Beispiel #1
0
 def downsample_2D3Ddata(GTdir, Lowdir, SaveGTdir, SaveLowdir,pattern = '*.tif', axes = 'ZYX', smallaxes = 'YX', downsamplefactor = 0.5, interpolationscheme = cv2.INTER_CUBIC ):
 
     
     
     Path(SaveGTdir).mkdir(exist_ok = True)
     Path(SaveLowdir).mkdir(exist_ok = True)
     print(Path(SaveGTdir))
     print(Path(SaveLowdir))
     GT_path = os.path.join(GTdir, pattern)
     Low_path = os.path.join(Lowdir, pattern)
     
     filesGT = glob.glob(GT_path)
     filesGT.sort
     for fname in filesGT:
       x = imread(fname)
       Name = os.path.basename(os.path.splitext(fname)[0])  
     
         
       y =   cv2.resize(x, dsize = (int(x.shape[1] * downsamplefactor), int(x.shape[0] * downsamplefactor) ))
       save_tiff_imagej_compatible((SaveGTdir  + Name) , y,smallaxes)
       print('File saved GT: ', Name, 'size:', y.shape)  
         
     filesLow = glob.glob(Low_path)
     filesLow.sort    
     for fname in filesLow:
       x = imread(fname)
       y = np.zeros([x.shape[0],int(x.shape[1] * downsamplefactor),int(x.shape[2] * downsamplefactor)])
       Name = os.path.basename(os.path.splitext(fname)[0])
       for i in range(x.shape[0]):
         
         y[i,:] =   cv2.resize(x[i,:], dsize = (int(x.shape[2] * downsamplefactor), int(x.shape[1] * downsamplefactor) ))
       
       save_tiff_imagej_compatible((SaveLowdir  + Name) , y,axes)    
       print('File saved Low: ', Name, 'size:', y.shape)    
Beispiel #2
0
 def predict(self, inputFolder, outputFolder):
     self.model = N2V(None, self.name, basedir=self.path)
     for r, d, f in os.walk(inputFolder):
         for file in f:
             base_filename = os.path.basename(file)
             input_train = imread(os.path.join(r, file))
             pred_train = self.model.predict(input_train,
                                             axes='YX',
                                             n_tiles=(2, 1))
             save_tiff_imagej_compatible(os.path.join(
                 outputFolder, base_filename),
                                         pred_train,
                                         axes='YX')
     print("Images saved into folder:", outputFolder)
Beispiel #3
0
    def drift_correction(self, T, file):
        c = self.config
        assert c.channel_drift_correction is not None

        # check if file already exists?
        reg_file = self.registered_dir / ('DRIFTCORRECTED_' + file.name)

        reg_ch = c.channel_order.index(c.channel_drift_correction)
        T_reg = self.register(T, reg_ch)
        T_reg = T_reg.astype(T.dtype)

        # with TiffFile(str(file)) as _file:
        #     imagej_metadata = _file.imagej_metadata
        #     ome_metadata = _file.ome_metadata
        # save_tiff_imagej_compatible(str(reg_file), T_reg, axes=axes, metadata=imagej_metadata)

        save_tiff_imagej_compatible(str(reg_file), T_reg, axes='TCYX')

        return T_reg
Beispiel #4
0
def main():
    tifs = []
    folders = []
    for root, dirs, files in os.walk(args.indir):
        for file in files:
            if file.endswith('tif') & ('mask' not in file):
                tifs.append(Path(root) / file)
                if Path(root) not in folders:
                    folders.append(Path(root))

    if not args.summarize_only:
        model = StarDist2D(None, name='gcamp-stardist', basedir='models')

        for tif in tifs:
            print(("Analyzing %s..." % str(tif.stem)), end='', flush=True)
            movie = imread(str(tif))
            num_frames, num_ch, dim_y, dim_x = get_movie_dims(movie)
            labels, df = analyze_gcamp(movie, model, num_frames, num_ch, dim_y,
                                       dim_x)
            savedir = tif.parent
            mask_file = savedir / (tif.stem + '_mask.tif')
            data_file = savedir / (tif.stem + '_analysis.csv')
            save_tiff_imagej_compatible(mask_file,
                                        labels.astype("uint8"),
                                        axes="TYX")
            df.to_csv(data_file)
            print("done!")

    for folder in folders:
        print(("Summarizing %s...") % str(folder), end='', flush=True)
        summary_dfs = summarize_folder(folder)
        savedir = folder.parent
        for summary, df in summary_dfs.items():
            df.to_csv(savedir / (folder.stem + '_' + summary + '.csv'))
        print('done!')

    print("Mischief managed :)")
Beispiel #5
0
                          os.path.basename(fname)) == False or os.path.exists(
                              basedirResults2Dextended + '_' +
                              os.path.basename(fname)) == False:
            print(fname)
            y = imread(fname)
            restored = RestorationModel.predict(
                y, axes, n_tiles=(1, 2, 4)
            )  #n_tiles is for the decomposition of the image in (z,y,x). (1,2,2) will work with light images. Less tiles we have, faster the calculation is
            projection = ProjectionModel.predict(
                restored, axes, n_tiles=(1, 1, 1)
            )  #n_tiles is for the decomposition of the image in (z,y,x). There is overlapping in the decomposition wich is managed by the program itself
            axes_restored = axes.replace(ProjectionModel.proj_params.axis, '')
            #restored = restored.astype('uint8') # if prediction and projection running at the same time
            restored = restored.astype(
                'uint16'
            )  # if projection training set creation or waiting for a future projection
            projection = projection.astype('uint8')
            save_tiff_imagej_compatible(
                (basedirResults3Dextended + os.path.basename(fname)), restored,
                axes)
            save_tiff_imagej_compatible(
                (basedirResults2Dextended + '_' + os.path.basename(fname)),
                projection, axes_restored)

# In[]:

from csbdeep.utils import Path

TriggerName = '/home/sancere/NextonDisk_1/TimeTrigger/TTCAREMaria1'
Path(TriggerName).mkdir(exist_ok=True)
ModelName = 'BorialisS1S2FlorisMidNoiseModel'
BaseDir = '/data/u934/service_imagerie/v_kapoor/CurieDeepLearningModels/'
Path(basedirResults3D).mkdir(exist_ok=True)

# In[4]:

model = CARE(config=None, name=ModelName, basedir=BaseDir)

# In[6]:

Raw_path = os.path.join(basedirLow, '*tif')

axes = 'ZYX'
smallaxes = 'YX'
filesRaw = glob.glob(Raw_path)

filesRaw.sort
print(len(filesRaw))
for fname in filesRaw:

    x = imread(fname)
    print(x.shape)
    print('Saving file' + basedirResults3D + '%s_' + os.path.basename(fname))
    restored = model.predict(x, axes, n_tiles=(1, 4, 4))
    projected = np.max(restored, axis=0)

    save_tiff_imagej_compatible(
        (basedirResults3D + '%s_' + os.path.basename(fname)), restored, axes)

# In[ ]:
Beispiel #7
0
parser.add_argument("folderlocation",
                    type=str,
                    help="Path of the folder with images")
args = parser.parse_args()

# Making the output folder in the parent directory if it doesn't exist
outputfolder = os.path.dirname(args.folderlocation) + '/labelimages'
if not os.path.exists(outputfolder):
    os.makedirs(outputfolder)

# Instantiating the StarDist model. Using the '2D_versatile_fluo' pre-trained model. I tried several settings on
# the images and found that these parameters work best:
# Normalize the image with a lower threshold of 40 and an upper threshold of 100
# Probability threshold of 0.25, overlap threshold of 0.3

model = StarDist2D.from_pretrained('2D_versatile_fluo')

folder = args.folderlocation
print(f'Working on {folder}')
outdir = outputfolder + '/' + os.path.basename(folder) + '_labels'
os.makedirs(outdir)
greenimagesnames = sorted(glob.glob(folder + '/*.tif'))
greenimages = map(imread, greenimagesnames)
for tif, loc in zip(greenimages, greenimagesnames):
    saveloc = outdir + '/' + os.path.splitext(
        os.path.basename(loc))[0] + '_labels.tif'
    img = normalize(tif, 40, 100, axis=(0, 1))
    labels, _ = model.predict_instances(img, prob_thresh=0.25, nms_thresh=0.3)
    save_tiff_imagej_compatible(saveloc, labels, axes='YX')

print(f'Done with {folder}.')
Beispiel #8
0
def main():
    if not ('__file__' in locals() or '__file__' in globals()):
        print('running interactively, exiting.')
        sys.exit(0)

    # parse arguments
    parser, args = parse_args()
    args_dict = vars(args)

    # exit and show help if no arguments provided at all
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(0)

    # check for required arguments manually (because of argparse issue)
    required = ('--input-dir', '--input-axes', '--norm-pmin', '--norm-pmax',
                '--model-basedir', '--model-name', '--output-dir')
    for r in required:
        dest = r[2:].replace('-', '_')
        if args_dict[dest] is None:
            parser.print_usage(file=sys.stderr)
            print("%s: error: the following arguments are required: %s" %
                  (parser.prog, r),
                  file=sys.stderr)
            sys.exit(1)

    # show effective arguments (including defaults)
    if not args.quiet:
        print('Arguments')
        print('---------')
        pprint(args_dict)
        print()
        sys.stdout.flush()

    # logging function
    log = (lambda *a, **k: None) if args.quiet else tqdm.write

    # get list of input files and exit if there are none
    file_list = list(Path(args.input_dir).glob(args.input_pattern))
    if len(file_list) == 0:
        log("No files to process in '%s' with pattern '%s'." %
            (args.input_dir, args.input_pattern))
        sys.exit(0)

    # delay imports after checking to all required arguments are provided
    from tifffile import imread, imsave
    from csbdeep.utils.tf import keras_import
    K = keras_import('backend')
    from csbdeep.models import CARE
    from csbdeep.data import PercentileNormalizer
    sys.stdout.flush()
    sys.stderr.flush()

    # limit gpu memory
    if args.gpu_memory_limit is not None:
        from csbdeep.utils.tf import limit_gpu_memory
        limit_gpu_memory(args.gpu_memory_limit)

    # create CARE model and load weights, create normalizer
    K.clear_session()
    model = CARE(config=None, name=args.model_name, basedir=args.model_basedir)
    if args.model_weights is not None:
        print("Loading network weights from '%s'." % args.model_weights)
        model.load_weights(args.model_weights)
    normalizer = PercentileNormalizer(pmin=args.norm_pmin,
                                      pmax=args.norm_pmax,
                                      do_after=args.norm_undo)

    n_tiles = args.n_tiles
    if n_tiles is not None and len(n_tiles) == 1:
        n_tiles = n_tiles[0]

    processed = []

    # process all files
    for file_in in tqdm(file_list,
                        disable=args.quiet
                        or (n_tiles is not None and np.prod(n_tiles) > 1)):
        # construct output file name
        file_out = Path(args.output_dir) / args.output_name.format(
            file_path=str(file_in.relative_to(args.input_dir).parent),
            file_name=file_in.stem,
            file_ext=file_in.suffix,
            model_name=args.model_name,
            model_weights=Path(args.model_weights).stem
            if args.model_weights is not None else None)

        # checks
        (file_in.suffix.lower() in ('.tif', '.tiff')
         and file_out.suffix.lower() in ('.tif', '.tiff')) or _raise(
             ValueError('only tiff files supported.'))

        # load and predict restored image
        img = imread(str(file_in))
        restored = model.predict(img,
                                 axes=args.input_axes,
                                 normalizer=normalizer,
                                 n_tiles=n_tiles)

        # restored image could be multi-channel even if input image is not
        axes_out = axes_check_and_normalize(args.input_axes)
        if restored.ndim > img.ndim:
            assert restored.ndim == img.ndim + 1
            assert 'C' not in axes_out
            axes_out += 'C'

        # convert data type (if necessary)
        restored = restored.astype(np.dtype(args.output_dtype), copy=False)

        # save to disk
        if not args.dry_run:
            file_out.parent.mkdir(parents=True, exist_ok=True)
            if args.imagej_tiff:
                save_tiff_imagej_compatible(str(file_out), restored, axes_out)
            else:
                imsave(str(file_out), restored)

        processed.append((file_in, file_out))

    # print summary of processed files
    if not args.quiet:
        sys.stdout.flush()
        sys.stderr.flush()
        n_processed = len(processed)
        len_processed = len(str(n_processed))
        log('Finished processing %d %s' %
            (n_processed, 'files' if n_processed > 1 else 'file'))
        log('-' * (26 + len_processed if n_processed > 1 else 26))
        for i, (file_in, file_out) in enumerate(processed):
            len_file = max(len(str(file_in)), len(str(file_out)))
            log(('{:>%d}. in : {:>%d}' % (len_processed, len_file)).format(
                1 + i, str(file_in)))
            log(('{:>%d}  out: {:>%d}' % (len_processed, len_file)).format(
                '', str(file_out)))
Beispiel #9
0
input_train = imread('data/train.tif')
input_val = imread('data/validation.tif')
pred_train = model.predict(input_train, axes='YX', n_tiles=(2, 1))
pred_val = model.predict(input_val, axes='YX')

#plt.figure(figsize=(16,8))
#plt.subplot(1,2,1)
#plt.imshow(input_train[:1500:,:1500],cmap="magma")
#plt.title('Input');
#plt.subplot(1,2,2)
#plt.imshow(pred_train[:1500,:1500],cmap="magma")
#plt.title('Prediction')
#plt.show()

# Let's look at the results
#plt.figure(figsize=(16,8))
#plt.subplot(1,2,1)
#plt.imshow(input_val,cmap="magma")
#plt.title('Input')
#plt.subplot(1,2,2)
#plt.imshow(pred_val,cmap="magma")
#plt.title('Prediction')
#plt.show()

save_tiff_imagej_compatible('models/n2v_2D_SEM/pred_train.tif',
                            pred_train,
                            axes='YX')
save_tiff_imagej_compatible('models/n2v_2D_SEM/pred_validation.tif',
                            pred_val,
                            axes='YX')
Beispiel #10
0
    #   "--output_filename",
    #   help="the name of the out"
    # )
    parser.add_argument("-p", "--plot", action="store_true", default=True)
    args = parser.parse_args()

    # Load the trained model
    model = CARE(config=None, name=args.model_name, basedir=args.model_dir)

    # Read the test images
    x = imread(
        os.path.join(args.base_dir, args.path_to_test_image, "low_snr.tif"))
    y = imread(
        os.path.join(args.base_dir, args.path_to_test_image, "high_snr.tif"))

    restored = model.predict(x, axes="YX")  # , n_tiles=(1,4,4))

    # Save the restored image
    save_tiff_imagej_compatible(
        os.path.join(args.base_dir, args.path_to_test_image, "predicted.tif"),
        restored,
        axes="YX")

    # Plot the restored image next to the test pair
    if args.plot:
        plt.figure(figsize=(16, 10))
        plot_some(
            np.stack([x, restored, y]),
            title_list=[['low res', 'CARE', 'target']])
        plt.show()
Beispiel #11
0
    basedirResults2D = currentdir + '/Projections/'

    Path(basedirResults3D).mkdir(exist_ok=True)

    Path(basedirResults2D).mkdir(exist_ok=True)

    Raw_path = os.path.join(currentdir, '*tif')

    filesRaw = glob.glob(Raw_path)

    for fname in filesRaw:

        y = imread(fname)

        print('Saving file' + basedirResults3D + '%s_' +
              os.path.basename(fname))

        restored = RestorationModel.predict(y, axes, n_tiles=(1, 4, 8))
        projection = ProjectionModel.predict(restored, axes, n_tiles=(1, 2, 2))
        axes_restored = axes.replace(ProjectionModel.proj_params.axis, '')
        save_tiff_imagej_compatible(
            (basedirResults3D + '%s_' + 'Restored' + os.path.basename(fname)) %
            RestorationModel.name, restored, axes)

        save_tiff_imagej_compatible(
            (basedirResults2D + '%s_' + 'Projected' + os.path.basename(fname))
            % ProjectionModel.name, projection, axes_restored)

# In[ ]:
Beispiel #12
0
    def predict(self,
                model_and_ch_nro: tuple,
                out_path: str,
                make_overlay: bool = True,
                n_tiles: tuple = None,
                overlay_path: str = None,
                **kwargs) -> Tuple[np.ndarray, dict]:
        """Perform a prediction to one image.
        Parameters
        ----------
        model_and_ch_nro : tuple
            Tuple of (str, int) that first has name of model and then the index for the channel the model is applied to. 
        out_path : str
            Path to the label file output directory.
        make_overlay : bool
            Whether to create a flattened overlay image of
        n_tiles : [None, tuple]
            Tuple that contains number of tiles for each axis. If None, number of tiles is defined from Class-attribute
            default_config or from kwargs.
        overlay_path : str
            Path where overlay image is saved to if created.
        
        Returns
        -------
        labels : np.ndarray
            Image of the labels.
        details : dict
            Descriptions of label polygons/polyhedra.
        """

        config = deepcopy(self.config)
        config.update(kwargs)

        img = normalize(self.image.get_channels(model_and_ch_nro[1]),
                        1,
                        99.8,
                        axis=(0, 1, 2))
        print(
            f"\n{self.image.name}; Model = {model_and_ch_nro[0]} ; Image dims = {self.image.shape}"
        )

        # Define tile number if big image
        if config.get("predict_big") and n_tiles is None:
            n_tiles = self.define_tiles(img.shape)

        # Run prediction
        model = read_model(model_and_ch_nro[0])
        labels, details = self._prediction(model, img, n_tiles, config)

        # Define save paths:
        file_stem = f'{self.name}_{model_and_ch_nro[0]}'
        save_label = pl.Path(out_path).joinpath(f'{file_stem}.labels.tif')

        # Save the label image:
        save_tiff_imagej_compatible(save_label,
                                    labels.astype('int16'),
                                    axes='ZYX',
                                    **{
                                        "imagej":
                                        True,
                                        "resolution":
                                        (1. / self.image.voxel_dims[1],
                                         1. / self.image.voxel_dims[2]),
                                        "metadata": {
                                            'spacing': self.image.voxel_dims[0]
                                        }
                                    })

        # Add path to label paths
        if save_label not in self.label_paths:
            self.label_paths.append(save_label)

        if make_overlay and config.get(
                "imagej_path"
        ) is not None:  # Create and save overlay tif of the labels
            ov_save_path = overlay_path if overlay_path is not None else out_path
            overlay_images(
                pl.Path(ov_save_path).joinpath(f'overlay_{file_stem}.tif'),
                self.image.path,
                save_label,
                config.get("imagej_path"),
                channel_n=model_and_ch_nro[1])
        return labels, details
Beispiel #13
0
# default = 32,64,64
patches = datagen.generate_patches_from_list(imgs[:1], shape=(32, 64, 64))

# default = :600
X = patches[:600]
X_val = patches[600:]
numberEpochs = 20
config = N2VConfig(X,
                   unet_kern_size=3,
                   train_steps_per_epoch=int(X.shape[0] / 128),
                   train_epochs=numberEpochs,
                   train_loss='mse',
                   batch_norm=True,
                   train_batch_size=4,
                   n2v_perc_pix=0.198,
                   n2v_patch_shape=(32, 64, 64),
                   n2v_manipulator='uniform_withCP',
                   n2v_neighborhood_radius=5)
vars(config)
model_name = '20epoch'
model = N2V(config=config, name=model_name, basedir=image_path)
history = model.train(X, X_val)
print(sorted(list(history.history.keys())))
model.export_TF()

# Load the image, and predict the denoised image.
img = imread(os.path.join(image_path, image_name))
pred = model.predict(img, axes='ZYX', n_tiles=(2, 4, 4))
save_tiff_imagej_compatible(os.path.join(image_path, 'denoised.tif'), pred,
                            'ZYX')
Beispiel #14
0
import sys,io,os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
model_name = 'n2v_3D'
basedir = 'models'
model = N2V(config=None, name=model_name, basedir=basedir)


folderName = sys.argv[1]
imList = os.listdir(folderName)

for image in imList:

    jobName = folderName.split('/')
    jobName = str(jobName[1])
    outName = image
    outName = outName.split('.')
    outName = str(outName[0])
    imgName = folderName+'/'+image
    img = imread(imgName)

    pred = model.predict(img, axes='ZYX', n_tiles=(2,4,4))

    if not os.path.isdir('./job_output'):
        os.mkdir('./job_output')
    if not os.path.isdir('./job_output/'+jobName):
        os.mkdir('./job_output/'+jobName)


    save_tiff_imagej_compatible('./job_output/'+jobName+'/'+outName+'_deNoised.tif', pred, 'ZYX')
Beispiel #15
0
from csbdeep.utils import Path, download_and_extract_zip_file, plot_some
from csbdeep.io import save_tiff_imagej_compatible
from csbdeep.models import ProjectionCARE

# In[2]:

basedirLow = '/local/u934/private/v_kapoor/ProjectionTraining/MasterLow/NotsoLow/'
basedirResults = '/local/u934/private/v_kapoor/ProjectionTraining/MasterLow/NetworkProjections'
ModelName = 'DrosophilaDenoisingProjection'
BaseDir = '/local/u934/private/v_kapoor/CurieDeepLearningModels/'

# In[3]:

model = ProjectionCARE(config=None, name=ModelName, basedir=BaseDir)

# In[4]:

Raw_path = os.path.join(basedirLow, '*tif')
Path(basedirResults).mkdir(exist_ok=True)
axes = 'ZYX'
filesRaw = glob.glob(Raw_path)
filesRaw.sort
for fname in filesRaw:
    x = imread(fname)
    print('Saving file' + basedirResults + '%s_' + os.path.basename(fname))
    restored = model.predict(x, axes, n_tiles=(1, 4, 4))
    axes_restored = axes.replace(model.proj_params.axis, '')
    save_tiff_imagej_compatible(
        (basedirResults + '%s_' + os.path.basename(fname)) % model.name,
        restored, axes_restored)
Beispiel #16
0
#!/usr/bin/env python
# Noise2Void - 3D Example for Flywing Data"
from n2v.models import N2V
import numpy as np
from matplotlib import pyplot as plt
from tifffile import imread
from csbdeep.io import save_tiff_imagej_compatible

model_name = 'n2v_3D'
basedir = 'models'
model = N2V(config=None, name=model_name, basedir=basedir)

img = imread('data/flywing.tif')
pred = model.predict(img, axes='ZYX', n_tiles=(2, 4, 4))

#plt.figure(figsize=(30,30))
#plt.subplot(1,2,1)
#plt.imshow(np.max(img,axis=0),
#           cmap='magma',
#           vmin=np.percentile(img,0.1),
#           vmax=np.percentile(img,99.9))
#plt.title('Input')
#plt.subplot(1,2,2)\n",
#plt.imshow(np.max(pred,axis=0),
#    cmap='magma',
#    vmin=np.percentile(img,0.1),
#    vmax=np.percentile(img,99.9))
#plt.title('Prediction')

save_tiff_imagej_compatible('models/n2v_3D/prediction.tif', pred, 'ZYX')