Пример #1
0
def main(model_num, img_dir, img_ext, out_dir):
    try:
        model_num_int = int(model_num)
    except ValueError:
        print('Please enter an integer between 1-13 for model number')
        return
    
    if model_num_int < 1 or model_num_int > 13:
        print('Please enter an integer between 1-13 for model number')
        return

    img_names, imgs = load_images(img_dir, img_ext)
    if len(imgs) == 0:
        print(f'No images loaded.')
        return
    
    # Load model
    if model_num == '1':
        model = StarDist2D.from_pretrained('2D_versatile_fluo')
    else:
        model = StarDist2D(None, name = f'model{model_num}', basedir = './models')

    print('Segmenting nuclei...')
    for i in tqdm(range(len(imgs))):
        img_name = os.path.basename(img_names[i]).split('.')[0] # Get image name without extension
        img = imgs[i]
        seg = model.predict_instances(img, n_tiles = model._guess_n_tiles(img), show_tile_progress = False)
        io.imsave(f'{out_dir}/SEG_{img_name}.tif', seg[0]) # The first element in the result array is the image
Пример #2
0
def load_model(model_to_load, model_path = "default"):
	"""
	Loads a pretrained model from:
		- stardist.models.StarDist2D
		- StarDist2D(config = None, name = "pretrained", basedir = model_to_load)
	"""
	if model_path == "default":
		return StarDist2D.from_pretrained(model_to_load)
	return StarDist2D(name = model_to_load, basedir = model_path)
Пример #3
0
def _model2d():
    from utils import path_model2d
    from stardist.models import StarDist2D
    model_path = path_model2d()
    return StarDist2D(None,
                      name=model_path.name,
                      basedir=str(model_path.parent))
Пример #4
0
def stardist_segment_nuclei(image: Array,
                            model_str: str = "2D_versatile_fluo") -> Array:
    from stardist.models import StarDist2D

    model = StarDist2D.from_pretrained(model_str)
    mask, _ = model.predict_instances(eq(image))
    return mask
Пример #5
0
def render_label_pred_example():
    model_path = path_model2d()
    model = StarDist2D(None,
                       name=model_path.name,
                       basedir=str(model_path.parent))
    img, y_gt = real_image2d()
    x = normalize(img, 1, 99.8)
    y, _ = model.predict_instances(x)

    im = render_label_pred(y_gt, y, img=x)
    import matplotlib.pyplot as plt
    plt.figure(1, figsize=(12, 4))
    plt.subplot(1, 4, 1)
    plt.imshow(x)
    plt.title("img")
    plt.subplot(1, 4, 2)
    plt.imshow(render_label(y_gt, img=x))
    plt.title("gt")
    plt.subplot(1, 4, 3)
    plt.imshow(render_label(y, img=x))
    plt.title("pred")
    plt.subplot(1, 4, 4)
    plt.imshow(im)
    plt.title("tp (green) fp (red) fn(blue)")
    plt.tight_layout()
    plt.show()
    return im
Пример #6
0
def test_model(tmpdir, n_rays, grid, n_channel):
    img = circle_image(shape=(160,160))
    imgs = np.repeat(img[np.newaxis], 3, axis=0)

    if n_channel is not None:
        imgs = np.repeat(imgs[...,np.newaxis], n_channel, axis=-1)
    else:
        n_channel = 1

    X = imgs+.6*np.random.uniform(0,1,imgs.shape)
    Y = (imgs if imgs.ndim==3 else imgs[...,0]).astype(int)

    conf = Config2D (
        n_rays                = n_rays,
        grid                  = grid,
        n_channel_in          = n_channel,
        use_gpu               = False,
        train_epochs          = 1,
        train_steps_per_epoch = 2,
        train_batch_size      = 2,
        train_loss_weights    = (4,1),
        train_patch_size      = (128,128),
    )

    model = StarDist2D(conf, name='stardist', basedir=str(tmpdir))
    model.train(X, Y, validation_data=(X[:2],Y[:2]))
Пример #7
0
def stardistmodel_from_folder(modelfolder, mdname='2D_dsb2018'):

    # workaround explained here to avoid errors
    # https://github.com/openai/spinningup/issues/16
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
    sdmodel = StarDist2D(None, name=mdname, basedir=modelfolder)

    return sdmodel
Пример #8
0
def stardist_model_to_fiji(model_path, model=None):

    if model is None:
        save_root, save_name = os.path.split(model_path)
        model = StarDist2D(None, name=save_name, basedir=save_root)

    fiji_save_path = os.path.join(model_path, 'TF_SavedModel.zip')
    print("Saving model for fiji", fiji_save_path)
    model.export_TF()
Пример #9
0
 def __init__(self,
              from_pretrained='2D_versatile_fluo',
              normalize_func=None):
     if from_pretrained is None:  # Train your own model
         raise NotImplementedError
     self.model = StarDist2D.from_pretrained(from_pretrained)
     self.lbl_cmap = random_label_cmap()
     if normalize_func is None:
         self.normalize_func = self.default_normalize
Пример #10
0
def load_stardistmodel(modeltype='Versatile (fluorescent nuclei)'):

    # workaround explained here to avoid errors
    # https://github.com/openai/spinningup/issues/16
    os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'

    # define and load the stardist model
    sdmodel = StarDist2D.from_pretrained(modeltype)

    return sdmodel
Пример #11
0
def print_receptive_fields():
    for backbone in ("unet",):
        for n_depth in (1,2,3):
            for grid in ((1,1),(2,2)):
                conf  = Config2D(backbone = backbone,
                                 grid = grid,
                                 unet_n_depth=n_depth)
                model = StarDist2D(conf, None, None)
                fov   = model._compute_receptive_field()
                print(f"backbone: {backbone} \t n_depth: {n_depth} \t grid {grid} -> fov: {fov}")
Пример #12
0
def test_load_and_export_TF():
    model_path = path_model2d()
    model = StarDist2D(None,
                       name=model_path.name,
                       basedir=str(model_path.parent))
    assert any(g > 1 for g in model.config.grid)
    # model.export_TF(single_output=False, upsample_grid=False)
    # model.export_TF(single_output=False, upsample_grid=True)
    model.export_TF(single_output=True, upsample_grid=False)
    model.export_TF(single_output=True, upsample_grid=True)
Пример #13
0
def test_load_and_predict_big():
    model_path = path_model2d()
    model = StarDist2D(None,
                       name=model_path.name,
                       basedir=str(model_path.parent))
    img, _ = real_image2d()
    x = normalize(img, 1, 99.8)
    x = np.tile(x, (8, 8))
    labels, polygons = model.predict_instances(x)
    return labels
Пример #14
0
def test_model(tmpdir, n_rays, grid, n_channel):
    img = circle_image(shape=(160, 160))
    imgs = np.repeat(img[np.newaxis], 3, axis=0)

    if n_channel is not None:
        imgs = np.repeat(imgs[..., np.newaxis], n_channel, axis=-1)
    else:
        n_channel = 1

    X = imgs + .6 * np.random.uniform(0, 1, imgs.shape)
    Y = (imgs if imgs.ndim == 3 else imgs[..., 0]).astype(int)

    conf = Config2D(
        n_rays=n_rays,
        grid=grid,
        n_channel_in=n_channel,
        use_gpu=False,
        train_epochs=1,
        train_steps_per_epoch=2,
        train_batch_size=2,
        train_loss_weights=(4, 1),
        train_patch_size=(128, 128),
    )

    model = StarDist2D(conf, name='stardist', basedir=str(tmpdir))
    model.train(X, Y, validation_data=(X[:2], Y[:2]))
    ref = model.predict(X[0])
    res = model.predict(X[0],
                        n_tiles=((2, 3) if X[0].ndim == 2 else (2, 3, 1)))
    # assert all(np.allclose(u,v) for u,v in zip(ref,res))

    # ask to train only with foreground patches when there are none
    # include a constant label image that must trigger a warning
    conf.train_foreground_only = 1
    conf.train_steps_per_epoch = 1
    _X = X[:2]
    _Y = [np.zeros_like(Y[0]), np.ones_like(Y[1])]
    with pytest.warns(UserWarning):
        StarDist2D(conf, name='stardist',
                   basedir=None).train(_X,
                                       _Y,
                                       validation_data=(X[-1:], Y[-1:]))
Пример #15
0
def test_pretrained_integration():
    from stardist.models import StarDist2D
    img = normalize(real_image2d()[0])

    model = StarDist2D.from_pretrained("2D_versatile_fluo")
    prob, dist = model.predict(img)

    y1, res1 = model._instances_from_prediction(img.shape,
                                                prob,
                                                dist,
                                                nms_thresh=.3)
    return y1, res1
Пример #16
0
 def load_models(self):
     c = self.config
     d = c._asdict()
     self.models = {}
     K.clear_session()
     for name in c.channels_segment:
         self.models[name] = dict(
             model=StarDist2D(None,
                              name=d[name + '_model'],
                              basedir=c.model_dir),
             prob_thresh=d[name + '_prob_thresh'],
             nms_thresh=d[name + '_nms_thresh'],
         )
Пример #17
0
def test_load_and_predict():
    model_path = path_model2d()
    model = StarDist2D(None, name=model_path.name, basedir=str(model_path.parent))
    img, mask = real_image2d()
    x = normalize(img,1,99.8)
    prob, dist = model.predict(x, n_tiles=(2,3))
    assert prob.shape == dist.shape[:2]
    assert model.config.n_rays == dist.shape[-1]
    labels, polygons = model.predict_instances(x)
    assert labels.shape == img.shape[:2]
    assert labels.max() == len(polygons['coord'])
    assert len(polygons['coord']) == len(polygons['points']) == len(polygons['prob'])
    stats = matching(mask, labels, thresh=0.5)
    assert (stats.fp, stats.tp, stats.fn) == (1, 48, 17)
Пример #18
0
def render_label_example():
    model_path = path_model2d()
    model = StarDist2D(None,
                       name=model_path.name,
                       basedir=str(model_path.parent))
    img, y_gt = real_image2d()
    x = normalize(img, 1, 99.8)
    y, _ = model.predict_instances(x)
    # im =  render_label(y,img = x, alpha = 0.3, alpha_boundary=1, cmap = (.3,.4,0))
    im = render_label(y, img=x, alpha=0.3, alpha_boundary=1)
    import matplotlib.pyplot as plt
    plt.figure(1)
    plt.imshow(im)
    plt.show()
    return im
Пример #19
0
def test_optimize_thresholds():
    model_path = path_model2d()
    model = StarDist2D(None,
                       name=model_path.name,
                       basedir=str(model_path.parent))
    img, mask = real_image2d()
    x = normalize(img, 1, 99.8)

    res = model.optimize_thresholds([x], [mask],
                                    nms_threshs=[.3, .5],
                                    iou_threshs=[.3, .5],
                                    optimize_kwargs=dict(tol=1e-1),
                                    save_to_json=False)

    np.testing.assert_almost_equal(res["prob"], 0.454617141955, decimal=3)
    np.testing.assert_almost_equal(res["nms"], 0.3, decimal=3)
Пример #20
0
def test_foreground_warning():
    # ask to train only with foreground patches when there are none
    # include a constant label image that must trigger a warning
    conf = Config2D(
        n_rays=32,
        train_patch_size=(96, 96),
        train_foreground_only=1,
        train_steps_per_epoch=1,
        train_epochs=1,
        train_batch_size=2,
    )
    X, Y = np.ones((2, 100, 100), np.float32), np.ones((2, 100, 100),
                                                       np.uint16)

    with pytest.warns(UserWarning):
        StarDist2D(conf, None, None).train(X,
                                           Y,
                                           validation_data=(X[-1:], Y[-1:]))
Пример #21
0
def test_model(n_rays, grid):
    img = circle_image()
    imgs = np.repeat(img[np.newaxis], 10, axis=0)

    X = imgs + .6 * np.random.uniform(0, 1, imgs.shape)
    Y = imgs.astype(int)

    conf = Config2D(n_rays=n_rays,
                    grid=grid,
                    use_gpu=False,
                    train_epochs=1,
                    train_steps_per_epoch=10,
                    train_loss_weights=(4, 1),
                    train_patch_size=(128, 128),
                    n_channel_in=1)

    with tempfile.TemporaryDirectory() as tmp:
        model = StarDist2D(conf, name='stardist', basedir=tmp)
        model.train(X, Y, validation_data=(X[:3], Y[:3]))
Пример #22
0
def _star_convex_polynoms(dapi_path,
                          membrane_dic,
                          model_path,
                          full_output=False):
    """Nuclei segmentation using the stardist algorithm.

    Parameters
    ----------
    dapi_path : string
      path to the DAPI image
    membrane_dic: dic obtained with extract_membranes.
      Dictionnary containing relevant informations about the membranes.
    model_path : string
      path to the stardist model

    Returns
    -------

    clockwise_centers : np.ndarray
      An array of cell centers, sort in clockwise order.
    """
    images = sorted(glob(dapi_path))
    images = list(map(imread, images))
    img = normalize(images[0], 1, 99.8)
    model_sc = StarDist2D(None,
                          name="stardist_shape_completion",
                          basedir=model_path)
    prob, dist = model_sc.predict(img)
    coord = dist_to_coord(dist)
    points = non_maximum_suppression(coord, prob, prob_thresh=0.4)
    points = np.flip(points, 1)

    rho, phi = _card_coords(points, membrane_dic["center_inside"])
    cleaned = _quick_del_art(points, rho, membrane_dic["rIn"])
    clockwise_centers = _quick_clockwise(cleaned, phi, rho,
                                         membrane_dic["rIn"])

    clockwise_centers = np.subtract(np.float32(clockwise_centers),
                                    np.array(membrane_dic["img_shape"]) / 2.0)
    if full_output:
        return clockwise_centers, (prob, dist, points)

    return clockwise_centers
Пример #23
0
    def run(self,
            input_files,
            output_files,
            gpu_id=None,
            n_jobs=1,
            on_cluster=False):

        # set number of OMP threads to 1, so we can properly parallelize over
        # the segmentation via NMS properly
        os.environ["OMP_NUM_THREADS"] = "1"

        # set additional env variables for gpu / cpu
        if gpu_id is None:
            # need to do this for the conda tensorflow cpu version
            os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
        else:
            if not on_cluster:
                logger.info(
                    f"{self.name}: setting CUDA_VISIBLE_DEVICES to {gpu_id}")
                os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)

            vis_devices = os.environ.get('CUDA_VISIBLE_DEVICES', '')
            logger.info(
                f"{self.name}: CUDA_VISIBLE_DEVICES are set to {vis_devices}")

            # limit the gpu memory demand, so we can run tasks with pytorch later
            # (otherwise tf will block all gpu memory for the rest of the python process)
            limit_gpu_memory(.25)

        from stardist.models import StarDist2D
        model = StarDist2D(None, name=self.model_name, basedir=self.model_root)

        # run prediction for all images
        for in_path, out_path in tqdm(zip(input_files, output_files),
                                      total=len(input_files)):
            self.predict_image(in_path, out_path, model)

        # run segmentation for all images
        _segment = partial(self.segment_image, model=model)
        with futures.ThreadPoolExecutor(n_jobs) as tp:
            list(tqdm(tp.map(_segment, output_files), total=len(output_files)))
Пример #24
0
def test_pretrained_scales():
    from scipy.ndimage import zoom
    from stardist.matching import matching
    from skimage.measure import regionprops

    model = StarDist2D.from_pretrained("2D_versatile_fluo")
    img, mask = real_image2d()
    x = normalize(img, 1, 99.8)

    def pred_scale(scale=2):
        x2 = zoom(x, scale, order=1)
        labels2, _ = model.predict_instances(x2)
        labels = zoom(labels2, tuple(_s1/_s2 for _s1, _s2 in zip(mask.shape, labels2.shape)), order=0)
        return labels

    scales = np.linspace(.5,5,10)
    accs = tuple(matching(mask, pred_scale(s)).accuracy for s in scales)
    print("scales   ", np.round(scales,2))
    print("accuracy ", np.round(accs,2))

    return accs
Пример #25
0
def initialize_model(init_model, model_type, which_model):
    # initialize models
    # returns a dictonary of models that can be called
    mds = {}
    # UNet Cell Profiler
    if model_type == "Cellprofiler_UNet":
        option_dict_conv, option_dict_bn = init_model["UNetSettings"]
        os.environ["KERAS_BACKEND"] = "tensorflow"
        if which_model == "UNet_CP001":
            model = unet_initialize(init_model["UNetShape"],
                                    option_dict_conv,
                                    option_dict_bn,
                                    init_model["UNet_model_file_CP01"],
                                    automated_shape_adjustment=True)
            mds["UNet_CP001"] = model
    # Stardist
    elif model_type == "StarDist":
        if which_model == "SD_2D_dsb2018":
            model = StarDist2D(None,
                               name='2D_dsb2018',
                               basedir=init_model["basedir_StarDist"])
            mds["SD_2D_dsb2018"] = model
    # Cellpose
    elif model_type == "Cellpose":
        # check if GPU working, and if so use it
        use_gpu = cp_utils.use_gpu()
        if use_gpu:
            device = mx.gpu()
            print("GPU found")
        else:
            device = mx.cpu()
            print("CPU only")
        if which_model == "CP_nuclei":
            model = cp_models.Cellpose(device, model_type="nuclei")
            mds["CP_nuclei"] = model
        if which_model == "CP_cyto":
            model = cp_models.Cellpose(device, model_type="cyto")
            mds["CP_cyto"] = model
    print("Model_keys", mds.keys())
    return mds
Пример #26
0
def train_model(x_train,
                y_train,
                x_val,
                y_val,
                save_path,
                n_channels,
                patch_size,
                n_rays=32):

    # make the model config
    # copied from the stardist training notebook, this is a very weird line ...
    use_gpu = False and gputools_available()
    # predict on subsampled image for increased efficiency
    grid = (2, 2)
    config = Config2D(n_rays=n_rays,
                      grid=grid,
                      use_gpu=use_gpu,
                      n_channel_in=n_channels,
                      train_patch_size=patch_size)

    if use_gpu:
        print("Using a GPU for training")
        # limit gpu memory
        from csbdeep.utils.tf import limit_gpu_memory
        limit_gpu_memory(0.8)
    else:
        print("GPU not found, using the CPU for training")

    save_root, save_name = os.path.split(save_path)
    os.makedirs(save_root, exist_ok=True)
    model = StarDist2D(config, name=save_name, basedir=save_root)

    model.train(x_train,
                y_train,
                validation_data=(x_val, y_val),
                augmenter=augmenter)
    optimal_parameters = model.optimize_thresholds(x_val, y_val)
    return model, optimal_parameters
Пример #27
0
def main():
    tifs = []
    folders = []
    for root, dirs, files in os.walk(args.indir):
        for file in files:
            if file.endswith('tif') & ('mask' not in file):
                tifs.append(Path(root) / file)
                if Path(root) not in folders:
                    folders.append(Path(root))

    if not args.summarize_only:
        model = StarDist2D(None, name='gcamp-stardist', basedir='models')

        for tif in tifs:
            print(("Analyzing %s..." % str(tif.stem)), end='', flush=True)
            movie = imread(str(tif))
            num_frames, num_ch, dim_y, dim_x = get_movie_dims(movie)
            labels, df = analyze_gcamp(movie, model, num_frames, num_ch, dim_y,
                                       dim_x)
            savedir = tif.parent
            mask_file = savedir / (tif.stem + '_mask.tif')
            data_file = savedir / (tif.stem + '_analysis.csv')
            save_tiff_imagej_compatible(mask_file,
                                        labels.astype("uint8"),
                                        axes="TYX")
            df.to_csv(data_file)
            print("done!")

    for folder in folders:
        print(("Summarizing %s...") % str(folder), end='', flush=True)
        summary_dfs = summarize_folder(folder)
        savedir = folder.parent
        for summary, df in summary_dfs.items():
            df.to_csv(savedir / (folder.stem + '_' + summary + '.csv'))
        print('done!')

    print("Mischief managed :)")
Пример #28
0
def test_model(tmpdir, n_rays, grid, n_channel, workers, use_sequence):
    img = circle_image(shape=(160, 160))
    imgs = np.repeat(img[np.newaxis], 3, axis=0)

    if n_channel is not None:
        imgs = np.repeat(imgs[..., np.newaxis], n_channel, axis=-1)
    else:
        n_channel = 1

    X = imgs + .6 * np.random.uniform(0, 1, imgs.shape)
    Y = (imgs if imgs.ndim == 3 else imgs[..., 0]).astype(int)

    if use_sequence:
        X, Y = NumpySequence(X), NumpySequence(Y)

    conf = Config2D(n_rays=n_rays,
                    grid=grid,
                    n_channel_in=n_channel,
                    use_gpu=False,
                    train_epochs=2,
                    train_steps_per_epoch=1,
                    train_batch_size=2,
                    train_loss_weights=(4, 1),
                    train_patch_size=(128, 128),
                    train_sample_cache=not use_sequence)

    model = StarDist2D(conf, name='stardist', basedir=str(tmpdir))
    model.train(X, Y, validation_data=(X[:2], Y[:2]), workers=workers)
    ref = model.predict(X[0])
    res = model.predict(X[0],
                        n_tiles=((2, 3) if X[0].ndim == 2 else (2, 3, 1)))

    # deactivate as order of labels might not be the same
    # assert all(np.allclose(u,v) for u,v in zip(ref,res))

    return model
Пример #29
0
def main(argv):
    base_path = "{}".format(os.getenv("HOME"))
    problem_cls = CLASS_OBJSEG

    with BiaflowsJob.from_cli(argv) as bj:
        bj.job.update(status=Job.RUNNING,
                      progress=0,
                      statusComment="Initialization...")

        # 1. Prepare data for workflow
        in_imgs, gt_imgs, in_path, gt_path, out_path, tmp_path = prepare_data(
            problem_cls, bj, is_2d=True, **bj.flags)
        list_imgs = [image.filepath for image in in_imgs]

        # 2. Run Stardist model on input images
        bj.job.update(progress=25, statusComment="Launching workflow...")

        #Loading pre-trained Stardist model
        np.random.seed(17)

        lbl_cmap = random_label_cmap()
        model_fluo = StarDist2D(None,
                                name='2D_versatile_fluo',
                                basedir='/models/')
        model_he = StarDist2D(None, name='2D_versatile_he', basedir='/models/')

        #Go over images
        for img_path in list_imgs:
            fluo = True
            img = imageio.imread(img_path)
            n_channel = 3 if img.ndim == 3 else 1

            if n_channel == 3:
                # Check if 3-channel grayscale image or actually an RGB image
                if np.array_equal(img[:, :, 0],
                                  img[:, :, 1]) and np.array_equal(
                                      img[:, :, 0], img[:, :, 2]):
                    img = skimage.color.rgb2gray(img)
                else:
                    fluo = False

            # normalize channels independently (0,1,2) normalize channels jointly (0,1)
            axis_norm = (0, 1)
            img = normalize(img,
                            bj.parameters.stardist_norm_perc_low,
                            bj.parameters.stardist_norm_perc_high,
                            axis=axis_norm)

            #Stardist model prediction with thresholds
            if fluo:
                labels, details = model_fluo.predict_instances(
                    img,
                    prob_thresh=bj.parameters.stardist_prob_t,
                    nms_thresh=bj.parameters.stardist_nms_t)
            else:
                labels, details = model_he.predict_instances(
                    img,
                    prob_thresh=bj.parameters.stardist_prob_t,
                    nms_thresh=bj.parameters.stardist_nms_t)

            # Convert labels to uint16 for BIAFLOWS
            labels = labels.astype(np.uint16)
            imageio.imwrite(os.path.join(out_path, os.path.basename(img_path)),
                            labels)

        # 3. Upload data to BIAFLOWS
        upload_data(problem_cls,
                    bj,
                    in_imgs,
                    out_path,
                    **bj.flags,
                    monitor_params={
                        "start": 60,
                        "end": 90,
                        "period": 0.1,
                        "prefix":
                        "Extracting and uploading polygons from masks"
                    })

        # 4. Compute and upload metrics
        bj.job.update(progress=90,
                      statusComment="Computing and uploading metrics...")
        upload_metrics(problem_cls, bj, in_imgs, gt_path, out_path, tmp_path,
                       **bj.flags)

        # 5. Pipeline finished
        bj.job.update(progress=100,
                      status=Job.TERMINATED,
                      status_comment="Finished.")
Пример #30
0
    for i, im in enumerate(data):
        im = resize(im, (img_size, img_size),
                    anti_aliasing=anti_aliasing,
                    mode='constant',
                    order=order)
        data_rescaled[i] = im

    return data_rescaled


img_size = 512
X = resize_data(X, img_size)

logging.info(f'Starting mask predictions')

model = StarDist2D(None, name='stardist', basedir="")

# Predict instance segmentation in each image usng stardist:
Y = []
for x in X:
    y, details = model.predict_instances(x)
    Y.append(y)

img_size = 1024
Y = np.asarray(Y)
labels_images = resize_data(Y, img_size, anti_aliasing=False, order=0)

# Save the predictions:
np.savez(predicted_npz_path, np.asarray(labels_images),
         np.asarray(gfp_images_names))
os.chmod(predicted_npz_path, 0o664)