Пример #1
0
def test_model(tmpdir, n_rays, grid, n_channel):
    img = circle_image(shape=(160,160))
    imgs = np.repeat(img[np.newaxis], 3, axis=0)

    if n_channel is not None:
        imgs = np.repeat(imgs[...,np.newaxis], n_channel, axis=-1)
    else:
        n_channel = 1

    X = imgs+.6*np.random.uniform(0,1,imgs.shape)
    Y = (imgs if imgs.ndim==3 else imgs[...,0]).astype(int)

    conf = Config2D (
        n_rays                = n_rays,
        grid                  = grid,
        n_channel_in          = n_channel,
        use_gpu               = False,
        train_epochs          = 1,
        train_steps_per_epoch = 2,
        train_batch_size      = 2,
        train_loss_weights    = (4,1),
        train_patch_size      = (128,128),
    )

    model = StarDist2D(conf, name='stardist', basedir=str(tmpdir))
    model.train(X, Y, validation_data=(X[:2],Y[:2]))
Пример #2
0
def print_receptive_fields():
    for backbone in ("unet",):
        for n_depth in (1,2,3):
            for grid in ((1,1),(2,2)):
                conf  = Config2D(backbone = backbone,
                                 grid = grid,
                                 unet_n_depth=n_depth)
                model = StarDist2D(conf, None, None)
                fov   = model._compute_receptive_field()
                print(f"backbone: {backbone} \t n_depth: {n_depth} \t grid {grid} -> fov: {fov}")
Пример #3
0
def test_foreground_warning():
    # ask to train only with foreground patches when there are none
    # include a constant label image that must trigger a warning
    conf = Config2D(
        n_rays=32,
        train_patch_size=(96, 96),
        train_foreground_only=1,
        train_steps_per_epoch=1,
        train_epochs=1,
        train_batch_size=2,
    )
    X, Y = np.ones((2, 100, 100), np.float32), np.ones((2, 100, 100),
                                                       np.uint16)

    with pytest.warns(UserWarning):
        StarDist2D(conf, None, None).train(X,
                                           Y,
                                           validation_data=(X[-1:], Y[-1:]))
Пример #4
0
def test_model(n_rays, grid):
    img = circle_image()
    imgs = np.repeat(img[np.newaxis], 10, axis=0)

    X = imgs + .6 * np.random.uniform(0, 1, imgs.shape)
    Y = imgs.astype(int)

    conf = Config2D(n_rays=n_rays,
                    grid=grid,
                    use_gpu=False,
                    train_epochs=1,
                    train_steps_per_epoch=10,
                    train_loss_weights=(4, 1),
                    train_patch_size=(128, 128),
                    n_channel_in=1)

    with tempfile.TemporaryDirectory() as tmp:
        model = StarDist2D(conf, name='stardist', basedir=tmp)
        model.train(X, Y, validation_data=(X[:3], Y[:3]))
Пример #5
0
def test_model(tmpdir, n_rays, grid, n_channel):
    img = circle_image(shape=(160, 160))
    imgs = np.repeat(img[np.newaxis], 3, axis=0)

    if n_channel is not None:
        imgs = np.repeat(imgs[..., np.newaxis], n_channel, axis=-1)
    else:
        n_channel = 1

    X = imgs + .6 * np.random.uniform(0, 1, imgs.shape)
    Y = (imgs if imgs.ndim == 3 else imgs[..., 0]).astype(int)

    conf = Config2D(
        n_rays=n_rays,
        grid=grid,
        n_channel_in=n_channel,
        use_gpu=False,
        train_epochs=1,
        train_steps_per_epoch=2,
        train_batch_size=2,
        train_loss_weights=(4, 1),
        train_patch_size=(128, 128),
    )

    model = StarDist2D(conf, name='stardist', basedir=str(tmpdir))
    model.train(X, Y, validation_data=(X[:2], Y[:2]))
    ref = model.predict(X[0])
    res = model.predict(X[0],
                        n_tiles=((2, 3) if X[0].ndim == 2 else (2, 3, 1)))
    # assert all(np.allclose(u,v) for u,v in zip(ref,res))

    # ask to train only with foreground patches when there are none
    # include a constant label image that must trigger a warning
    conf.train_foreground_only = 1
    conf.train_steps_per_epoch = 1
    _X = X[:2]
    _Y = [np.zeros_like(Y[0]), np.ones_like(Y[1])]
    with pytest.warns(UserWarning):
        StarDist2D(conf, name='stardist',
                   basedir=None).train(_X,
                                       _Y,
                                       validation_data=(X[-1:], Y[-1:]))
Пример #6
0
def train_model(x_train,
                y_train,
                x_val,
                y_val,
                save_path,
                n_channels,
                patch_size,
                n_rays=32):

    # make the model config
    # copied from the stardist training notebook, this is a very weird line ...
    use_gpu = False and gputools_available()
    # predict on subsampled image for increased efficiency
    grid = (2, 2)
    config = Config2D(n_rays=n_rays,
                      grid=grid,
                      use_gpu=use_gpu,
                      n_channel_in=n_channels,
                      train_patch_size=patch_size)

    if use_gpu:
        print("Using a GPU for training")
        # limit gpu memory
        from csbdeep.utils.tf import limit_gpu_memory
        limit_gpu_memory(0.8)
    else:
        print("GPU not found, using the CPU for training")

    save_root, save_name = os.path.split(save_path)
    os.makedirs(save_root, exist_ok=True)
    model = StarDist2D(config, name=save_name, basedir=save_root)

    model.train(x_train,
                y_train,
                validation_data=(x_val, y_val),
                augmenter=augmenter)
    optimal_parameters = model.optimize_thresholds(x_val, y_val)
    return model, optimal_parameters
Пример #7
0
def test_model(tmpdir, n_rays, grid, n_channel, workers, use_sequence):
    img = circle_image(shape=(160, 160))
    imgs = np.repeat(img[np.newaxis], 3, axis=0)

    if n_channel is not None:
        imgs = np.repeat(imgs[..., np.newaxis], n_channel, axis=-1)
    else:
        n_channel = 1

    X = imgs + .6 * np.random.uniform(0, 1, imgs.shape)
    Y = (imgs if imgs.ndim == 3 else imgs[..., 0]).astype(int)

    if use_sequence:
        X, Y = NumpySequence(X), NumpySequence(Y)

    conf = Config2D(n_rays=n_rays,
                    grid=grid,
                    n_channel_in=n_channel,
                    use_gpu=False,
                    train_epochs=2,
                    train_steps_per_epoch=1,
                    train_batch_size=2,
                    train_loss_weights=(4, 1),
                    train_patch_size=(128, 128),
                    train_sample_cache=not use_sequence)

    model = StarDist2D(conf, name='stardist', basedir=str(tmpdir))
    model.train(X, Y, validation_data=(X[:2], Y[:2]), workers=workers)
    ref = model.predict(X[0])
    res = model.predict(X[0],
                        n_tiles=((2, 3) if X[0].ndim == 2 else (2, 3, 1)))

    # deactivate as order of labels might not be the same
    # assert all(np.allclose(u,v) for u,v in zip(ref,res))

    return model
Пример #8
0
    for img in list(map(io.imread, merfish_X_manual_train_names))
]
merfish_Y_manual_train_names = sorted(
    glob(f'{merfish_dir}/train/masks/manual/*.tif'))
merfish_Y_manual_train = [
    fill_label_holes(img)
    for img in tqdm(list(map(io.imread, merfish_Y_manual_train_names)))
]

# Train validation split
merfish_X_manual_train, merfish_Y_manual_train, merfish_X_manual_valid, merfish_Y_manual_valid = train_validation_split(
    merfish_X_manual_train, merfish_Y_manual_train)

conf = Config2D(
    n_rays=32,
    grid=(2, 2),
    n_channel_in=1,
)
print(conf)
vars(conf)

# Number of training examples to use
num_train = [(5, 2), (10, 3)]

for i in range(len(num_train)):
    num = num_train[i][0]
    num_valid = num_train[i][1]
    # Load pretrained StarDist model for 2d fluorescent images
    pretrained_stardist_model = StarDist2D(None,
                                           name=f'2D_versatile_fluo_{num}',
                                           basedir=model_dir)
Пример #9
0
#None;



# 32 is a good default choice (see 1_data.ipynb)
n_rays = args.n_rays

# Use OpenCL-based computations for data generator during training (requires 'gputools')
use_gpu = args.use_gpu and gputools_available()

# Predict on subsampled grid for increased efficiency and larger field of view
grid = args.grid

conf = Config2D (
    n_rays       = n_rays,
    grid         = grid,
    use_gpu      = use_gpu,
    n_channel_in = n_channel,
)
print(conf)
vars(conf)


if use_gpu:
    from csbdeep.utils.tf import limit_gpu_memory
    # adjust as necessary: limit GPU memory to be used by TensorFlow to leave some to OpenCL-based computations
    limit_gpu_memory(args.limit_gpu_mem)




model = StarDist2D(conf, name=args.model_name, basedir=args.model_dir)
Пример #10
0
 def _parse(n_classes, classes):
     model = StarDist2D(Config2D(n_classes=n_classes), None, None)
     classes = model._parse_classes_arg(classes, length=1)
     return classes
Пример #11
0
def _test_model_multiclass(n_classes=1,
                           classes="auto",
                           n_channel=None,
                           basedir=None):
    from skimage.measure import regionprops
    img, mask = real_image2d()
    img = normalize(img, 1, 99.8)

    if n_channel is not None:
        img = np.repeat(img[..., np.newaxis], n_channel, axis=-1)
    else:
        n_channel = 1

    X, Y = [img, img, img], [mask, mask, mask]

    conf = Config2D(
        n_rays=48,
        grid=(2, 2),
        n_channel_in=n_channel,
        n_classes=n_classes,
        use_gpu=False,
        train_epochs=1,
        train_steps_per_epoch=1,
        train_batch_size=1,
        train_dist_loss="iou",
        train_patch_size=(128, 128),
    )

    if n_classes is not None and n_classes > 1 and classes == "auto":
        regs = regionprops(mask)
        areas = tuple(r.area for r in regs)
        inds = np.argsort(areas)
        ss = tuple(
            slice(n * len(regs) // n_classes, (n + 1) * len(regs) // n_classes)
            for n in range(n_classes))
        classes = {}
        for i, s in enumerate(ss):
            for j in inds[s]:
                classes[regs[j].label] = i + 1
        classes = (classes, ) * len(X)

    model = StarDist2D(conf,
                       name=None if basedir is None else "stardist",
                       basedir=str(basedir))

    val_classes = {k: 1 for k in set(mask[mask > 0])}

    s = model.train(X,
                    Y,
                    classes=classes,
                    epochs=30,
                    validation_data=(X[:1], Y[:1]) if n_classes is None else
                    (X[:1], Y[:1], (val_classes, )))

    img = np.tile(img, (4, 4) if img.ndim == 2 else (4, 4, 1))

    kwargs = dict(prob_thresh=.2)
    labels1, res1 = model.predict_instances(img, **kwargs)
    labels2, res2 = model.predict_instances(img, sparse=True, **kwargs)
    labels3, res3 = model.predict_instances_big(
        img,
        axes="YX" if img.ndim == 2 else "YXC",
        block_size=640,
        min_overlap=32,
        context=96,
        **kwargs)

    assert np.allclose(labels1, labels2)
    assert all([
        np.allclose(res1[k], res2[k])
        for k in set(res1.keys()).union(set(res2.keys()))
        if isinstance(res1[k], np.ndarray)
    ])

    return model, img, res1, res2, res3
Пример #12
0
n_rays = args.n_rays

# Use OpenCL-based computations for data generator during training (requires 'gputools')
use_gpu = args.use_gpu and gputools_available()

# Predict on subsampled grid for increased efficiency and larger field of view
grid = args.grid

stepsEpoch = math.ceil(len(X_trn) / args.train_batch_size)

conf = Config2D(
    n_rays=n_rays,
    grid=grid,
    use_gpu=use_gpu,
    n_channel_in=n_channel,
    #~~
    train_learning_rate=lr,
    train_epochs=args.epochs,
    train_steps_per_epoch=stepsEpoch
    #~~
)
print(conf)
vars(conf)

if use_gpu:
    from csbdeep.utils.tf import limit_gpu_memory
    # adjust as necessary: limit GPU memory to be used by TensorFlow to leave some to OpenCL-based computations
    limit_gpu_memory(args.limit_gpu_mem)

model = StarDist2D(config=conf, name=args.model_name, basedir=args.model_dir)