Пример #1
0
def em_G_D_001(x, scale=4, upsample=False):
    noise = np.random.normal(0, 3, x.shape)
    x = x + noise
    x = x - x.min()
    x = x/x.max()
    x_down = npzoom(x, 1/scale, order=1)
    x_up = npzoom(x_down, scale, order=1)
    return x_down, x_up
Пример #2
0
def new_crappify(x, scale=4):
    x = random_noise(x, mode='salt', amount=0.005)
    x = random_noise(x, mode='pepper', amount=0.005)
    lvar = filters.gaussian(x, sigma=5)
    x = random_noise(x, mode='localvar', local_vars=lvar * 0.5)
    x_down = npzoom(x, 1 / scale, order=1)
    x_up = npzoom(x_down, scale, order=1)
    return x_down, x_up
Пример #3
0
def em_G_D_002(x, scale=4, upsample=False):
    x = img_as_float(x)
    mu, sigma = 0, 3
    noise = np.random.normal(mu, sigma*0.05, x.shape)
    x = np.clip(x + noise, 0, 1)
    x_down = npzoom(x, 1/scale, order=1)
    x_up = npzoom(x_down, scale, order=1)
    return x_down, x_up
Пример #4
0
def new_crappify(img, add_noise=True, scale=4):
    "a crappifier for our microscope images"
    if add_noise:
        img = random_noise(img, mode='salt', amount=0.005)
        img = random_noise(img, mode='pepper', amount=0.005)
        lvar = filters.gaussian(img, sigma=5)
        img = random_noise(img, mode='localvar', local_vars=lvar * 0.5)
    img_down = npzoom(img, 1 / scale, order=1)
    img_up = npzoom(img_down, scale, order=1)
    return img_down, img_up
Пример #5
0
def image_to_synth(img_data, dest, mode, hr_dir, lr_dir, lrup_dir, save_name, single, multi, tiles, n_tiles, n_frames, scale, crappify_func):
    if len(img_data.shape) > 2:
        if len(img_data.shape) == 3:
            img_data = img_data[:,:,0]
        else:
            print(f'skip {save_name} multichannel')
            return

    h,w = img_data.shape
    adjh, adjw = (h//4) * 4, (w//4)*4
    hr_img = img_data[0:adjh, 0:adjw]

    crap_img = crappify_func(hr_img).astype(np.float32).copy() if crappify_func else hr_img
    lr_img = npzoom(crap_img, 1/scale, order=0).astype(np.float32).copy()
    lrup_img = npzoom(lr_img, scale, order=0).astype(np.float32).copy()

    if single:
        hr_name, lr_name, lrup_name = [d / save_name for d in [hr_dir, lr_dir, lrup_dir]]
        save_img(hr_name, hr_img)
        save_img(lr_name, lr_img)
        save_img(lrup_name, lrup_img)

    if tiles:
        for tile_sz in tiles:
            hr_tile_dir = ensure_folder(dest/f'hr_t_{tile_sz}'/mode)
            lr_tile_dir = ensure_folder(dest/f'lr_t_{tile_sz}'/mode)
            lrup_tile_dir = ensure_folder(dest/f'lrup_t_{tile_sz}'/mode)

            tile_id = 0
            tries = 0
            max_tries = 200
            thresh = 0.01
            thresh_pct = (hr_img > thresh).mean() * 1.5
            while tile_id < n_tiles:
                hr_tile, bounds = draw_tile(hr_img, tile_sz)
                if check_tile(hr_tile, thresh, thresh_pct):
                    tile_name = f'{save_name}_{tile_id:03d}'
                    hr_tile_name, lr_tile_name, lrup_tile_name = [d / tile_name for d
                                                                in [hr_tile_dir, lr_tile_dir, lrup_tile_dir]]
                    crap_tile = draw_tile_bounds(crap_img, bounds=bounds)
                    lr_tile = npzoom(crap_tile, 1/scale, order=0).astype(np.float32).copy()
                    lrup_tile = npzoom(lr_tile, scale, order=0).astype(np.float32).copy()
                    save_img(hr_tile_name, hr_tile)
                    save_img(lr_tile_name, lr_tile)
                    save_img(lrup_tile_name, lrup_tile)
                    tile_id += 1
                    tries = 0
                else:
                    tries += 1
                    if tries > (max_tries//2):
                        thresh_pct /= 2
                    if tries > max_tries:
                        print(f'timed out on {save_name}')
                        tries = 0
                        tile_id += 1
Пример #6
0
def em_AG_P_D_001(x, scale=4, upsample=False):
    poisson_noisemap = np.random.poisson(x, size=None)
    set_trace()
    lvar = filters.gaussian(x, sigma=3)
    x = random_noise(x, mode='localvar', local_vars=lvar*0.05)
    x = x + poisson_noisemap
    #x = x - x.min()
    #x = x/x.max()
    x_down = npzoom(x, 1/scale, order=1)
    x_up = npzoom(x_down, scale, order=1)
    return x_down, x_up
Пример #7
0
def fluo_downsampleonly(x, scale=4, upsample=False):
    xn = np.array(x)
    xorig_max = xn.max()
    xn = xn.astype(np.float32)
    xn /= float(np.iinfo(np.uint8).max)
    new_max = xn.max()
    x = xn
    if new_max > 0:
        xn /= new_max
    xn *= xorig_max
    x_down = npzoom(x, 1/scale, order=1)
    #x_up = npzoom(x_down, scale, order=1)
    return PIL.Image.fromarray(x_down.astype(np.uint8))
Пример #8
0
def fluo_G_D(x, scale=4, upsample=False):
    xn = np.array(x)
    xorig_max = xn.max()
    xn = xn.astype(np.float32)
    xn /= float(np.iinfo(np.uint8).max)

    x = np.array(x)
    mu, sigma = 0, 5
    noise = np.random.normal(mu, sigma*0.05, x.shape)
    x = np.clip(x + noise, 0, 1)
    x_down = npzoom(x, 1/scale, order=1)
    #x_up = npzoom(x_down, scale, order=1)
    return PIL.Image.fromarray(x_down.astype(np.uint8))
Пример #9
0
def unet_multi_image_from_tiles(learn, in_img, tile_sz=128, scale=4, wsize=3):
    cur_size = in_img.shape[1:3]
    c = in_img.shape[0]
    new_size = (cur_size[0] * scale, cur_size[1] * scale)
    w, h = cur_size

    in_tile = torch.zeros((c, tile_sz // scale, tile_sz // scale))
    out_img = torch.zeros((1, w * scale, h * scale))
    tile_sz //= scale

    for x_tile in range(math.ceil(w / tile_sz)):
        for y_tile in range(math.ceil(h / tile_sz)):
            x_start = x_tile

            x_start = x_tile * tile_sz
            x_end = min(x_start + tile_sz, w)
            y_start = y_tile * tile_sz
            y_end = min(y_start + tile_sz, h)

            in_tile[:, 0:(x_end - x_start),
                    0:(y_end - y_start)] = tensor(in_img[:, x_start:x_end,
                                                         y_start:y_end])

            img_list = [
                Image(tensor(npzoom(in_tile[i], scale, order=1))[None])
                for i in range(wsize)
            ]
            #img_list += img_list

            tlist = MultiImage(img_list)
            out_tile, _, _ = learn.predict(tlist)

            out_x_start = x_start * scale
            out_x_end = x_end * scale
            out_y_start = y_start * scale
            out_y_end = y_end * scale

            #print("out: ", out_x_start, out_y_start, ",", out_x_end, out_y_end)
            in_x_start = 0
            in_y_start = 0
            in_x_end = (x_end - x_start) * scale
            in_y_end = (y_end - y_start) * scale
            #print("tile: ",in_x_start, in_y_start, ",", in_x_end, in_y_end)

            out_img[:, out_x_start:out_x_end,
                    out_y_start:out_y_end] = out_tile.data[:,
                                                           in_x_start:in_x_end,
                                                           in_y_start:in_y_end]
    return out_img
Пример #10
0
def fluo_SP_D(x, scale=4, upsample=False):
    xn = np.array(x)
    xorig_max = xn.max()
    xn = xn.astype(np.float32)
    xn /= float(np.iinfo(np.uint8).max)
    xn = random_noise(xn, mode='salt', amount=0.005)
    xn = random_noise(xn, mode='pepper', amount=0.005)
    new_max = xn.max()
    x = xn
    if new_max > 0:
        xn /= new_max
    xn *= xorig_max
    x_down = npzoom(x, 1/scale, order=1)
    #x_up = npzoom(x_down, scale, order=1)
    return PIL.Image.fromarray(x_down.astype(np.uint8))
Пример #11
0
def fluo_AG_D(x, scale=4, upsample=False):
    xn = np.array(x)
    xorig_max = xn.max()
    xn = xn.astype(np.float32)
    xn /= float(np.iinfo(np.uint8).max)

    lvar = filters.gaussian(xn, sigma=5) + 1e-10
    xn = random_noise(xn, mode='localvar', local_vars=lvar*0.5)
    new_max = xn.max()
    x = xn
    if new_max > 0:
        xn /= new_max
    xn *= xorig_max
    x_down = npzoom(x, 1/scale, order=1)
    #x_up = npzoom(x_down, scale, order=1)
    return PIL.Image.fromarray(x_down.astype(np.uint8))
Пример #12
0
def main(
    load_name: Param("load learner name", str) = "em_save",
    save_dir: Param("dir to save to:",
                    str) = "/scratch/bpho/results/emsynth_crap",
    gpu: Param("GPU to run on", str) = 0,
):
    torch.cuda.set_device(gpu)

    bs = 1
    size = 1920
    data = get_data(bs, size)

    arch = models.resnet34
    wd = 1e-3
    learn = unet_learner(data,
                         arch,
                         wd=wd,
                         loss_func=feat_loss,
                         metrics=superres_metrics,
                         callback_fns=LossMetrics,
                         blur=True,
                         norm_type=NormType.Weight,
                         model_dir=model_path)
    gc.collect()

    learn = learn.load(load_name)

    test_files = Path(
        '/scratch/bpho/datasources/EM_manually_aquired_pairs_01242019/')
    test_hr = list((test_files / 'aligned_hr').glob('*.tif'))
    test_lr = list((test_files / 'aligned_lr').glob('*.tif'))
    results = Path(save_dir)

    if results.exists(): shutil.rmtree(results)
    results.mkdir(parents=True, mode=0o775, exist_ok=True)

    def get_key(fn):
        return fn.stem[0:(fn.stem.find('Region') - 1)]

    hr_map = {get_key(fn): fn for fn in test_hr}
    lr_map = {get_key(fn): fn for fn in test_lr}

    ssims = []
    psnrs = []
    for k in progress_bar(hr_map):
        hr_fn, lr_fn = hr_map[k], lr_map[k]
        hr_img = PIL.Image.open(hr_fn)
        lr_img = PIL.Image.open(lr_fn)
        lr_img_data = img_as_float32(lr_img)
        lr_up_data = npzoom(lr_img_data, 4, order=1)
        lr_up_img = Image(tensor(lr_up_data[None]))
        hr_pred_img, aaa, bbb = learn.predict(lr_up_img)
        pred_img = PIL.Image.fromarray(
            img_as_ubyte(np.array(hr_pred_img.data))[0, :, :])

        lr_img.save(results / f'{k}_orig.tif')
        hr_img.save(results / f'{k}_truth.tif')
        pred_img.save(results / f'{k}_pred.tif')
        hr_img_data = np.array(hr_img)

        ssims.append(
            compare_ssim(img_as_float32(np.array(hr_img)),
                         img_as_float32(np.array(pred_img))))
        psnrs.append(
            compare_psnr(img_as_float32(np.array(hr_img)),
                         img_as_float32(np.array(pred_img))))
    print(np.array(ssims).mean(), np.array(psnrs).mean())

    #target_path = Path('/DATA/Dropbox/bpho_movie_results/emsynth_003/')
    target_path = results

    orig, tru, pred = [
        list(target_path.glob(f'*{tag}*')) for tag in ['orig', 'tru', 'pred']
    ]
    orig.sort()
    tru.sort()
    pred.sort()

    ssims = []
    c_ssims = []
    l_ssims = []
    psnrs = []
    c_psnrs = []
    l_psnrs = []

    for o, t, p in progress_bar(list(zip(orig, tru, pred))):
        oimg, timg, pimg = [img_as_float32(io.imread(fn)) for fn in [o, t, p]]
        if len(pimg.shape) == 3: pimg = pimg[:, :, 0]
        cimg = npzoom(oimg, 4)
        limg = npzoom(oimg, 4, order=1)

        ssims.append(compare_ssim(timg, pimg))
        c_ssims.append(compare_ssim(timg, cimg))
        l_ssims.append(compare_ssim(timg, limg))
        psnrs.append(compare_psnr(timg, pimg))
        c_psnrs.append(compare_psnr(timg, cimg))
        l_psnrs.append(compare_psnr(timg, limg))

    import pandas as pd

    df = pd.DataFrame(
        dict(ssim=ssims,
             psnr=psnrs,
             bicubic_ssim=c_ssims,
             bicubic_psnr=c_psnrs,
             bilinear_ssim=l_ssims,
             bilinear_psnr=l_psnrs))

    df.describe()
    print(df.describe())
Пример #13
0
def bilinear(img, **kwargs):
    pred_img = npzoom(img, 4, order=1)
    return pred_img / pred_img.max()
Пример #14
0
def em_crappify(x, scale=4):
    lvar = filters.gaussian(x, sigma=3)
    x = random_noise(x, mode='localvar', local_vars=lvar*0.05)
    x_down = npzoom(x, 1/scale, order=1)
    x_up = npzoom(x_down, scale, order=1)
    return x_down, x_up
Пример #15
0
def em_P_D_001(x, scale=4, upsample=False):
    x = random_noise(x, mode='poisson', seed=1)
    x_down = npzoom(x, 1/scale, order=1)
    x_up = npzoom(x_down, scale, order=1)
    return x_down, x_up
Пример #16
0
def bicubic(img, **kwargs):
    pred_img = npzoom(img, 4, order=3)
    return pred_img
Пример #17
0
def original(img, **kwargs):
    pred_img = npzoom(img, 4, order=0)
    return pred_img / pred_img.max()
Пример #18
0
def em_downsampleonly(x, scale=4, upsample=False):
    x_down = npzoom(x, 1/scale, order=1)
    x_up = npzoom(x_down, scale, order=1)
    return x_down, x_up
Пример #19
0
def czi_movie_to_synth(czi_fn,
                       dest,
                       category,
                       mode,
                       single=True,
                       multi=False,
                       tiles=None,
                       scale=4,
                       n_tiles=5,
                       n_frames=5,
                       crappify_func=None):
    base_name = czi_fn.stem
    if single:
        hr_dir = ensure_folder(dest / 'hr' / mode / category)
        lr_dir = ensure_folder(dest / 'lr' / mode / category)
        lrup_dir = ensure_folder(dest / 'lrup' / mode / category)
        with czifile.CziFile(czi_fn) as czi_f:
            data = czi_f.asarray()
            axes, shape = get_czi_shape_info(czi_f)
            channels = shape['C']
            depths = shape['Z']
            times = shape['T']
            x,y = shape['X'], shape['Y']

            for channel in range(channels):
                for depth in range(depths):
                    for t in range(times):
                        save_name = f'{channel:02d}_{depth:02d}_{t:06d}_{base_name}'
                        idx = build_index( axes, {'T': t, 'C':channel, 'Z': depth, 'X':slice(0,x), 'Y':slice(0,y)})
                        img_data = data[idx].astype(np.float32).copy()
                        img_max = img_data.max()
                        if img_max != 0: img_data /= img_max

                        image_to_synth(img_data, dest, mode, hr_dir, lr_dir, lrup_dir, save_name,
                                    single, multi, tiles, n_tiles, n_frames, scale, crappify_func)

    if multi:
        with czifile.CziFile(czi_fn) as czi_f:
            proc_axes, proc_shape = get_czi_shape_info(czi_f)
            channels = proc_shape['C']
            depths = proc_shape['Z']
            times = proc_shape['T']
            x,y = proc_shape['X'], proc_shape['Y']
            data = czi_f.asarray()
            for channel in range(channels):
                img_max = None
                timerange = list(range(0,times-n_frames+1, n_frames))
                if len(timerange) >= n_frames:
                    hr_mt_dir = ensure_folder(dest / f'hr_mt_{n_frames:02d}' / mode / category)
                    lr_mt_dir = ensure_folder(dest / f'lr_mt_{n_frames:02d}' / mode / category)
                    lrup_mt_dir = ensure_folder(dest / f'lrup_mt_{n_frames:02d}' / mode / category)

                    for time_col in timerange:
                        save_name = f'{channel:02d}_T{time_col:05d}-{(time_col+n_frames-1):05d}_{base_name}'
                        idx = build_index(proc_axes, {'T': slice(time_col,time_col+n_frames), 'C': channel, 'X':slice(0,x),'Y':slice(0,y)})
                        img_data = data[idx].astype(np.float32).copy()
                        img_max = img_data.max()
                        if img_max != 0: img_data /= img_max

                        _,h,w = img_data.shape
                        adjh, adjw = (h//4) * 4, (w//4)*4
                        hr_imgs = img_data[:,0:adjh, 0:adjw]
                        lr_imgs = []
                        lrup_imgs = []

                        for i in range(hr_imgs.shape[0]):
                            hr_img = hr_imgs[i]
                            crap_img = crappify_func(hr_img).astype(np.float32).copy() if crappify_func else hr_img
                            lr_img = npzoom(crap_img, 1/scale, order=0).astype(np.float32).copy()
                            lr_imgs.append(lr_img)
                            lrup_img = npzoom(lr_img, scale, order=0).astype(np.float32).copy()
                            lrup_imgs.append(lrup_img)

                        lr_imgs = np.array(lr_imgs).astype(np.float32).copy()
                        lrup_imgs = np.array(lrup_imgs).astype(np.float32).copy()
                        hr_img = hr_imgs[hr_imgs.shape[0]//2].astype(np.float32).copy()
                        hr_mt_name, lr_mt_name, lrup_mt_name = [d / save_name for d in [hr_mt_dir, lr_mt_dir, lrup_mt_dir]]
                        np.save(hr_mt_name, hr_img)
                        np.save(lr_mt_name, lr_imgs)
                        np.save(lrup_mt_name, lrup_imgs)

                        make_multi_tiles(tiles, category, n_tiles, scale, hr_img, lr_imgs, lrup_imgs,
                                         save_name, dest, n_frames, mode, 't')

                if depths >= n_frames:
                    hr_mz_dir = ensure_folder(dest / f'hr_mz_{n_frames:02d}' / mode / category)
                    lr_mz_dir = ensure_folder(dest / f'lr_mz_{n_frames:02d}' / mode / category)
                    lrup_mz_dir = ensure_folder(dest / f'lrup_mz_{n_frames:02d}' / mode / category)

                    mid_depth = depths // 2
                    start_depth = mid_depth - n_frames//2
                    end_depth = mid_depth + n_frames//2
                    depthrange = slice(start_depth,end_depth+1)
                    save_name = f'{channel:02d}_Z{start_depth:05d}-{end_depth:05d}_{base_name}'
                    idx = build_index(proc_axes, {'Z': depthrange, 'C': channel, 'X':slice(0,x),'Y':slice(0,y)})
                    img_data = data[idx].astype(np.float32).copy()
                    img_max = img_data.max()
                    if img_max != 0: img_data /= img_max

                    _,h,w = img_data.shape
                    adjh, adjw = (h//4) * 4, (w//4)*4
                    hr_imgs = img_data[:,0:adjh, 0:adjw]
                    lr_imgs = []
                    lrup_imgs = []

                    for i in range(hr_imgs.shape[0]):
                        hr_img = hr_imgs[i]
                        crap_img = crappify_func(hr_img).astype(np.float32).copy() if crappify_func else hr_img
                        lr_img = npzoom(crap_img, 1/scale, order=0).astype(np.float32).copy()
                        lr_imgs.append(lr_img)
                        lrup_img = npzoom(lr_img, scale, order=0).astype(np.float32).copy()
                        lrup_imgs.append(lrup_img)

                    lr_imgs = np.array(lr_imgs).astype(np.float32).copy()
                    lrup_imgs = np.array(lrup_imgs).astype(np.float32).copy()
                    hr_img = hr_imgs[hr_imgs.shape[0]//2].astype(np.float32).copy()
                    hr_mz_name, lr_mz_name, lrup_mz_name = [d / save_name for d in [hr_mz_dir, lr_mz_dir, lrup_mz_dir]]
                    np.save(hr_mz_name, hr_img)
                    np.save(lr_mz_name, lr_imgs)
                    np.save(lrup_mz_name, lrup_imgs)

                    make_multi_tiles(tiles, category, n_tiles, scale, hr_img, lr_imgs, lrup_imgs,
                                        save_name, dest, n_frames, mode, 'z')
Пример #20
0
def unet_image_from_tiles_partialsave(learn,
                                      in_img,
                                      tile_sz=(256, 256),
                                      scale=(4, 4),
                                      overlap_pct=(0.50, 0.50),
                                      img_info=None):
    """
    This function run inference on a trained model and removes tiling artifacts.

    Input:
    - learn: learner
    - in_img: input image (2d/3d), floating array
    - tile_sz: XY dimension of the small tile that will be fed into GPU [p q]
    - scale: upsampling scale
    - overlap_pct: overlap percent while cropping the tiles in xy dimension [alpha beta],
                   floating tuple, ranging from 0 to 1
    - img_info: mi, ma, max

    Output:
    - predicted image (2d), ranging from 0 to 1

    """
    n_frames = in_img.shape[0]

    if img_info:
        mi, ma, imax = [img_info[fld] for fld in ['mi', 'ma', 'img_max']]
        in_img = ((in_img - mi) / (ma - mi + 1e-20)).clip(0., 1.)
    else:
        mi, ma = 0., 1.
    in_img = np.stack(
        [npzoom(in_img[i], scale, order=1) for i in range(n_frames)])

    Y, X = in_img.shape[1:3]
    p, q = tile_sz[0:2]
    alpha, beta = overlap_pct[0:2]
    print('Y,X=', Y, X)
    assembled = np.zeros((X, Y))

    # X = p + (m - 1) * (1 - alpha) * p + epsilonX
    numX, epsX = divmod(X - p, p - int(p * alpha)) if X - p > 0 else (0, X)
    numY, epsY = divmod(Y - q, q - int(q * beta)) if Y - q > 0 else (0, Y)
    numX = int(numX) + 1
    numY = int(numY) + 1

    for i in range(numX + 1):
        for j in range(numY + 1):
            crop_x_start = int(i * (1 - alpha) * p)
            crop_x_end = min(crop_x_start + p, X)
            crop_y_start = int(j * (1 - beta) * q)
            crop_y_end = min(crop_y_start + q, Y)

            src_tile = in_img[:, crop_y_start:crop_y_end,
                              crop_x_start:crop_x_end]

            in_tile = torch.zeros((p, q, n_frames))
            in_x_size = crop_x_end - crop_x_start
            in_y_size = crop_y_end - crop_y_start
            if (in_y_size, in_x_size) != src_tile.shape[1:3]: set_trace()
            in_tile[0:in_y_size,
                    0:in_x_size, :] = tensor(src_tile).permute(1, 2, 0)

            if n_frames > 1:
                img_in = MultiImage(
                    [Image(in_tile[:, :, i][None]) for i in range(n_frames)])
            else:
                img_in = Image(in_tile[:, :, 0][None])
            y, pred, raw_pred = learn.predict(img_in)

            out_tile = pred.numpy()[0]

            tileROI_x_start = int(0.5 *
                                  int(alpha * p)) if crop_x_start != 0 else 0
            tileROI_x_end = int(p - 0.5 * int(alpha * p)
                                ) if crop_x_end != X else int(alpha * p + epsX)
            tileROI_y_start = int(0.5 *
                                  int(beta * q)) if crop_y_start != 0 else 0
            tileROI_y_end = int(
                q - 0.5 * int(beta * q)) if crop_y_end != Y else int(beta * q +
                                                                     epsY)

            tileROI_x_end = X if X - q < 0 else tileROI_x_end
            tileROI_y_end = Y if Y - p < 0 else tileROI_y_end

            out_x_start = int(p - 0.5 * int(alpha * p) + (i - 1) *
                              (p - int(alpha * p))) if crop_x_start != 0 else 0
            out_x_end = int(p - 0.5 * int(alpha * p) + i *
                            (p - int(alpha * p))) if crop_x_end != X else X
            out_y_start = int(q - 0.5 * int(beta * q) + (j - 1) *
                              (q - int(beta * q))) if crop_y_start != 0 else 0
            out_y_end = int(q - 0.5 * int(beta * q) + j *
                            (q - int(beta * q))) if crop_y_end != Y else Y
            assembled[out_y_start:out_y_end, out_x_start:out_x_end] = out_tile[
                tileROI_y_start:tileROI_y_end, tileROI_x_start:tileROI_x_end]

    assembled -= assembled.min()
    assembled /= assembled.max()
    assembled *= (ma - mi)
    assembled += mi

    return assembled.astype(np.float32)
Пример #21
0
def em_AG_D_sameas_preprint(x, scale=4, upsample=False):
    lvar = filters.gaussian(x, sigma=3)
    x = random_noise(x, mode='localvar', local_vars=lvar*0.05)
    x_down = npzoom(x, 1/scale, order=1)
    x_up = npzoom(x_down, scale, order=1)
    return x_down, x_up
Пример #22
0
def unet_image_from_tiles_blend(learn,
                                in_img,
                                use_tiles,
                                tile_sz=256,
                                scale=4,
                                overlap_pct=5.0,
                                img_info=None):
    n_frames = in_img.shape[0]
    if img_info:
        mi, ma, imax, real_max = [
            img_info[fld] for fld in ['mi', 'ma', 'img_max', 'real_max']
        ]
        in_img /= real_max
        # in_img = ((in_img - mi) / (ma - mi + 1e-20)).clip(0.,1.)
    else:
        mi, ma, imax, real_max = 0., 1., 1., 1.
    in_img = np.stack(
        [npzoom(in_img[i], scale, order=1) for i in range(n_frames)])
    h, w = in_img.shape[1:3]
    assembled = np.zeros((h, w))

    if not use_tiles: tile_sz = h  #assume it is a square
    overlap = int(tile_sz * (overlap_pct / 100.) // 2 * 2)
    step_sz = tile_sz - overlap

    x_seams = set()
    y_seams = set()

    for x_tile in range(0, math.ceil(w / step_sz)):
        for y_tile in range(0, math.ceil(h / step_sz)):
            x_start = x_tile * step_sz
            x_end = min(x_start + tile_sz, w)
            y_start = y_tile * step_sz
            y_end = min(y_start + tile_sz, h)
            src_tile = in_img[:, y_start:y_end, x_start:x_end]

            in_tile = torch.zeros((tile_sz, tile_sz, n_frames))
            in_x_size = x_end - x_start
            in_y_size = y_end - y_start
            if (in_y_size, in_x_size) != src_tile.shape[1:3]: set_trace()
            in_tile[0:in_y_size,
                    0:in_x_size, :] = tensor(src_tile).permute(1, 2, 0)

            if n_frames > 1:
                img_in = MultiImage(
                    [Image(in_tile[:, :, i][None]) for i in range(n_frames)])
            else:
                img_in = Image(in_tile[:, :, 0][None])
            y, pred, raw_pred = learn.predict(img_in)

            out_tile = pred.numpy()[0]

            half_overlap = overlap // 2
            left_adj = half_overlap if x_start != 0 else 0
            right_adj = half_overlap if x_end != w else 0
            top_adj = half_overlap if y_start != 0 else 0
            bot_adj = half_overlap if y_end != h else 0

            trim_y_start = y_start + top_adj
            trim_x_start = x_start + left_adj
            trim_y_end = y_end - bot_adj
            trim_x_end = x_end - right_adj

            out_x_start = left_adj
            out_y_start = top_adj
            out_x_end = in_x_size - right_adj
            out_y_end = in_y_size - bot_adj
            assembled[trim_y_start:trim_y_end,
                      trim_x_start:trim_x_end] = out_tile[
                          out_y_start:out_y_end, out_x_start:out_x_end]
            if trim_x_start != 0: x_seams.add(trim_x_start)
            if trim_y_start != 0: y_seams.add(trim_y_start)

    blur_rects = []
    blur_size = 5
    for x_seam in x_seams:
        left = x_seam - blur_size
        right = x_seam + blur_size
        top, bottom = 0, h
        blur_rects.append((slice(top, bottom), slice(left, right)))

    for y_seam in y_seams:
        top = y_seam - blur_size
        bottom = y_seam + blur_size
        left, right = 0, w
        blur_rects.append((slice(top, bottom), slice(left, right)))

    for xs, ys in blur_rects:
        assembled[xs, ys] = gaussian(assembled[xs, ys], sigma=1.0)

    # if assembled.min() < 0: assembled -= assembled.min()
    # assembled += imax
    # assembled *= imax
    # assembled *= (ma - mi)
    # assembled += mi

    return assembled.astype(np.float32).clip(0., 1.)
Пример #23
0
def downsample(fn, i, scale=4):
    dest = lr_tifs / fn.relative_to(hr_tifs)
    img = PIL.Image.open(fn)
    down_img = npzoom(np.array(img), 1. / scale, order=1)
    ensure_folder(dest.parent)
    img = PIL.Image.fromarray(down_img).save(dest)