Exemplo n.º 1
0
def immontage(imgs, sz=None):
    ''' Montage color images '''
    # input is required as a list of images
    # assert isinstance(imgs, list)
    
    if sz is not None:
        
        size = sz 
        if sz[1] < 0:
            size = (sz[0], numpy.floor(len(imgs)*1.0/sz[0])+1)

    else:
        imh, imw = imgs[0].shape[:2]
        mgh = numpy.int(numpy.sqrt(imw*len(imgs)/imh))
        mgw = numpy.int(numpy.sqrt(imh*len(imgs)/imw))
        if mgh*mgw < len(imgs):
            mgw += 1
            mgh += 1
        size = (mgh, mgw) 

    if imgs[0].ndim is 2:
        
        return montage2d(arr_in=numpy.asarray(imgs), grid_shape=size)

    else:

        c1 = [im[:, :, 0] for im in imgs]
        c2 = [im[:, :, 1] for im in imgs]
        c3 = [im[:, :, 2] for im in imgs]
        c1m = montage2d(arr_in=numpy.asarray(c1), grid_shape=size)
        c2m = montage2d(arr_in=numpy.asarray(c2), grid_shape=size)
        c3m = montage2d(arr_in=numpy.asarray(c3), grid_shape=size)
        return numpy.dstack((c1m, c2m, c3m))
Exemplo n.º 2
0
    def _show_plot(c_subax, cur_var):
        if len(cur_var.shape) >= 2:
            c_mat = cur_var[0, 0] if cur_var.shape[1] == 1 else cur_var[0]
        else:
            c_mat = cur_var

        if normalize:
            if not np.isnan(c_mat.mean()):
                c_mat = c_mat.astype(np.float32) - c_mat.mean()
            if c_mat.std() > 0:
                c_mat = c_mat.astype(np.float32) / c_mat.std()

        if len(c_mat.shape) == 1:
            if len(c_mat) > max_bars:
                c_mat = c_mat[:max_bars]
            ind = np.array(range(len(c_mat)))
            c_subax.bar(ind, c_mat)
        elif len(c_mat.shape) == 2:
            c_subax.imshow(c_mat, **im_settings)
        elif len(c_mat.shape) == 3:
            c_subax.imshow(montage2d(c_mat), **im_settings)
        elif len(c_mat.shape) == 4:
            c_subax.imshow(montage2d(
                np.stack([montage2d(c_layer) for c_layer in c_mat], 0)),
                **im_settings)
Exemplo n.º 3
0
    def canonical_image(self, blob_name, feature_idx, k, tmp_fname):
        # load the image and reconstruction
        act_env = lmdb.open(self.config.max_activation_dbname, map_size=recon.config.config.lmdb_map_size)
        act_key = self._get_key(blob_name, feature_idx)
        with act_env.begin() as txn:
            val = txn.get(act_key)
            if val == None:
                raise Exception('activation for key {} not yet stored'.format(act_key))
            activations = pkl.loads(val)[-k:]
        img_patches = []
        rec_patches = []

        # crop patches from the image and reconstruction
        for i, act in enumerate(activations):
            rec = act['reconstruction']
            img = act['img']
            top_left, bottom_right = act['patch_bbox']
            top, left, bottom, right = top_left + bottom_right
            img = img[top:bottom+1, left:right+1]
            rec = rec[top:bottom+1, left:right+1]
            img_patches.append(img)
            rec_patches.append(rec)

        # display the patches in a grid, then save to a file
        patch_size = [0, 0]
        for img, rec in zip(img_patches, rec_patches):
            patch_size[0] = max(img.shape[0], patch_size[0])
            patch_size[0] = max(rec.shape[0], patch_size[0])
            patch_size[1] = max(img.shape[1], patch_size[1])
            patch_size[1] = max(rec.shape[1], patch_size[1])
        def scale(patch):
            assert len(patch.shape) == 3
            if patch.shape[:2] == patch_size:
                return patch
            new_patch = np.zeros(tuple(patch_size) + patch.shape[2:3], dtype=patch.dtype)
            new_patch[:patch.shape[0], :patch.shape[1], :patch.shape[2]] = patch[:, :, :]
            return new_patch
        img_patches = (scale(img) for img in img_patches)
        rec_patches = (scale(rec) for rec in rec_patches)
        pad_img_patches = [np.pad(img, ((0, 2), (0, 2), (0, 0)), 'constant')
                                                for img in img_patches]
        pad_rec_patches = [np.pad(rec, ((0, 2), (0, 2), (0, 0)), 'constant')
                                                for rec in rec_patches]
        img_patches = np.vstack([img[np.newaxis] for img in pad_img_patches])
        rec_patches = np.vstack([rec[np.newaxis] for rec in pad_rec_patches])
        img_mon_channels = []
        rec_mon_channels = []
        for channel in range(img_patches.shape[-1]):
            imgs = img_patches[:, :, :, channel]
            mon = montage.montage2d(imgs, fill=0, rescale_intensity=False)
            img_mon_channels.append(mon)
            recs = rec_patches[:, :, :, channel]
            mon = montage.montage2d(recs, fill=0, rescale_intensity=False)
            rec_mon_channels.append(mon)
        img = np.dstack(img_mon_channels)
        rec = np.dstack(rec_mon_channels)
        combined = np.hstack([img, np.zeros([img.shape[0], 5, img.shape[2]], dtype=img.dtype), rec])
        io.imsave(tmp_fname, combined)
Exemplo n.º 4
0
def montage_by(config,df,timepoint='t2',n=10,size=25,return_index=False,**kwargs):
    clip_edge = int((float(size)/2))+1
    b = config['image']['size']-1
    s = select(select_range(df,ImageX=[clip_edge,b-clip_edge],ImageY=[clip_edge,b-clip_edge]),**kwargs).sample(n)
    arr = np.dstack([slice_well(config,r,timepoint=timepoint,size=size) for i,r in s.iterrows()]).transpose([2,0,1])

    if not return_index:
        return montage.montage2d(arr)
    else:
        return montage.montage2d(arr), s.index
Exemplo n.º 5
0
def montage_ndimage(ndimg, **kwargs):
    """create montage mpl plot for 3D ndimage
    in gray (ndim == 3)
    or color (ndim == 4)
    """
    if np.ndim(ndimg) == 3:
        m = montage2d(ndimg, fill=0)
    elif np.ndim(ndimg) == 4:  # + rgb dimension
        m = np.stack(
            (montage2d(np.squeeze(ndimg[..., 0]),
                       fill=0), montage2d(np.squeeze(ndimg[..., 1]), fill=0),
             montage2d(np.squeeze(ndimg[..., 2]), fill=0)),
            axis=2)
    im = plt.imshow(m, **kwargs)
    return im
Exemplo n.º 6
0
def calc_montage_layer_view(td_seg_model,
                            layer_id,
                            img_in,
                            border_padding=1,
                            scale_f=2.0,
                            cmap_fun=cm.get_cmap('RdBu'),
                            verbose=False):
    """
    Calculate a RGB representation of the image in the 3rd layer of the neural network
    """
    img_representation = layer_id.get_output_at(0)
    if verbose:
        print('input:', img_in.shape, 'layer:', layer_id.name, 'output:',
              img_representation)
    img_func = K.function(
        [td_seg_model.input, K.learning_phase()], [img_representation])

    rgb_outputs = img_func([img_in,
                            0])[0]  # training phase so elements are removed
    if verbose: print('out_shape', rgb_outputs.shape)
    _, _, l_wid, l_height = rgb_outputs.shape
    rgb8_outputs = rgb_outputs
    rgb8_outputs = (rgb8_outputs - np.mean(rgb8_outputs)) / (
        scale_f * rgb8_outputs.std()) + 0.5
    rgb8_outputs = rgb8_outputs.clip(-1, 1).astype(np.float32).reshape(
        (-1, l_wid, l_height))
    if border_padding > 0:
        rgb8_outputs = np.pad(rgb8_outputs, border_padding,
                              mode='constant')[border_padding:-border_padding]
    rgb_montage = montage2d(rgb8_outputs)
    if verbose: print('montage_shape', rgb_montage.shape)
    return cmap_fun(rgb_montage)
Exemplo n.º 7
0
def montage_wb_ratio(input_image, patch_shape, n_filters, ele_print=False):

    patches = view_as_windows(input_image, patch_shape)
    patches = patches.reshape(-1, patch_shape[0] * patch_shape[1])[::8]
    fb, _ = kmeans2(patches, n_filters, minit='random')
    fb = fb.reshape((-1, ) + patch_shape)
    fb_montage = montage2d(fb, fill=False, rescale_intensity=True)
    shape_var = np.ceil(np.sqrt(n_filters))
    elements = np.split(np.hstack(np.split(fb_montage, shape_var)),
                        shape_var**2,
                        axis=1)
    del elements[n_filters:]
    wb_ratios = []
    bin_elements = []

    for element in elements:
        thresh = threshold_mean(element)
        binary = element > thresh
        ratio = np.sum(binary) / binary.size
        wb_ratios.append(ratio)

        if ele_print:
            bin_elements.append(binary)

    wb_ratios = sorted(wb_ratios, reverse=True)

    if ele_print:
        show_images(elements)
        show_images(bin_elements)

    return (wb_ratios)
Exemplo n.º 8
0
def test_grid_shape():
    n_images = 6
    height, width = 2, 2
    arr_in = np.arange(n_images * height * width, dtype=np.float32)
    arr_in = arr_in.reshape(n_images, height, width)
    arr_out = montage2d(arr_in, grid_shape=(3, 2))
    correct_arr_out = np.array([[0., 1., 4., 5.], [2., 3., 6., 7.],
                                [8., 9., 12., 13.], [10., 11., 14., 15.],
                                [16., 17., 20., 21.], [18., 19., 22., 23.]])
    assert_array_equal(arr_out, correct_arr_out)
Exemplo n.º 9
0
def test_shape():
    n_images = 15
    height, width = 11, 7
    arr_in = np.arange(n_images * height * width)
    arr_in = arr_in.reshape(n_images, height, width)

    alpha = int(np.ceil(np.sqrt(n_images)))

    arr_out = montage2d(arr_in)
    assert_equal(arr_out.shape, (alpha * height, alpha * width))
Exemplo n.º 10
0
def test_shape():
    n_images = 15
    height, width = 11, 7
    arr_in = np.arange(n_images * height * width)
    arr_in = arr_in.reshape(n_images, height, width)

    alpha = int(np.ceil(np.sqrt(n_images)))

    arr_out = montage2d(arr_in)
    assert_equal(arr_out.shape, (alpha * height, alpha * width))
Exemplo n.º 11
0
def montage_projection(im_dir, trange=None, context=None):
    """
    Generate a montage of x projections.

    im_dir : str, path to directory containing [x,y,z] data saved as tif
    
    trange : object which can be used for linear indexing, set of timepoints to use

    context : spark context object for parallelization
    """
    import thunder as td
    import skimage.external.tifffile as tif
    from glob import glob
    from skimage.util.montage import montage2d
    from skimage.exposure import rescale_intensity as rescale
    import numpy as np
    from skimage import io

    exp_name = im_dir.split('/')[-2]

    print('Exp name: {0}'.format(exp_name))
    ims = td.images.fromtif(im_dir + 'TM*.tif', engine=context)
    print('Experiment dims: {0}'.format(ims.shape))
    
    if trange is None:
        trange = np.arange(ims.shape[0])
    
    ims_cropped = ims[trange].median_filter([1,3,3])
    dims = ims_cropped.dims
    
    from scipy.ndimage import percentile_filter
    float_dtype = 'float32'
    
    def my_dff(y, perc, window): 
        baseFunc = lambda x: percentile_filter(x.astype(float_dtype), perc, window, mode='reflect')
        b = baseFunc(y)
        return ((y - b) / (b + .1))

    dff_fun = lambda v: my_dff(v, 15, 800) 
    chop = 16

    reshape_fun = lambda v: v.reshape(dims[0], dims[1], chop, dims[2] // chop)
    montage_fun = lambda v: montage2d(v.T).T

    def im_fun(v):
        return montage_fun(reshape_fun(v).max(3))
    
    out_dtype = 'uint16'
    
    montage_ims = ims_cropped.map_as_series(dff_fun, value_size=ims_cropped.shape[0], dtype=float_dtype, chunk_size='35').map(im_fun)
    dff_lim = montage_ims.map(lambda v: [v.max(), v.min()]).toarray()
    rescale_fun = lambda v: rescale(v, in_range=(dff_lim.min(), dff_lim.max()), out_range=out_dtype).astype(out_dtype)

    montage_rescaled = montage_ims.map(rescale_fun).toarray()[:,-1::-1,:]
    return montage_rescaled
Exemplo n.º 12
0
def plot_batch(image_batch, figure_path, label_batch=None):
    
    all_groups = {label: montage2d(np.stack([img[:,:,0] for img, lab in img_lab_list],0)) 
                  for label, img_lab_list in groupby(zip(image_batch, label_batch), lambda x: x[1])}
    fig, c_axs = plt.subplots(1,len(all_groups), figsize=(len(all_groups)*4, 8), dpi = 600)
    for c_ax, (c_label, c_mtg) in zip(c_axs, all_groups.items()):
        c_ax.imshow(c_mtg, cmap='bone')
        c_ax.set_title(c_label)
        c_ax.axis('off')
    fig.savefig(os.path.join(figure_path))
    plt.close()
Exemplo n.º 13
0
def collage(bubbles):
    ex = RGBExtractor()
    ex.shp = (200, 200)
    images = [ex.extract(*p) for p in bubble_params(bubbles)]

    if len(images) == 3:
        return np.vstack(images)

    r, g, b = tuple(montage2d(np.array([a[:, :, i] for a in images]))
                    for i in range(3))
    return np.dstack((r, g, b)).astype(np.uint8)
Exemplo n.º 14
0
def collage(bubbles):
    ex = RGBExtractor()
    ex.shp = (200, 200)
    images = [ex.extract(*p) for p in bubble_params(bubbles)]

    if len(images) == 3:
        return np.vstack(images)

    r, g, b = tuple(
        montage2d(np.array([a[:, :, i] for a in images])) for i in range(3))
    return np.dstack((r, g, b)).astype(np.uint8)
Exemplo n.º 15
0
def test_fill():
    n_images = 3
    height, width = 2, 3,
    arr_in = np.arange(n_images * height * width)
    arr_in = arr_in.reshape(n_images, height, width)

    arr_out = montage2d(arr_in, fill=0)

    gt = np.array([[0., 1., 2., 6., 7., 8.], [3., 4., 5., 9., 10., 11.],
                   [12., 13., 14., 0., 0., 0.], [15., 16., 17., 0., 0., 0.]])

    assert_array_equal(arr_out, gt)
Exemplo n.º 16
0
def show_and_save_montage_of_patches(patches, is_show, is_save, save_filename=None):
    if is_show or is_save:
        if patches.ndim == 3:
            patches_montage = montage2d(patches)
        else:
            patches_montage = patches

        if is_show:
            imshow(patches_montage)
        if is_save and (save_filename is not None):
            save_filename = os.path.join(THIS_FILE_PATH, save_filename)
            imsave(save_filename, patches_montage)
def show_montage_filters(imgs_src):
    # show the features that extract from MRI using UNET
    # change position of axis from (240,240,64) to (64,240,240)
    imgs_show = np.moveaxis(imgs_src, -1, 0)
    # print(imgs_tmp.shape)
    slice_montage = montage2d(imgs_show)

    fig, (ax1) = plt.subplots(1, 1, figsize=(12, 12))
    ax1.imshow(slice_montage, cmap='gray')
    ax1.axis('off')
    ax1.set_title('Features')
    plt.show()
Exemplo n.º 18
0
def show_batch_sample(gen, path=None):
    from matplotlib import pyplot as plt
    from skimage.util.montage import montage2d
    batch_x, batch_y = next(gen)
    x = montage2d(np.squeeze(batch_x[:, :, :, 0]))
    fig = plt.figure(figsize=(15, 15))
    plt.imshow(x, cmap='bone')
    plt.axis('off')
    if path:
        plt.savefig(path)
    else:
        plt.show()
Exemplo n.º 19
0
def show_and_save_montage_of_patches(patches,
                                     is_show,
                                     is_save,
                                     save_filename=None):
    if is_show or is_save:
        if patches.ndim == 3:
            patches_montage = montage2d(patches)
        else:
            patches_montage = patches

        if is_show:
            imshow(patches_montage)
        if is_save and (save_filename is not None):
            save_filename = os.path.join(THIS_FILE_PATH, save_filename)
            imsave(save_filename, patches_montage)
Exemplo n.º 20
0
def test_grid_shape():
    n_images = 6
    height, width = 2, 2
    arr_in = np.arange(n_images * height * width, dtype=np.float32)
    arr_in = arr_in.reshape(n_images, height, width)
    arr_out = montage2d(arr_in, grid_shape=(3,2))
    correct_arr_out = np.array(
	[[  0.,   1.,   4.,   5.],
	 [  2.,   3.,   6.,   7.],
	 [  8.,   9.,  12.,  13.],
	 [ 10.,  11.,  14.,  15.],
	 [ 16.,  17.,  20.,  21.],
	 [ 18.,  19.,  22.,  23.]]
    )
    assert_array_equal(arr_out, correct_arr_out)
Exemplo n.º 21
0
def test_simple():
    n_images = 3
    height, width = 2, 3,
    arr_in = np.arange(n_images * height * width)
    arr_in = arr_in.reshape(n_images, height, width)

    arr_out = montage2d(arr_in)

    gt = np.array(
        [[  0. ,   1. ,   2. ,   6. ,   7. ,   8. ],
         [  3. ,   4. ,   5. ,   9. ,  10. ,  11. ],
         [ 12. ,  13. ,  14. ,   8.5,   8.5,   8.5],
         [ 15. ,  16. ,  17. ,   8.5,   8.5,   8.5]]
    )

    assert_array_equal(arr_out, gt)
Exemplo n.º 22
0
def montage_wb_ratio (input_image, patch_shape, n_filters, ele_print = False):

    from skimage.util.shape import view_as_windows
    from skimage.util.montage import montage2d
    from scipy.cluster.vq import kmeans2

    patches = view_as_windows(gray_crab, patch_shape)

    patches = patches.reshape(-1, patch_shape[0] * patch_shape[1])[::8]

    fb, _ = kmeans2(patches, n_filters, minit='points')

    fb = fb.reshape((-1,) + patch_shape)

    fb_montage = montage2d(fb, fill=False, rescale_intensity=True)

    elements = np.split(np.hstack(np.split(fb_montage, 4)), 16, axis=1)

    del elements[n_filters:]

    wb_ratios = []

    bin_elements = []

    for element in elements:

        thresh = threshold_otsu(element)

        binary = element > thresh

        ratio = np.sum(binary) / binary.size

        wb_ratios.append(ratio)

        if ele_print:
            bin_elements.append(binary)

    wb_ratios = sorted(wb_ratios, reverse = True)

    if ele_print:

        show_images(elements)
        show_images(bin_elements)

    return(wb_ratios)
Exemplo n.º 23
0
def test_simple():
    n_images = 3
    height, width = 2, 3
    arr_in = np.arange(n_images * height * width)
    arr_in = arr_in.reshape(n_images, height, width)

    arr_out = montage2d(arr_in)

    gt = np.array(
        [
            [0.0, 1.0, 2.0, 6.0, 7.0, 8.0],
            [3.0, 4.0, 5.0, 9.0, 10.0, 11.0],
            [12.0, 13.0, 14.0, 8.5, 8.5, 8.5],
            [15.0, 16.0, 17.0, 8.5, 8.5, 8.5],
        ]
    )

    assert_array_equal(arr_out, gt)
Exemplo n.º 24
0
def test_rescale_intensity():
    n_images = 4
    height, width = 3, 3
    arr_in = np.arange(n_images * height * width, dtype=np.float32)
    arr_in = arr_in.reshape(n_images, height, width)

    arr_out = montage2d(arr_in, rescale_intensity=True)

    gt = np.array([[0., 0.125, 0.25, 0., 0.125, 0.25],
                   [0.375, 0.5, 0.625, 0.375, 0.5, 0.625],
                   [0.75, 0.875, 1., 0.75, 0.875, 1.],
                   [0., 0.125, 0.25, 0., 0.125, 0.25],
                   [0.375, 0.5, 0.625, 0.375, 0.5, 0.625],
                   [0.75, 0.875, 1., 0.75, 0.875, 1.]])

    assert_equal(arr_out.min(), 0.0)
    assert_equal(arr_out.max(), 1.0)
    assert_array_equal(arr_out, gt)
Exemplo n.º 25
0
def check_batch_sample(gen, path=None):
    train_val_df = load_train_csv(cfg)
    train_df, val_df = split_train_val(train_val_df, 0.25)
    train_gen = BaseGenerator(train_df,
                              cfg.train_dir,
                              batch_size=cfg.batch_size,
                              aug_args=cfg.aug_args,
                              target_shape=cfg.input_shape[:2],
                              use_yellow=False)
    batch_x, batch_y = next(train_gen)
    x = montage2d(np.squeeze(batch_x[:, :, :, 0]))
    fig = plt.figure(figsize=(15, 15))
    plt.imshow(x, cmap='bone')
    plt.axis('off')
    if path:
        plt.savefig(path)
    else:
        plt.show()
Exemplo n.º 26
0
def test_rescale_intensity():
    n_images = 4
    height, width = 3, 3
    arr_in = np.arange(n_images * height * width, dtype=np.float32)
    arr_in = arr_in.reshape(n_images, height, width)

    arr_out = montage2d(arr_in, rescale_intensity=True)

    gt = np.array(
        [[ 0.   ,  0.125,  0.25 ,  0.   ,  0.125,  0.25 ],
         [ 0.375,  0.5  ,  0.625,  0.375,  0.5  ,  0.625],
         [ 0.75 ,  0.875,  1.   ,  0.75 ,  0.875,  1.   ],
         [ 0.   ,  0.125,  0.25 ,  0.   ,  0.125,  0.25 ],
         [ 0.375,  0.5  ,  0.625,  0.375,  0.5  ,  0.625],
         [ 0.75 ,  0.875,  1.   ,  0.75 ,  0.875,  1.   ]]
        )

    assert_equal(arr_out.min(), 0.0)
    assert_equal(arr_out.max(), 1.0)
    assert_array_equal(arr_out, gt)
Exemplo n.º 27
0
def coronal_montage(img, n_rows=4, n_cols=4, flip_x=False, flip_y=True, flip_z=True):
    """
    Create a montage of all coronal (XZ) slices from a 3D image

    Parameters
    ----------
    img: 3D image to montage
    n_rows: number of montage rows
    n_cols: number of montage columns
    rot: CCW 90deg rotations to apply to each section

    Returns
    -------

    cor_mont: coronal slice montage of img
    """

    # Total number of sections to extract
    n = n_rows * n_cols

    # Source image dimensions
    nx, ny, nz = img.shape

    # Coronal (XZ) sections
    yy = np.linspace(0, ny-1, n).astype(int)
    cors = img[:,yy,:]

    if flip_x:
        cors = np.flip(cors, axis=0)
    if flip_y:
        cors = np.flip(cors, axis=1)
    if flip_z:
        cors = np.flip(cors, axis=2)

    # Permute image axes for montage2d: original y becomes new x
    img = np.transpose(cors, (1,2,0))

    # Construct montage of coronal sections
    cor_mont = montage2d(img, fill=0, grid_shape=(n_rows, n_cols))

    return cor_mont
Exemplo n.º 28
0
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
loaded_model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=opt,
              metrics=['accuracy'])
print("Loaded model from disk")
score = loaded_model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
score = loaded_model.evaluate(x_train, y_train, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#%%
from skimage.util.montage import montage2d
predictions = loaded_model.predict(all_masks_gt, batch_size=32, verbose=1)
#%%
classes = np.argmax(predictions,axis=1)
#%%
pl.imshow(montage2d(all_masks_gt[np.where((classes!=2)&((labels_gt==2)))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where((classes==1)&((labels_gt!=1)))[0]].squeeze()))
#%%

cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:,0]<0.5)[0]])).play(gain=3., magnification = 5, fr = 10)
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt==2)&(predictions[:,1]>=0.05))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt==2)&(predictions[:,1]>0.05))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where((predictions[:,1]>0.1))[0]].squeeze()))

Exemplo n.º 29
0
def test_error_ndim():
    arr_error = np.random.randn(1, 2, 3, 4)
    with pytest.raises(AssertionError):
        montage2d(arr_error)
                    m1 = cm.movie(np.concatenate([cv2.copyMakeBorder(img.T,xx,xx,yy,yy, cv2.BORDER_CONSTANT,0).T[None,:860,:560] for img in m],0))
                    m1.save('/mnt/ceph/neuro/zebra/05292014Fish1-4/thresholded_components_movie_' + str(ID) + '.hdf5')

#%% load each movie and create a super movie
        movies = []
        for ID in range(1,41):
            print(ID)
            movies.append(cm.load('/mnt/ceph/neuro/zebra/05292014Fish1-4/thresholded_components_movie_' + str(ID) + '.hdf5').astype(np.float32))
        movies = np.array(movies)
        movies = movies.transpose([1,0,3,2])
#%%
        montmov = []
        from skimage.util.montage import montage2d
        for idx,fr in enumerate(movies):
            print(idx)
            montmov.append(montage2d(fr,grid_shape=(5,8)).astype(np.float32))
        movies = []
        montmov = np.array(montmov)
        montmov = cm.movie(montmov)
        #%%
        montmov = cm.load('/mnt/ceph/neuro/zebra/05292014Fish1-4/thresholded_components_movie_all.hdf5')
#%% save frames in avi

        import tifffile as tiff
        for idx,vid_frame in enumerate(montmov):
            print(idx)
            tiff.imsave('/mnt/ceph/neuro/zebra/05292014Fish1-4/frames/'+str(idx)+'_.tif',vid_frame)

#%% Plane 11
if ploton:
    from sklearn.preprocessing import normalize
def experiment(plot_path, ds_name, no_aug, affine_std, scale_u_range,
               scale_x_range, scale_y_range, xlat_range, hflip, intens_flip,
               intens_scale_range, intens_offset_range, grid_h, grid_w, seed):
    settings = locals().copy()

    import os
    import sys
    import cmdline_helpers

    intens_scale_range_lower, intens_scale_range_upper = cmdline_helpers.colon_separated_range(
        intens_scale_range)
    intens_offset_range_lower, intens_offset_range_upper = cmdline_helpers.colon_separated_range(
        intens_offset_range)
    scale_u_range = cmdline_helpers.colon_separated_range(scale_u_range)
    scale_x_range = cmdline_helpers.colon_separated_range(scale_x_range)
    scale_y_range = cmdline_helpers.colon_separated_range(scale_y_range)

    import numpy as np
    from skimage.util.montage import montage2d
    from PIL import Image
    from batchup import data_source
    import data_loaders
    import augmentation

    n_chn = 0

    if ds_name == 'mnist':
        d_source = data_loaders.load_mnist(zero_centre=False)
    elif ds_name == 'usps':
        d_source = data_loaders.load_usps(zero_centre=False, scale28=True)
    elif ds_name == 'svhn_grey':
        d_source = data_loaders.load_svhn(zero_centre=False, greyscale=True)
    elif ds_name == 'svhn':
        d_source = data_loaders.load_svhn(zero_centre=False, greyscale=False)
    elif ds_name == 'cifar':
        d_source = data_loaders.load_cifar10()
    elif ds_name == 'stl':
        d_source = data_loaders.load_stl()
    elif ds_name == 'syndigits':
        d_source = data_loaders.load_syn_digits(zero_centre=False,
                                                greyscale=False)
    elif ds_name == 'synsigns':
        d_source = data_loaders.load_syn_signs(zero_centre=False,
                                               greyscale=False)
    elif ds_name == 'gtsrb':
        d_source = data_loaders.load_gtsrb(zero_centre=False, greyscale=False)
    else:
        print('Unknown dataset \'{}\''.format(ds_name))
        return

    # Delete the training ground truths as we should not be using them
    del d_source.train_y

    n_classes = d_source.n_classes

    print('Loaded data')

    src_aug = augmentation.ImageAugmentation(
        hflip,
        xlat_range,
        affine_std,
        intens_flip=intens_flip,
        intens_scale_range_lower=intens_scale_range_lower,
        intens_scale_range_upper=intens_scale_range_upper,
        intens_offset_range_lower=intens_offset_range_lower,
        intens_offset_range_upper=intens_offset_range_upper,
        scale_u_range=scale_u_range,
        scale_x_range=scale_x_range,
        scale_y_range=scale_y_range)

    def augment(X):
        if not no_aug:
            X = src_aug.augment(X)
        return X,

    rampup_weight_in_list = [0]

    print('Rendering...')
    train_ds = data_source.ArrayDataSource([d_source.train_X],
                                           repeats=-1).map(augment)
    n_samples = len(d_source.train_X)

    if seed != 0:
        shuffle_rng = np.random.RandomState(seed)
    else:
        shuffle_rng = np.random

    batch_size = grid_h * grid_w
    display_batch_iter = train_ds.batch_iterator(batch_size=batch_size,
                                                 shuffle=shuffle_rng)

    best_src_test_err = 1.0

    x_batch, = next(display_batch_iter)

    montage = []
    for chn_i in range(x_batch.shape[1]):
        m = montage2d(x_batch[:, chn_i, :, :], grid_shape=(grid_h, grid_w))
        montage.append(m[:, :, None])
    montage = np.concatenate(montage, axis=2)

    if montage.shape[2] == 1:
        montage = montage[:, :, 0]

    lower = min(0.0, montage.min())
    upper = max(1.0, montage.max())
    montage = (montage - lower) / (upper - lower)
    montage = (np.clip(montage, 0.0, 1.0) * 255.0).astype(np.uint8)

    Image.fromarray(montage).save(plot_path)
Exemplo n.º 32
0
        kernel = gabor_kernel(frequency, theta=theta)
        params = "theta=%d,\nfrequency=%.2f" % (theta * 180 / np.pi, frequency)
        kernel_params.append(params)
        # Save kernel and the power image for each image
        results.append((kernel, [power(image_gray, kernel)]))

output = power(image_gray, kernel)
# astro = color.rgb2gray(data.astronaut())
astro = output

# -- filterbank1 on original image
patches3 = view_as_windows(astro, patch_shape)
patches1 = patches3.reshape(-1, patch_shape[0] * patch_shape[1])[::8]
fb3, _ = kmeans2(patches1, n_filters, minit="points")
fb1 = fb3.reshape((-1,) + patch_shape)
fb1_montage = montage2d(fb1, rescale_intensity=True)

# -- filterbank2 LGN-like image
astro_dog = ndi.gaussian_filter(astro, 0.5) - ndi.gaussian_filter(astro, 1)
patches2 = view_as_windows(astro_dog, patch_shape)
patches2 = patches2.reshape(-1, patch_shape[0] * patch_shape[1])[::8]
fb2, _ = kmeans2(patches2, n_filters, minit="points")
fb2 = fb2.reshape((-1,) + patch_shape)
fb2_montage = montage2d(fb2, rescale_intensity=True)

# --
fig, axes = plt.subplots(2, 2, figsize=(7, 6))
ax0, ax1, ax2, ax3 = axes.ravel()

ax0.imshow(astro, cmap=plt.cm.gray)
ax0.set_title("Image (original)")
Exemplo n.º 33
0
def plot_weights(weights, num_h_units, num_x_units):
    size = int(math.sqrt(num_x_units))
    hidden_units = weights.reshape(num_h_units, size, size)
    display = montage2d(hidden_units)
    plt.imshow(display, cmap='binary')
    plt.show()
Exemplo n.º 34
0
annotated = locate(data)

plt.title("Augmented")
plt.imshow(annotated)
plt.show()
#%% visualize_results
num_sampl = 30000
predictions = model.predict(
    all_masks_gt[:num_sampl, :, :, None], batch_size=32, verbose=1)
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:num_sampl, 0] >= 0.95)[
         0]])).play(gain=3., magnification=5, fr=10)
#%%
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:num_sampl, 1] >= 0.95)[
         0]])).play(gain=3., magnification=5, fr=10)
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt[:num_sampl] == 0) & (
    predictions[:num_sampl, 1] > 0.95))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt[:num_sampl] == 1) & (
    predictions[:num_sampl, 0] > 0.95))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where(
    (predictions[:num_sampl, 0] > 0.95))[0]].squeeze()))
#%% retrieve and test
json_file = open(json_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(model_path)
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
loaded_model.compile(loss=keras.losses.categorical_crossentropy,
                     optimizer=opt,
                     optimizer=opt,
                     metrics=['accuracy'])
print("Loaded model from disk")
score = loaded_model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
score = loaded_model.evaluate(x_train, y_train, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#%%
from skimage.util.montage import montage2d
predictions = loaded_model.predict(all_masks_gt, batch_size=32, verbose=1)
#%%
classes = np.argmax(predictions, axis=1)
#%%
pl.imshow(montage2d(all_masks_gt[np.where(
    (classes != 2) & ((labels_gt == 2)))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where(
    (classes == 1) & ((labels_gt != 1)))[0]].squeeze()))
#%%

cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:, 0] < 0.5)[0]])).play(
    gain=3., magnification=5, fr=10)
#%%
pl.imshow(montage2d(all_masks_gt[np.where(
    (labels_gt == 2) & (predictions[:, 1] >= 0.05))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where(
    (labels_gt == 2) & (predictions[:, 1] > 0.05))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where(
                    m1 = cm.movie(np.concatenate([cv2.copyMakeBorder(img.T,xx,xx,yy,yy, cv2.BORDER_CONSTANT,0).T[None,:860,:560] for img in m],0))
                    m1.save('/mnt/ceph/neuro/zebra/05292014Fish1-4/thresholded_components_movie_' + str(ID) + '.hdf5')

#%% load each movie and create a super movie
        movies = []
        for ID in range(1,41):
            print(ID)
            movies.append(cm.load('/mnt/ceph/neuro/zebra/05292014Fish1-4/thresholded_components_movie_' + str(ID) + '.hdf5').astype(np.float32))
        movies = np.array(movies)
        movies = movies.transpose([1,0,3,2])
#%%
        montmov = []
        from skimage.util.montage import montage2d
        for idx,fr in enumerate(movies):
            print(idx)
            montmov.append(montage2d(fr,grid_shape=(5,8)).astype(np.float32))
        movies = []
        montmov = np.array(montmov)
        montmov = cm.movie(montmov)
        #%%
        montmov = cm.load('/mnt/ceph/neuro/zebra/05292014Fish1-4/thresholded_components_movie_all.hdf5')
#%% save frames in avi

        import tifffile as tiff
        for idx,vid_frame in enumerate(montmov):
            print(idx)
            tiff.imsave('/mnt/ceph/neuro/zebra/05292014Fish1-4/frames/'+str(idx)+'_.tif',vid_frame)

#%% Plane 11
if ploton:
    from sklearn.preprocessing import normalize
Exemplo n.º 37
0
EDGE_CROP = 16
NB_EPOCHS = 70
GAUSSIAN_NOISE = 0.1
UPSAMPLE_MODE = 'SIMPLE'
# downsampling inside the network
NET_SCALING = None
# downsampling in preprocessing
IMG_SCALING = (1, 1)
# number of validation images to use
VALID_IMG_COUNT = 400
# maximum number of steps_per_epoch in training
MAX_TRAIN_STEPS = 200
AUGMENT_BRIGHTNESS = False
dropout = 0.4
montage_rgb = lambda x: np.stack(
    [montage2d(x[:, :, :, i]) for i in range(x.shape[3])], -1)

# get currrent working directory
print()
# print(os.getcwd())
print()
ship_dir = '/home/speri/airbus_detection/'
train_image_dir = os.path.join(ship_dir, 'train_v2')
test_image_dir = os.path.join(ship_dir, 'test_v2')


def multi_rle_encode(img):
    labels = label(img[:, :, 0])
    return [rle_encode(labels == k) for k in np.unique(labels[labels > 0])]

Exemplo n.º 38
0
def test_error_ndim():
    arr_error = np.random.randn(1, 2, 3, 4)
    montage2d(arr_error)
Exemplo n.º 39
0
     print(ID)
 pl.tight_layout()
 #%% predictions for Plan 11
 from skimage.util.montage import montage2d
 predictions, final_crops = cm.components_evaluation.evaluate_components_CNN(
     Ab[()][:, gnb:],
     dims,
     np.array(gSig).astype(np.int),
     model_name='model/cnn_model',
     patch_size=50,
     loaded_model=None,
     isGPU=False)
 #%%
 idx = np.argsort(predictions[:, 0])[:10]  #[[0,1,2,3,5,9]]
 Ab_part = Ab[()][:, gnb:][:, idx]
 pl.imshow(montage2d(final_crops[idx]))
 pl.figure()    crd = cm.utils.visualization.plot_contours(
         Ab_part.toarray().reshape(tuple(dims)+(-1,), order = 'F').transpose([1,0,2]).\
         reshape((dims[1]*dims[0],-1),order = 'F'), cv2.resize(Cn_,tuple(dims[::-1])).T, thr=0.9, vmax = 0.95,
         display_numbers=True)
 pl.figure()    crd = cm.utils.visualization.plot_contours(
         Ab_part.toarray().reshape(tuple(dims)+(-1,), order = 'F').transpose([1,0,2]).\
         reshape((dims[1]*dims[0],-1),order = 'F'), img, thr=0.9, display_numbers=True, vmax = .001)
 #%%
 count = 0
 for cf, sp_c in zip(Cf[idx + gnb], final_crops[idx]):
     pl.subplot(10, 2, 2 * count + 1)
     pl.imshow(sp_c[10:-10, 10:-10])
     pl.axis('off')
     pl.subplot(10, 2, 2 * count + 2)
     pl.plot(cf)
Exemplo n.º 40
0
ax1.set_title('Band 1')
ax2.matshow(train_images[0,:,:,1])
ax2.set_title('Band 2')


# # Training Overview
# Here we use the montage functionality of skimage to make preview tiles of randomly selected icebergs and ships. This helps us get a better idea about the diversity in the data.

# In[ ]:


fig, (ax1s, ax2s) = plt.subplots(2,2, figsize = (8,8))
obj_list = dict(ships = train_df.query('is_iceberg==0').sample(16).index,
     icebergs = train_df.query('is_iceberg==1').sample(16).index)
for ax1, ax2, (obj_type, idx_list) in zip(ax1s, ax2s, obj_list.items()):
    ax1.imshow(montage2d(train_images[idx_list,:,:,0]))
    ax1.set_title('%s Band 1' % obj_type)
    ax1.axis('off')
    ax2.imshow(montage2d(train_images[idx_list,:,:,1]))
    ax2.set_title('%s Band 2' % obj_type)
    ax2.axis('off')


# # Testing Data Overview
# To see how different the test data looks from the training and it looks like the data is much messier (multiple objects, different skews and angles). Clearly we will require some augmentation to do well here

# In[ ]:


fig, (ax1, ax2) = plt.subplots(1,2, figsize = (12,12))
idx_list = test_df.sample(49).index
Exemplo n.º 41
0
    loaded_model = model_from_json(loaded_model_json)
    loaded_model.load_weights(model_name + '.h5')
    print("Loaded model from disk")
    return loaded_model.predict(msks, batch_size=32, verbose=1)


predictions = run_classifier(all_masks_gt)
#%% show results classifier
pl.figure(figsize=(20, 30))
is_positive = 0

for a in grouper(100, np.where(predictions[:, is_positive] > .95)[0]):
    a_ = [aa for aa in a if aa is not None]
    img_mont_ = all_masks_gt[np.array(a_)].squeeze()
    shps_img = img_mont_.shape
    img_mont = montage2d(img_mont_)
    shps_img_mont = np.array(img_mont.shape) // 50
    pl.imshow(img_mont)
    inp = pl.pause(.1)
    #     pl.cla()
    break

#%% Curate data. Remove wrong negatives or wrong positives
is_positive = 0  # should be 0 when processing negatives
to_be_checked = np.where(labels_gt == is_positive)[0]

wrong = []
count = 0
for a in grouper(50, to_be_checked):

    a_ = [aa for aa in a if aa is not None]
    loaded_model = model_from_json(loaded_model_json)
    loaded_model.load_weights(model_name + '.h5')
    print("Loaded model from disk")
    return loaded_model.predict(msks, batch_size=32, verbose=1)


predictions = run_classifier(all_masks_gt)
#%% show results classifier
pl.figure(figsize=(20, 30))
is_positive = 0

for a in grouper(100, np.where(predictions[:, is_positive] > .95)[0]):
    a_ = [aa for aa in a if aa is not None]
    img_mont_ = all_masks_gt[np.array(a_)].squeeze()
    shps_img = img_mont_.shape
    img_mont = montage2d(img_mont_)
    shps_img_mont = np.array(img_mont.shape) // 50
    pl.imshow(img_mont)
    inp = pl.pause(.1)
#     pl.cla()
    break

#%% Curate data. Remove wrong negatives or wrong positives
is_positive = 2  # should be 0 when processing negatives
to_be_checked = np.where(labels_gt == is_positive)[0]
to_be_checked = to_be_checked[to_be_checked < 1000]
wrong = []
count = 0
for a in grouper(50, to_be_checked):

    a_ = [aa for aa in a if aa is not None]
from skimage.util.shape import view_as_windows
from skimage.util.montage import montage2d

np.random.seed(42)

patch_shape = 8, 8
n_filters = 49

astro = color.rgb2gray(data.astronaut())

# -- filterbank1 on original image
patches1 = view_as_windows(astro, patch_shape)
patches1 = patches1.reshape(-1, patch_shape[0] * patch_shape[1])[::8]
fb1, _ = kmeans2(patches1, n_filters, minit='points')
fb1 = fb1.reshape((-1, ) + patch_shape)
fb1_montage = montage2d(fb1, rescale_intensity=True)

# -- filterbank2 LGN-like image
astro_dog = ndi.gaussian_filter(astro, .5) - ndi.gaussian_filter(astro, 1)
patches2 = view_as_windows(astro_dog, patch_shape)
patches2 = patches2.reshape(-1, patch_shape[0] * patch_shape[1])[::8]
fb2, _ = kmeans2(patches2, n_filters, minit='points')
fb2 = fb2.reshape((-1, ) + patch_shape)
fb2_montage = montage2d(fb2, rescale_intensity=True)

# --
fig, axes = plt.subplots(2, 2, figsize=(7, 6))
ax0, ax1, ax2, ax3 = axes.ravel()

ax0.imshow(astro, cmap=plt.cm.gray)
ax0.set_title("Image (original)")
Exemplo n.º 44
0
			print('The ROI of this pictured is positioned too far left, so that the last clusters areas exceed the image border. This picture will not be processed to clusters.')
			break_parentloop = True
		    elif row_start_stop_cells[row_number, 0] < 0:
			print('The ROI of this pictured is positioned too far right, so that the last clusters areas exceed the image border. This picture will not be processed to clusters.')
			break_parentloop = True
	    
	    if break_parentloop:
		continue
	    
	    all_squares = all_squares[1:] # remove first zeroes frame (created just for the vstack to work at the first iteration)
	    all_squares = np.random.permutation(all_squares)
	    	    
	    pad_value = np.mean(im[:cell_size,:cell_size]).astype(int)
	    reconstructed_im = np.ones((leadingzeros_y-rest_y_u,np.shape(im)[1])) * pad_value
	    scrambled_image = np.zeros((1, np.shape(im)[1]))
	    
	    for row_number, sub_im_row in enumerate(sub_im_rows):
		shuffled_squares = montage2d(all_squares[:row_start_stop_cells[row_number, 2]], grid_shape=(1,row_start_stop_cells[row_number, 2]))
		all_squares = all_squares[row_start_stop_cells[row_number, 2]:]
		padded_row = np.pad(shuffled_squares, ((0,0),(row_start_stop_cells[row_number,0],row_start_stop_cells[row_number,1])), 'constant' ,constant_values=pad_value)
		scrambled_image = np.vstack((scrambled_image, padded_row))
		
	    scrambled_image = scrambled_image[1:]
	    scrambled_image = np.pad(scrambled_image, ((leadingzeros_y-rest_y_u, np.shape(im)[0]-(leadingzeros_y+nonzero_y+rest_y_d)),(0,0)), 'constant' ,constant_values=pad_value)
		    
	    #~ imgplot = plt.imshow(scrambled_image, cmap = cm.Greys_r, interpolation='nearest')
	    #~ plt.show()
	    
	    toimage(scrambled_image, cmin=0, cmax=255).save(input_folder+path.splitext(pic)[0]+'_cell'+str("%03d" % (cell_size))+'rand.jpg') # use this instead of imsave to avoide rescaling to maximal dynamic range
	    print('Done!')
loaded_model.load_weights(model_path)
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
loaded_model.compile(loss=keras.losses.categorical_crossentropy,
                     optimizer=opt,
                     metrics=['accuracy'])
print("Loaded model from disk")
score = loaded_model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
score = loaded_model.evaluate(x_train, y_train, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#%%
from skimage.util.montage import montage2d
#%%
predictions = loaded_model.predict(all_masks_gt, batch_size=32, verbose=1)
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:, 1] > 0.1)[0]])).play(
    gain=3., magnification=5, fr=10)
#%%
for i in range(3):
    pl.subplot(2,2,i+1)
    pl.imshow(montage2d(all_masks_gt[np.where((labels_gt ==i) & (
            predictions[:, i] < 0.1) & (predictions[:, i] < 0.1))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where(labels_gt == 3)[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where(
    predictions[:, 2] <= 0.25)[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt == 3) & (
predictions[:, 3] <= 0.25) & (predictions[:, 3] <= 0.25))[0]].squeeze()))
    total_labels = np.array(total_labels)[rand_perm]
    np.savez('use_cases/edge-cutter/residual_crops_all_classes.npz',
             all_masks_gt=total_crops, labels_gt=total_labels)
#%%
# the data, shuffled and split between train and test sets
with np.load('use_cases/edge-cutter/residual_crops_all_classes.npz') as ld:
    all_masks_gt = ld['all_masks_gt']
    labels_gt = ld['labels_gt']
#%%
num_sampl = 30000
predictions = loaded_model.predict(
    all_masks_gt[:num_sampl, :, :, None], batch_size=32, verbose=1)
#cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:num_sampl,1]<0.1)[0]])).play(gain=3., magnification = 5, fr = 10)
#%%
from skimage.util.montage import montage2d
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt[:num_sampl] == 2) & (
    predictions[:num_sampl, 0] <= 0.5))[0]].squeeze()))
#%%
fname_new = '/mnt/ceph/neuro/labeling/neurofinder.03.00.test/images/final_map/Yr_d1_498_d2_467_d3_1_order_C_frames_2250_.mmap'
gSig = [8, 8]
gt_file = os.path.join(os.path.split(fname_new)[0], os.path.split(
    fname_new)[1][:-4] + 'match_masks.npz')
base_name = fname_new.split('/')[5]
maxT = 8100
with np.load(gt_file, encoding='latin1') as ld:
    print(ld.keys())
    locals().update(ld)
    A_gt = scipy.sparse.coo_matrix(A_gt[()])
    dims = (d1, d2)
    C_gt = C_gt[:, :maxT]
    YrA_gt = YrA_gt[:, :maxT]
    f_gt = f_gt[:, :maxT]
Exemplo n.º 47
0
print("Shape of classes in validation dataset {}".format(
    data.validation.labels.shape))

#sample image
sample = data.train.images[5].reshape(28, 28)
plt.imshow(sample, cmap='gray')
plt.title('Sample image')
plt.axis('off')
plt.show()

#function to display montage of input data
imgs = data.train.images[0:100]
montage_img = np.zeros([100, 28, 28])
for i in range(len(imgs)):
    montage_img[i] = imgs[i].reshape(28, 28)
plt.imshow(montage2d(montage_img), cmap='gray')
plt.title('Sample of imput data')
plt.axis('off')
plt.show()

images = data.train.images
images = np.reshape(images, [images.shape[0], 28, 28])
mean_img = np.mean(images, axis=0)
std_img = np.std(images, axis=0)

plt.imshow(mean_img)
plt.title('Mean image of the data')
plt.colorbar()
plt.axis('off')
plt.show()
                     metrics=['accuracy'])
print("Loaded model from disk")
score = loaded_model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
score = loaded_model.evaluate(x_train, y_train, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#%%
from skimage.util.montage import montage2d
#%%
predictions = loaded_model.predict(all_masks_gt, batch_size=32, verbose=1)
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:, 1] > 0.1)[0]])).play(
    gain=3., magnification=5, fr=10)
#%%
for i in range(3):
    pl.subplot(2, 2, i + 1)
    pl.imshow(
        montage2d(
            all_masks_gt[np.where((labels_gt == i) & (predictions[:, i] < 0.1)
                                  & (predictions[:, i] < 0.1))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where(labels_gt == 3)[0]].squeeze()))
#%%
pl.imshow(
    montage2d(all_masks_gt[np.where(predictions[:, 2] <= 0.25)[0]].squeeze()))
#%%
pl.imshow(
    montage2d(
        all_masks_gt[np.where((labels_gt == 3) & (predictions[:, 3] <= 0.25)
                              & (predictions[:, 3] <= 0.25))[0]].squeeze()))
Exemplo n.º 49
0
    rand_perm = np.random.permutation(len(total_crops))    
    total_crops =  np.array(total_crops)[rand_perm]
    total_labels = np.array(total_labels)[rand_perm]
    np.savez('use_cases/edge-cutter/residual_crops_all_classes.npz',all_masks_gt = total_crops, labels_gt = total_labels)
#%%
# the data, shuffled and split between train and test sets
with np.load('use_cases/edge-cutter/residual_crops_all_classes.npz') as ld:
    all_masks_gt = ld['all_masks_gt']
    labels_gt = ld['labels_gt']    
#%%
num_sampl = 30000
predictions = loaded_model.predict(all_masks_gt[:num_sampl,:,:,None], batch_size=32, verbose=1)
#cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:num_sampl,1]<0.1)[0]])).play(gain=3., magnification = 5, fr = 10)
#%%
from skimage.util.montage import montage2d
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt[:num_sampl]==2)&(predictions[:num_sampl,0]<=0.5))[0]].squeeze()))
#%%
fname_new = '/mnt/ceph/neuro/labeling/neurofinder.03.00.test/images/final_map/Yr_d1_498_d2_467_d3_1_order_C_frames_2250_.mmap'
gSig = [8,8]
gt_file = os.path.join(os.path.split(fname_new)[0], os.path.split(fname_new)[1][:-4] + 'match_masks.npz')
base_name = fname_new.split('/')[5]
maxT = 8100
with np.load(gt_file, encoding = 'latin1') as ld:
    print(ld.keys())
    locals().update(ld)
    A_gt = scipy.sparse.coo_matrix(A_gt[()])
    dims = (d1,d2)
    C_gt = C_gt[:,:maxT]
    YrA_gt = YrA_gt[:,:maxT]
    f_gt = f_gt[:,:maxT]
    try:
Exemplo n.º 50
0
def test_error_ndim():
    arr_error = np.random.randn(1, 2, 3, 4)
    montage2d(arr_error)