Пример #1
1
def laplaceFilter(fileName,num):

    workData = getData(fileName,num)
    lapFilter = ndimage.laplace(workData)
    lapFilter = workData + lapFilter
    mfSave = Image.fromarray(lapFilter)
    mfSave = mfSave.convert('1')
    mfSave.save('Laplace Filter.jpg')
    imageGUI.imdisplay('Laplace Filter.jpg','Laplace Filter',1)
Пример #2
0
 def vector_laplace(arr, out=None):
     """ apply vector Laplacian operator to array `arr` """
     if out is None:
         out = np.empty(shape_out)
     for i in range(dim):
         ndimage.laplace(arr[i], output=out[i], **args)
     return out * scaling
 def transportRates(self,
                    signalRates,
                    signalLevels,
                    boundcond='constant',
                    mode='normal'):
     # Compute diffusion term, laplacian of grid levels in signalLevels,
     # write into signalRates
     #
     # mode='greens' - do not use initLevels as these don't apply!
     signalRatesView = signalRates.reshape(self.gridDim)
     signalLevelsView = signalLevels.reshape(self.gridDim)
     advKernel = numpy.zeros((3, 3, 3))
     advKernel[:, 1, 1] = [-0.5, 0, 0.5]
     for s in range(self.nSignals):
         if boundcond == 'constant' and self.initLevels and mode != 'greens':
             boundval = self.initLevels[s]
         else:
             boundval = 0.0
         if self.advRates:
             # Adevction term = du/dx
             # Note: always use 'nearest' edge case, this gives central
             # differences in middle, and forward/backward differences on edges
             convolve(signalLevelsView[s],
                      advKernel * self.advRates[s],
                      output=signalRatesView[s],
                      mode='nearest')
             # Diffusion term = \del^2u
             # Use edge case from boundary conditions for diffusion
             signalRatesView[s] += laplace(signalLevelsView[s], None, mode=boundcond, cval=boundval) * \
                                                                                 self.diffRates[s] / 6.0
         else:
             signalRatesView[s] = laplace(signalLevelsView[s], None, mode=boundcond, cval=boundval) \
                                                                                * self.diffRates[s] / 6.0
Пример #4
0
def test_multiple_modes():
    # Test that the filters with multiple mode cababilities for different
    # dimensions give the same result as applying a single mode.
    arr = np.array([[1., 0., 0.],
                    [1., 1., 0.],
                    [0., 0., 0.]])

    mode1 = 'reflect'
    mode2 = ['reflect', 'reflect']

    assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1),
                 sndi.gaussian_filter(arr, 1, mode=mode2))
    assert_equal(sndi.prewitt(arr, mode=mode1),
                 sndi.prewitt(arr, mode=mode2))
    assert_equal(sndi.sobel(arr, mode=mode1),
                 sndi.sobel(arr, mode=mode2))
    assert_equal(sndi.laplace(arr, mode=mode1),
                 sndi.laplace(arr, mode=mode2))
    assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1),
                 sndi.gaussian_laplace(arr, 1, mode=mode2))
    assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1),
                 sndi.maximum_filter(arr, size=5, mode=mode2))
    assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1),
                 sndi.minimum_filter(arr, size=5, mode=mode2))
    assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1),
                 sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2))
    assert_equal(sndi.uniform_filter(arr, 5, mode=mode1),
                 sndi.uniform_filter(arr, 5, mode=mode2))
def test_multiple_modes():
    # Test that the filters with multiple mode cababilities for different
    # dimensions give the same result as applying a single mode.
    arr = np.array([[1., 0., 0.],
                    [1., 1., 0.],
                    [0., 0., 0.]])

    mode1 = 'reflect'
    mode2 = ['reflect', 'reflect']

    assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1),
                 sndi.gaussian_filter(arr, 1, mode=mode2))
    assert_equal(sndi.prewitt(arr, mode=mode1),
                 sndi.prewitt(arr, mode=mode2))
    assert_equal(sndi.sobel(arr, mode=mode1),
                 sndi.sobel(arr, mode=mode2))
    assert_equal(sndi.laplace(arr, mode=mode1),
                 sndi.laplace(arr, mode=mode2))
    assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1),
                 sndi.gaussian_laplace(arr, 1, mode=mode2))
    assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1),
                 sndi.maximum_filter(arr, size=5, mode=mode2))
    assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1),
                 sndi.minimum_filter(arr, size=5, mode=mode2))
    assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1),
                 sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2))
    assert_equal(sndi.uniform_filter(arr, 5, mode=mode1),
                 sndi.uniform_filter(arr, 5, mode=mode2))
Пример #6
0
 def transportRates(self, signalRates, signalLevels, boundcond='constant', mode='normal'):
     # Compute diffusion term, laplacian of grid levels in signalLevels, 
     # write into signalRates
     #
     # mode='greens' - do not use initLevels as these don't apply!
     signalRatesView = signalRates.reshape(self.gridDim)
     signalLevelsView = signalLevels.reshape(self.gridDim)
     advKernel = numpy.zeros((3,3,3))
     advKernel[:,1,1] = [-0.5,0,0.5]
     for s in range(self.nSignals):
         if boundcond=='constant' and self.initLevels and mode!='greens':
             boundval = self.initLevels[s]
         else:
             boundval = 0.0 
         if self.advRates:
             # Adevction term = du/dx
             # Note: always use 'nearest' edge case, this gives central 
             # differences in middle, and forward/backward differences on edges
             convolve(signalLevelsView[s], advKernel*self.advRates[s], output=signalRatesView[s], mode='nearest')
             # Diffusion term = \del^2u
             # Use edge case from boundary conditions for diffusion
             signalRatesView[s] += laplace(signalLevelsView[s], None, mode=boundcond, cval=boundval) * \
                                                                                 self.diffRates[s] / 6.0
         else:
             signalRatesView[s] = laplace(signalLevelsView[s], None, mode=boundcond, cval=boundval) \
                                                                                * self.diffRates[s] / 6.0
Пример #7
0
def isotropic(img, C=0.1, t=5):
    C = 1 - ((10 - C) / 10)
    lapla = ndimage.laplace(img)
    iso = img + (lapla * C)
    for i in range(0, t - 1):
        iso = iso + (ndimage.laplace(iso) * C)

    return iso
Пример #8
0
def meshToBinvox(url,
                 ext="_pre.ply",
                 dims=128,
                 doFilter=False,
                 normVals=None,
                 dimVals=None,
                 axis='xyz'):
    shape = (dims, dims, dims)
    data = np.zeros(shape, dtype=bool)
    translate = (0, 0, 0)
    scale = 1
    axis_order = axis
    bv = binvox_rw.Voxels(data, shape, translate, scale, axis_order)

    print("Reading from: " + url)
    mesh = trimesh.load(url)

    if (normVals != None and dimVals != None):
        for vert in mesh.vertices:
            vert[0] = remap(vert[0], dimVals[0], dimVals[1],
                            dims * normVals[0], (dims * normVals[1]) - 1)
            vert[1] = remap(vert[1], dimVals[2], dimVals[3],
                            dims * normVals[2], (dims * normVals[3]) - 1)
            vert[2] = remap(vert[2], dimVals[4], dimVals[5],
                            dims * normVals[4], (dims * normVals[5]) - 1)
    else:
        mesh.vertices = scale_numpy_array(mesh.vertices, 0, dims - 1)

    newMeshUrl = changeExtension(url, ext)
    mesh.export(newMeshUrl)

    for vert in mesh.vertices:
        x = dims - 1 - int(vert[0])
        y = int(vert[1])
        z = int(vert[2])
        data[x][y][z] = True

    if (doFilter == True):
        for i in range(0, 1):  # 1
            nd.binary_dilation(bv.data.copy(), output=bv.data)

        for i in range(0, 3):  # 3
            nd.sobel(bv.data.copy(), output=bv.data)

        nd.median_filter(bv.data.copy(), size=4, output=bv.data)  # 4

        for i in range(0, 2):  # 2
            nd.laplace(bv.data.copy(), output=bv.data)

        for i in range(0, 0):  # 0
            nd.binary_erosion(bv.data.copy(), output=bv.data)

    outputUrl = changeExtension(url, ".binvox")
    print("Writing to: " + outputUrl)

    with open(outputUrl, 'wb') as f:
        bv.write(f)
Пример #9
0
def _inpaint_biharmonic_single_channel(mask, out, limits):
    # Initialize sparse matrices
    matrix_unknown = sparse.lil_matrix((np.sum(mask), out.size))
    matrix_known = sparse.lil_matrix((np.sum(mask), out.size))

    # Find indexes of masked points in flatten array
    mask_i = np.ravel_multi_index(np.where(mask), mask.shape)

    # Find masked points and prepare them to be easily enumerate over
    mask_pts = np.stack(np.where(mask), axis=-1)

    # Iterate over masked points
    for mask_pt_n, mask_pt_idx in enumerate(mask_pts):
        # Get bounded neighborhood of selected radius
        b_lo, b_hi = _get_neighborhood(mask_pt_idx, 2, out.shape)

        # Create biharmonic coefficients ndarray
        neigh_coef = np.zeros(b_hi - b_lo)
        neigh_coef[tuple(mask_pt_idx - b_lo)] = 1
        neigh_coef = laplace(laplace(neigh_coef))

        # Iterate over masked point's neighborhood
        it_inner = np.nditer(neigh_coef, flags=['multi_index'])
        for coef in it_inner:
            if coef == 0:
                continue
            tmp_pt_idx = np.add(b_lo, it_inner.multi_index)
            tmp_pt_i = np.ravel_multi_index(tmp_pt_idx, mask.shape)

            if mask[tuple(tmp_pt_idx)]:
                matrix_unknown[mask_pt_n, tmp_pt_i] = coef
            else:
                matrix_known[mask_pt_n, tmp_pt_i] = coef

    # Prepare diagonal matrix
    flat_diag_image = sparse.dia_matrix((out.flatten(), np.array([0])),
                                        shape=(out.size, out.size))

    # Calculate right hand side as a sum of known matrix's columns
    matrix_known = matrix_known.tocsr()
    rhs = -(matrix_known * flat_diag_image).sum(axis=1)

    # Solve linear system for masked points
    matrix_unknown = matrix_unknown[:, mask_i]
    matrix_unknown = sparse.csr_matrix(matrix_unknown)
    result = spsolve(matrix_unknown, rhs)

    # Handle enormous values
    result = np.clip(result, *limits)

    result = result.ravel()

    # Substitute masked points with inpainted versions
    for mask_pt_n, mask_pt_idx in enumerate(mask_pts):
        out[tuple(mask_pt_idx)] = result[mask_pt_n]

    return out
Пример #10
0
def main():
    argv = sys.argv
    argv = argv[argv.index("--") + 1:]  # get all args after "--"

    inputPath = argv[0]
    dims = int(argv[1])

    for fileName in os.listdir(inputPath):
        if fileName.endswith(".binvox"):
            inputUrl = os.path.join(inputPath, fileName)

            print("Reading from: " + inputUrl)
            bv = read_binvox(inputUrl)

            outputUrl = ""
            outputPathArray = inputUrl.split(".")
            for i in range(0, len(outputPathArray) - 1):
                outputUrl += outputPathArray[i]
            outputUrl += "_filter.binvox"

            # filters
            if (dilateReps > 0):
                print("Dilating...")
                for i in range(0, dilateReps):
                    nd.binary_dilation(bv.data.copy(), output=bv.data)

            if (sobelReps > 0):
                print("Sobel filter...")
                for i in range(0, sobelReps):
                    nd.sobel(bv.data.copy(), output=bv.data)

            if (gaussianSigma > 0):
                print("Gaussian filter")
                nd.gaussian_filter(bv.data.copy(),
                                   sigma=gaussianSigma,
                                   output=bv.data)

            if (medianSize > 0):
                print("Median filter")
                nd.median_filter(bv.data.copy(),
                                 size=medianSize,
                                 output=bv.data)

            if (laplaceReps > 0):
                print("Laplace filter...")
                for i in range(0, laplaceReps):
                    nd.laplace(bv.data.copy(), output=bv.data)

            if (erodeReps > 0):
                print("Eroding...")
                for i in range(0, erodeReps):
                    nd.binary_erosion(bv.data.copy(), output=bv.data)

            print("Writing to: " + outputUrl)
            write_binvox(bv, outputUrl)
Пример #11
0
def task_5():
    fig, ax = plt.subplots(ncols=3, figsize=(16, 4))
    a_set, _ = ndimage.label(A > A.mean())
    ax[0].imshow(a_set)
    b_mat = ndimage.laplace(ndimage.gaussian_filter(A, 2.2))
    b_set, _ = ndimage.label(b_mat > b_mat.mean())
    ax[1].imshow(b_set)
    c_mat = ndimage.sobel(ndimage.laplace(ndimage.gaussian_filter(A, 1.8)))
    c_set, _ = ndimage.label(c_mat > c_mat.mean())
    ax[2].imshow(c_set)
    plt.show()
Пример #12
0
def main():

    args = parse_args()
    try:
        os.makedirs(args.directory)
    except OSError:
        pass

    target = Volume.fromfile(args.target)
    structure = Structure.fromfile(args.template)
    center = structure.coor.mean(axis=1)
    radius = np.linalg.norm((structure.coor - center.reshape(-1, 1)), axis=0).max() + 0.5 * args.resolution

    template = zeros_like(target)
    rottemplate = zeros_like(target)
    mask = zeros_like(target)
    rotmask = zeros_like(target)
    structure_to_shape(structure.coor, args.resolution, out=template, shape='vol', weights=structure.atomnumber)
    structure_to_shape(structure.coor, args.resolution, out=mask, shape='mask')

    if args.laplace:
        target.array = laplace(target.array, mode='constant')
        template.array = laplace(template.array, mode='constant')
    if args.core_weighted:
        mask.array = determine_core_indices(mask.array)

    # Normalize the template density
    ind = mask.array != 0
    N = ind.sum()
    template.array *= mask.array
    template.array[ind] -= template.array[ind].mean()
    template.array[ind] /= template.array[ind].std()

    rotmat = quat_to_rotmat(proportional_orientations(args.angle)[0])

    lcc_list = []
    center -= target.origin
    center /= template.voxelspacing
    radius /= template.voxelspacing
    time0 = time()
    for n, rot in enumerate(rotmat):
        rotate_grid(template.array, rot, center, radius, rottemplate.array)
        rotate_grid(mask.array, rot, center, radius, rotmask.array, nearest=True)
        lcc = calc_lcc(target.array, rottemplate.array, rotmask.array, N)
        lcc_list.append(lcc)
        print '{:d}              \r'.format(n),

    print 'Searching took: {:.0f}m {:.0f}s'.format(*divmod(time() - time0, 60))
    ind = np.argsort(lcc_list)[::-1]
    with open(os.path.join(args.directory, args.outfile), 'w') as f:
        line = ' '.join(['{:.4f}'] + ['{:7.4f}'] * 9) + '\n'
        for n in xrange(min(args.nsolutions, len(lcc_list))):
            f.write(line.format(lcc_list[ind[n]], *rotmat[ind[n]].ravel()))
Пример #13
0
def _get_neigh_coef(shape, center, dtype=float):
    # Create biharmonic coefficients ndarray
    neigh_coef = np.zeros(shape, dtype=dtype)
    neigh_coef[center] = 1
    neigh_coef = laplace(laplace(neigh_coef))

    # extract non-zero locations and values
    coef_idx = np.where(neigh_coef)
    coef_vals = neigh_coef[coef_idx]

    coef_idx = np.stack(coef_idx, axis=0)
    return neigh_coef, coef_idx, coef_vals
Пример #14
0
 def laplace(arr: np.ndarray, out: np.ndarray) -> None:
     """apply laplace operator to array `arr`"""
     assert arr.shape == grid._shape_full
     valid = (..., ) + (slice(1, -1), ) * grid.dim
     with np.errstate(all="ignore"):  # type: ignore
         # some errors can happen for ghost cells
         out[:] = ndimage.laplace(scaling * arr)[valid]
Пример #15
0
 def calcDeltaInnerEnergy(self, dx, dy, norm_inv):
     phi_lap = laplace(self.phi)
     norm_dx = dx * norm_inv
     norm_dy = dy * norm_inv
     dxx, dxy = np.gradient(norm_dx)
     dyx, dyy = np.gradient(norm_dy)
     return phi_lap - (dxx + dyy)
Пример #16
0
def plot_avg_laplace(env_name, pcts_to_plot,reps_to_plot):
    fig, ax = plt.subplots(len(reps_to_plot)*2,len(pcts_to_plot))
    for p, pct in enumerate(pcts_to_plot):
        for r, rep in enumerate(reps_to_plot):
            run_id = list(gb.get_group((env_name,rep,cache_limits[env_name][pct])))[0]
            print(run_id)

            with open(f'../../Data/ec_dicts/lifetime_dicts/{run_id}_polarcoord.p', 'rb') as f:
                polar_array = pickle.load(f)

            lpc = []
            for i in range(polar_array.shape[0]):
                lpc.append(laplace(polar_array[i,:]))
            mean_polar = np.mean(polar_array,axis=0)
            mean_laplace = np.mean(np.asarray(lpc),axis=0)
            ax[r*2+0,p].imshow(mean_polar,cmap=fade)
            print(rep, pct, mean_polar[15,2])
            a = ax[r*(2)+1,p].imshow(mean_laplace, cmap=fade_cm,vmin=-1000,vmax=1000)
            ax[r,p].get_xaxis().set_visible(False)
            #ax[r,p].get_yaxis().set_visible(False)
            ax[r,p].set_yticklabels([])
            if r ==0:
                ax[r,p].set_title(pct)
    for r in range(2):
        ax[r,0].set_ylabel(reps_to_plot[r])
        fig.colorbar(a,ax=ax[r,-1])


    plt.show()
    def edge_filters(self):
        ''' Plot five edge-filters (kernels) in grayscale
        '''

        self.gray = rgb2gray(self.im)

        self.edges = {
            'Original': self.im,
            'Grayscale': self.gray,
            'Sobel': ndimage.sobel(self.gray),
            'Prewitt': ndimage.prewitt(self.gray),
            'Laplacian': ndimage.laplace(self.gray, mode='reflect'),
            'LoG': ndimage.gaussian_laplace(self.gray, sigma=1, mode='reflect')
        }

        fig, axes = plt.subplots(2, 3, figsize=(18, 10))
        axs = iter(axes.ravel())

        for name, edge in self.edges.items():
            ax = next(axs)
            ax.imshow(edge, cmap='gray')
            ax.set_title(name)

        fig.tight_layout()

        plt.savefig('.'.join(FNAME.split('.')[:-1]) + '_processed.png')
Пример #18
0
def plot_forest(max_depth=1):
    plt.figure()
    ax = plt.gca()
    h = 0.02

    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))

    if max_depth != 0:
        tree = RandomForestClassifier(n_estimators=200, max_depth=max_depth,
                                      random_state=1).fit(X, y)
        Z = tree.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
        Z = Z.reshape(xx.shape)
        faces = tree.tree_.apply(np.c_[xx.ravel(), yy.ravel()].astype(np.float32))
        faces = faces.reshape(xx.shape)
        border = ndimage.laplace(faces) != 0
        ax.contourf(xx, yy, Z, alpha=.4)
        ax.scatter(xx[border], yy[border], marker='.', s=1)
        ax.set_title("max_depth = %d" % max_depth)
    else:
        ax.set_title("data set")
    ax.scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)
    ax.set_xlim(x_min, x_max)
    ax.set_ylim(y_min, y_max)
    ax.set_xticks(())
    ax.set_yticks(())
def blur_image(img):
    '''Return the blurred image that's used when sampling'''
    blur = np.zeros(list(img.shape)+[2], img.dtype)
    for z in range(img.shape[2]):
        blur[:,:,z, 0] = laplace(gaussian_filter(img[:,:,z], 3))
        blur[:,:,z, 1] = gaussian_filter(img[:,:,z], 5)
    return blur
def plot_tree(max_depth=1, ax=None):
    if ax is None:
        fig, ax = plt.subplots(1, 2, figsize=(10, 5))
    h = 0.02

    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))

    if max_depth != 0:
        tree = DecisionTreeClassifier(max_depth=max_depth, random_state=1)
        tree.fit(X, y)
        Z = tree.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
        Z = Z.reshape(xx.shape)
        faces = tree.tree_.apply(np.c_[xx.ravel(),
                                       yy.ravel()].astype(np.float32))
        faces = faces.reshape(xx.shape)
        border = ndimage.laplace(faces) != 0
        ax[0].contourf(xx, yy, Z, alpha=.4, cmap='RdBu_r')
        ax[0].scatter(xx[border], yy[border], marker='.', s=1)
        ax[0].set_title("max_depth = %d" % max_depth)
        plot_tree_mpl(tree, ax=ax[1], impurity=False, filled=True)
        # ax[1].axis("off")
    else:
        ax[0].set_title("data set")
        ax[1].set_visible(False)
    ax[0].scatter(X[:, 0],
                  X[:, 1],
                  c=np.array(['tab:blue', 'tab:red'])[y],
                  s=60)
    ax[0].set_xlim(x_min, x_max)
    ax[0].set_ylim(y_min, y_max)
    ax[0].set_xticks(())
    ax[0].set_yticks(())
def _get_measurements(folder):
    # list image files
    filenames = os.listdir(folder)
    # sort the image filenames
    filenames = sorted(filenames, key=lambda v: v.upper())
    nl, bs, bs2, ai, dl, di, cl, mz = [], [], [], [], [], [], [], []
    for filename in filenames:
        print(filename)
        filename = os.path.join(folder, filename)
        im = imread(filename)
        im = np.moveaxis(im, 0, -1)
        for i in range(im.shape[2]):
            nl.append(
                estimate_sigma(im[:, :, i],
                               multichannel=False,
                               average_sigmas=True))
            imlap = laplace(im[:, :, i])
            bs.append(imlap.var())  # Blurriness Score
            im2 = gaussian_filter(im[:, :, i], sigma=3)
            bs2.append(im2.var())  # Blurriness Score with Gaussian Filter
            ai.append(im[:, :, i].mean())  # Average Intensity
            dl.append(_get_dark_light(im[:, :, i]))  # Darkness Level
            di.append(_get_dominant_intensity(im[:, :,
                                                 i]))  # Dominant intensity
            imgx, imgy = np.gradient(im[:, :, i])
            img = np.sqrt(np.power(imgx, 2) + np.power(imgy, 2))
            # Contrast Level
            cl.append(np.sum(img) / (im.shape[0] * im.shape[1]))
        for i in range(im.shape[2] - 1):
            _, _, m, _ = _motion_estimation(im[:, :, i], im[:, :, i + 1])
            ali = np.sum(m)
            mz.append(ali)  # Motion Estimation
    return nl, bs, bs2, ai, dl, di, cl, mz
Пример #22
0
def get_spine_from_features(image,
                            features,
                            size,
                            degree,
                            blur=2,
                            threshold=2):
    """
    Generate a mask image just for the spine from spine features
    """
    src, dst, poly_par = get_poly_parameters(features, axis=0, degree=degree)
    win_size = size * 4 + 1
    prof_2d = get_feature_profile(image, features, size=win_size, deg=degree)
    h_centre = prof_2d.shape[1] // 2
    centre_mask = np.zeros(prof_2d.shape)
    centre_mask[:, h_centre] = 1
    centre_mask = ndimage.gaussian_filter1d(centre_mask, size // 2, axis=1)
    lap = ndimage.laplace(ndimage.gaussian_filter(prof_2d, blur))
    lap = lap.max() - lap
    prof_weighted = centre_mask * prof_2d * lap
    prof_mask = prof_weighted > prof_weighted.std() * threshold
    positions = np.array(np.where(prof_mask > 0))
    pos_rec = remap(positions, image, src, dst, poly_par, linewidth=win_size)
    pos_rec = np.flip(pos_rec, axis=0)
    mask_spine = np.zeros(image.shape, dtype=np.uint8)
    mask_spine[tuple(pos_rec.astype(int))] = 1
    mask_spine = ndimage.binary_closing(mask_spine)
    return mask_spine
Пример #23
0
def main():
    jpegs, pngs = imutils.find_local_images()
    print str(len(jpegs)) + " JPEGs Found"
    print str(len(pngs)) + " PNGs Found"

    if '-edges' in sys.argv:
        edges = basic_edge_detector(jpegs)

    if '-test' in sys.argv:
        test_image = choose_random_image(jpegs)

        k1 = [[0, 0, 1], [0, 1, 0], [-1, 0, 1]]

        k2 = [[0, -1, 0], [-1, 2, -1], [0, -1, 0]]

        edge_test = np.zeros(test_image.shape)
        gol0 = ndi.gaussian_laplace(test_image[:, :, 0], sigma=1)
        gol1 = ndi.laplace(ndi.convolve(test_image[:, :, 1], k1))
        gol2 = ndi.gaussian_laplace(ndi.convolve(test_image[:, :, 2], k2),
                                    sigma=1)
        edge_test[:, :, 0] = ndi.convolve(test_image[:, :, 0],
                                          [[0, 0, 0], [0, 2, 0], [0, 0, 0]])
        edge_test[:, :, 1] = gol1
        edge_test[:, :, 2] = gol2 / 255
        f, ax = plt.subplots(1, 4, figsize=(10, 5), sharex=True)
        ax[0].imshow(test_image)
        ax[1].imshow(test_image - edge_test, 'gray')
        ax[2].imshow(gol1, 'gray')
        ax[3].imshow(gol0, 'gray')
        plt.show()
Пример #24
0
def dibuja_arbol_particion(X, y, arbol, ax=None):
    if ax is None:
        ax = plt.gca()
    eps = X.std() / 2.

    x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps
    y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps
    xx = np.linspace(x_min, x_max, 1000)
    yy = np.linspace(y_min, y_max, 1000)

    X1, X2 = np.meshgrid(xx, yy)
    X_grid = np.c_[X1.ravel(), X2.ravel()]

    Z = arbol.predict(X_grid)
    Z = Z.reshape(X1.shape)
    caras = arbol.apply(X_grid)
    caras = caras.reshape(X1.shape)
    bordes = ndimage.laplace(caras) != 0
    ax.contourf(X1, X2, Z, alpha=.4, cmap=cm2, levels=[0, .5, 1])
    ax.scatter(X1[bordes], X2[bordes], marker='.', s=1)

    dibuja_dispersion_discreta(X[:, 0], X[:, 1], y, ax=ax)
    ax.set_xlim(x_min, x_max)
    ax.set_ylim(y_min, y_max)
    ax.set_xticks(())
    ax.set_yticks(())
    return ax
def preprocessing(features, method):
    """preprocess the sequence data

    Parameters:
        features (numpy array): original data sequence
        method (dictionary): preprocessing methods (filters) and sizes of filters

    Returns:
         features (numpy array): data sequence after preprocessing
    """
    for (key, value) in method:
        if key == 'gaussian':
            for i in range(6):
                features[:, i] = gaussian_filter1d(features[:, i],
                                                   sigma=value,
                                                   axis=0)
        elif key == 'median':
            for i in range(6):
                features[:, i] = median_filter(features[:, i], size=value)
        elif key == 'uniform':
            for i in range(6):
                features[:, i] = uniform_filter1d(features[:, i],
                                                  size=value,
                                                  axis=0)
        elif key == 'laplace':
            for i in range(6):
                features[:, i] = laplace(features[:, i])
        elif key == 'gaussian_laplace':
            for i in range(6):
                features[:, i] = gaussian_laplace(features[:, i], sigma=value)
    return features
def plot_tree(max_depth=1):
    fig, ax = plt.subplots(1, 2, figsize=(15, 7))
    h = 0.02

    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))

    if max_depth != 0:
        tree = DecisionTreeClassifier(max_depth=max_depth,
                                      random_state=1).fit(X, y)
        Z = tree.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
        Z = Z.reshape(xx.shape)
        faces = tree.tree_.apply(np.c_[xx.ravel(),
                                       yy.ravel()].astype(np.float32))
        faces = faces.reshape(xx.shape)
        border = ndimage.laplace(faces) != 0
        ax[0].contourf(xx, yy, Z, alpha=.4)
        ax[0].scatter(xx[border], yy[border], marker='.', s=1)
        ax[0].set_title("max_depth = %d" % max_depth)
        ax[1].imshow(tree_image(tree))
        ax[1].axis("off")
    else:
        ax[0].set_title("data set")
        ax[1].set_visible(False)
    ax[0].scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)
    ax[0].set_xlim(x_min, x_max)
    ax[0].set_ylim(y_min, y_max)
    ax[0].set_xticks(())
    ax[0].set_yticks(())
def plot_tree(max_depth=1):
    fig, ax = plt.subplots(1, 2, figsize=(15, 7))
    h = 0.02

    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))

    if max_depth != 0:
        tree = DecisionTreeClassifier(max_depth=max_depth, random_state=1).fit(X, y)
        Z = tree.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
        Z = Z.reshape(xx.shape)
        faces = tree.tree_.apply(np.c_[xx.ravel(), yy.ravel()].astype(np.float32))
        faces = faces.reshape(xx.shape)
        border = ndimage.laplace(faces) != 0
        ax[0].contourf(xx, yy, Z, alpha=.4)
        ax[0].scatter(xx[border], yy[border], marker='.', s=1)
        ax[0].set_title("max_depth = %d" % max_depth)
        ax[1].imshow(tree_image(tree))
        ax[1].axis("off")
    else:
        ax[0].set_title("data set")
        ax[1].set_visible(False)
    ax[0].scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)
    ax[0].set_xlim(x_min, x_max)
    ax[0].set_ylim(y_min, y_max)
    ax[0].set_xticks(())
    ax[0].set_yticks(())
def plot_tree_partition(X, y, tree, ax=None):
    if ax is None:
        ax = plt.gca()
    eps = X.std() / 2.

    x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps
    y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps
    xx = np.linspace(x_min, x_max, 1000)
    yy = np.linspace(y_min, y_max, 1000)

    X1, X2 = np.meshgrid(xx, yy)
    X_grid = np.c_[X1.ravel(), X2.ravel()]

    Z = tree.predict(X_grid)
    Z = Z.reshape(X1.shape)
    faces = tree.apply(X_grid)
    faces = faces.reshape(X1.shape)
    border = ndimage.laplace(faces) != 0
    ax.contourf(X1, X2, Z, alpha=.4, colors=['red', 'blue'], levels=[0, .5, 1])
    ax.scatter(X1[border], X2[border], marker='.', s=1)

    ax.scatter(X[:, 0], X[:, 1], c=np.array(['r', 'b'])[y], s=60)
    ax.set_xlim(x_min, x_max)
    ax.set_ylim(y_min, y_max)
    ax.set_xticks(())
    ax.set_yticks(())
    return ax
Пример #29
0
 def velocitychange(self, dt=0.1):
     """
     Work towards full Navier Stokes equation:
     P' = P - m' div(u)
     p Du/Dt = -del(P) + pg + m lap(u) + 1/3 m del(div(u))
     
     """
     ###FORGOT THE DENSITY TERMS HERE BEFORE AAAAAAAAAAAAAAAAAA
     """STICKING THIS HERE FOR NOW, THIS SHOULD BE MODULARISED TO
 MATCH DIFFERENT DYE BEHAVIOUS"""
     total = self.d + self.dye
     #Pressure gradient
     du_dt = -np.array(np.gradient(self.P)) / total
     #Gravity, only in the y direction
     du_dt[0] -= self.g
     #Diffusion term, this doesn't work great
     du_dt += self.m * np.stack(
         [ndimage.laplace(self.u[i]) for i in [0, 1]]) / total
     #Compressible term
     du_dt += 1 / 3 * self.m * np.array(np.gradient(div(self.u))) / total
     #Advection
     du_dt -= np.add.reduce(
         np.stack((self.u, ) * 2, axis=1) *
         np.array(np.gradient(self.u, axis=(1, 2))))
     self.u += du_dt * dt
Пример #30
0
 def heatchange(self, rate_a: float, rate_b: float):
     #add some heat in
     self.T[0, :] += rate_a
     #radiate some heat out
     self.T -= self.T[-1]**4 * rate_b  #???
     #distribute temp
     self.T += ndimage.laplace(self.T) / 4
Пример #31
0
def separate_fields_by_laplace(rate_map, threshold=0, minimum_field_area=None):
    """Separates fields using the laplacian to identify fields separated by
    a negative second derivative.
    Parameters
    ----------
    rate_map : np 2d array
        firing rate in each bin
    threshold : float
        value of laplacian to separate fields by relative to the minima. Should be
        on the interval 0 to 1, where 0 cuts off at 0 and 1 cuts off at
        min(laplace(rate_map)). Default 0.
    minimum_field_area: int
        minimum number of bins to consider it a field. Default None (all fields are kept)
    Returns
    -------
    labels : numpy array, shape like rate_map.
        contains areas all filled with same value, corresponding to fields
        in rate_map. The fill values are in range(1,nFields + 1), sorted by size of the
        field (sum of all field values) with 0 elsewhere.
    :Authors:
        Halvard Sutterud <*****@*****.**>
    """

    l = ndimage.laplace(rate_map)

    l[l > threshold * np.min(l)] = 0

    # Labels areas of the laplacian not connected by values > 0.
    fields, field_count = ndimage.label(l)
    fields = sort_fields_by_rate(rate_map, fields)
    if minimum_field_area is not None:
        fields = remove_fields_by_area(fields, minimum_field_area)
    return fields
Пример #32
0
 def heatchange(self, rate_a, rate_b):
     #add some heat in
     self.T[0] += rate_a
     #radiate some heat out
     self.T[-1] -= self.T[-1]**4 * rate_b  #???
     #distribute temp
     self.T -= self.c * ndimage.laplace(self.T) / 2
def plot_tree_partition(X, y, tree, ax=None):
    if ax is None:
        ax = plt.gca()
    eps = X.std() / 2.

    x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps
    y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps
    xx = np.linspace(x_min, x_max, 1000)
    yy = np.linspace(y_min, y_max, 1000)

    X1, X2 = np.meshgrid(xx, yy)
    X_grid = np.c_[X1.ravel(), X2.ravel()]

    Z = tree.predict(X_grid)
    Z = Z.reshape(X1.shape)
    faces = tree.apply(X_grid)
    faces = faces.reshape(X1.shape)
    border = ndimage.laplace(faces) != 0
    ax.contourf(X1, X2, Z, alpha=.4, cmap=cm2, levels=[0, .5, 1])
    ax.scatter(X1[border], X2[border], marker='.', s=1)

    discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)
    ax.set_xlim(x_min, x_max)
    ax.set_ylim(y_min, y_max)
    ax.set_xticks(())
    ax.set_yticks(())
    return ax
def generate_features3D(image, sigma):
    # generate range of sigmas
    sigmas = range(sigma[0], sigma[1] + 1)

    f_values = image.flatten()
    f_sobel = scp.sobel(image).flatten()

    f_gauss = np.zeros([len(image.flatten()), len(sigmas)])
    f_dog = np.zeros([len(image.flatten()), len(sigmas) - 1])

    idx = 0
    for s in range(sigma[0], sigma[1] + 1):
        # consider only Re part for gabor filter
        f_gauss[:, idx] = scp.gaussian_filter(image, s).flatten()
        if (idx != 0):
            f_dog[:, idx - 1] = f_gauss[:, idx] - f_gauss[:, idx - 1]
        idx += 1

    f_max = scp.maximum_filter(image, sigma[0]).flatten()
    f_median = scp.median_filter(image, sigma[0]).flatten() # run median only with the minimal sigma
    f_laplacian = scp.laplace(image).flatten()

    # full set of features
    f_set = np.vstack([f_values, f_max,
                       f_median, f_sobel,
                       f_gauss.T, f_dog.T,
                       f_laplacian]).T
    return f_set
Пример #35
0
def getFilterResponses(im, filterSize=7, DogScales=[3, 5], GaussianScales=[1]):
    """ im: Nx3 channel image , N: number of samples """
    print("Computing Lab images...")
    im = color.rgb2lab(im)
    responses = []
    num_channels = im.shape[3]
    for k in range(num_channels):
        for i in GaussianScales:
            a = ndi.gaussian_filter(im[:, :, :, k], sigma=i)
            responses.append(
                np.reshape(a, (a.shape[0], a.shape[1] * a.shape[2])))
            #            print("responses size: ", np.shape(responses))

            b = ndi.laplace(a)
            responses.append(
                np.reshape(b, (b.shape[0], b.shape[1] * b.shape[2])))

        for i in DogScales:
            a = ndi.gaussian_gradient_magnitude(im[:, :, :, k], sigma=i)
            responses.append(
                np.reshape(a, (a.shape[0], a.shape[1] * a.shape[2])))

        for j in GaussianScales:

            t = ndi.gaussian_filter(im[:, :, :, k], sigma=i)
            a = ndi.sobel(t, axis=0)
            responses.append(
                np.reshape(a, (a.shape[0], a.shape[1] * a.shape[2])))
            b = ndi.sobel(t, axis=1)
            responses.append(
                np.reshape(b, (b.shape[0], b.shape[1] * b.shape[2])))

    return np.array(responses)
Пример #36
0
def task_4():
    sigs = np.logspace(-1, 0.7, 9)
    fig, ax = plt.subplots(ncols=3, nrows=3, figsize=(15, 10))
    for i, s in enumerate(sigs):
        ax[i % 3,
           int(i / 3)].imshow(ndimage.laplace(ndimage.gaussian_filter(A, s)))
        ax[i % 3, int(i / 3)].set_title(r"$\sigma$=%.3f" % s)
Пример #37
0
def test_multiple_modes_laplace():
    # Test laplace filter for multiple extrapolation modes
    arr = np.array([[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 0.0, 0.0]])

    expected = np.array([[-2.0, 2.0, 1.0], [-2.0, -3.0, 2.0], [1.0, 1.0, 0.0]])

    modes = ["reflect", "wrap"]

    assert_equal(expected, sndi.laplace(arr, mode=modes))
	def onEdgeFinding(self,evt):	
		if not self.panel.selectiontool.isTargeting('Auto Create Contours'):
			return 
		point = self.panel.view2image((evt.m_x,evt.m_y))

		ndarray = mrc.read(os.path.join(self.appionloop.params['rundir'], self.appionloop.imgtree[self.index]['filename']+'.dwn.mrc'))
		mrc.write(ndarray, os.path.join(self.appionloop.params['rundir'], 'beforefilter'+'.dwn.mrc'))

		negative = False	
		if self.filters:
			ndarray = ndimage.gaussian_filter(ndarray,1)
			ndarray = ndimage.gaussian_gradient_magnitude(ndarray,2)
			markers = []
			for i in range(3):
				for j in range(3):
					if i!=0 or j!=0:
						markers.append((point[0]-1+i,point[1]-1+j))
			markers = (1,2,3,4,5,6,7,8)
			#ndarray = ndimage.watershed_ift(ndarray,markers)
			ndarray = ndimage.laplace(ndarray)
			ndarray = ndimage.gaussian_filter(ndarray,1)
			#ndarray = apImage.preProcessImage(ndarray,params=self.appionloop.params)
			negative = True 
		mrc.write(ndarray, os.path.join(self.appionloop.params['rundir'], 'afterfilter'+'.dwn.mrc'))

		delta = .1
		targets = []
		radius = 20
		size = 50
		rangeSize = 50
		maker = PixelCurveMaker()
		maker._init_(size,rangeSize);
		for theta in range(size):
			theta +=0
			theta*=math.pi*2/rangeSize
			for rad in range(size):
				try:
					if negative:
						maker.addData(theta,rad,127-ndarray[int(point[1]+rad*math.sin(theta))][int(point[0]+rad*math.cos(theta))])
					else:
						maker.addData(theta,rad,ndarray[int(point[1]+rad*math.sin(theta))][int(point[0]+rad*math.cos(theta))])
				except IndexError:
					maker.addData(theta,rad,0)
		maker.makeCalculations()
		s = self.filterSelectorChoices[self.filterSelector.GetSelection()]	
		dilate = 2
		if s == 'Latex Bead':
			dilate = 0
		for theta in range(size):
			theta += 0
			theta*=math.pi*2/rangeSize
			targets.append((point[0]+(dilate+maker.getData(theta))*math.cos(theta),point[1]+(dilate+maker.getData(theta))*math.sin(theta)))
		self.addPolyParticle(targets)
		#this section draws all of the contours that the algorithm considers - useful for debugging
		'''
def update_plot():
    global ip_rx, ip_ry, ip_refs, ip_xray, ip_rho
    color_options = [np.array([1,0,0,1]), np.array([0.65,0,1,1]), np.array([0,0.8,1,1]), 
            np.array([1,0.6,0,1]),  np.array([0,1,0,1]), np.array([1,1,0,1]), np.array([1,0,1,1])]
    trho = ip_rho
    maskr = []
    maskx = []
    colors = []
    for rx, ry in zip(ip_rx, ip_ry):
        maskx.append(np.zeros(ip_xray.shape))
        maskr.append(np.ones(ip_xray.shape))
        c = color_options[(len(maskx)-1)%len(color_options)]
        colors.append(c)

        maskx[-1][ry[0]:ry[1],rx[0]] = 1
        maskx[-1][ry[0]:ry[1],rx[1]] = 1
        maskx[-1][ry[1],rx[0]:rx[1]] = 1
        maskx[-1][ry[0],rx[0]:rx[1]] = 1

        maskr[-1] = np.zeros(ip_rho.shape[:2])
        for x in range(rx[0], rx[1]):
            for y in range(ry[0], ry[1]):
                if ip_refs.has_key((x,y)):
                    for tt in ip_refs[(x,y)]:
                        maskr[-1][tt[1],tt[0]] = 1

    ip_fig = pl.gcf()
    ip_fxray = pl.subplot(121)
    ip_freal = pl.subplot(122)

    ip_fxray.cla()
    ip_freal.cla()
    trho = mpl.cm.gray_r(mpl.colors.Normalize()(ip_rho**0.5))
    txry = mpl.cm.jet(mpl.colors.Normalize()(ip_xray**0.5))

    ip_freal.imshow(trho, interpolation='nearest', origin='lower')
    ip_fxray.imshow(txry, interpolation='nearest', origin='lower')
    
    for mask, color in zip(maskr, colors):
        outline = (abs(nd.laplace(mask)) > 0)[...,np.newaxis]*color[np.newaxis,np.newaxis,...]
        filler  = np.ones(mask.shape)[...,np.newaxis]*color[np.newaxis,np.newaxis,...] 
        filler[:,:,3] = 0.7*mask
        ip_freal.imshow(outline, interpolation='nearest', origin='lower', alpha=1.0)
        ip_freal.imshow(filler, interpolation='nearest', origin='lower', alpha=0.4)

    for mask,color in zip(maskx, colors):
        outline = mask[...,np.newaxis]*color[np.newaxis,np.newaxis,...]
        ip_fxray.imshow(outline, interpolation='nearest', origin='lower', alpha=1.0)

    ip_freal.set_xticks([]); ip_freal.set_yticks([])
    ip_fxray.set_xticks([]); ip_fxray.set_yticks([])
    ip_fig.show()
    pl.subplots_adjust(0.,0.,1.,1.,0.01,0.05)
    pl.draw()
def update_plot():
    global rod, rho, crod, rx, ry, rz, ps
    gs = 200
    cm = pylab.cm.hot_r
    
    mrodx = ones_like(rod[x])
    mrody = ones_like(rod[y])
    mrodz = ones_like(rod[z])
    
    if rx is not None:
        mrodx = (rod[x] > rx[0]) * (rod[x] < rx[1])
    if ry is not None:
        mrody = (rod[y] > ry[0]) * (rod[y] < ry[1])
    if rz is not None:
        mrodz = (rod[z] > rz[0]) * (rod[z] < rz[1])
    
    """
    trodx_xy = (mrodx * mrody * rod[x]).flatten()
    trody_xy = (mrodx * mrody * rod[y]).flatten()
    trodx_xz = (mrodx * mrodz * rod[x]).flatten()
    trodz_xz = (mrodx * mrodz * rod[z]).flatten()
    trody_yz = (mrody * mrodz * rod[y]).flatten()
    trodz_yz = (mrody * mrodz * rod[z]).flatten()

    fxy.hexbin(trodx_xy[trodx_xy.nonzero()], trody_xy[trodx_xy.nonzero()], gridsize=gs, extent=(-ps,ps,-ps,ps), cmap=cm)
    fxz.hexbin(trodx_xz[trodx_xz.nonzero()], trodz_xz[trodx_xz.nonzero()], gridsize=gs, extent=(-ps,ps,-ps,ps), cmap=cm)
    fyz.hexbin(trody_yz[trody_yz.nonzero()], trodz_yz[trody_yz.nonzero()], gridsize=gs, extent=(-ps,ps,-ps,ps), cmap=cm)
    """
    ex = (-ps, ps)
    ey = (-ps, ps)
    ez = (-ps, ps)
    if rx is not None:
        ex = tuple(rx)
    if ry is not None:
        ey = tuple(ry)
    if rz is not None:
        ez = tuple(rz)

    fxy.cla(); fxz.cla(); fyz.cla()
    fxy.hexbin(rod[x].flatten(), rod[y].flatten(), gridsize=gs, extent=ex+ey, cmap=cm)
    fxz.hexbin(rod[x].flatten(), rod[z].flatten(), gridsize=gs, extent=ex+ez, cmap=cm)
    fyz.hexbin(rod[y].flatten(), rod[z].flatten(), gridsize=gs, extent=ey+ez, cmap=cm)

    mask = ones_like(rod[x]) * mrodx * mrody * mrodz
    mask = (nd.laplace(mask) > 1e-6) == False
    
    tcrod = ones_like(crod)
    trho = rho * mask
    tcrod[:,:,0] = crod[:,:,0] * mask
    tcrod[:,:,1] = crod[:,:,1] * mask
    tcrod[:,:,2] = crod[:,:,2] * mask
    frod.imshow(tcrod)
    frho.imshow(trho, vmin=trho.min(), vmax=trho.max())
    show()
def plot_timeslice(filename, N, dim, time, Max=None):
    t, s = FieldInitializer.LoadStateRaw(filename, N, dim, time)

    rod = s.CalculateRotationRodrigues()
    rho = s.CalculateRhoFourier().modulus()
    if len(s.gridShape) == 3:
        rho = rho[:, :, 0]
        rod[x] = rod[x][:, :, 0]
        rod[y] = rod[y][:, :, 0]
        rod[z] = rod[z][:, :, 0]
    crod = OrientationField.RodriguesToUnambiguousColor(rod[x], rod[y], rod[z])

    fig = figure(0)
    fig.clf()
    fxy = subplot(231)
    fxz = subplot(232)
    fyz = subplot(233)
    frho = subplot(234)
    frod = subplot(235)
    fmask = subplot(236)

    fxy.cla()
    fxz.cla()
    fyz.cla()
    if Max is not None:
        fxy.hexbin(rod[x].flatten(), rod[y].flatten(), gridsize=gs, extent=ex + ey, cmap=cm, vmax=Max)
        fxz.hexbin(rod[x].flatten(), rod[z].flatten(), gridsize=gs, extent=ex + ez, cmap=cm, vmax=Max)
        fyz.hexbin(rod[y].flatten(), rod[z].flatten(), gridsize=gs, extent=ey + ez, cmap=cm, vmax=Max)
    else:
        fxy.hexbin(rod[x].flatten(), rod[y].flatten(), gridsize=gs, extent=ex + ey, cmap=cm)
        fxz.hexbin(rod[x].flatten(), rod[z].flatten(), gridsize=gs, extent=ex + ez, cmap=cm)
        fyz.hexbin(rod[y].flatten(), rod[z].flatten(), gridsize=gs, extent=ey + ez, cmap=cm)

    mrodx = ones_like(rod[x])
    mrody = ones_like(rod[y])
    mrodz = ones_like(rod[z])

    mrodx = (rod[x] > ex[0]) * (rod[x] < ex[1])
    mrody = (rod[y] > ey[0]) * (rod[y] < ey[1])
    mrodz = (rod[z] > ez[0]) * (rod[z] < ez[1])

    mask1 = ones_like(rod[x]) * mrodx * mrody * mrodz
    mask = (nd.laplace(mask1) > 1e-6) == False

    tcrod = ones_like(crod)
    trho = rho * mask
    tcrod[:, :, 0] = crod[:, :, 0] * mask
    tcrod[:, :, 1] = crod[:, :, 1] * mask
    tcrod[:, :, 2] = crod[:, :, 2] * mask
    frod.imshow(tcrod)
    frho.imshow(trho, vmin=trho.min(), vmax=trho.max())
    fmask.imshow(mask1, vmin=mask1.min(), vmax=mask1.max())
Пример #42
0
def test_multiple_modes_laplace():
    # Test laplace filter for multiple extrapolation modes
    arr = np.array([[1., 0., 0.],
                    [1., 1., 0.],
                    [0., 0., 0.]])

    expected = np.array([[-2., 2., 1.],
                         [-2., -3., 2.],
                         [1., 1., 0.]])

    modes = ['reflect', 'wrap']

    assert_equal(expected,
                 sndi.laplace(arr, mode=modes))
def compute_cell_separation(mat):
    """
    Function creating a space between cells for display.
    Change the shared voxel between two cell to 0 so you can clearly see the seperations bewteen cells.
    """
    import scipy.nd as nd
    import numpy as np
    import copy
    sep=nd.laplace(mat)
    sep2=copy.copy(sep)
    sep2[np.where(mat==1)]=0
    sep2[np.where(sep==0)]=1
    sep2[np.where(sep!=0)]=0
    mat=mat*sep2
    del sep2,sep
    
    return mat
Пример #44
0
def sharp_laplace1(img):
  """ Sharpen the image using laplacian operator

  Input:
   - img <ndarray>

  Output:
   <ndarray>
  """

  # Shapening the image with laplacian involves adding the image concolved
  # with the laplacian back to the original image. Since laplace operator
  # can generate negative values we need to use a int type image
  img = np.asarray(img, dtype=np.int)

  # Perform the operation
  sharp = img - ndi.laplace(img)

  # Clip, cast and return the result
  return np.asarray(np.clip(sharp, 0, 255), dtype=np.uint8)
def img2polydata_complexe(image, list_remove=[], sc=None, verbose=False):
    """
    Convert a |SpatialImage| to a PolyData object with cells surface

    : Parameters :
    list_remove : a list of cells to be removed from the tissue before putting it on screen
    sc : if you give a parameter here, it will use it as scalar. you need to give a
    cell->scalar dictionary
    """

    labels_provi = list(np.unique(image))
    #ici on filtre déjà les listes
    #~ labels= [i for i in labels_provi if i not in list_remove]
    labels= labels_provi

    try:      labels.remove(0)
    except:   pass
    try:      labels.remove(1)
    except:   pass

    #print image.shape

    xyz = {}
    if verbose:print "on récupère les bounding box"
    bbox = nd.find_objects(image)
    #print labels
    for label in xrange(2,max(labels)+1):
        if not label in labels: continue
        if verbose:print "% until cells are built", label/float(max(labels))*100
        slices = bbox[label-1]
        label_image = (image[slices] == label)
        #here we could add a laplacian function to only have the external shape
        mask = nd.laplace(label_image)
        label_image[mask!=0] = 0
        mask = nd.laplace(label_image)
        label_image[mask==0]=0
        # compute the indices of voxel with adequate label
        a = np.array(label_image.nonzero()).T
        a+=[slices[0].start, slices[1].start, slices[2].start ]
        #print a.shape
        if a.shape[1] == 4:
            #print a
            pass
        else:
            xyz[label] = a


    vx,vy,vz = image.resolution

    polydata = tvtk.AppendPolyData()
    polys = {}
    filtre=[i for i in xyz.keys() if i not in list_remove]
    k=0.0
    for c in filtre:
        if verbose: print "% until first polydata is built", k/float(len(filtre))*100
        k+=1.
        p=xyz[c]
        p=p.astype(np.float)
        pd = tvtk.PolyData(points=xyz[c].astype(np.float))
        if sc:
            try:
                pd.point_data.scalars = [float(sc[c]) for i in xrange(len(xyz[c]))]
            except:
                pd.point_data.scalars = [float(0) for i in xrange(len(xyz[c]))]
        else:
            pd.point_data.scalars = [float(c) for i in xrange(len(xyz[c]))]
        f=tvtk.VertexGlyphFilter(input=pd)
        f2=tvtk.PointDataToCellData(input=f.output)
        polys[c]=f2.output
        polydata.add_input(polys[c])
        polydata.set_input_array_to_process(0,0,0,0,0)


    try:
        labels_not_in_sc = list(set(list(np.unique(image)))-set(sc))
    except TypeError:
        labels_not_in_sc=[]

    if 0 in labels_not_in_sc: labels_not_in_sc.remove(0)
    if 1 in labels_not_in_sc: labels_not_in_sc.remove(1)
    filtre=[i for i in xyz.keys() if i in list_remove or i in labels_not_in_sc]
    if filtre!=[]:
        polydata2 = tvtk.AppendPolyData()
        polys2 = {}    
        k=0.0
        for c in filtre:
            if verbose: print "% until second polydata is built", k/float(len(filtre))*100
            k+=1.
            p=xyz[c]
            p=p.astype(np.float)
            pd = tvtk.PolyData(points=xyz[c].astype(np.float))
            pd.point_data.scalars = [0. for i in xrange(len(xyz[c]))]
            f=tvtk.VertexGlyphFilter(input=pd)
            f2=tvtk.PointDataToCellData(input=f.output)
            polys2[c]=f2.output
            polydata2.add_input(polys2[c])
    else:
        polydata2=tvtk.AppendPolyData()
        polydata2.set_input_array_to_process(0,0,0,0,0)
        polys2 = {}
        pd = tvtk.PolyData()
        polydata2.add_input(pd)
    return polydata, polydata2
Пример #46
0
            else:
                out[:,j] = out[:,j] + np.minimum(f[:,j - 1] - f[:,j], 0)**2
        else:
            if j > 0:
                out[:,j] = out[:,j] + np.minimum(f[:,j] - f[:,j - 1], 0)**2
            else:
                out[:,j] = out[:,j] + np.minimum(f[:,j] - f[:,j + 1], 0)**2
            if j < out.shape[1] - 1:
                out[:,j] = out[:,j] + np.maximum(f[:,j + 1] - f[:,j], 0)**2
            else:
                out[:,j] = out[:,j] + np.maximum(f[:,j - 1] - f[:,j], 0)**2
    return np.sqrt(out)

# S = square(radius=25, spacing=5) # data set of points
S = np.array(Image.open("double1.png"))
S = ndimage.laplace(ndimage.gaussian_filter(S-0.5,1))
S = np.absolute(S < 30)
D = ndimage.distance_transform_edt(S) # distance to data set
[Du, Dv] = np.gradient(D)
# image = np.array(Image.open("cir.png"))
# Phi0 = (image - image.max() / 2) / 255

Phi0 = np.ones(grid_shape)
Phi0[cx, cy] = 0
Phi0 = ndimage.distance_transform_edt(Phi0)
P = Phi0 -  0.65*np.max(Phi0)

# plt.figure()
# plt.imshow(1-S, cmap='gray')
# fig = plt.figure()
# cax = plt.imshow(D)
Пример #47
0
 def _laplace_filter(array):
     """Laplace transform"""
     return laplace(array, mode='constant')
Пример #48
0
def update_plot():
    global rod, rho, crod, rx, ry, rz, ps, point, fpoint
    gs = 100
    cm = pylab.cm.hot_r
    
    mrodx = ones_like(rod[x])
    mrody = ones_like(rod[y])
    mrodz = ones_like(rod[z])
    
    if rx is not None:
        mrodx = (rod[x] > rx[0]) * (rod[x] < rx[1])
    if ry is not None:
        mrody = (rod[y] > ry[0]) * (rod[y] < ry[1])
    if rz is not None:
        mrodz = (rod[z] > rz[0]) * (rod[z] < rz[1])
    
    ex = (-ps, ps)
    ey = (-ps, ps)
    ez = (-ps, ps)
    if rx is not None:
        ex = tuple(rx)
    if ry is not None:
        ey = tuple(ry)
    if rz is not None:
        ez = tuple(rz)

    fxy.cla(); fxz.cla(); fyz.cla()
    exy = array([array(ex),array(ey)])
    exz = array([array(ex),array(ez)])
    eyz = array([array(ey),array(ez)])
    hxy = histogram2d(rod[x].flatten(), rod[y].flatten(), bins=gs, range=exy)
    hxz = histogram2d(rod[x].flatten(), rod[z].flatten(), bins=gs, range=exz)
    hyz = histogram2d(rod[y].flatten(), rod[z].flatten(), bins=gs, range=eyz)
    fxy.imshow(rot90(hxy[0]), cmap=cm, extent=ex+ey); 
    fxz.imshow(rot90(hxz[0]), cmap=cm)#, extent=ex+ez); 
    fyz.imshow(rot90(hyz[0]), cmap=cm)#, extent=ey+ez); 
    fpoint = None

    mask1 = ones_like(rod[x]) * mrodx * mrody * mrodz
    mask = (nd.laplace(mask1) > 1e-6) == False
    
    tcrod = ones_like(crod)
    trho = rho * mask
    tcrod[:,:,0] = crod[:,:,0] * mask
    tcrod[:,:,1] = crod[:,:,1] * mask
    tcrod[:,:,2] = crod[:,:,2] * mask

    frod.imshow(rot90(rod[x]), vmin=rod[x].min(), vmax=rod[x].max(), extent=(0,N,0,N)); 
    frho.imshow(rot90(trho), vmin=trho.min(), vmax=trho.max(), extent=(0,N,0,N)); 
    fmask.imshow(rot90(mask1), vmin=mask1.min(), vmax=mask1.max(), cmap=pylab.cm.gray, extent=(0,N,0,N))

    title("Incoherent X-Ray Patterns")
    fxy.set_title("Scattering intensity x-y", fontsize=20)
    fxz.set_title("Scattering intensity x-z", fontsize=20)
    fyz.set_title("Scattering intensity y-z", fontsize=20)
    frho.set_title("Dislocation Density", fontsize=20)
    frod.set_title("Misorientation", fontsize=20)
    fmask.set_title("Selection mask", fontsize=20)
 
    fxy.set_xticks([]); fxy.set_yticks([])
    fxz.set_xticks([]); fxz.set_yticks([])
    fyz.set_xticks([]); fyz.set_yticks([])
    frod.set_xticks([]); frod.set_yticks([])    
    frho.set_xticks([]); frho.set_yticks([])
    fmask.set_xticks([]); fmask.set_yticks([]);

    subplots_adjust(0.,0.,1.,0.95,0.01,0.05)
    show()
    print "done"
sigma = 4.
k = stats.norm.pdf(x, 0, sigma)
kernel = np.dot(k[:, None], k[None, :])
kernel /= kernel.sum()
# Convolve it with the image
img_lo = ndim.convolve(img, np.expand_dims(kernel, axis=2))
plt.subplot(2, 3, 2)
plt.imshow(img_lo[::-1])
# That convolution was slow because it did not exploit the
# separability of the filter.
#
# However, scipy.ndimage contains many packaged filtering operations
# that are efficient.
#
# high-pass, laplace filter
img_hi = ndim.laplace(img)

# grayscale versions of the images
img_gray = np.mean(img, axis=2)
img_gray_lo = ndim.convolve(img_gray, kernel)
img_gray_hi = ndim.laplace(img_gray)

def plot_image(ax, img, title="", gray=False):
    if gray:
        ax.imshow(img[::-1], cmap='gray')
    else:
        ax.imshow(img[::-1])
    ax.set_xticks([])
    ax.set_yticks([])
    ax.set_title(title)
    plt.draw()
Пример #50
0
dataDir = "/home/arb/Delme/"
plt.imshow(griddata, origin='lower')

plt.gray()
cb = plt.colorbar()
cb.set_label('Value Range')
plt.xlabel('GridEast')
plt.ylabel('GridNorth')
plt.suptitle('Raw data')
plt.savefig(dataDir + 'r15_raw' + '.png')
plt.show()

#Calculate derivatives
gridSobel = nd.sobel(griddata)
gridLaplace = nd.laplace(griddata)
gridPrewitt = nd.prewitt(griddata)
gridGaussian = nd.gaussian_filter(griddata, 1)
gridMinimum = nd.minimum_filter(griddata, size=(3, 3))

#Plot a derivative
plt.imshow(gridGaussian, origin='lower')
plt.gray()
#show image
cb = plt.colorbar()
cb.set_label('Value Range')
plt.xlabel('GridEast')
plt.ylabel('GridNorth')
plt.suptitle('Raw data')
plt.savefig(dataDir + 'r15_gaussianDerivative' + '.png')
plt.show()
Пример #51
0
    def _estimate(self, dataset):
        """

        Parameters
        ----------

        Returns
        -------
        displacements : array
            (2, num_frames*num_cycles)-array of integers giving the
            estimated displacement of each frame
        """
        params = self._params
        verbose = params['verbose']
        n_processes = params['n_processes']

        if verbose:
            print('Using ' + str(n_processes) + ' worker(s)')

        displacements = []

        for sequence in dataset:
            num_planes = sequence.shape[1]
            num_channels = sequence.shape[4]
            if num_channels > 1:
                raise NotImplementedError("Error: only one colour channel \
                    can be used for DFT motion correction. Using channel 1.")

            for plane_idx in range(num_planes):
                # load into memory... need to pass numpy array to dftreg.
                # could(should?) rework it to instead accept tiff array
                if verbose:
                    print('Loading plane ' + str(plane_idx + 1) + ' of ' +
                          str(num_planes) + ' into numpy array')
                t0 = time.time()
                # reshape, one plane at a time
                frames = np.array(sequence[:, plane_idx, :, :, 0])
                frames = np.squeeze(frames)
                e1 = time.time() - t0
                if verbose:
                    print('    Loaded in: ' + str(e1) + ' s')

                # do the registering
                # registered_frames return is useless, sima later uses the
                # displacements to shift the image (apply_displacements in
                # sima/sequence.py: _align method of _MotionCorrectedSequence
                # class) but this shifting is only pixel-level, much better
                # results if sub-pixel were possible - replace sima's way of
                # shifting? this may run into problems when sima then crops the
                # final image so no empty rows/columns at edge of any frame in
                # the video (trim_criterion)
                if params['laplace'] > 0:
                    framesl = np.array([
                        np.abs(laplace(gaussian_filter(frame, params['laplace'])))
                        for frame in frames])
                else:
                    framesl = frames
                output = _register(
                    framesl,
                    upsample_factor=params['upsample_factor'],
                    max_displacement=params['max_displacement'],
                    num_images_for_mean=params['num_images_for_mean'],
                    randomise_frames=params['randomise_frames'],
                    err_thresh=params['err_thresh'],
                    max_iterations=params['max_iterations'],
                    n_processes=params['n_processes'],
                    save_fmt=params['save_fmt'],
                    save_name=params['save_name'],
                    verbose=params['verbose'],
                    return_registered=params['return_registered'])

                # sort results
                if params['return_registered']:
                    dy, dx, registered_frames = output
                else:
                    dy, dx = output

                # get results into a shape sima likes
                frame_shifts = np.zeros([len(frames), num_planes, 2])
                for idx, frame in enumerate(sequence):
                    frame_shifts[idx, plane_idx] = [dy[idx], dx[idx]]
            displacements.append(frame_shifts)

            total_time = time.time() - t0
            if verbose:
                print('    Total time for plane ' + str(plane_idx + 1) + ': ' +
                      str(total_time) + ' s')

        return displacements
Пример #52
0
def find_stars(data):
    #If passed a list, stack and median-combine first
    if isinstance(data,list):
        warps,aligned = astt.align(data)
        aligned = np.asarray(aligned)
        im = np.median(aligned,0)
    else:
        im = data
    
    
    #Denoise the image with a fourier filter
    fourier = np.fft.fft2(im)
    fourier = np.fft.fftshift(fourier)
    print(fourier.max())
    fits.writeto('fourier.fits',abs(fourier),clobber=True)
    exit()
    
    #Compute the second derivative at every point
    laplace = ndimage.laplace(smoothed)
    
    #Image should be concave down where there are stars
    stars = derivative < 0
    
    #Stars should also be a local min in the laplacian
    row_buffer = np.zeros(laplace.shape[0])
    col_buffer = row_buffer[None,:]
    above = np.vstack((laplace[1:],row_buffer[:]))
    below = np.vstack((row_buffer[:,:],laplace[:-1]))
    right = np.hstack((laplace[1:],row_buffer[:,:]))

    stars = stars & (laplace < above) & (laplace < below) & (laplace < right)
    
    #Denoise the image with a fourier filter
    print(np.std(im))
    fourier = scipy.fftpack.rfft(im)
    fits.writeto('fft.fits',fourier,clobber=True)
    fourier[0] = 0
    fourier[-1] = 0
    fourier[:,0] = 0
    fourier[:,-1] = 0
    test = scipy.fftpack.ifft(fourier).real
    fits.writeto('ifft.fits',test,clobber=True)
    print(np.std(test))
    exit()
    
    #Compute the second derivative at every point
    laplace = ndimage.laplace(smoothed)
    
    #Image should be concave down where there are stars
    stars = derivative < 0
    
    #Stars should also be a local min in the laplacian
    row_buffer = np.zeros(laplace.shape[0])
    col_buffer = np.zeros(laplace.shape[1][None,:])
    above = np.vstack((laplace[1:],row_buffer[:]))
    below = np.vstack((row_buffer[:,:],laplace[:-1]))
    right = np.hstack((laplace[1:],row_buffer[:,:]))

    stars = stars & (laplace < above) & (laplace < below) & (laplace < right) & (laplace < left)

    #Pick a sky value
    sky = np.median(im)

    #Sigma threshold for sky level
    signal = im > (sky + sky_sigma*np.sqrt(sky))
    
    #Use binary erosion and propagation to remove isolated points of signal
    eroded_signal = binary_erosion(signal)
    signal = binary_propagation(eroded_signal,mask=signal)
    
    #Stars are only where signal is significant
    stars = stars & signal
    
    return stars
Пример #53
0
def canny(image, mask, sigma, low_threshold, high_threshold, 
          ridge = False, use_image_magnitude = False):
    '''Edge filter an image using the Canny algorithm.
    
    sigma - the standard deviation of the Gaussian used
    low_threshold - threshold for edges that connect to high-threshold
                    edges
    high_threshold - threshold of a high-threshold edge
    
    ridge - detect ridges instead of edges by taking the laplacian of the
            gaussian and use kernels sensitive to the resulting ridges.
            
    use_image_magnitude - if true, use the image's magnitude for thresholding.
            This is appropriate for the ridge detector since you're looking
            to follow the ridge. If this is a number, it's used to smooth
            the image.
    
    Canny, J., A Computational Approach To Edge Detection, IEEE Trans. 
    Pattern Analysis and Machine Intelligence, 8:679-714, 1986
    
    William Green's Canny tutorial
    http://www.pages.drexel.edu/~weg22/can_tut.html
    '''
    #
    # The steps involved:
    #
    # * Smooth using the Gaussian with sigma above.
    #
    # * Apply the horizontal and vertical Sobel operators to get the gradients
    #   within the image. The edge strength is the sum of the magnitudes
    #   of the gradients in each direction.
    #
    # * Find the normal to the edge at each point using the arctangent of the
    #   ratio of the Y sobel over the X sobel - pragmatically, we can
    #   look at the signs of X and Y and the relative magnitude of X vs Y
    #   to sort the points into 4 categories: horizontal, vertical,
    #   diagonal and antidiagonal.
    #
    # * Look in the normal and reverse directions to see if the values
    #   in either of those directions are greater than the point in question.
    #   Use interpolation to get a mix of points instead of picking the one
    #   that's the closest to the normal.
    #
    # * Label all points above the high threshold as edges.
    # * Recursively label any point above the low threshold that is 8-connected
    #   to a labeled point as an edge.
    #
    # Regarding masks, any point touching a masked point will have a gradient
    # that is "infected" by the masked point, so it's enough to erode the
    # mask by one and then mask the output. We also mask out the border points
    # because who knows what lies beyond the edge of the image?
    #
    if ridge:
        smoothed = laplace(gaussian_filter(image, sigma))
        jsobel_kernel = [[-1, 2, -1], [-2, 4, -2], [-1, 2, -1]]
        isobel_kernel = [[-1, -2, -1], [2, 4, 2], [-1, -2, -1]]
    else:
        fsmooth = lambda x: gaussian_filter(x, sigma, mode='constant')
        jsobel_kernel = [[-1,0,1],[-2,0,2],[-1,0,1]]
        isobel_kernel = [[-1,-2,-1],[0,0,0],[1,2,1]]
        smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
    jsobel = convolve(smoothed, jsobel_kernel)
    isobel = convolve(smoothed, isobel_kernel)
    abs_isobel = np.abs(isobel)
    abs_jsobel = np.abs(jsobel)
    if ridge:
        if use_image_magnitude:
            if not isinstance(use_image_magnitude, bool):
                fsmooth = lambda x: \
                    gaussian_filter(x, use_image_magnitude, mode='constant')
                magnitude = smooth_with_function_and_mask(image, fsmooth, mask)
            else:
                magnitude = image
        else:
            magnitude = smoothed
    else:
        magnitude = np.sqrt(isobel*isobel + jsobel*jsobel)
    #
    # Make the eroded mask. Setting the border value to zero will wipe
    # out the image edges for us.
    #
    s = generate_binary_structure(2,2)
    emask = binary_erosion(mask, s, border_value = 0)
    emask = emask & (magnitude > 0)
    #
    #--------- Find local maxima --------------
    #
    # Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
    # 90-135 degrees and 135-180 degrees.
    #
    local_maxima = np.zeros(image.shape,bool)
    #----- 0 to 45 degrees ------
    pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
    pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
    pts = (pts_plus | pts_minus) & emask
    # Get the magnitudes shifted left to make a matrix of the points to the
    # right of pts. Similarly, shift left and down to get the points to the
    # top right of pts.
    c1 = magnitude[1:,:][pts[:-1,:]]
    c2 = magnitude[1:,1:][pts[:-1,:-1]]
    m  = magnitude[pts]
    w  = abs_jsobel[pts] / abs_isobel[pts]
    c_plus  = c2 * w + c1 * (1-w) <= m
    c1 = magnitude[:-1,:][pts[1:,:]]
    c2 = magnitude[:-1,:-1][pts[1:,1:]]
    c_minus =  c2 * w + c1 * (1-w) <= m
    local_maxima[pts] = c_plus & c_minus
    #----- 45 to 90 degrees ------
    # Mix diagonal and vertical
    #
    pts_plus = np.logical_and(isobel >= 0, 
                              np.logical_and(jsobel >= 0, 
                                             abs_isobel <= abs_jsobel))
    pts_minus = np.logical_and(isobel <= 0,
                               np.logical_and(jsobel <= 0, 
                                              abs_isobel <= abs_jsobel))
    pts = np.logical_or(pts_plus, pts_minus)
    pts = np.logical_and(emask, pts)
    c1 = magnitude[:,1:][pts[:,:-1]]
    c2 = magnitude[1:,1:][pts[:-1,:-1]]
    m  = magnitude[pts]
    w  = abs_isobel[pts] / abs_jsobel[pts]
    c_plus  = c2 * w + c1 * (1-w) <= m
    c1 = magnitude[:,:-1][pts[:,1:]]
    c2 = magnitude[:-1,:-1][pts[1:,1:]]
    c_minus =  c2 * w + c1 * (1-w) <= m
    local_maxima[pts] = np.logical_and(c_plus, c_minus)
    #----- 90 to 135 degrees ------
    # Mix anti-diagonal and vertical
    #
    pts_plus = np.logical_and(isobel <= 0, 
                              np.logical_and(jsobel >= 0, 
                                             abs_isobel <= abs_jsobel))
    pts_minus = np.logical_and(isobel >= 0,
                               np.logical_and(jsobel <= 0, 
                                              abs_isobel <= abs_jsobel))
    pts = np.logical_or(pts_plus, pts_minus)
    pts = np.logical_and(emask, pts)
    c1a = magnitude[:,1:][pts[:,:-1]]
    c2a = magnitude[:-1,1:][pts[1:,:-1]]
    m  = magnitude[pts]
    w  = abs_isobel[pts] / abs_jsobel[pts]
    c_plus  = c2a * w + c1a * (1.0-w) <= m
    c1 = magnitude[:,:-1][pts[:,1:]]
    c2 = magnitude[1:,:-1][pts[:-1,1:]]
    c_minus =  c2 * w + c1 * (1.0-w) <= m
    cc = np.logical_and(c_plus,c_minus)
    local_maxima[pts] = np.logical_and(c_plus, c_minus)
    #----- 135 to 180 degrees ------
    # Mix anti-diagonal and anti-horizontal
    #
    pts_plus = np.logical_and(isobel <= 0, 
                              np.logical_and(jsobel >= 0, 
                                             abs_isobel >= abs_jsobel))
    pts_minus = np.logical_and(isobel >= 0,
                               np.logical_and(jsobel <= 0, 
                                              abs_isobel >= abs_jsobel))
    pts = np.logical_or(pts_plus, pts_minus)
    pts = np.logical_and(emask, pts)
    c1 = magnitude[:-1,:][pts[1:,:]]
    c2 = magnitude[:-1,1:][pts[1:,:-1]]
    m  = magnitude[pts]
    w  = abs_jsobel[pts] / abs_isobel[pts]
    c_plus  = c2 * w + c1 * (1-w) <= m
    c1 = magnitude[1:,:][pts[:-1,:]]
    c2 = magnitude[1:,:-1][pts[:-1,1:]]
    c_minus =  c2 * w + c1 * (1-w) <= m
    local_maxima[pts] = np.logical_and(c_plus, c_minus)
    #
    #---- Create two masks at the two thresholds.
    #
    high_mask = np.logical_and(local_maxima, magnitude >= high_threshold)
    low_mask  = np.logical_and(local_maxima, magnitude >= low_threshold)
    #
    # Segment the low-mask, then only keep low-segments that have
    # some high_mask component in them 
    #
    labels,count = label(low_mask, np.ndarray((3,3),bool))
    if count == 0:
        return low_mask
    
    sums = np.bincount(labels.flatten(), high_mask.flatten())
    sums[0] = 0
    good_label = np.zeros((count+1,),bool)
    good_label[:len(sums)] = sums > 0
    output_mask = good_label[labels]
    return output_mask  
Пример #54
0
def finddd(filefile,\
           timelist=None,\
           dt_out=50.,\
           lt_start=8.,\
           halolim=None,\
           method=1,\
           plotplot=False,\
           filewind=None,\
           filemean=None,\
           save=True):

    if method == 3:
       print "importing additional scikit-image packages"
       from skimage import filter,transform,feature
       import matplotlib.patches as mpatches
       from scipy import ndimage


    ###############################################################################
    ########################## FOR METHOD 1 FOR METHOD 2 ##########################
    ## FACLIST : how many sigmas below mean we start to consider this could be a vortex
    ## ... < 3.0 dubious low-intensity minima are caught
    ## ... 3 ~ 3.1 is a bit too low but helps? because size of pressure drop could be an underestimate of actual dust devil
    ## ... 3.2 ~ 3.3 is probably right, this is the one we choose for exploration [3.25]
    ## ... NB: values 3.2 to 3.6 yields the same number of devils but measured sizes could vary a bit
    ## ... > 3.6 makes strong minima disappear... especially >3.8
    ## ... EVENTUALLY 3.5-4 is better for problematic cases 
    ###############################################################################
    faclist = [3.75]

    ################################ FOR ALL METHODS
    ## (see below) NEIGHBOR_FAC is the multiple of std used to evaluate size
    #### METHOD 1
    #neighbor_fac = 2.7
    ## --> 1: limit not discriminative enough. plus does not separate neighbouring vortices.
    ##        ... but interesting: gives an exponential law (because vortices are artificially merged?)
    ## --> 2.7: very good for method 1. corresponds usually to ~0.3
    ## --> 2: so-so. do not know what to think. but usually too low.
    #### METHOD 3 --> optimizing neighbor_fac with visual checks and superimposing wind friction (max must be at boundaries)
    ##neighbor_fac = 1.5 # too low --> vortices too large + false positives
    neighbor_fac = 2.7 # optimal --> good for separation, only a slight underestimation of size
    ##neighbor_fac = 3.0 # too high --> excellent for separation, but size quite underestimated
    ###############################################################################
    ###############################################################################

    ###############################################################################
    ###############################################################################
    if save:
        myfile1 = open(filefile+'m'+str(method)+'_'+'1.txt', 'w')
        myfile2 = open(filefile+'m'+str(method)+'_'+'2.txt', 'w')
    ###############################################################################
    ###############################################################################

    ## get the resolution within the file
    dx = ncattr(filefile,'DX') ; print "resolution in meters is: ",dx
    ## if no halolim is given, guess it from resolution
    if halolim is None:
        extentlim = 2000. # the putative maximum extent of a vortex in m
        halolim = extentlim / dx
        print "maximum halo size is: ",halolim

    ## mean and std calculations
    ## -- std is used in both methods for limits
    ## -- mean is only used in method 1
    print "calculate mean and std, please wait."
    ## -- get time series of 2D surface pressure
    ## -- (a different file to calculate mean might be provided)
    if filemean is None:
      psfc = pp(file=filefile,var="PSFC",verbose=True).getf()
    else:
      psfc = pp(file=filemean,var="PSFC",verbose=True).getf()

    ## -- calculate mean and standard deviation
    ## -- ... calculating std at all time is not right!
    ## -- ... for mean value though, similar results with both methods
    mean = np.mean(psfc,dtype=np.float64)
    std = np.std(psfc,dtype=np.float64)
    damax = np.max(psfc)
    damin = np.min(psfc)
    ## some information about inferred limits
    print "**************************************************************"
    print "MEAN",mean
    print "STD",std
    print "LIMIT FOR PRESSURE MINIMUM:",-np.array(faclist)*std
    print "LIMIT FOR EVALUATING SIZE OF A GIVEN LOCAL MINIMUM",-neighbor_fac*std
    print "**************************************************************"
    
    # if no timelist is given, take them all
    if timelist is None:
        test = netCDF4.Dataset(filefile)
        sizet = len(test.dimensions['Time'])
        print "treat all time values: ",sizet
        timelist = range(0,sizet-1,1)

    ## LOOP ON TIME
    for time in timelist:

     ## get 2D surface pressure at a given time
     ## (this is actually so quick we don't use psfc above)
     psfc2d = pp(file=filefile,var="PSFC",t=time).getf()
     if filewind is not None:
       ustm = pp(file=filewind,var="USTM",t=time).getf()

     ## MAIN ANALYSIS. LOOP ON FAC. OR METHOD.
     for fac in faclist:
     ###fac = 3.75
     ###for method in [2,1]:
  
      ## initialize arrays
      tabij = [] ; tabsize = [] ; tabdrop = []
      tabijcenter = [] ; tabijvortex = [] ; tabdim = [] 
      tabwind = []

      ################ FIND RELEVANT POINTS TO BE ANALYZED
      ## lab is 1 for points to be treated by minimum_position routine
      ## we set elements at 1 where pressure is under mean-fac*std
      ## otherwise we set to 0 because this means we are close enough to mean pressure (background)
      lab = np.zeros(psfc2d.shape)
      if method == 1:
          # method 1: standard deviation
          lab[np.where(psfc2d < mean-fac*std)] = 1
      elif method == 2:
          # method 2: polynomial fit
          # ... tried smooth but too difficult, not accurate and too expensive
          deg = 5 #plutot bien (deg 10 ~pareil) mais loupe les gros (~conv cell)
          #deg = 2 #pas mal mais false positive (pareil 3-4 mm si un peu mieux)
          #        #(OK now with fixing the false positive bug)
          nx = psfc2d.shape[1] ; ny = psfc2d.shape[0]
          xxx = np.array(range(nx)) ; yyy = np.array(range(ny))
          anopsfc2d = psfc2d*0. ; polypsfc2d = psfc2d*0.
          for iii in range(0,nx,1):
             poly = np.poly1d(np.polyfit(yyy,psfc2d[iii,:],deg))
             polypsfc2d[iii,:] = poly(yyy)
          for jjj in range(0,ny,1):
             poly = np.poly1d(np.polyfit(xxx,psfc2d[:,jjj],deg))
             polypsfc2d[:,jjj] = 0.5*polypsfc2d[:,jjj] + 0.5*poly(xxx)
          ## smooth a little to avoid 'crosses' (plus, this removes PBC problems)
          polypsfc2d = ppcompute.smooth2diter(polypsfc2d,n=deg)
          # compute anomaly and find points to be explored
          anopsfc2d = psfc2d - polypsfc2d
          limlim = fac*std ## same as method 1
          lab[np.where(anopsfc2d < -limlim)] = 1
      elif method == 3:
          # method 3 : find centers of circle features using image processing techniques

          # initialize the array containing point to be further analyzed
          lab = np.zeros(psfc2d.shape)

          # enclose computations in a test to save time when no obvious vortices
          datab = psfc2d[np.where(psfc2d < mean-fac*std)]
          #datab = np.array([42]) #uncomment this to always include search!
          if datab.shape[0] == 0:
            ### if no point has significantly lower value than mean
            ### well, no need to do anything, keep lab filled with 0
            pass
          else:
            ### field to analyze: pressure
            ### --- apply a Laplace transform to highlight drops
            field = ndimage.laplace(psfc2d)
 
            ### prepare the field to be analyzed 
            ### by the Hough transform or Blob detection
            ### --> normalize it in an interval [-1,1]
            ### --> NB: dasigma serves later for Hough transform
            ### --> NB: polynomial de-trending does not seem to help
            # ... test 1. local max / min used for normalization.
            mmax = np.max(field) ; mmin = np.min(field) ; dasigma = 2.5
            ## ... test 2. global max / min used for normalization. bof.
            #mmax = damax ; mmin = damin ; dasigma = 1.0 #1.5 trop restrictif
            spec = 2.*((field-mmin)/(mmax-mmin) - 0.5)
 
            #### **** BLOB DETECTION ****
            #### Better than Hough transform for multiple adjacent vortices
            #### log: best / dog or doh: miss small vortices, hence the majority
            #### SITE: http://scikit-image.org/docs/dev/auto_examples/plot_blob.html
            #### PUBLISHED: https://peerj.com/articles/453/
            ### --------------------------------------------        
            ### the parameters below are aimed for efficiency
            ### ... because anyway the actual size is not detected
            ### ... so setting max_sigma to a high value is not needed
            ### --------------------------------------------
            blobs = feature.blob_log(spec, max_sigma=3, num_sigma=3, threshold=0.05)
            ### a plot to check detection
            if plotplot:
              fig, ax = mpl.subplots(1, 1)
              what_I_plot = psfc2d #spec #field
              ax.imshow(what_I_plot, cmap=mpl.cm.gray)
            ### store the detected points in lab
            for blob in blobs:
              center_x, center_y, r = blob
              #lab[center_x,center_y] = 1
              # a test for faster calculations (at the expense of missing 1% vortices maybe)
              if psfc2d[center_x,center_y] < mean-fac*std:
                lab[center_x,center_y] = 1
              if plotplot:
                circ = mpatches.Circle((center_y, center_x), r*np.sqrt(2), fill=False, edgecolor='green', linewidth=2)
                ax.add_patch(circ)
            if plotplot: mpl.show()

#################################### BEGIN TEST HOUGH TRANSFORM
#            # perform an edge detection on the field
#            # ... returns an array with True on edges and False outside
#            # http://sciunto.wordpress.com/2013/03/01/detection-de-cercles-par-une-transformation-de-hough-dans-scikit-image/    
#            edges = filter.canny(filter.sobel(spec),sigma=dasigma)
#            # initialize plot for checks
#            if plotplot:
#              fig, ax = mpl.subplots(ncols=1, nrows=1, figsize=(10,8))
#              ax.imshow(field, cmap=mpl.cm.gray)
#            ## detect circle with radius 3dx. works well. 5dx detection pretty similar.
#            ## use an Hough circle transform
#            radii = np.array([2,3])
#            hough_res = transform.hough_circle(edges, radii)
#            # analyze results of the Hough transform
#            nnn = 0 
#            sigselec = neighbor_fac
#            #sigselec = 3.
#            for radius, h in zip(radii, hough_res):
#              # number of circle features to keep
#              # ... quite large. but we want to be sure not to miss anything.
#              nup = 30 
#              maxima = feature.peak_local_max(h, num_peaks=nup)
#              # loop on detected circle features
#              for maximum in maxima:
#                center_x, center_y = maximum #- radii.max()
#                # nup is quite high so there are false positives.
#                # ... but those are easy to detect
#                # ... if pressure drop is unclear (or inexistent)
#                # ... we do not take the point into account for further analysis
#                # ... NB: for inspection give red vs. green color to displayed circles
#                diag = field[center_x,center_y] - (mean-sigselec*std)
#                ## uncomment below to keep all detections
#                #diag = -1
#                if diag < 0:  
#                    col = 'green'
#                    nnn = nnn + 1
#                    lab[center_x,center_y] = 1
#                else:
#                    col = 'red'
#                # draw circles
#                if plotplot:
#                  circ = mpatches.Circle((center_y, center_x), radius,fill=False, edgecolor=col, linewidth=2)
#                  ax.add_patch(circ)
#            if plotplot:
#              mpl.title(str(nnn)+" vortices")
#              if nnn>0: mpl.show()
#              mpl.close()
#################################### END TEST HOUGH TRANSFORM

      ## while there are still points to be analyzed...
      while 1 in lab:
        ## ... get the point with the minimum field values
        ## ... within values of field at labels lab
        if method == 1 or method == 3: 
            ij = minimum_position(psfc2d,labels=lab)
        elif method == 2:
            ij = minimum_position(anopsfc2d,labels=lab)
        ## ... store the indexes of the point in tabij
        tabij.append(ij)
        ## ... remove the point from labels to be further explored by minimum_position
        lab[ij] = 0
    
      ################ GET SIZES BASED ON THOSE FOUND POINTS
      ## reslab is the same as lab
      ## except for scanning purpose we keep the information
      ## about how went the detection
      ## --> and we set a lower fac
      ## --> above a high fac is good to catch only strong vortices
      ## --> but here a casual fac=3 is better to get accurate sizes
      ## --> or even lower as shown by plotting reslab 
      reslab = np.zeros(psfc2d.shape)
      #reslabf = np.zeros(psfc2d.shape) # TESTS
      if method == 1 or method == 3:
          reslab[np.where(psfc2d < mean-neighbor_fac*std)] = 1
          #reslabf[np.where(psfc2d < mean-neighbor_fac_fine*std)] = 1 # TESTS
      elif method == 2:
          reslab[np.where(anopsfc2d < -neighbor_fac*std)] = 1
     
      ## initialize halomax and while loop
      ## HALOMAX : maximum halo defined around a minima to evaluate the size
      ## ... halomax must be large enough to encompass a vortex
      ## ... but not too large otherwise neighboring vortex are caught
      halomax = 3 ; notconv = 9999 ; yorgl = 9999
    
      ## WHILE LOOP on HALOMAX exploration
      while ( notconv > 0 and halomax < halolim and yorgl != 0 ):
       # now browse through all points caught in previous loop
       for ij in tabij:
        ## ... OK. take each indexes found before with minimum_position
        i,j = ij[0],ij[1]
        ## EITHER
        ## ... if reslab is already 0, we do not have to do anything
        ## ... because this means point is already part of detected vortex
        ## OR
        ## ... if the ij couple is already in a previously detected vortex
        ## ... we don't actually need to consider it again
        ## ... this is necessary otherwise (sometimes a lot) of false positives
        if reslab[i,j] <= 0 or ij in tabijvortex:
          pass
        else:
          ## GET HALOS. SEE FUNCTION ABOVE.
          nmesh,maxw,maxh,reslab,tabijvortex=gethalo(ij,reslab,halomax,tabijvortex)

          ## OK. check this is actually a vortex.
          ## get the size. get the drop.
          ## store results in file
          if nmesh is not None:

            ## calculate size
            ## we multiply by mesh area, then square to get approx. size of vortex
            ## [if one wants to obtain equivalent diameter, multiply by 2.*np.sqrt(np.pi)]
            size = np.sqrt(nmesh*dx*dx)

            ## check size. if not OK recompute halo with more stringent zone around pressure minimum.
            ## -- NB: reslab and tabijvortex do not need to be changed again, was done just before
            ## --     however, we could have been a little bit more subtle to disentangle twin vortices        
            # if (np.abs(maxw-maxh)*dx/size > 0.33):
            # #if (np.sqrt(maxw*maxh*dx*dx) > size):
            #    #print "asymmetry!",np.abs(maxw-maxh)*dx,size
            #    nmesh,maxw,maxh,dummy,dummy=gethalo(ij,reslabf,halomax,tabijvortex)
            #    if nmesh is not None: size = int(np.sqrt(nmesh*dx*dx))
            #    #print "new values",np.abs(maxw-maxh)*dx,size

            ## calculate drop.
            if method == 1 or method == 3: drop = -psfc2d[i,j]+mean
            else: drop = -anopsfc2d[i,j]

            #############################################################
            ##### Check this is the actual minimum (only tested so far with method=3)
            #if method == 1 or method ==3:
            #  ## ... define a halo around the minimum point
            #  ix,ax,iy,ay = i-maxw,i+maxw+1,j-maxh,j+maxh+1
            #  ## ... treat the boundary case (TBD: periodic boundary conditions)
            #  nx = reslab.shape[1] ; ny = reslab.shape[0]
            #  if ix < 0: ix = 0
            #  if iy < 0: iy = 0
            #  if ax > nx: ax = nx
            #  if ay > ny: ay = ny
            #  ## ... keep real minimal value
            #  ## DOMAINMIN --> does not change a lot results (not worth it)
            #  domainmin = np.max(-psfc2d[ix:ax,iy:ay])+mean
            #  if drop < domainmin:
            #     print "corrected drop",drop,domainmin
            #     drop = domainmin
            #  ### DOMAINDROP --> leads to underestimate drops in most cases
            #  #domaindrop = np.max(psfc2d[ix:ax,iy:ay])-np.min(psfc2d[ix:ax,iy:ay])
            #  #drop = domaindrop
            #############################################################

            ## if available get info on friction velocity
            if filewind is not None:
              ## ... define a halo around the minimum point
              ix,ax,iy,ay = i-maxw,i+maxw+1,j-maxh,j+maxh+1
              ## ... treat the boundary case (TBD: periodic boundary conditions)
              nx = reslab.shape[1] ; ny = reslab.shape[0]
              if ix < 0: ix = 0
              if iy < 0: iy = 0
              if ax > nx: ax = nx
              if ay > ny: ay = ny
              ## WINDMAX
              windmax = np.max(ustm[ix:ax,iy:ay])
              tabwind.append(windmax)
            else:
              tabwind.append(0.) 

            ## store info in dedicated arrays
            tabdim.append((maxw*dx,maxh*dx))
            tabsize.append(size)
            tabdrop.append(drop)
            tabijcenter.append(ij)
            #print "... VORTEX!!!! size %.0f drop %.1f coord %.0f %.0f" % (size,drop,i,j)

       ## count how many points are not converged and left to be analyzed
       notconv = len(np.where(reslab > 1)[0])
       yorgl = len(np.where(reslab == 1)[0])
    
       ## increment halomax
       ## to speed-up the increment is slightly increasing with considered halomax
       halomax = halomax + halomax / 2
    
      ## just for simpler plots.
      reslab[reslab > 2] = 4
      reslab[reslab < -2] = -4

      ## give some info to the user
      if len(tabsize) > 0:
        nvortex = len(tabsize)
        maxsize = np.max(tabsize)
        maxdrop = np.max(tabdrop)
        maxwind = np.max(tabwind)
      else:
        nvortex = 0
        maxsize = 0
        maxdrop = 0.
        maxwind = 0.
      notconv = len(np.where(reslab > 1)[0])
      print "t=%3.0f / n=%2.0f / s_max=%4.0f / d_max=%4.1f / halo_out=%3.0f / notconvp=%3.1f / wind=%4.1f" \
            % (time,nvortex,maxsize,maxdrop,halomax,100.*notconv/float(reslab.size),maxwind)        
    
      ## save results in a text file
      if save:
          # convert t in local time
          ttt = lt_start + time*dt_out/3700.      
          # write files
          myfile2.write( "%7.4f ; %5.0f ; %6.1f ; %8.3f ; %8.3f\n" % (ttt,nvortex,maxsize,maxdrop,maxwind) )
          for iii in range(len(tabsize)):
              myfile1.write( "%7.4f ; %6.1f ; %8.3f ; %5.0f ; %5.0f ; %8.3f\n" \
              % (ttt,tabsize[iii],tabdrop[iii],tabdim[iii][0],tabdim[iii][1],tabwind[iii]) )

      #### PLOT PLOT PLOT PLOT
      damaxsize = 10000.
      #damaxsize = 400.
      if (nvortex>0 and plotplot) or (nvortex>0 and maxsize > damaxsize):
      #if nvortex > 200:
       mpl.figure(figsize=(12,8))
       myplot = plot2d()
       myplot.x = np.array(range(psfc2d.shape[1]))*dx/1000.
       myplot.y = np.array(range(psfc2d.shape[0]))*dx/1000.
       myplot.title = str(nvortex)+" vortices found (indicated diameter / pressure drop)"
       myplot.xlabel = "x distance (km)"
       myplot.ylabel = "y distance (km)"
       if method != 2:
           #myplot.f = ustm 
           myplot.f = psfc2d
           #myplot.vmin = damin
           #myplot.vmax = damax
           myplot.vmin = mean - 6.*std
           myplot.vmax = mean + 6.*std
       else:
           myplot.field = anopsfc2d
           myplot.vmin = -1.5
           myplot.vmax = 0.5
       myplot.fmt = "%.1f"
       myplot.div = 20
       myplot.colorbar = "spectral"
       myplot.make()
      
       ### ANNOTATIONS
       for iii in range(len(tabsize)):
        ij = tabijcenter[iii]
        coord1 = ij[1]*dx/1000.
        coord2 = ij[0]*dx/1000.
        txt = "%.0f/%.2f/%.0f" % (tabsize[iii],tabdrop[iii],100*np.abs(tabdim[iii][0]-tabdim[iii][1])/tabsize[iii])
        txt = "%.0f m / %.2f Pa" % (tabsize[iii],tabdrop[iii])
        mpl.annotate(txt,xy=(coord1,coord2),
             xytext=(-10,-30),textcoords='offset points',ha='center',va='bottom',\
             bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.3),\
             arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=-0.3',color='red'),\
             size='small')
    
       ###show detection
       lev = [-4,-2,-1,0,1,2,4] ## all colours for detection cases
       lev = [-1,0] # show dubious areas as detection areas
       lev = [-1,1] # show dubious areas as no-detection areas
       mpl.contourf(myplot.x,myplot.y,reslab,alpha=0.9,cmap=mpl.cm.get_cmap("binary_r"),levels=lev)
    
       ### SHOW OR SAVE IN FILE
       mpl.show()
       #save(mode="png",filename="detectm"+"_"+str(time)+"_"+str(method),folder="detect/",includedate=False)
    
    ## close data files
    if save:
        myfile1.close()
        myfile2.close()
def img2polydata_simple(image, dictionnaire=None, verbose=True):
    """
    Convert a |SpatialImage| to a PolyData object with cells surface

    : Parameters :
    dictionnaire : cell->scalar dictionary
    """

    labels_provi = list(np.unique(image))
    #ici on filtre déjà les listes
    #~ labels= [i for i in labels_provi if i not in list_remove]
    labels= labels_provi

    try:      labels.remove(0)
    except:   pass
    try:      labels.remove(1)
    except:   pass

    #print image.shape

    xyz = {}
    if verbose:print "on récupère les bounding box"
    bbox = nd.find_objects(image)
    #print labels
    for label in xrange(2,max(labels)+1):
        if not label in labels: continue
        if verbose:print "% until cells are built", label/float(max(labels))*100
        slices = bbox[label-1]
        label_image = (image[slices] == label)
        #here we could add a laplacian function to only have the external shape
        mask = nd.laplace(label_image)
        label_image[mask!=0] = 0
        mask = nd.laplace(label_image)
        label_image[mask==0]=0
        # compute the indices of voxel with adequate label
        a = np.array(label_image.nonzero()).T
        a+=[slices[0].start, slices[1].start, slices[2].start ]
        #print a.shape
        if a.shape[1] == 4:
            #print a
            pass
        else:
            xyz[label] = a

    vx,vy,vz = image.resolution

    polydata = tvtk.AppendPolyData()
    polys = {}
    filtre=[i for i in xyz.keys() if i in dictionnaire.keys()]
    k=0.0
    for c in filtre:
        if verbose: print "% until first polydata is built", k/float(len(filtre))*100
        k+=1.
        p=xyz[c]
        p=p.astype(np.float)
        pd = tvtk.PolyData(points=xyz[c].astype(np.float))
        pd.point_data.scalars = [float(dictionnaire[c]) for i in xrange(len(xyz[c]))]    
        f=tvtk.VertexGlyphFilter(input=pd)
        f2=tvtk.PointDataToCellData(input=f.output)
        polys[c]=f2.output
        polydata.add_input(polys[c])
        polydata.set_input_array_to_process(0,0,0,0,0)

    return polydata
Пример #56
0
    in_sub.setBounds(out_sub.c0*dec, out_sub.r0*dec, out_sub.Nc*dec, out_sub.Nr*dec, update=True)
    z=in_sub.z[0,:,:]
    mask=np.ones_like(in_sub.z[0,:,:])
    mask[np.isnan(in_sub.z[0,:,:])]=0
    mask[in_sub.z[0,:,:]==noData]=0
    out_temp=np.zeros([len(out_bands), stride, stride])

    if np.all(mask.ravel()==0):
        out_temp=out_temp+np.NaN
        out_sub.z=out_temp
        out_sub.setBounds(out_sub.c0+pad, out_sub.r0+pad, out_sub.Nc-2*pad, out_sub.Nr-2*pad)
        out_sub.writeSubsetTo(out_bands, out_sub)
        continue

    if (args.R_tol is not None) | (args.facet_tol is not None):
        lap=np.abs(snd.laplace(in_sub.z[0,:,:], mode='constant', cval=0.0))

    if args.R_tol is not None:
        mask[lap>args.R_tol]=0

    if args.facet_tol is not None:
        mask1=mask.copy()
        mask1[lap < args.facet_tol]=0
        mask1=snd.binary_closing(snd.binary_opening(mask1, structure=opening_kernel), structure=closing_kernel)
        #mask1=snd.binary_erosion(mask1, structure=simplify_kernel);
        mask[mask1==0]=0

    if args.smooth_scale is not None:
        zs, mask2 = smooth_corrected(z, mask, w_smooth)
        mask[np.abs(in_sub.z[0,:,:]-zs)>args.smooth_tol]=0.
Пример #57
0
		currentImage = io.imread(currentLoadingFolder+image)
		resizedImage = resize(currentImage,(200,200))
		sizeOfImage = np.shape(currentImage);
		croppedImage = currentImage[(sizeOfImage[0]-sizeOfNewImage[0])/2:(sizeOfImage[0]+sizeOfNewImage[0])/2,(sizeOfImage[1]-sizeOfNewImage[1])/2:(sizeOfImage[1]+sizeOfNewImage[1])/2]
		flippedImageH = flipImageHorizontally(croppedImage)
		flippedImageV = flipImageVertically(croppedImage)
		currentFolder.append(resizedImage)
		currentFolderLabels.append(index)
		currentFolder.append(croppedImage)
		currentFolderLabels.append(index)
		currentFolder.append(flippedImageH)
		currentFolderLabels.append(index)
		currentFolder.append(flippedImageV)
		currentFolderLabels.append(index)
		if laplaceImages:
			currentLaplaceImage = scImage.laplace(currentImage)
			currentFolder.append(currentLaplaceImage)
			currentFolderLabels.append(index)
	data.append(currentFolder)
	labels.append(currentFolderLabels)
	index = index + 1
print("DATA READING DONE")
index = 0
for i in labels:
	labels[index] = np_utils.to_categorical(i, 15)
	index = index + 1
print("LABELS WERE CHANGED")

#MAKE TRAINING AND TEST
Xtraining = []
Ytraining = []
Пример #58
0
 def test_laplace_filter(self):
     template = np.random.rand(*self.shape)
     self.corr._template = self.corr._laplace_filter(template)
     self.assertTrue(np.allclose(self.corr._template, laplace(template, mode='constant')))