def phase_stretch_transform(img, LPF, S, W, threshold_min, threshold_max, flag):
    L = 0.5
    x = np.linspace(-L, L, img.shape[0])
    y = np.linspace(-L, L, img.shape[1])
    [X1, Y1] = (np.meshgrid(x, y))
    X = X1.T
    Y = Y1.T
    theta, rho = cart2pol(X, Y)
    orig = ((np.fft.fft2(img)))
    expo = np.fft.fftshift(np.exp(-np.power((np.divide(rho, math.sqrt((LPF ** 2) / np.log(2)))), 2)))
    orig_filtered = np.real(np.fft.ifft2((np.multiply(orig, expo))))
    PST_Kernel_1 = np.multiply(np.dot(rho, W), np.arctan(np.dot(rho, W))) - 0.5 * np.log(
        1 + np.power(np.dot(rho, W), 2))
    PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * S
    temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)), np.fft.fft2(orig_filtered))
    orig_filtered_PST = np.fft.ifft2(temp)
    PHI_features = np.angle(orig_filtered_PST)
    if flag == 0:
        out = PHI_features
    else:
        features = np.zeros((PHI_features.shape[0], PHI_features.shape[1]))
        features[PHI_features > threshold_max] = 1
        features[PHI_features < threshold_min] = 1
        features[img < (np.amax(img) / 20)] = 0

        out = features
        out = mh.thin(out, 1)
        out = mh.bwperim(out, 4)
        out = mh.thin(out, 1)
        out = mh.erode(out, np.ones((1, 1)))
    return out, PST_Kernel
Beispiel #2
0
def pst_algorithm(image, LPF, Phase_strength, Warp_strength, Threshold_min,
                  Threshold_max, Morph_flag):
    L = 0.5
    x = np.linspace(-L, L, image.shape[0])
    y = np.linspace(-L, L, image.shape[1])
    [X1, Y1] = (np.meshgrid(x, y))
    X = X1.T
    Y = Y1.T
    [THETA, RHO] = [np.arctan2(Y, X),
                    np.hypot(X, Y)]  # cartesian to polar coordinates

    # Apply localization kernel to the original image to reduce noise
    Image_orig_f = np.fft.fft2(image)
    expo = np.fft.fftshift(
        np.exp(-np.power((np.divide(RHO, math.sqrt((LPF**2) /
                                                   np.log(2)))), 2)))
    Image_orig_filtered = np.real(
        np.fft.ifft2((np.multiply(Image_orig_f, expo))))

    # Constructing the PST Kernel
    PST_Kernel_1 = np.multiply(
        np.dot(RHO, Warp_strength), np.arctan(np.dot(RHO, Warp_strength))
    ) - 0.5 * np.log(1 + np.power(np.dot(RHO, Warp_strength), 2))
    PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * Phase_strength

    # Apply the PST Kernel
    temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)),
                       np.fft.fft2(Image_orig_filtered))
    Image_orig_filtered_PST = np.fft.ifft2(temp)

    # Calculate phase of the transformed image
    PHI_features = np.angle(Image_orig_filtered_PST)

    if Morph_flag == 0:
        out = PHI_features
        out = (out / np.max(out)) * 3
    else:
        # find image sharp transitions by thresholding the phase
        features = np.zeros((PHI_features.shape[0], PHI_features.shape[1]))
        features[PHI_features > Threshold_max] = 1  # Bi-threshold decision
        features[
            PHI_features <
            Threshold_min] = 1  # as the output phase has both positive and negative values
        features[image < (
            np.amax(image) / 20
        )] = 0  # Removing edges in the very dark areas of the image (noise)

        # apply binary morphological operations to clean the transformed image
        out = features
        out = mh.thin(out, 1)
        out = mh.bwperim(out, 4)
        out = mh.thin(out, 1)
        out = mh.erode(out, np.ones((1, 1)))

    return out
Beispiel #3
0
def phase_stretch_transform(img, LPF, S, W, Threshold_min, Threshold_max,
                            flag):
    L = 0.5
    x = np.linspace(-L, L, img.shape[0])
    y = np.linspace(-L, L, img.shape[1])
    X, Y = np.meshgrid(x, y)
    p, q = X.T, y.T
    theta, rho = cart2pol(p, q)

    # 接下来对PST公式从右至左依次实现,
    # 对输入图像进行快速傅里叶变换,
    orig = np.fft.fft2(img)

    # 实现L[p, q]
    expo = np.fft.fftshift(
        np.exp(-np.power((np.divide(rho, math.sqrt((LPF**2) /
                                                   np.log(2)))), 2)))

    # 对图像进行平滑处理,
    orig_filtered = np.real(np.fft.ifft2((np.multiply(orig, expo))))

    # 实现相位核,
    PST_Kernel_1 = np.multiply(np.dot(rho, W), np.arctan(np.dot(
        rho, W))) - 0.5 * np.log(1 + np.power(np.dot(rho, W), 2))
    PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * S

    # 将前面实现的部分与相位核做乘积,
    temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)),
                       np.fft.fft2(orig_filtered))

    # 对图像进行逆快速傅里叶变换,
    orig_filtered_PST = np.fft.ifft2(temp)

    # 进行角运算,得到变换图像的相位,
    PHI_features = np.angle(orig_filtered_PST)

    if flag == 0:
        out = PHI_features
    else:
        # 对图像进行阈值化处理,
        features = np.zeros((PHI_features.shape[0], PHI_features.shape[1]))
        features[PHI_features > Threshold_max] = 1
        features[PHI_features < Threshold_min] = 1
        features[img < (np.amax(img) / 20)] = 0

        # 应用二进制形态学操作来清除转换后的图像,
        out = features
        out = mh.thin(out, 1)
        out = mh.bwperim(out, 4)
        out = mh.thin(out, 1)
        out = mh.erode(out, np.ones((1, 1)))
    return out, PST_Kernel
Beispiel #4
0
def count_holes(dir_name, extension='.tif', plotting = False, dpi = 300, transparent=True):
    total_holes = []
    i = 0
    for files in sorted(os.listdir(dir_name)):
        if files.endswith(extension):
            scan = os.path.join(dir_name,files)
            img = tf.imread(scan)
            img[img > 0] = -1
            skel = mh.thin(img)
            noholes = mh.morph.close_holes(skel)
            cskel = np.logical_not(skel)
            choles = np.logical_not(noholes)
            holes = np.logical_and(cskel,noholes)
            lab, n = mh.label(holes)
            total_holes.append(n)
            if plotting:
                fig = plt.figure()
                plt.imshow(img)
                plt.tight_layout(pad=0)
                fig.savefig(dir_name + '/binary_{}_{:03d}.png'.format(dir_name,i+1), dpi=dpi, transparent=transparent)
                plt.close(fig)
                fig = plt.figure()
                plt.imshow(skel)
                plt.tight_layout(pad=0)
                fig.savefig(dir_name + '/skel_{}_{:03d}.png'.format(dir_name,i+1), dpi=dpi, transparent=transparent)
                plt.close(fig)
                fig = plt.figure()
                plt.imshow(lab)
                plt.tight_layout(pad=0)
                fig.savefig(dir_name + '/labels_{}_{:03d}.png'.format(dir_name,i+1), dpi=dpi, transparent=transparent)
                plt.close(fig)
            i += 1

    return total_holes
Beispiel #5
0
 def skeleton(self):
     self.image = removeframe(readgreyimage(self.image))
     self.ip = dealwhitelabel(self.image, what='remove', ontooriginal=False)
     # self.ip=openclose(self.ip,open=False)
     self.sk = mh.thin(self.ip > 127)
     self.sk = pruning(self.sk, size=15)
     return self
Beispiel #6
0
def chromatids_elements(TopHatedChromosome):
    '''Take a High pass filtered (or top hat) image of a chromosome and label the chromatids elements
    '''
    threshed = TopHatedChromosome > 0
    #threshed = mh.open(threshed)
    labthres, _ = mh.label(threshed)
    labsz = mh.labeled.labeled_size(labthres)
    mh.labeled.remove_regions_where(labthres, labsz < 2, inplace=True)
    threshed = labthres > 0

    skel2 = mh.thin(threshed)
    bp2 = branchedPoints(skel2, showSE=False) > 0
    rem = np.logical_and(skel2, np.logical_not(bp2))
    labskel, _ = mh.labeled.label(rem)
    #print labskel.dtype
    size_sk = mh.labeled.labeled_size(labskel)
    #print size_sk
    skelem = mh.labeled.remove_regions_where(labskel, size_sk < 4)

    distances = mh.stretch(mh.distance(threshed))
    surface = (distances.max() - distances)
    chr_label = mh.cwatershed(surface, skelem)
    #print chr_label.dtype, type(chr_label)
    chr_label *= threshed

    #This convertion is important !!
    chr_label = chr_label.astype(np.intc)
    #-------------------------------
    mh.labeled.relabel(chr_label, inplace=True)
    labsize2 = mh.labeled.labeled_size(chr_label)
    cleaned = mh.labeled.remove_regions_where(chr_label, labsize2 < 8)
    mh.labeled.relabel(cleaned, inplace=True)
    return cleaned
Beispiel #7
0
def objectfeatures(img):
    """
    values=objectfeatures(img)

    This implements the object features described in
    "Object Type Recognition for Automated Analysis of Protein Subcellular Location"
    by Ting Zhao, Meel Velliste, Michael V. Boland, and Robert F. Murphy
    in IEEE Transaction on Image Processing
    """

    protimg = img.get("procprotein")
    dnaimg = img.channeldata.get("procdna", None)
    assert (
        dnaimg is None or protimg.shape == dnaimg.shape
    ), "pymorph.objectfeatures: DNA image is not of same size as Protein image."

    labeled, N = ndimage.label(protimg, ones((3, 3)))
    if not N:
        return np.zeros((0, 11))

    sofs = np.zeros((N, 11))
    indices = np.arange(1, N + 1)
    if dnaimg is not None:
        dnacofy, dnacofx = ndimage.center_of_mass(dnaimg)
        bindna = dnaimg > 0
        # According to the documentation, it shouldn't matter if indices is None,
        # but in my version of scipy.ndimage, you *have* to use indices.
        centers = ndimage.center_of_mass(protimg, labeled, indices)
        if N == 1:
            centers = list(centers)
        centers = np.asarray(centers)
        centers -= np.array((dnacofy, dnacofx))
        centers **= 2
        sofs[:, 1] = np.sqrt(centers.sum(1))
    locations = ndimage.find_objects(labeled, N)
    sofs[:, 9] = ndimage.measurements.sum(protimg, labeled, indices)
    for obji in xrange(N):
        slice = locations[obji]
        binobj = (labeled[slice] == (obji + 1)).copy()
        protobj = protimg[slice]
        binskel = thin(binobj)
        objhull = convexhull(binobj)
        no_of_branch_points = fast_sum(find_branch_points(binskel))
        hfeats = hullfeatures(binobj, objhull)
        sofs[obji, 0] = fast_sum(binobj)
        if dnaimg is not None:
            sofs[obji, 2] = fast_sum(binobj & bindna[slice])
        sofs[obji, 3] = hfeats[2]
        sofs[obji, 4] = euler(binobj)
        sofs[obji, 5] = hfeats[1]
        sofs[obji, 6] = fast_sum(binskel)
        sofs[obji, 7] = hfeats[0]
        sofs[obji, 9] /= fast_sum(binskel * protobj)
        sofs[obji, 10] = no_of_branch_points
    sofs[:, 2] /= sofs[:, 0]
    sofs[:, 8] = sofs[:, 6] / sofs[:, 0]
    sofs[:, 10] /= sofs[:, 6]
    return sofs
Beispiel #8
0
 def skel(self, img):
     img[0,:]=0 # make 1st line in the image black to achieve consistent result between distance field and medial axis skeleton.
     img[len(img)-1,:]=0 # make last line in the image black to achieve consistent result between distance field and medial axis skeleton.
     img[:,len(img[0])-1]=0 # make right column in the image black to achieve consistent result between distance field and medial axis skeleton.
     img[:,0]=0 # make left column in the image black to achieve consistent result between distance field and medial axis skeleton.
     dmap = m.distance(img>0,metric='euclidean')
     dmap=np.sqrt(dmap)*2
     skelImg=m.thin(img>0)
     
     return skelImg, dmap
Beispiel #9
0
def _objskelfeats(objimg):
    """
    feats = _objskelfeats(objimg)

    Calculate skeleton features for the object OBJIMG.
    """
    objimg = objimg
    objbin = objimg > 0
    objsize = objbin.sum()

    if objsize == 0:
        return numpy.zeros(5)

    objskel = thin(objbin)
    skellen = objskel.sum()


    skelhull = convexhull(objskel);
    hullsize = skelhull.sum()
    hullsize = max(hullsize, skellen) # Corner cases such as [[1]]

    skel_hull_area_ratio = skellen / hullsize

    skel_obj_area_ratio = skellen/objsize

    skel_fluor = (objimg * objskel).sum()
    obj_fluor = objimg.sum()
    skel_obj_fluor_ratio = skel_fluor/obj_fluor

    branch_points = find_branch_points(objskel)
    no_of_branch_points = branch_points.sum()
    return numpy.array([
            skellen,
            skel_hull_area_ratio,
            skel_obj_area_ratio,
            skel_obj_fluor_ratio,
            no_of_branch_points/skellen])
import numpy as np
import mahotas
from pylab import imshow, savefig
A = np.zeros((100,100), bool)
A[40:60] = 1
W = mahotas.thin(A)
D = mahotas.distance(~W)
imshow(D)
savefig('distance.png')

def n_skeleton_branched_points(image):
    """Number of branched points of skeleton."""
    skeleton = mh.thin(image)
    b_points = branched_points(skeleton)
    return (sum(sum(b_points != False)))
def processImage(filepath, median_filter_size=15, small_object_size=40, fill_small_holes_n_iterations=2, n_prune=15,
                 bg_greyscale=250, crack_greyscale=245):
    """
    Function to create extract a binary image pixel-thick cracks from an image of a cracked material.

    For details, see Griffiths et al. (2017) and supplementary materials (provided in ./references folder).

    Griffiths, L., Heap, M.J., Baud, P., Schmittbuhl, J., 2017. Quantification of microcrack characteristics and
    implications for stiffness and strength of granite. International Journal of Rock Mechanics and Mining Sciences 100,
    138–150. https://doi.org/10.1016/j.ijrmms.2017.10.013

    Args:
        filepath (string): path to the image file to be processed
        median_filter_size (int): width (in pixels) of the square median filter window. the value at the center of the
        moving window is replaces by the median of the values within the window. This should be set to a value just
        larger than the crack width.
        small_object_size (int): connected objects within the image following thresholding, with contained within a
        square of this size (in pixels) will be removed from the image.
        fill_small_holes_n_iterations (int): Before skeletonisation of the segmented image, any holes in the segmented
        cracks must be removed. This value gives the number of times the image will be dilated and eroded, to fill all
        holes. 2 is often enough.
        n_prune: Number of times the skeletonised cracks are pruned i.e., end points are removed. This value should be
        greater than the width of the segmented cracks before skeletonisation, or the median_filter_size, which should
        be similar.
        bg_greyscale: The lower boundary of the image mask defined prior to the watershed segmentation. Any pixel with
        a greyscale value below this value is "background"
        crack_greyscale: The lower boundary of the image mask defined prior to the watershed segmentation. Any pixel
        with a greyscale value above this value is "crack".

    Returns:
        cracks_skeleton_restored (numpy array): Binary image of the skeletonised cracks.


    """

    # img_orig = np.uint8( scipy.misc.imread(filename, flatten = True) )
    image = Image.open(filepath).convert('L')
    img_orig = np.uint8(image)
    
    # img = img[2:-2, 2:-2] # Microscope image borders are all same grayscale
    # img = img[760:780, 923:977]

    """ Filter image """
    img = (255 - img_orig)  # Switch grayscale
    img_median = ndi.median_filter(img, median_filter_size)  # Median filter, good at conserving edges
    img_filtered = (255 - cv2.subtract(img, img_median))  # Subtract filtered image from original (cracks = white)
    
    """ Segmentation """
    markers = np.zeros_like(img_filtered)  # Mark the different regions of the image
    markers[img_filtered > bg_greyscale] = 1  # Minimum crack grayscales
    markers[img_filtered < crack_greyscale] = 2  # Maximum crack grayscales

    # Plot median filtered image and mask
    f, ax = plt.subplots(1, 2, sharex=True, sharey=True)
    ax[0].imshow(img_filtered, cmap='gray')
    ax[1].imshow(markers, cmap='gray')
    plt.show()
    
    elevation_map = filters.sobel(img_filtered)  # Edge detection for watershed
    segmented = np.abs(255 - 255 * watershed(elevation_map, markers))  # Watershed segmentation, white on black

    """ Thin, prune and label cracks """
    cracks = removeSmallObjects(segmented, small_object_size)  # Remove small objects
    cracks = fillSmallHoles(cracks, fill_small_holes_n_iterations)  # Fill small holes in cracks
    cracks = binary_dilation(cracks)  # Dilate before thinning
    cracks_skeleton = 255 * np.int8(mh.thin(cracks > 0))  # Skeletonise image
    cracks_skeleton_pruned = removeEndPoints(cracks_skeleton, n_prune)  # Remove skeletonisation artefacts
    cracks_skeleton_pruned_no_bp = removeBranchPoints(cracks_skeleton_pruned)  # Remove branch points to separate cracks
    cracks_skeleton_pruned_no_bp_2_ep = removeEndPointsIter(
        cracks_skeleton_pruned_no_bp)  # Remove end points until 2 per crack
    cracks_skeleton_restored = restoreBranches(cracks_skeleton_pruned_no_bp_2_ep,
                                               cracks_skeleton)  # Restore branches without creating new endpoints

    # Plot original and final image
    f, ax = plt.subplots(1, 2, sharex=True, sharey=True)
    ax[0].imshow(img_orig, cmap='gray')
    ax[1].imshow(cracks_skeleton_restored, cmap='gray')
    plt.show()
    
    # Save images
    save_image.counter = 0
    save_image(img_orig)
    save_image(255 - img_median)
    save_image(255 - img_filtered)
    
    save_image(255 * elevation_map / np.max(elevation_map))
    save_image(255 * segmented / np.max(segmented))
    save_image(255 * cracks / np.max(cracks))
    save_image(255 * cracks_skeleton / np.max(cracks_skeleton))
    save_image(255 * cracks_skeleton_pruned / np.max(cracks_skeleton_pruned))
    save_image(255 * cracks_skeleton_pruned_no_bp / np.max(cracks_skeleton_pruned_no_bp))
    save_image(cracks_skeleton_pruned_no_bp_2_ep)
    save_image(cracks_skeleton_restored)

    return cracks_skeleton_restored
# import numpy for standard numerical calculations
import numpy as np

# read the image with mahotas as a grey image
img = m.imread('./testimg4.jpg', as_grey=True)
# read the image with mahotas again to obtain a color image where we can draw the ReebGraph in red (vertices) and green (edges)
imgColor = m.imread('./testimg4.jpg')
# Threshhold to remove artifacts from the jpg compression
img = (img > 100)
#get the dimensions of the image
x, y = np.shape(img)

#use the distance transform to obtain the distances per pixel of the medial axis
dmap = m.distance(img, metric='manhatten')
#use mathamatical morphology to obtain the medial axis (thinning function of mahotas)
skelImg = m.thin(img)

# draw the medial axis in the image
for idx, i in enumerate(skelImg):
    for jdx, j in enumerate(i):
        if skelImg[idx, jdx] == True:
            imgColor[idx, jdx] = (255, 1, 1)
            try:
                imgColor[idx + 1, jdx] = (255, 1, 1)
            except:
                pass
            imgColor[idx - 1, jdx] = (255, 1, 1)
            try:
                imgColor[idx, jdx + 1] = (255, 1, 1)
            except:
                pass
 def skeletonisation(self):
     #Skeletonisation by thinning
     return mh.thin(self.imgArray)
Beispiel #15
0
        self.rotatedIm=nd.rotate(self.particuleImage,majorAngle)
        self.rotatedFlag=True
        return majorAngle,compassTable,self.rotatedIm
        
user=os.path.expanduser("~")
#modify the path to your image
workdir=os.path.join(user,"Applications","ImagesTest","CytoProject","Jpp48","8","DAPI","particles")
file="part3.png"        
complete_path=os.path.join(workdir,file)
if __name__ == "__main__":
    im=readmagick.readimg(complete_path)
    im0=np.copy(im)
    
    hip=pymorph.subm(im,nd.gaussian_filter(im,5))
    hip0=np.copy(hip)
    im=mahotas.thin(im)
    #im=mahotas.bwperim(im>0)
    hip=mahotas.thin(hip)    
    #print im.dtype#print uint16
    p1=particle(im)
    print "particle 1",p1.cvxhull_area()
    p2=particle(hip)
    contour=mahotas.bwperim(im>0)
    p3=particle(contour)
    print "particle 1 hi pass",p2.cvxhull_area()
    theta1,rosedesvents,VImage=p1.orientationByErosion(5)
    theta2,rdv,VHip=p2.orientationByErosion(5)
    x=rosedesvents[0,:]
    y=rosedesvents[1,:]
    xh=rdv[0,:]
    yh=rdv[1,:]
# import numpy for standard numerical calculations
import numpy as np 

# read the image with mahotas as a grey image
img=m.imread('./testimg4.jpg',as_grey=True)
# read the image with mahotas again to obtain a color image where we can draw the ReebGraph in red (vertices) and green (edges)
imgColor=m.imread('./testimg4.jpg')
# Threshhold to remove artifacts from the jpg compression
img=(img>100)
#get the dimensions of the image
x,y = np.shape(img)

#use the distance transform to obtain the distances per pixel of the medial axis
dmap = m.distance(img,metric='manhatten')
#use mathamatical morphology to obtain the medial axis (thinning function of mahotas)
skelImg=m.thin(img)

# draw the medial axis in the image
for idx,i in enumerate(skelImg):
    for jdx,j in enumerate(i):
        if skelImg[idx,jdx]==True:
            imgColor[idx,jdx]=(255,1,1)
            try:
                imgColor[idx+1,jdx]=(255,1,1)
            except:
                pass
            imgColor[idx-1,jdx]=(255,1,1)
            try:
                imgColor[idx,jdx+1]=(255,1,1)
            except:pass
            imgColor[idx,jdx-1]=(255,1,1)
def PST(I,
        LPF=0.21,
        Phase_strength=0.48,
        Warp_strength=12.14,
        Threshold_min=-1,
        Threshold_max=0.0019,
        Morph_flag=1):
    # I: image
    # Gaussian Low Pass Filter
    #	LPF = 0.21
    # PST parameters:
    # 	Phase_strength = 0.48
    #	Warp_strength = 12.14
    # Thresholding parameters (for post processing after the edge is computed)
    #	Threshold_min = -1
    #	Threshold_max = 0.0019
    # To compute analog edge, set Morph_flag = 0 and to compute digital edge, set Morph_flag = 1
    # 	Morph_flag = 1
    I_initial = I
    if (len(I.shape) == 3):
        I = I.mean(axis=2)

    L = 0.5
    x = np.linspace(-L, L, I.shape[0])
    y = np.linspace(-L, L, I.shape[1])
    [X1, Y1] = (np.meshgrid(x, y))
    X = X1.T
    Y = Y1.T
    [THETA, RHO] = cart2pol(X, Y)

    # Apply localization kernel to the original image to reduce noise
    Image_orig_f = ((np.fft.fft2(I)))
    expo = np.fft.fftshift(
        np.exp(-np.power((np.divide(RHO, math.sqrt((LPF**2) /
                                                   np.log(2)))), 2)))
    Image_orig_filtered = np.real(
        np.fft.ifft2((np.multiply(Image_orig_f, expo))))
    # Constructing the PST Kernel
    PST_Kernel_1 = np.multiply(
        np.dot(RHO, Warp_strength), np.arctan(np.dot(RHO, Warp_strength))
    ) - 0.5 * np.log(1 + np.power(np.dot(RHO, Warp_strength), 2))
    PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * Phase_strength
    # Apply the PST Kernel
    temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)),
                       np.fft.fft2(Image_orig_filtered))
    Image_orig_filtered_PST = np.fft.ifft2(temp)

    # Calculate phase of the transformed image
    PHI_features = np.angle(Image_orig_filtered_PST)

    if Morph_flag == 0:
        out = PHI_features
        return out
    else:
        #   find image sharp transitions by thresholding the phase
        features = np.zeros((PHI_features.shape[0], PHI_features.shape[1]))
        features[PHI_features > Threshold_max] = 1  # Bi-threshold decision
        features[
            PHI_features <
            Threshold_min] = 1  # as the output phase has both positive and negative values
        features[I < (
            np.amax(I) / 20
        )] = 0  # Removing edges in the very dark areas of the image (noise)

        # apply binary morphological operations to clean the transformed image
        out = features
        out = mh.thin(out, 1)
        out = mh.bwperim(out, 4)
        out = mh.thin(out, 1)
        out = mh.erode(out, np.ones((1, 1)))

        Overlay = mh.overlay(I, out)
        return (out, Overlay)
expo = np.fft.fftshift(np.exp(-np.power((np.divide(rho, math.sqrt((LPF ** 2) / np.log(2)))), 2)))

orig_filtered = np.real(np.fft.ifft2((np.multiply(orig, expo))))
PST_Kernel_1 = np.multiply(np.dot(rho, W), np.arctan(np.dot(rho, W))) - 0.5 * np.log(1 + np.power(np.dot(rho, W), 2))
PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * S
temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)), np.fft.fft2(orig_filtered))
temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)), np.fft.fft2(Image_orig_filtered))
orig_filtered_PST = np.fft.ifft2(temp)
PHI_features = np.angle(Image_orig_filtered_PST)
features = np.zeros((PHI_features.shape[0], PHI_features.shape[1]))
features[PHI_features > Threshold_max] = 1
features[PHI_features < Threshold_min] = 1
features[I < (np.amax(I) / 20)] = 0
out = features
out = mh.thin(out, 1)
out = mh.bwperim(out, 4)
out = mh.thin(out, 1)
out = mh.erode(out, np.ones((1, 1)))

def phase_stretch_transform(img, LPF, S, W, threshold_min, threshold_max, flag):
    L = 0.5
    x = np.linspace(-L, L, img.shape[0])
    y = np.linspace(-L, L, img.shape[1])
    [X1, Y1] = (np.meshgrid(x, y))
    X = X1.T
    Y = Y1.T
    theta, rho = cart2pol(X, Y)
    orig = ((np.fft.fft2(img)))
    expo = np.fft.fftshift(np.exp(-np.power((np.divide(rho, math.sqrt((LPF ** 2) / np.log(2)))), 2)))
    orig_filtered = np.real(np.fft.ifft2((np.multiply(orig, expo))))
Beispiel #19
0
def python_nuvolatools(timg, tdrawable, mangaoptype=0, lowlevelnumber=0, highlevelnumber=255,
                     samplethreshold=90, minblobarea=1, maxblobarea=10, otsuon=0, fancyproc=0, cbrregistry=0, numberOfDilations=1):


####CBR Registry section
###note CBR registry is meant to become a very compact way of specifying multiple values (i.e. for multiple operations, even as binary values) and should be processed in this section to initialize the necessary variables. This has priority over everything else passed as arguments to the present function



####variables init

    width = tdrawable.width
    height = tdrawable.height
    swidth = tdrawable.mask_bounds[2] - tdrawable.mask_bounds[0]
    sheight = tdrawable.mask_bounds[3] - tdrawable.mask_bounds[1]
    soffsetX = tdrawable.mask_bounds[0]
    soffsetY = tdrawable.mask_bounds[1]
    sselectionBox = tdrawable.mask_bounds


    shapedetfillratio = 0.0
    shapedetfillrange = 0.0
    shapedethwratio = 0.0
    shapedethwrange = 0.0
    shapeselectex = 1
    shapeinvertpic = 0
    radiusoperation = 0
    radiuspixel = 1
    uniqueExtension = ""
    if ((cbrregistry & 16)== 16):
    	dateNow = datetime.datetime.now()
    	uniqueExtension = str(time.mktime(dateNow.timetuple()))


    if ((mangaoptype==2) or (mangaoptype==3) or (mangaoptype==4)):

    	if ((cbrregistry & 4)== 4):
    		extradata = readdialogdatafromshm()		

    	root = Tk()
    	if ((cbrregistry & 4)!= 4):
    		d = MyDialog(root)
	    	extradata = d.result
    		print "Extra parameters input result:", extradata

	print "EXTRA PARAMETERS RAW", extradata
    	shapedetfillratio = float(extradata[0])
    	shapedethwratio = float(extradata[1])
    	shapedetfillrange = float(extradata[2])
    	shapedethwrange = float(extradata[3])
    	shapeselectex = int(extradata[4])
    	shapeinvertpic = int(extradata[5])
   	radiusoperation = int(extradata[6])
   	radiuspixel = int(extradata[7])
    	#basically, invertpic means setting the whitepores operation... so:
    	# (shapeinvertpic) complements the whitepores condition ~_~
    		


    scropBox = (swidth,sheight,soffsetX,soffsetY)

####creating work image (currently the imgg is NOT USED)

    imgg = gimp.Image(width, height, GRAY)
    imgg.disable_undo()
    layer_imgg = gimp.Layer(imgg, "work layer", width, height, GRAY_IMAGE, 100, NORMAL_MODE)
    layer_imgg.add_alpha()
    imgg.add_layer(layer_imgg, 0)
    pdb.gimp_image_crop(imgg,swidth,sheight,soffsetX,soffsetY)
    ##pdb.gimp_edit_copy(tdrawable)
    ##imggpastelayer = pdb.gimp_edit_paste(layer_imgg, True)
    ##imgg.add_layer(imggpastelayer, 1)
    ##imggreturnlayer = pdb.gimp_image_flatten(imgg)

    wdrawable = tdrawable.copy()
    if (fancyproc):
    	timg.add_layer(wdrawable, 0)
    	wdrawable.add_alpha()
    	pdb.gimp_layer_set_name(wdrawable,"nuvola work layer")

#### FANCY OPERATIONS (work layer)

    if (fancyproc):
	### this is a preset known to work well for me ~_~ ...doing two passes! (I should make this optonal tho)
	## excluding the gaussian blur, useless for now.
	#pdb.plug_in_gauss_rle(timg, wdrawable, maxblurrad***, 1, 1)  #mablurrad will give error since this variable has been removed
    	pdb.gimp_levels(wdrawable, 0, 0, highlevelnumber, 1, 0, 255) #first let's minimize the background
    	pdb.plug_in_unsharp_mask(timg, wdrawable, 5, 0.5, 0) # only then, more crispness
    	#pdb.plug_in_unsharp_mask(timg, wdrawable, 5, 0.5, 0) ###maybe this is excessive, let's comment it
    	pdb.gimp_levels(wdrawable, 0, lowlevelnumber, 255, 1, 0, 255) #then, further darken the blacks
    	pdb.plug_in_antialias(timg, wdrawable) # a touch of antialias~





####conversion to numpy array
    #pdb.file_png_save(timg, tdrawable, "/dev/shm/"+uniqueExtension+"nuvolatools-pngtimgfile.png", "pngtimgfile.png", 0, 9, 1, 1, 1, 1, 1)


    #img = makenparrayfromfile("/dev/shm/"+uniqueExtension+"nuvolatools-pngtimgfile.png", sselectionBox, uniqueExtension)
 
    #pdb.file_gif_save(timg, tdrawable, "/dev/shm/"+uniqueExtension+"nuvolatools-pngtimgfile.gif", "pngtimgfile.gif", 0, 0, 0, 0)


    #img = makenparrayfromfile("/dev/shm/"+uniqueExtension+"nuvolatools-pngtimgfile.gif", sselectionBox, uniqueExtension)


    pdb.file_tiff_save(timg, tdrawable, "/dev/shm/"+uniqueExtension+"nuvolatools-pngtimgfile.tif", "pngtimgfile.tif", 1)

    print "calling nparrayfrom pngtimgfile"
    img = makenparrayfromfile("/dev/shm/"+uniqueExtension+"nuvolatools-pngtimgfile.tif", sselectionBox, uniqueExtension)


    #pdb.file_png_save(timg, wdrawable, "/dev/shm/"+uniqueExtension+"nuvolatools-wpngtimgfile.png", "wpngtimgfile.png", 0, 9, 1, 1, 1, 1, 1)


    #wimg = makenparrayfromfile("/dev/shm/"+uniqueExtension+"nuvolatools-wpngtimgfile.png", sselectionBox, uniqueExtension) <-- only if fancyproc
    if (fancyproc):
    	print "calling nparrayfrom wpngtimgfile"
    	pdb.file_tiff_save(timg, wdrawable, "/dev/shm/"+uniqueExtension+"nuvolatools-wpngtimgfile.tif", "wpngtimgfile.tif",1)
    	wimg = makenparrayfromfile("/dev/shm/"+uniqueExtension+"nuvolatools-wpngtimgfile.tif", sselectionBox, uniqueExtension)

#### thresholding and labeling


    imglabel, labelnum, imgbin, imgbinneg, img, imgneg, imgotsu, imgtosaveotsu, imgotsuneg = imagepreprocesslabel(img, mangaoptype, otsuon, samplethreshold, shapeinvertpic)

    if (fancyproc):
    	wimglabel, wlabelnum, wimgbin, wimgbinneg, wimg, wimgneg, wimgotsu, wimgtosaveotsu, wimgotsuneg = imagepreprocesslabel(wimg, mangaoptype, otsuon, samplethreshold, shapeinvertpic)


#### special for the shapedetector: we are actually using the work layer since it should give a much better shape separation 

    if ((mangaoptype==2) and (fancyproc)):
    	imglabel = wimglabel
    	labelnum = wlabelnum
    	imgbin = wimgbin
    	imgbinneg = wimgbinneg
    	img = wimg
    	imgneg = wimgneg
    	imgotsu = wimgotsu
    	imgtosaveotsu = wimgtosaveotsu
    	imgotsuneg = wimgotsuneg






####blob measurement via pymorph, parameter maxblobarea

    pdb.gimp_progress_set_text('Measuring specks sizes.. (may take some time)')
    
    imgblobdata = measurefromlabel(imglabel, tdrawable, fancyproc)
    print imgblobdata, len(imgblobdata)
    #len of data == number of labels, of course.

#### speckles operation start
    if ((mangaoptype==0) or (mangaoptype==1)):
	#print "making measurement from labels"
    	imgspeckles = img; #create a copy of the original nparray
	imgspecklesres = makefromlabel(imgbinneg, imgspeckles, imglabel, imgblobdata,minblobarea,maxblobarea,fancyproc, shapedetfillratio, shapedetfillrange, shapedethwratio, shapedethwrange, shapeselectex, radiusoperation, radiuspixel, mangaoptype, uniqueExtension)
    if (mangaoptype==2):
	imgspeckles = img; #create a copy of the original nparray
	### for now, fancyproc is needed true if we want to match specific shapes
    	fancyproc = True
	print "pre-call shape det parameters", shapedetfillratio, shapedetfillrange, shapedethwratio, shapedethwrange, shapeselectex, radiusoperation, radiuspixel
    	imgspecklesres = makefromlabel(imgbinneg, imgspeckles, imglabel, imgblobdata,minblobarea,maxblobarea,fancyproc, shapedetfillratio, shapedetfillrange, shapedethwratio, shapedethwrange, shapeselectex, radiusoperation, radiuspixel, mangaoptype, uniqueExtension)


#### Fancy operation: dilation 

    #if (mangaoptype==1):
    #	propagate_mode=1
    print "processing dilations"
    for i in range(numberOfDilations):
    	imgspecklesresdil = mahotas.dilate(imgspecklesres)
    	imgspecklesres = imgspecklesresdil



#### Fancy operation: thinning skeleton from mahotas when processing lineart

    print "thinning skeleton, this may take some minutes for very large pictures!"

    if ((fancyproc) and (mangaoptype == 3)):
    #if ((mangaoptype == 0)):
	minsegment=256
	minoverlap=64
    	#if (not fancyproc): # wimgbinneg is not declared if fancyproc is false, so for debugging purposes we assign it
	#	wimgbinneg = imgbinneg #preliminary results show a *less crisp* starting picture is MUCH desirable ~_~
	
	
    	wbinneglen=len(wimgbinneg)
    	wbinneglenintparts=wbinneglen/minsegment
	wbinneglenintrem=wbinneglen-wbinneglenintparts*minsegment
	wbinnegindex=0

    	#wimgbinneg=mahotas.erode(wimgbinneg)  ###this has a very positive effect of stabilizing certain aspects of the picture, however it's also altering it greatly. Commented for the time being.
    	#wimgbinneg=mahotas.erode(wimgbinneg)


    	print "processing segment at", wbinnegindex, "/",wbinneglen
    	imgthin=mahotas.thin(wimgbinneg[wbinnegindex:wbinnegindex+minsegment])
    	wbinnegindex=wbinnegindex+minsegment
	
	while (wbinnegindex<=wbinneglen):
    		pdb.gimp_progress_set_text("processing segment at "+str(wbinnegindex)+"/"+str(wbinneglen))
		wbinnegindex=wbinnegindex-minoverlap ###makes it 25% more computationally expensive, but pays with no artifacts
    		if (wbinnegindex+minsegment <= wbinneglen):
       			imgthinpart=mahotas.thin(wimgbinneg[wbinnegindex:wbinnegindex+minsegment])
    		if (wbinnegindex+minsegment > wbinneglen):
       			imgthinpart=mahotas.thin(wimgbinneg[wbinnegindex:])
    		#print imgthin[0]
    		#print imgthinpart[0]
    		imgthin=np.append(imgthin[0:wbinnegindex+minoverlap/2],imgthinpart[minoverlap/2:], axis=0)
    		wbinnegindex=wbinnegindex+minsegment


    	print "skeleton thinned"

    	io.imsave('/dev/shm/'+uniqueExtension+'nuvolatools-imgthin.png', imgthin*255)

	### a word of wisdom: this damn fails with "memory error" for VERY large pictures ~_~

    	###skel, distance = medial_axis(wimgbinneg, return_distance=True)

    	###print "medialaxis done"

    	###skeldist=skel*distance

    	###io.imsave('/dev/shm/'+uniqueExtension+'nuvolatools-medialaxis2.png', skeldist)




#### if whitepores = True or (shapeinvertpic), invert the picture
    if ((mangaoptype==1) or (shapeinvertpic)):
        imgnegres = pymorph.neg(imgspecklesres)
        imgspecklesres = imgnegres


####saving numpy intermediates
    #io.imsave('/dev/shm/'+uniqueExtension+'nuvolatools-imgtosaveotsu.png', imgtosaveotsu)
    #io.imsave('/dev/shm/'+uniqueExtension+'nuvolatools-imgbinneg.png', imgbinneg*255)
    io.imsave('/dev/shm/'+uniqueExtension+'nuvolatools-imgbin.png', imgbin*255)
    if (otsuon == 1):
    	io.imsave('/dev/shm/'+uniqueExtension+'nuvolatools-imgotsuneg.png',imgotsuneg*255)

    io.imsave('/dev/shm/'+uniqueExtension+'nuvolatools-imglabel.png',imglabel)
    io.imsave('/dev/shm/'+uniqueExtension+'nuvolatools-imgspecklesres.png',imgspecklesres)


####creating spare GIMP layers
    layer_specks = gimp.Layer(timg, "negative specks", swidth, sheight, GRAY_IMAGE, 100, NORMAL_MODE)


####loading specks as layer
    layer_specks = pdb.gimp_file_load_layer(timg, '/dev/shm/'+uniqueExtension+'nuvolatools-imgspecklesres.png')
    #pdb.gimp_layer_add_alpha(layer_specks)
    timg.add_layer(layer_specks, 0)
    layer_specks.add_alpha()
 
    pdb.gimp_layer_set_name(layer_specks,"speckles remover")
    #### if whitepores = True or (shapeinvertpic), use the correct layer name
    if ((mangaoptype==1) or (shapeinvertpic)):
    	pdb.gimp_layer_set_name(layer_specks,"white pores filler")

    print tdrawable.mask_bounds, tdrawable.mask_bounds[2]-tdrawable.mask_bounds[0], tdrawable.mask_bounds[3]-tdrawable.mask_bounds[1], tdrawable.mask_bounds[0], tdrawable.mask_bounds[1]



#### removing background and publishing

    #layer_specks.resize(tdrawable.mask_bounds[2]-tdrawable.mask_bounds[0], tdrawable.mask_bounds[3]-tdrawable.mask_bounds[1], tdrawable.mask_bounds[0], tdrawable.mask_bounds[1])

    pdb.gimp_by_color_select(layer_specks, gimpcolor.RGB(0,0,0), 0, CHANNEL_OP_REPLACE, False, False, 0, False)

#### if whitepores = True, invert the selection
    if ((mangaoptype==1) or (shapeinvertpic)):
        pdb.gimp_by_color_select(layer_specks, gimpcolor.RGB(255,255,255), 0, CHANNEL_OP_REPLACE, False, False, 0, False)



    pdb.gimp_edit_clear(layer_specks)
    pdb.gimp_selection_none(timg) 
    #print dir(layer_specks)
    #print dir(pdb)
    ##layer_two = layer_one.copy()
    ##layer_two.mode = MULTIPLY_MODE
    ##layer_two.name = "Y Dots"
    ##timg.add_layer(layer_two, 0)


    imgg.flatten()

    ##bump_layer = imgg.active_layer

    layer_specks.translate(soffsetX, soffsetY)
####sending back layers

    #layer_one.image = imgtosaveotsu
    #timg.add_layer(layer_one, 0)

    if ((cbrregistry & 1)== 1):
    	wdrawable.add_alpha()
	pdb.gimp_selection_all(timg)
	print "matched cbr registry 1 -> clearing wdrawable"
	#pdb.gimp_drawable_delete(wdrawable)
    	pdb.gimp_edit_clear(wdrawable)
    	pdb.gimp_selection_none(timg) 

    if ((cbrregistry & 2)== 2):
	print "saving current picture in xcf format"
	timgfilename=pdb.gimp_image_get_filename(timg)
    	timgname=pdb.gimp_image_get_name(timg)
	print timgname, timgfilename
    	
	timgfilenames=timgfilename+str(cbrregistry)+"op"+str(mangaoptype)+".xcf"
	timgnames=timgname+str(cbrregistry)+"op"+str(mangaoptype)+".xcf"
	print timgnames,timgfilenames
    	pdb.gimp_xcf_save(2,timg,wdrawable,timgfilenames,timgnames)
	#pdb.file_xjt_save(timg,wdrawable,timgfilenames,timgnames,1,0,1,0) 
    	#pdb.gimp_file_save(timg,wdrawable,timgfilenames,timgnames)
	 

#### Final


    if ((cbrregistry & 8)== 8):
    	os.system("rm -v /dev/shm/"+uniqueExtension+"nuvolatools-*")
    	os.system("rm -f /dev/shm/"+uniqueExtension+"nuvolatools-*")

    gimp.delete(imgg)
Beispiel #20
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 23:26:47 2018

@author: raja
"""

import mahotas as mh
img = np.array(img)
im = img[:, 0:50, 0]
im = im < 128
skel = mh.thin(im2)
noholes = mh.morph.close_holes(skel)
plt.subplot(311)
plt.imshow(im)
plt.subplot(312)
plt.imshow(skel)
plt.subplot(313)
cskel = np.logical_not(skel)
choles = np.logical_not(noholes)
holes = np.logical_and(cskel, noholes)
lab, n = mh.label(holes)
print 'B has %s holes' % str(n)
plt.imshow(lab)
Beispiel #21
0
def compute_skeleton(input_map):
    '''Returns a skeleton image of the free space'''
    return mahotas.thin(input_map)
Beispiel #22
0
        return majorAngle, compassTable, self.rotatedIm


user = os.path.expanduser("~")
#modify the path to your image
workdir = os.path.join(user, "Applications", "ImagesTest", "CytoProject",
                       "Jpp48", "8", "DAPI", "particles")
file = "part3.png"
complete_path = os.path.join(workdir, file)
if __name__ == "__main__":
    im = readmagick.readimg(complete_path)
    im0 = np.copy(im)

    hip = pymorph.subm(im, nd.gaussian_filter(im, 5))
    hip0 = np.copy(hip)
    im = mahotas.thin(im)
    #im=mahotas.bwperim(im>0)
    hip = mahotas.thin(hip)
    #print im.dtype#print uint16
    p1 = particle(im)
    print "particle 1", p1.cvxhull_area()
    p2 = particle(hip)
    contour = mahotas.bwperim(im > 0)
    p3 = particle(contour)
    print "particle 1 hi pass", p2.cvxhull_area()
    theta1, rosedesvents, VImage = p1.orientationByErosion(5)
    theta2, rdv, VHip = p2.orientationByErosion(5)
    x = rosedesvents[0, :]
    y = rosedesvents[1, :]
    xh = rdv[0, :]
    yh = rdv[1, :]
def n_skeleton_branches(image):
    """Number of branches of skeleton."""
    skeleton = mh.thin(image)
    branches = end_points(skeleton)
    return (sum(sum(branches != False)))
Beispiel #24
0
def compute_skeleton_map(boolean_map):
    """
    Returns a skeletonised version of a binary image via thinning.
    This makes use of the mahotas library.
    """
    return mahotas.thin(boolean_map)
Beispiel #25
0
def compute_skeleton_map(boolean_map):
    """
    Returns a skeletonised version of a binary image via thinning.
    This makes use of the mahotas library.
    """
    return mahotas.thin(boolean_map)