コード例 #1
0
def _region_features_for(histone, dna, region):
    pixels0 = histone[region].ravel()
    pixels1 = dna[region].ravel()
    bin0 = pixels0 > histone.mean()
    bin1 = pixels1 > dna.mean()
    overlap = [np.corrcoef(pixels0, pixels1)[0, 1], (bin0 & bin1).mean(), (bin0 | bin1).mean()]

    spi = mh.sobel(histone, just_filter=1)
    sp = spi[mh.erode(region)]
    sdi = mh.sobel(dna, just_filter=1)
    sd = sdi[mh.erode(region)]
    sobels = [
        np.dot(sp, sp) / len(sp),
        np.abs(sp).mean(),
        np.dot(sd, sd) / len(sd),
        np.abs(sd).mean(),
        np.corrcoef(sp, sd)[0, 1],
        np.corrcoef(sp, sd)[0, 1] ** 2,
        sp.std(),
        sd.std(),
    ]

    return np.concatenate(
        [
            [region.sum()],
            haralick(histone * region, ignore_zeros=True).mean(0),
            haralick(dna * region, ignore_zeros=True).mean(0),
            overlap,
            sobels,
            haralick(mh.stretch(sdi * region), ignore_zeros=True).mean(0),
            haralick(mh.stretch(spi * region), ignore_zeros=True).mean(0),
        ]
    )
コード例 #2
0
def _region_features_for(histone, dna, region):
    pixels0 = histone[region].ravel()
    pixels1 = dna[region].ravel()
    bin0 = pixels0 > histone.mean()
    bin1 = pixels1 > dna.mean()
    overlap = [
        np.corrcoef(pixels0, pixels1)[0, 1],
        (bin0 & bin1).mean(),
        (bin0 | bin1).mean(),
    ]

    spi = mh.sobel(histone, just_filter=1)
    sp = spi[mh.erode(region)]
    sdi = mh.sobel(dna, just_filter=1)
    sd = sdi[mh.erode(region)]
    sobels = [
        np.dot(sp, sp) / len(sp),
        np.abs(sp).mean(),
        np.dot(sd, sd) / len(sd),
        np.abs(sd).mean(),
        np.corrcoef(sp, sd)[0, 1],
        np.corrcoef(sp, sd)[0, 1]**2,
        sp.std(),
        sd.std(),
    ]

    return np.concatenate([
        [region.sum()],
        haralick(histone * region, ignore_zeros=True).mean(0),
        haralick(dna * region, ignore_zeros=True).mean(0),
        overlap,
        sobels,
        haralick(mh.stretch(sdi * region), ignore_zeros=True).mean(0),
        haralick(mh.stretch(spi * region), ignore_zeros=True).mean(0),
    ])
コード例 #3
0
def test_stretch_rgb():
    r = np.arange(256).reshape((32,-1))
    g = 255-r
    b = r/2
    s = mh.stretch(np.dstack([r,g,b]))
    s_rgb = mh.stretch_rgb(np.dstack([r,g,b]))
    assert not np.all(s == s_rgb)
    assert np.all(s[:,:,0] == s_rgb[:,:,0])
    assert np.all(mh.stretch(b) == mh.stretch_rgb(b))
コード例 #4
0
def test_stretch_rgb():
    r = np.arange(256).reshape((32, -1))
    g = 255 - r
    b = r / 2
    s = mh.stretch(np.dstack([r, g, b]))
    s_rgb = mh.stretch_rgb(np.dstack([r, g, b]))
    assert not np.all(s == s_rgb)
    assert np.all(s[:, :, 0] == s_rgb[:, :, 0])
    assert np.all(mh.stretch(b) == mh.stretch_rgb(b))
コード例 #5
0
def calc_img_haralick(im, bins=32):
    """
    Calculates haralick features for RGB input image
    Linearly quantizes input image to 'bins'. 
    Averages haralick features across 4 directions.
    :param im: 
    :return: list of RGB haralick features 
    """

    r_feats = mh.features.haralick(mh.stretch(im[:,:,0], 0, bins), return_mean=True)
    g_feats = mh.features.haralick(mh.stretch(im[:,:,1], 0, bins), return_mean=True)
    b_feats = mh.features.haralick(mh.stretch(im[:,:,2], 0, bins), return_mean=True)

    return r_feats.tolist() + g_feats.tolist() + b_feats.tolist()
コード例 #6
0
def method2(image, sigma):
    image = mh.imread(image)[:, :, 0]
    image = mh.gaussian_filter(image, sigma)
    image = mh.stretch(image)
    binimage = image > mh.otsu(image)
    labeled, _ = mh.label(binimage)
    return labeled
コード例 #7
0
ファイル: jugfile.py プロジェクト: jpedraza/luispedro_org
def method2(image, sigma):
    image = mh.imread(image)[:, :, 0]
    image = mh.gaussian_filter(image, sigma)
    image = mh.stretch(image)
    binimage = (image > mh.otsu(image))
    labeled, _ = mh.label(binimage)
    return labeled
コード例 #8
0
ファイル: test_watershed.py プロジェクト: cskv/CellProfiler
def test_run_distance(image, module, image_set, workspace):
    module.use_advanced.value = False

    module.operation.value = "Distance"

    module.x_name.value = "binary"

    module.y_name.value = "watershed"

    module.footprint.value = 3

    data = image.pixel_data

    if image.multichannel:
        data = skimage.color.rgb2gray(data)

    threshold = skimage.filters.threshold_otsu(data)

    binary = data > threshold

    image_set.add(
        "binary",
        cellprofiler_core.image.Image(
            image=binary, convert=False, dimensions=image.dimensions
        ),
    )

    module.run(workspace)

    original_shape = binary.shape

    distance = scipy.ndimage.distance_transform_edt(binary)

    distance = mahotas.stretch(distance)

    surface = distance.max() - distance

    if image.volumetric:
        footprint = numpy.ones((3, 3, 3))
    else:
        footprint = numpy.ones((3, 3))

    peaks = mahotas.regmax(distance, footprint)

    if image.volumetric:
        markers, _ = mahotas.label(peaks, numpy.ones((16, 16, 16)))
    else:
        markers, _ = mahotas.label(peaks, numpy.ones((16, 16)))

    expected = mahotas.cwatershed(surface, markers)

    expected = expected * binary

    expected = skimage.measure.label(expected)

    actual = workspace.get_objects("watershed")

    actual = actual.segmented

    numpy.testing.assert_array_equal(expected, actual)
コード例 #9
0
    def perform_watershed(threshed, maxima):

        distances = mh.stretch(mh.distance(threshed))
        spots, n_spots = mh.label(maxima, Bc=np.ones((3, 3)))
        surface = (distances.max() - distances)

        return sk.morphology.watershed(surface, spots, mask=threshed)
コード例 #10
0
def test_run_distance(image, module, image_set, workspace):
    module.operation.value = "Distance"

    module.x_name.value = "binary"

    module.y_name.value = "watershed"

    module.connectivity.value = 3

    data = image.pixel_data

    if image.multichannel:
        data = skimage.color.rgb2gray(data)

    threshold = skimage.filters.threshold_otsu(data)

    binary = data > threshold

    image_set.add(
        "binary",
        cellprofiler.image.Image(
            image=binary,
            convert=False,
            dimensions=image.dimensions
        )
    )

    module.run(workspace)

    original_shape = binary.shape

    distance = scipy.ndimage.distance_transform_edt(binary)

    distance = mahotas.stretch(distance)

    surface = distance.max() - distance

    if image.volumetric:
        footprint = numpy.ones((3, 3, 3))
    else:
        footprint = numpy.ones((3, 3))

    peaks = mahotas.regmax(distance, footprint)

    if image.volumetric:
        markers, _ = mahotas.label(peaks, numpy.ones((16, 16, 16)))
    else:
        markers, _ = mahotas.label(peaks, numpy.ones((16, 16)))

    expected = mahotas.cwatershed(surface, markers)

    expected = expected * binary

    expected = skimage.measure.label(expected)

    actual = workspace.get_objects("watershed")

    actual = actual.segmented

    numpy.testing.assert_array_equal(expected, actual)
コード例 #11
0
    def watershed(self, Ta=0):
        """
        Identification of particles through inverted slope comparisons
        
        Parameters
        -----------
        Ta : int
            Threshold value in which the particles will be identified by
        """
        self.Ta = Ta
        dist = mh.distance(self.image > 0.05 * self.Ta)
        dist1 = dist
        dist = dist.max() - dist
        dist -= dist.min()  # inverting color
        dist = dist / float(dist.ptp()) * 255
        dist = dist.astype(np.uint8)
        self.dist = mh.stretch(dist, 0, 255)
        self.labels, self.n_particles = mh.label(self.image > 0.7 * self.Ta)

        thr = np.median(dist)

        # not accurate to particles detected(?)
        # but matches dist graph well
        thresh = (dist < thr)
        areas = 0.9 * mh.cwatershed(dist, self.labels)
        self.areas = areas * thresh
        return
コード例 #12
0
def get_Haralick(im_arr, dist=1, grid_size=1, j=10):
    """
    Haralick para una imagen.
    :param im_arr:
    :param dist:
    :param grid_size:
    :return: 13*4*grid_size^2 array length
    """

    # if j % 30 == 0:
    #     print("|", end = "", flush = True)
    img = np.asarray(im_arr).astype(int)
    img = mahotas.stretch(img, 31)
    window_size = (np.asarray([img.shape]) / grid_size).astype(int)[0]
    im_grid = np.asarray(skimage.util.view_as_blocks(img, tuple(window_size)))
    windows = []
    for i in range(grid_size):
        for j in range(grid_size):
            windows.append(im_grid[i, j])
    haralick_features = []
    for i in range(len(windows)):
        try:
            h = haralick(windows[i], distance=dist)
        except ValueError:
            raise
            print('error in Haralick!')
            print(windows[i].shape)
            print(windows[i])
            quit()
        h = np.ravel(np.asarray(h))
        haralick_features.append(h)
    out = np.ravel(np.asarray(haralick_features))
    return out
コード例 #13
0
    def create_ring(self):
        im = self._image
        # This breaks up the image into RGB channels
        r, g, b = im.transpose(2, 0, 1)
        h, w = r.shape
        
        # smooth the image per channel:
        r12 = mh.gaussian_filter(r, 12.)
        g12 = mh.gaussian_filter(g, 12.)
        b12 = mh.gaussian_filter(b, 12.)
        
        # build back the RGB image
        im12 = mh.as_rgb(r12, g12, b12)
        
        X, Y = np.mgrid[:h, :w]
        X = X - h / 2.
        Y = Y - w / 2.
        X /= X.max()
        Y /= Y.max()
        
        # Array C will have the highest values in the center, fading out to the edges:
        
        C = np.exp(-2. * (X ** 2 + Y ** 2))
        C -= C.min()
        C /= C.ptp()
        C = C[:, :, None]
        
        # The final result is sharp in the centre and smooths out to the borders:
        ring = mh.stretch(im * C + (1 - C) * im12)

        return ring
コード例 #14
0
    def ChooseFile(self):
        options = QFileDialog.Options()
        options |= QFileDialog.DontUseNativeDialog
        fileName, _ = QFileDialog.getOpenFileName(self, "QFileDialog.getOpenFileName()", "",
                                                        "Excel Files (*.xls *.xlsx *.xlsm);; "
                                                        "Tiff Image Files (*.tif);;"
                                                        "Other Image Files (*.jpg *png);;"
                                                        " All Files (*)", options=options)

        if fileName:
            if fileName.endswith('.xls') or fileName.endswith('.xlsx') or fileName.endswith('.xlsm'):
                self.Line_filename.setText('File Uploaded from: '+str(fileName))
                self.seq_dataset = pd.read_excel(fileName, index_col=0);
                self.Button_Run.setEnabled(True); self.Button_ShowImage.setEnabled(False);
                self.Slider_Sigma.setEnabled(False); self.Slider_Dilate.setEnabled(False);
            elif fileName.endswith('.tif') or fileName.endswith('.png') or fileName.endswith('.jpg'):
                self.Line_filename.setText('File Uploaded from: '+str(fileName))
                img = pil.open(fileName); self.img_array = mh.stretch(np.array(img)); img.close()
                self.Button_ShowImage.setEnabled(True); self.Button_Run.setEnabled(False)
                # Load basic threshold params
                self.T_otsu = mh.otsu(self.img_array); self.T_mean = self.img_array.mean()
            else:
                self.Label_status.setText('There appears to be something wrong with your input file')

            self.Button_SNR.setEnabled(False); self.Check_TissueMaxima.setEnabled(False)
            self.Button_DAPI.setEnabled(False);
        else:
            self.Label_status.setText('File not uploaded')
コード例 #15
0
ファイル: segmentation.py プロジェクト: zhou0919/DeepFISH
def chromatids_elements(TopHatedChromosome):
    '''Take a High pass filtered (or top hat) image of a chromosome and label the chromatids elements
    '''
    threshed = TopHatedChromosome > 0
    #threshed = mh.open(threshed)
    labthres, _ = mh.label(threshed)
    labsz = mh.labeled.labeled_size(labthres)
    mh.labeled.remove_regions_where(labthres, labsz < 2, inplace=True)
    threshed = labthres > 0

    skel2 = mh.thin(threshed)
    bp2 = branchedPoints(skel2, showSE=False) > 0
    rem = np.logical_and(skel2, np.logical_not(bp2))
    labskel, _ = mh.labeled.label(rem)
    #print labskel.dtype
    size_sk = mh.labeled.labeled_size(labskel)
    #print size_sk
    skelem = mh.labeled.remove_regions_where(labskel, size_sk < 4)

    distances = mh.stretch(mh.distance(threshed))
    surface = (distances.max() - distances)
    chr_label = mh.cwatershed(surface, skelem)
    #print chr_label.dtype, type(chr_label)
    chr_label *= threshed

    #This convertion is important !!
    chr_label = chr_label.astype(np.intc)
    #-------------------------------
    mh.labeled.relabel(chr_label, inplace=True)
    labsize2 = mh.labeled.labeled_size(chr_label)
    cleaned = mh.labeled.remove_regions_where(chr_label, labsize2 < 8)
    mh.labeled.relabel(cleaned, inplace=True)
    return cleaned
コード例 #16
0
ファイル: test_stretch.py プロジェクト: langner/mahotas
def test_overlay():
    im = mh.demos.load('luispedro', as_grey=1)
    im = mh.stretch(im)
    assert np.all(mh.overlay(im).max(2) == im)
    edges = mh.sobel(im)

    im3 = mh.overlay(im, green=edges)
    assert np.all(im3[:,:,0] == im)
    assert np.all(im3[:,:,2] == im)
    assert np.all(im3[:,:,1] >= im )
コード例 #17
0
def segment(fname):
    dna = mh.imread(fname)
    dna = dna[:,:,0]

    sigma = 12.
    dnaf = mh.gaussian_filter(dna, sigma)

    T_mean = dnaf.mean()
    bin_image = dnaf > T_mean
    labeled, nr_objects = mh.label(bin_image)

    maxima = mh.regmax(mh.stretch(dnaf))
    maxima = mh.dilate(maxima, np.ones((5,5)))
    maxima,_ = mh.label(maxima)
    dist = mh.distance(bin_image)
    dist = 255 - mh.stretch(dist)
    watershed = mh.cwatershed(dist, maxima)
    watershed *= bin_image
    return watershed
コード例 #18
0
def test_overlay():
    im = mh.demos.load('luispedro', as_grey=1)
    im = mh.stretch(im)
    assert np.all(mh.overlay(im).max(2) == im)
    edges = mh.sobel(im)

    im3 = mh.overlay(im, green=edges)
    assert np.all(im3[:, :, 0] == im)
    assert np.all(im3[:, :, 2] == im)
    assert np.all(im3[:, :, 1] >= im)
コード例 #19
0
def hypersegmented_features(histone, dna, rois):
    """\
    features,labels = hypersegmented_features(histone, dna, rois)
    features = hypersegmented_features(histone, dna, None)

    Computes hyper-segmented features on (histone/dna). If ``rois`` is not
    ``None``, returns the label for each region (fraction of NETs).

    Parameters
    ----------
    histone : ndarray
    dna : ndarray
    rois : ndarray or None

    Returns
    -------
    features : ndarray
        2D array features for each region
    labels : ndarray (only if ``rois is not None``)
        for the corresponding region, returns the fraction of NETs
    """
    if rois is not None:
        interest = rois > 0

    regions, n_regions = _segment(histone)
    histone = mh.stretch(histone)
    dna = mh.stretch(dna)
    features = []
    labels = []
    for ni in range(n_regions):
        region = regions == (ni + 1)
        if region.sum() < 16:
            continue
        features.append(_region_features_for(histone, dna, region))
        if rois is not None:
            fraction = interest[region].mean()
            labels.append(fraction)
    if rois is not None:
        return np.array(features), np.array(labels)
    return np.array(features)
コード例 #20
0
def hypersegmented_features(histone, dna, rois):
    '''\
    features,labels = hypersegmented_features(histone, dna, rois)
    features = hypersegmented_features(histone, dna, None)

    Computes hyper-segmented features on (histone/dna). If ``rois`` is not
    ``None``, returns the label for each region (fraction of NETs).

    Parameters
    ----------
    histone : ndarray
    dna : ndarray
    rois : ndarray or None

    Returns
    -------
    features : ndarray
        2D array features for each region
    labels : ndarray (only if ``rois is not None``)
        for the corresponding region, returns the fraction of NETs
    '''
    if rois is not None:
        interest = (rois > 0)

    regions, n_regions = _segment(histone)
    histone = mh.stretch(histone)
    dna = mh.stretch(dna)
    features = []
    labels = []
    for ni in range(n_regions):
        region = (regions == (ni + 1))
        if region.sum() < 16:
            continue
        features.append(_region_features_for(histone, dna, region))
        if rois is not None:
            fraction = interest[region].mean()
            labels.append(fraction)
    if rois is not None:
        return np.array(features), np.array(labels)
    return np.array(features)
コード例 #21
0
def watershed_single_channel(np_array, threshold=10, blur_factor=9, max_bb_size=13000, min_bb_size=1000, footprint=10):
# INPUT:
#     np_array: numpy image of single channel
#     threshold: minimum value to be analysed
#     blur_factor: gaussian blur.  some blur is good.  too much is bad.  too little is bad too.
#     max_bb_size: set maximum size for a bounding box.
#     min_bb_size: set minimum size for a bounding box.
#     footprint: box of size footprint x footprint used to find regions of maximum intensity.
# OUTPUT:
#     return: array of bounding boxes for the channel image given
# NOTE: input values should be tuned.  this alg is useless if input arguments are not optimized... right now its by hand.  otherwise just leave them

    # nuclear is a blured version of origional image
    nuclear = mh.gaussian_filter(np_array, blur_factor)
    # calculate a minimum threshold using otsu method
    otsu_thresh = threshold_otsu(nuclear)
    
    # determine minimum threshold from otsu and input argument.  if little/no signal in image; otsu value can be way too low
    set_thresh = None
    if threshold>otsu_thresh:
        set_thresh = threshold
    else:
        set_thresh = otsu_thresh
    
    # set values lower than set_thresh to zero
    index_otsu = nuclear < set_thresh
    nuclear[index_otsu] = 0
    
    # determine areas of maximum intensity and the distance between them
    thresh = (nuclear > nuclear.mean())
    dist = mh.stretch(mh.distance(thresh))
    Bc = np.ones((footprint, footprint))

    # the code the generate region_props from watersheding alg.
    maxima = mh.morph.regmax(dist, Bc=Bc)
    spots, n_spots = mh.label(maxima, Bc=Bc)
    sizes = mh.labeled.labeled_size(spots)
    too_big = np.where(sizes > max_bb_size)
    spots = mh.labeled.remove_regions(spots, too_big)
    spots = mh.labeled.remove_bordering(spots)
    spots, n_left = mh.labeled.relabel(spots)
    surface = (dist.max() - dist)
    areas = mh.cwatershed(surface, spots)
    areas *= thresh

    # get list of region properties from watershed.  allot of information in region_props.  allot of which is inaccurate.  NEVER TRUST REGIONPROPS!
    region_props=regionprops(areas,intensity_image=nuclear)

    # generate array of bounding boxes from measured region properties. call bbs_from_rprops()
    watershed_bb_array = bbs_from_rprops(region_props, max_bb_size, min_bb_size)

    return(watershed_bb_array)
コード例 #22
0
def nuclei_regions(comp_map):
    """
    NUCLEI_REGIONS: extract "support regions" for nuclei. This function
    expects as input a "tissue components map" (as returned, for example,
    by segm.tissue_components) where values of 1 indicate pixels having
    a color corresponding to nuclei.
    It returns a set of compact support regions corresponding to the
    nuclei.


    :param comp_map: numpy.ndarray
       A mask identifying different tissue components, as obtained
       by classification in RGB space. The value 0

       See segm.tissue.tissue_components()

    :return:
    """
    # Deprecated:...
    # img_hem, _ = rgb2he(img0, normalize=True)

    # img_hem = denoise_tv_bregman(img_hem, HE_OPTS['bregm'])

    # Get a mask of nuclei regions by unsupervised clustering:
    # Vector Quantization: background, mid-intensity Hem and high intensity Hem
    # -train the quantizer for 3 levels
    # vq = KMeans(n_clusters=3)
    # vq.fit(img_hem.reshape((-1,1)))
    # -the level of interest is the brightest:
    # k = np.argsort(vq.cluster_centers_.squeeze())[2]
    # mask_hem = (vq.labels_ == k).reshape(img_hem.shape)
    # ...end deprecated

    # Final mask:
    mask = (comp_map == 1)   # use the components classified by color

    # mask = morph.closing(mask, selem=HE_OPTS['strel1'])
    # mask = morph.opening(mask, selem=HE_OPTS['strel1'])
    # morph.remove_small_objects(mask, in_place=True)
    # mask = (mask > 0)

    mask = mahotas.close_holes(mask)
    morph.remove_small_objects(mask, in_place=True)

    dst  = mahotas.stretch(mahotas.distance(mask))
    Bc=np.ones((9,9))
    lmax = mahotas.regmax(dst, Bc=Bc)
    spots, _ = mahotas.label(lmax, Bc=Bc)
    regions = mahotas.cwatershed(lmax.max() - lmax, spots) * mask

    return regions
# end NUCLEI_REGIONS
コード例 #23
0
    def get_salt_pepper_image(self):
        im     = self._gray_image
        salt   = np.random.random(im.shape) > .975
        pepper = np.random.random(im.shape) > .975
        
        # salt is 170 & pepper is 30
        # Some playing around showed that setting these to more extreme values looks
        # very artificial. These look nicer
        
        im = np.maximum(salt * 170, mh.stretch(im))
        im = np.minimum(pepper * 30 + im * (~pepper), im)

        return im
コード例 #24
0
def _segment(cell):
    # takes a numpy array of a microscopy
    # segments it based on filtering the image then applying a distance transform and
    # a watershed method to get the proper segmentation

    import mahotas as mh
    filt_cell = mh.gaussian_filter(cell, 2)
    T = mh.thresholding.otsu((np.rint(filt_cell).astype('uint8')))
    dist = mh.stretch(mh.distance(filt_cell > T))

    Bc = np.ones((3, 3))
    rmax = mh.regmin((dist))
    rmax = np.invert(rmax)
    labels, num_cells = mh.label(rmax, Bc)
    surface = (dist.max() - dist)
    areas = mh.cwatershed(dist, labels)
    areas *= T
    return areas
コード例 #25
0
def _segment(cell):
    # takes a numpy array of a microscopy
    # segments it based on filtering the image then applying a distance transform and
    # a watershed method to get the proper segmentation

    import mahotas as mh
    filt_cell = mh.gaussian_filter(cell, 2)
    T = mh.thresholding.otsu((np.rint(filt_cell).astype('uint8')))
    dist = mh.stretch(mh.distance(filt_cell > T))
    
    Bc = np.ones((3,3))
    rmax = mh.regmin((dist))
    rmax = np.invert(rmax)
    labels, num_cells = mh.label(rmax, Bc)
    surface = (dist.max() - dist)
    areas = mh.cwatershed(dist, labels)
    areas *= T
    return areas
コード例 #26
0
def center_focus(im,f):
  r, g, b = im.transpose(2,0,1)
  r12 = mh.gaussian_filter(r, 12.0)
  g12 = mh.gaussian_filter(g, 12.0)
  b12 = mh.gaussian_filter(b, 12.0)
  im12 = mh.as_rgb(r12,g12,b12)
  h, w = r.shape 
  Y, X = np.mgrid[:h,:w]
  Y = Y-h/2.0
  Y = Y/Y.max()
  X = X-w/2.0
  X = X/X.max()
  W = np.exp(-2.0*(X**2 + Y**2)/0.5)
  W = W - W.min()
  W = W/W.ptp()
  W = W[:, :, None]
  ringed = mh.stretch(im*W + (1.0-W)*im12)
  plt.imshow(ringed)  
  plt.savefig(f) 
  plt.show () 
コード例 #27
0
def watershed_from_image(image):
    bin_image = threshold_niblack(image)
    bin_image = image > bin_image / 0.8
    bin_image = mh.label(bin_image)[0]

    sizes = mh.labeled.labeled_size(bin_image)
    bin_image = mh.labeled.remove_regions_where(bin_image, sizes < 50)
    bin_image = (bin_image > 0) * 1

    distance = ndi.distance_transform_edt(bin_image)

    imagef = mh.gaussian_filter(image.astype(float), 2)

    maxima = mh.regmax(mh.stretch(imagef))
    maxima, _ = mh.label(maxima)

    markers = erode(bin_image, 1)
    markers = mh.label(markers + 1)[0]

    labels = sk_watershed(-distance, maxima, watershed_line=1)

    watershed = labels * bin_image
    return watershed
コード例 #28
0
def get_Haralick(im_arr, dist=1, grid_size=1):
    """
    Haralick para una imagen.
    :param im_arr:
    :param dist:
    :param grid_size:
    :return: 13*4*grid_size^2 array length
    """
    img = np.asarray(im_arr).astype(int)
    img = mahotas.stretch(img, 31)
    window_size = (np.asarray([img.shape]) / grid_size).astype(int)[0]
    im_grid = np.asarray(skimage.util.view_as_blocks(img, tuple(window_size)))
    windows = []
    for i in range(grid_size):
        for j in range(grid_size):
            windows.append(im_grid[i, j])
    haralick_features = []
    for i in range(len(windows)):
        h = haralick(windows[i], distance=dist)
        h = np.ravel(np.asarray(h))
        haralick_features.append(h)
    out = np.ravel(np.asarray(haralick_features))
    return out
コード例 #29
0
def process_image(im, d, test=False, remove_bordering=False):
    plt.figure(1, frameon=False)
    sigma = 75
    blurred = mh.gaussian_filter(im.astype(float), sigma)
    T_mean = blurred.mean()
    bin_image = im > T_mean

    maxima = mh.regmax(mh.stretch(blurred))
    maxima, _ = mh.label(maxima)

    dist = mh.distance(bin_image)

    dist = 255 - mh.stretch(dist)
    watershed = mh.cwatershed(dist, maxima)

    _, old_nr_objects = mh.labeled.relabel(watershed)

    sizes = mh.labeled.labeled_size(watershed)
    min_size = 100000
    filtered = mh.labeled.remove_regions_where(
        watershed * bin_image, sizes < min_size)

    _, nr_objects = mh.labeled.relabel(filtered)
    print('Removed', old_nr_objects - nr_objects, 'small regions')
    old_nr_objects = nr_objects

    if (remove_bordering):
        filtered = mh.labeled.remove_bordering(filtered)
    labeled, nr_objects = mh.labeled.relabel(filtered)

    print('Removed', old_nr_objects - nr_objects, 'bordering cells')

    print("Number of cells: {}".format(nr_objects))
    fin_weights = mh.labeled_sum(im.astype(np.uint32), labeled)
    fin_sizes = mh.labeled.labeled_size(labeled)
    fin_result = fin_weights / fin_sizes
    if (test):
        f, axarr = plt.subplots(2, 2)
        for i in range(2):
            for j in range(2):
                axarr[i][j].axis('off')
        axarr[0, 0].imshow(im)
        axarr[0, 0].set_title('Source')
        axarr[0, 1].imshow(labeled)
        axarr[0, 1].set_title('Labeled')
        axarr[1, 0].imshow(watershed)
        axarr[1, 0].set_title('Watershed')
        axarr[1, 1].imshow(blurred)
        axarr[1, 1].set_title('Blurred')
        for i in range(1, nr_objects + 1):
            print("Cell {} average luminescence is {}".format(
                i, fin_result[i]))
            bbox = mh.bbox((labeled == i))
            plt.text((bbox[2] + bbox[3]) / 2, (bbox[0] + bbox[1]
                                               ) / 2, str(i), fontsize=20, color='black')
        # plt.show()
        plt.savefig("test" + str(nr_objects) + ".svg",
                    format='svg', bbox_inches='tight', dpi=1200)
    else:
        for i in range(1, nr_objects + 1):
            bbox = mh.bbox((labeled == i))
            cell = (im * (labeled == i))[bbox[0]:bbox[1], bbox[2]:bbox[3]]
            hashed = hashlib.sha1(im).hexdigest()
            imsave(d + data_dir + hashed + '-' + str(i) +
                   '.png', imresize(cell, (img_rows, img_cols)))
コード例 #30
0
import mahotas as mh
from jugfile import method1
from matplotlib import cm
import numpy as np

im = mh.imread('images/dna-21.jpg')
mh.imsave('image_stretched.jpeg', mh.stretch(im.astype(float)**.01))
m1 = method1.f('images/dna-21.jpg', 2)
m1 = m1.astype(np.uint8)
color = ((cm.rainbow(m1.astype(float)/m1.max())[:,:,:3]).reshape(m1.shape+(3,)))
color[m1 == 0] = (0,0,0)
mh.imsave('image_method1.jpeg', mh.stretch(color))

ref = mh.imread('references/dna-21.png')
color = ((cm.rainbow(ref.astype(float)/ref.max())[:,:,:3]).reshape(m1.shape+(3,)))
color[ref == 0] = (0,0,0)
mh.imsave('image_reference.jpeg', mh.stretch(color))


コード例 #31
0
from imread import imread
import numpy as np
import mahotas as mh

lenna = imread('../DATA/Lenna.png', as_grey=True)

views = []
view_titles = []

views.append(lenna)
view_titles.append('Lenna, grayscale')

salt = np.random.random(lenna.shape) > .975
pepper = np.random.random(lenna.shape) > .975

lenna = mh.stretch(lenna)
lenna = np.maximum(salt*170, lenna)
lenna = np.minimum(pepper*30 + lenna*(~pepper), lenna)

views.append(lenna)
view_titles.append('Lenna, salt & pepper')

# Initial view
index = 0
f = Figure()
a = f.add_subplot(111)

a.imshow(views[index], cmap = cm.Greys_r)
a.set_title(view_titles[index])
#/\#/\#
コード例 #32
0
import mahotas
import numpy as np
from matplotlib import pyplot as plt
import random
from matplotlib import colors as c

nuclear = mahotas.imread('data/flower.png')
nuclear = nuclear[:, :, 0]
nuclear = mahotas.gaussian_filter(nuclear, 1.)
threshed = (nuclear > nuclear.mean())
distances = mahotas.stretch(mahotas.distance(threshed))
Bc = np.ones((9, 9))

maxima = mahotas.morph.regmax(distances, Bc=Bc)
spots, n_spots = mahotas.label(maxima, Bc=Bc)
surface = (distances.max() - distances)
areas = mahotas.cwatershed(surface, spots)
areas *= threshed

plt.jet()

rmap = c.ListedColormap(np.random.rand(256, 3))

plt.imshow(areas, cmap=rmap)
plt.show()
コード例 #33
0
def main(image_1, image_2, weight_1, weight_2, plot=False):
    '''Combines `image_1` with `image_2`.

    Parameters
    ----------
    input_mask_1: numpy.ndarray[numpy.uint8 or numpy.uint16]
        2D unsigned integer array
    input_mask_2: numpy.ndarray[numpy.uint8 or numpy.uint16]
        2D unsigned integer array
    weight_1: int
        weight for `image_1`
    weight_2: int
        weight for `image_2`

    Returns
    -------
    jtmodules.combine_channels.Output

    Raises
    ------
    ValueError
        when `weight_1` or `weight_2` are not positive integers
    ValueError
        when `image_1` and `image_2` don't have the same dimensions
        and data type and if they don't have unsigned integer type
    '''
    if not isinstance(weight_1, int):
        raise TypeError('Weight #1 must have integer type.')
    if not isinstance(weight_2, int):
        raise TypeError('Weight #2 must have integer type.')
    if weight_1 < 1:
        raise ValueError('Weight #1 must be a positive integer.')
    if weight_2 < 1:
        raise ValueError('Weight #2 must be a positive integer.')
    logger.info('weight for first image: %d', weight_1)
    logger.info('weight for second image: %d', weight_2)

    if image_1.shape != image_2.shape:
        raise ValueError('The two images must have identical dimensions.')
    if image_1.dtype != image_2.dtype:
        raise ValueError('The two images must have identical data type.')

    if image_1.dtype == np.uint8:
        max_val = 2**8 - 1
    elif image_1.dtype == np.uint16:
        max_val = 2**16 - 1
    else:
        raise ValueError('The two images must have unsigned integer type.')

    logger.info('cast images to type float for arythmetics')
    img_1 = mh.stretch(image_1, 0, 1, float)
    img_2 = mh.stretch(image_2, 0, 1, float)
    logger.info('combine images using the provided weights')
    combined_image = img_1 * weight_1 + img_2 * weight_2
    logger.info('cast combined image back to correct data type')
    combined_image = mh.stretch(combined_image, 0, max_val, image_1.dtype)

    if plot:
        from jtlib import plotting
        plots = [
            plotting.create_intensity_image_plot(image_1, 'ul'),
            plotting.create_intensity_image_plot(image_2, 'ur'),
            plotting.create_intensity_image_plot(combined_image, 'll')
        ]
        figure = plotting.create_figure(plots, title='combined image')
    else:
        figure = str()

    return Output(combined_image, figure)
コード例 #34
0
def region_growing(labelImg):
    distances = mahotas.stretch(mahotas.distance(labelImg > 0))
    surface = numpy.int32(distances.max() - distances)
    areas = mahotas.cwatershed(surface, labelImg)
    return areas
コード例 #35
0
ファイル: Review 2.py プロジェクト: trixtun/IP-project
def check_sigma(sigma):
    dnaf = mh.gaussian_filter(dna.astype(float), sigma) 
    maxima = mh.regmax(mh.stretch(dnaf))
    maxima = mh.dilate(maxima, np.ones((5,5))) 
    plt.imshow(mh.as_rgb(np.maximum(255*maxima, dnaf), dnaf, dna > T_mean))
コード例 #36
0
ファイル: lenna-ring.py プロジェクト: Axighi/Scripts
import mahotas as mh
import numpy as np
im = mh.imread('lenna.jpg')
r,g,b = im.transpose(2,0,1)
h,w = r.shape
r12 = mh.gaussian_filter(r, 12.)
g12 = mh.gaussian_filter(g, 12.)
b12 = mh.gaussian_filter(b, 12.)
im12 = mh.as_rgb(r12,g12,b12)

X,Y = np.mgrid[:h,:w]
X = X-h/2.
Y = Y-w/2.
X /= X.max()
Y /= Y.max()
C = np.exp(-2.*(X**2+ Y**2))
C -= C.min()
C /= C.ptp()
C = C[:,:,None]

ring = mh.stretch(im*C + (1-C)*im12)
mh.imsave('lenna-ring.jpg', ring)
コード例 #37
0
    def extract(self):
        '''Extracts Gabor texture features by filtering the intensity image with
        Gabor kernels for a defined range of `frequency` and `theta` values and
        then calculating a score for each object.

        Returns
        -------
        pandas.DataFrame
            extracted feature values for each object in `label_image`
        '''
        # Create an empty dataset in case no objects were detected
        logger.info('extract texture features')
        features = list()
        for obj in self.object_ids:
            mask = self.get_object_mask_image(obj)
            label = mask.astype(np.int32)
            img = self.get_object_intensity_image(obj)
            img[~mask] = 0
            values = list()
            # Gabor
            logger.debug('extract Gabor features for object #%d', obj)
            for freq in self.frequencies:
                best_score = 0
                for angle in range(self.theta_range):
                    theta = np.pi * angle / self.theta_range
                    g = gabor(img, label, freq, theta)
                    score_r = ndi.measurements.sum(
                        g.real, label, np.arange(1, dtype=np.int32) + 1
                    )
                    score_i = ndi.measurements.sum(
                        g.imag, label, np.arange(1, dtype=np.int32) + 1
                    )
                    score = np.sqrt(score_r**2 + score_i**2)
                    best_score = np.max([best_score, score])
                values.append(best_score)
            # Threshold Adjacency Statistics
            logger.debug('extract TAS features for object #%d', obj)
            tas_values = mh.features.pftas(img, T=self._threshold)
            values.extend(tas_values)
            # Hu
            logger.debug('extract Hu moments for object #%d', obj)
            region = self.object_properties[obj]
            hu_values = region.weighted_moments_hu
            values.extend(hu_values)
            # Local Binary Pattern
            logger.debug('extract Local Binary Patterns for object #%d', obj)
            for r in self.radius:
                # We may want to use more points, but the number of features
                # increases exponentially with the number of neighbourhood
                # points.
                vals = mh.features.lbp(img, radius=r, points=8)
                values.extend(vals)
            if self.compute_haralick:
                # Haralick
                logger.debug('extract Haralick features for object #%d', obj)
                # NOTE: Haralick features are computed on 8-bit images.
                clipped_img = np.clip(img, 0, self._clip_value)
                rescaled_img = mh.stretch(clipped_img)
                haralick_values = mh.features.haralick(
                    img, ignore_zeros=False, return_mean=True
                )
                if not isinstance(haralick_values, np.ndarray):
                    # NOTE: setting `ignore_zeros` to True creates problems for some
                    # objects, when all values of the adjacency matrices are zeros
                    haralick_values = np.empty((len(self.names), ), dtype=float)
                    haralick_values[:] = np.NAN
                values.extend(haralick_values)
            features.append(values)
        return pd.DataFrame(features, columns=self.names, index=self.object_ids)
コード例 #38
0
ファイル: Review 2.py プロジェクト: trixtun/IP-project
bin_image = dnaf > T_mean 
plt.imshow(bin_image)
labeled, nr_objects = mh.label(bin_image) 
print(nr_objects)
plt.imshow(labeled) 
plt.jet()
@interact(sigma=(1.,16.)) 
def check_sigma(sigma):
    dnaf = mh.gaussian_filter(dna.astype(float), sigma) 
    maxima = mh.regmax(mh.stretch(dnaf))
    maxima = mh.dilate(maxima, np.ones((5,5))) 
    plt.imshow(mh.as_rgb(np.maximum(255*maxima, dnaf), dnaf, dna > T_mean))
sigma = 12.0

dnaf = mh.gaussian_filter(dna.astype(float),sigma) 
maxima = mh.regmax(mh.stretch(dnaf)) 
maxima,_= mh.label(maxima) 
plt.imshow(maxima)
dist = mh.distance(bin_image) 
plt.imshow(dist)
dist = 255 - mh.stretch(dist)
watershed = mh.cwatershed(dist,maxima) 
plt.imshow(watershed)
watershed *= bin_image 
plt.imshow(watershed)
watershed = mh.labeled.remove_bordering(watershed) 
plt.imshow(watershed)
sizes = mh.labeled.labeled_size(watershed)
# The conversion below is not necessary in newer versions of mahotas: watershed = watershed.astype(np.intc)
@interact(min_size=(100,4000,20)) 
def do_plot(min_size):
コード例 #39
0
ファイル: watershed.py プロジェクト: nathanin/CellProfiler
    def run(self, workspace):
        x_name = self.x_name.value

        y_name = self.y_name.value

        images = workspace.image_set

        x = images.get_image(x_name)

        dimensions = x.dimensions

        x_data = x.pixel_data

        if self.operation.value == "Distance":
            original_shape = x_data.shape

            factor = self.downsample.value

            if factor > 1:
                if x.volumetric:
                    factors = (1, factor, factor)
                else:
                    factors = (factor, factor)

                x_data = skimage.transform.downscale_local_mean(
                    x_data, factors)

            threshold = skimage.filters.threshold_otsu(x_data)

            x_data = x_data > threshold

            distance = scipy.ndimage.distance_transform_edt(x_data)

            distance = mahotas.stretch(distance)

            surface = distance.max() - distance

            if x.volumetric:
                footprint = numpy.ones(
                    (self.connectivity.value, self.connectivity.value,
                     self.connectivity.value))
            else:
                footprint = numpy.ones(
                    (self.connectivity.value, self.connectivity.value))

            peaks = mahotas.regmax(distance, footprint)

            if x.volumetric:
                markers, _ = mahotas.label(peaks, numpy.ones((16, 16, 16)))
            else:
                markers, _ = mahotas.label(peaks, numpy.ones((16, 16)))

            y_data = mahotas.cwatershed(surface, markers)

            y_data = y_data * x_data

            if factor > 1:
                y_data = skimage.transform.resize(y_data,
                                                  original_shape,
                                                  mode="edge",
                                                  order=0,
                                                  preserve_range=True)

                y_data = numpy.rint(y_data).astype(numpy.uint16)
        else:
            markers_name = self.markers_name.value

            markers = images.get_image(markers_name)

            markers_data = markers.pixel_data

            if x.multichannel:
                x_data = skimage.color.rgb2gray(x_data)

            if markers.multichannel:
                markers_data = skimage.color.rgb2gray(markers_data)

            mask_data = None

            if not self.mask_name.is_blank:
                mask_name = self.mask_name.value

                mask = images.get_image(mask_name)

                mask_data = mask.pixel_data

            y_data = skimage.morphology.watershed(image=x_data,
                                                  markers=markers_data,
                                                  mask=mask_data)

        y_data = skimage.measure.label(y_data)

        objects = cellprofiler.object.Objects()

        objects.segmented = y_data

        objects.parent_image = x

        workspace.object_set.add_objects(objects, y_name)

        self.add_measurements(workspace)

        if self.show_window:
            workspace.display_data.x_data = x.pixel_data

            workspace.display_data.y_data = y_data

            workspace.display_data.dimensions = dimensions
コード例 #40
0
ファイル: test_watershed.py プロジェクト: cskv/CellProfiler
def test_run_distance_declump_intensity(
    image, module, image_set, workspace, connectivity, compactness, watershed_line
):
    module.use_advanced.value = True

    module.operation.value = "Distance"

    module.x_name.value = "binary"

    module.y_name.value = "watershed"

    module.connectivity.value = connectivity

    module.footprint.value = 3

    data = image.pixel_data

    if image.multichannel:
        data = skimage.color.rgb2gray(data)

    threshold = skimage.filters.threshold_otsu(data)

    binary = data > threshold

    image_set.add(
        "binary",
        cellprofiler_core.image.Image(
            image=binary, convert=False, dimensions=image.dimensions
        ),
    )

    module.declump_method.value = "Intensity"

    module.reference_name.value = "gradient"

    module.gaussian_sigma.value = 1

    # must pass pixel data into image set for intensity declumping
    gradient = image.pixel_data

    image_set.add(
        "gradient",
        cellprofiler_core.image.Image(
            image=gradient, convert=False, dimensions=image.dimensions
        ),
    )

    # set the structuring element, used for declumping
    if image.dimensions == 3:
        module.structuring_element.value = "Ball,1"
        selem = skimage.morphology.ball(1)

    else:
        module.structuring_element.value = "Disk,1"
        selem = skimage.morphology.disk(1)


    # run the module
    module.run(workspace)

    # distance-based watershed
    distance = scipy.ndimage.distance_transform_edt(binary)

    distance = mahotas.stretch(distance)

    surface = distance.max() - distance

    if image.volumetric:
        footprint = numpy.ones((3, 3, 3))
    else:
        footprint = numpy.ones((3, 3))

    peaks = mahotas.regmax(distance, footprint)

    if image.volumetric:
        markers, _ = mahotas.label(peaks, numpy.ones((16, 16, 16)))
    else:
        markers, _ = mahotas.label(peaks, numpy.ones((16, 16)))

    watershed_distance = mahotas.cwatershed(surface, markers)

    watershed_distance = watershed_distance * binary

    # intensity-based declumping
    peak_image = scipy.ndimage.distance_transform_edt(watershed_distance > 0)

    # Set the image as a float and rescale to full bit depth
    watershed_image = skimage.img_as_float(gradient, force_copy=True)
    watershed_image -= watershed_image.min()
    watershed_image = 1 - watershed_image

    if image.multichannel:
        watershed_image = skimage.color.rgb2gray(watershed_image)

    watershed_image = skimage.filters.gaussian(watershed_image, sigma=module.gaussian_sigma.value)

    seed_coords = skimage.feature.peak_local_max(peak_image,
                                                 min_distance=module.min_dist.value,
                                                 threshold_rel=module.min_intensity.value,
                                                 exclude_border=module.exclude_border.value,
                                                 num_peaks=module.max_seeds.value if module.max_seeds.value != -1
                                                 else numpy.inf)

    seeds = numpy.zeros_like(peak_image, dtype=bool)
    seeds[tuple(seed_coords.T)] = True

    seeds = skimage.morphology.binary_dilation(seeds, selem)

    number_objects = skimage.measure.label(watershed_distance, return_num=True)[1]

    seeds_dtype = (numpy.uint16 if number_objects < numpy.iinfo(numpy.uint16).max else numpy.uint32)

    seeds = scipy.ndimage.label(seeds)[0]
    markers = numpy.zeros_like(seeds, dtype=seeds_dtype)
    markers[seeds > 0] = -seeds[seeds > 0]

    expected = skimage.segmentation.watershed(
        connectivity=connectivity,
        image=watershed_image,
        markers=markers,
        mask=binary !=0
    )

    zeros = numpy.where(expected==0)
    expected += numpy.abs(numpy.min(expected)) + 1
    expected[zeros] = 0

    expected = skimage.measure.label(expected)

    actual = workspace.get_objects("watershed")

    numpy.testing.assert_array_equal(expected, actual.segmented)
コード例 #41
0
ファイル: segmentation.py プロジェクト: dvischi/TissueMAPS
def separate_clumped_objects(clumps_image, min_cut_area, min_area, max_area,
        max_circularity, max_convexity):
    '''Separates objects in `clumps_image` based on morphological criteria.

    Parameters
    ----------
    clumps_image: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        objects that should be separated
    min_cut_area: int
        minimal area an object must have (prevents cuts that would result
        in too small objects)
    min_area: int
        minimal area an object must have to be considered a clump
    max_area: int
        maximal area an object can have to be considered a clump
    max_circularity: float
        maximal circularity an object must have to be considerd a clump
    max_convexity: float
        maximal convexity an object must have to be considerd a clump

    Returns
    -------
    numpy.ndarray[numpy.uint32]
        separated objects

    See also
    --------
    :class:`jtlib.features.Morphology`
    '''

    logger.info('separate clumped objects')
    label_image, n_objects = mh.label(clumps_image)
    if n_objects == 0:
        logger.debug('no objects')
        return label_image

    pad = 1
    cutting_pass = 1
    separated_image = label_image.copy()
    while True:
        logger.info('cutting pass #%d', cutting_pass)
        cutting_pass += 1
        label_image = mh.label(label_image > 0)[0]

        f = Morphology(label_image)
        values = f.extract()
        index = (
            (min_area < values['Morphology_Area']) &
            (values['Morphology_Area'] <= max_area) &
            (values['Morphology_Convexity'] <= max_convexity) &
            (values['Morphology_Circularity'] <= max_circularity)
        )
        clumped_ids = values[index].index.values
        not_clumped_ids = values[~index].index.values

        if len(clumped_ids) == 0:
            logger.debug('no more clumped objects')
            break

        mh.labeled.remove_regions(label_image, not_clumped_ids, inplace=True)
        mh.labeled.relabel(label_image, inplace=True)
        bboxes = mh.labeled.bbox(label_image)
        for oid in np.unique(label_image[label_image > 0]):
            bbox = bboxes[oid]
            logger.debug('process clumped object #%d', oid)
            obj_image = extract_bbox(label_image, bboxes[oid], pad=pad)
            obj_image = obj_image == oid

            # Rescale distance intensities to make them independent of clump size
            dist = mh.stretch(mh.distance(obj_image))

            # Find peaks that can be used as seeds for the watershed transform
            thresh = mh.otsu(dist)
            peaks = dist > thresh
            n = mh.label(peaks)[1]
            if n == 1:
                logger.debug(
                    'only one peak detected - perform iterative erosion'
                )
                # Iteratively shrink the peaks until we have two peaks that we
                # can use to separate the clump.
                while True:
                    tmp = mh.morph.open(mh.morph.erode(peaks))
                    n = mh.label(tmp)[1]
                    if n == 2 or n == 0:
                        if n == 2:
                            peaks = tmp
                        break
                    peaks = tmp

            # Select the two biggest peaks, since we want only two objects.
            peaks = mh.label(peaks)[0]
            sizes = mh.labeled.labeled_size(peaks)
            index = np.argsort(sizes)[::-1][1:3]
            for label in np.unique(peaks):
                if label not in index:
                    peaks[peaks == label] = 0
            peaks = mh.labeled.relabel(peaks)[0]
            regions = mh.cwatershed(np.invert(dist), peaks)

            # Use the line separating watershed regions to make the cut
            se = np.ones((3,3), np.bool)
            line = mh.labeled.borders(regions, Bc=se)
            line[~obj_image] = 0
            line = mh.morph.dilate(line)

            # Ensure that cut is reasonable given user-defined criteria
            test_cut_image = obj_image.copy()
            test_cut_image[line] = False
            subobjects, n_subobjects = mh.label(test_cut_image)
            sizes = mh.labeled.labeled_size(subobjects)
            smaller_object_area = np.min(sizes)
            smaller_id = np.where(sizes == smaller_object_area)[0][0]
            smaller_object = subobjects == smaller_id

            do_cut = (
                (smaller_object_area > min_cut_area) &
                (np.sum(line) > 0)
            )
            if do_cut:
                logger.debug('cut object #%d', oid)
                y, x = np.where(line)
                y_offset, x_offset = bboxes[oid][[0, 2]] - pad - 1
                y += y_offset
                x += x_offset
                label_image[y, x] = 0
                separated_image[y, x] = 0
            else:
                logger.debug('don\'t cut object #%d', oid)
                mh.labeled.remove_regions(label_image, oid, inplace=True)

    return mh.label(separated_image)[0]
コード例 #42
0
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License

from matplotlib import pyplot as plt
import numpy as np
import mahotas as mh
image = mh.imread('../1400OS_10_01.jpeg')
image = mh.colors.rgb2gray(image, dtype=np.uint8)
image = image[::4, ::4]
thresh = mh.sobel(image)
filtered = mh.sobel(image, just_filter=True)

thresh = mh.dilate(thresh, np.ones((7, 7)))
filtered = mh.dilate(mh.stretch(filtered), np.ones((7, 7)))

h, w = thresh.shape
canvas = 255 * np.ones((h, w * 2 + 64), np.uint8)
canvas[:, :w] = thresh * 255
canvas[:, -w:] = filtered

mh.imsave('../1400OS_10_09+.jpg', canvas)
コード例 #43
0
def features_for(imfile):
    return mh.features.haralick(mh.stretch(mh.imread(imfile))//4).mean(0)[[1,12]]
コード例 #44
0
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License

from matplotlib import pyplot as plt
import numpy as np
import mahotas as mh
image = mh.imread('../1400OS_10_01.jpeg')
image = mh.colors.rgb2gray(image, dtype=np.uint8)
image = image[::4, ::4]
thresh = mh.sobel(image)
filtered = mh.sobel(image, just_filter=True)

thresh = mh.dilate(thresh, np.ones((7, 7)))
filtered = mh.dilate(mh.stretch(filtered), np.ones((7, 7)))


h, w = thresh.shape
canvas = 255 * np.ones((h, w * 2 + 64), np.uint8)
canvas[:, :w] = thresh * 255
canvas[:, -w:] = filtered

mh.imsave('../1400OS_10_09+.jpg', canvas)
コード例 #45
0
sizes = mahotas.labeled.labeled_size(colonies)
# print(sizes)

too_small = np.where(sizes < 100)
colonies = mahotas.labeled.remove_regions(colonies, too_small)
#colonies = mahotas.labeled.remove_bordering(colonies)
colonies, n_colonies = mahotas.labeled.relabel(colonies)
print('Found {} colonies.'.format(n_colonies))
# plt.imshow(colonies)
# print(colonies)


# Investigate nuclei within cell clusters

# Now, we compute the distance transform:
distances = mahotas.stretch(mahotas.distance(local_threshed))

# We find and label the regional maxima:
Bc = np.ones((9,9))

maxima = mahotas.morph.regmax(distances, Bc=Bc)
spots,n_spots = mahotas.label(maxima, Bc=Bc)
print('Found {} maxima.'.format(n_spots))
# plt.imshow(spots)

# Finally, to obtain the image above, we invert the distance transform
# (because of the way that cwatershed is defined) and compute the watershed:
surface = (distances.max() - distances)
areas = mahotas.cwatershed(surface, spots)
areas *= local_threshed
コード例 #46
0
import numpy as np
from matplotlib import pyplot as plt

try:
    nuclear_path = path.join(
                    path.dirname(path.abspath(__file__)),
                    'data',
                    'nuclear.png')
except NameError:
    nuclear_path = path.join('data', 'nuclear.png')

nuclear = mahotas.imread(nuclear_path)
nuclear = nuclear[:,:,0]
nuclear = mahotas.gaussian_filter(nuclear, 1.)
threshed  = (nuclear > nuclear.mean())
distances = mahotas.stretch(mahotas.distance(threshed))
Bc = np.ones((9,9))

maxima = mahotas.morph.regmax(distances, Bc=Bc)
spots,n_spots = mahotas.label(maxima, Bc=Bc)
surface = (distances.max() - distances)
areas = mahotas.cwatershed(surface, spots)
areas *= threshed



import random
from matplotlib import colors as c
colors = map(plt.cm.jet,range(0, 256, 4))
random.shuffle(colors)
colors[0] = (0.,0.,0.,1.)
コード例 #47
0
ファイル: vsk_utils.py プロジェクト: thouis/icon
def region_growing(labelImg):
    distances = mahotas.stretch(mahotas.distance(labelImg>0))
    surface = numpy.int32(distances.max() - distances)
    areas = mahotas.cwatershed(surface, labelImg)
    return areas
コード例 #48
0
im = mh.imread('lenna.jpg')

# This breaks up the image into RGB channels
r, g, b = im.transpose(2, 0, 1)
h, w = r.shape

# smooth the image per channel:
r12 = mh.gaussian_filter(r, 12.)
g12 = mh.gaussian_filter(g, 12.)
b12 = mh.gaussian_filter(b, 12.)

# build back the RGB image
im12 = mh.as_rgb(r12, g12, b12)

X, Y = np.mgrid[:h, :w]
X = X - h / 2.
Y = Y - w / 2.
X /= X.max()
Y /= Y.max()

# Array C will have the highest values in the center, fading out to the edges:

C = np.exp(-2. * (X**2 + Y**2))
C -= C.min()
C /= C.ptp()
C = C[:, :, None]

# The final result is sharp in the centre and smooths out to the borders:
ring = mh.stretch(im * C + (1 - C) * im12)
mh.imsave('lenna-ring.jpg', ring)
コード例 #49
0
def separate_clumped_objects(clumps_image, min_cut_area, min_area, max_area,
        max_circularity, max_convexity):
    '''Separates objects in `clumps_image` based on morphological criteria.

    Parameters
    ----------
    clumps_image: numpy.ndarray[Union[numpy.int32, numpy.bool]]
        objects that should be separated
    min_cut_area: int
        minimal area an object must have (prevents cuts that would result
        in too small objects)
    min_area: int
        minimal area an object must have to be considered a clump
    max_area: int
        maximal area an object can have to be considered a clump
    max_circularity: float
        maximal circularity an object must have to be considerd a clump
    max_convexity: float
        maximal convexity an object must have to be considerd a clump

    Returns
    -------
    numpy.ndarray[numpy.uint32]
        separated objects

    See also
    --------
    :class:`jtlib.features.Morphology`
    '''

    logger.info('separate clumped objects')
    label_image, n_objects = mh.label(clumps_image)
    if n_objects == 0:
        logger.debug('no objects')
        return label_image

    pad = 1
    cutting_pass = 1
    separated_image = label_image.copy()
    while True:
        logger.info('cutting pass #%d', cutting_pass)
        cutting_pass += 1
        label_image = mh.label(label_image > 0)[0]

        f = Morphology(label_image)
        values = f.extract()
        index = (
            (min_area < values['Morphology_Area']) &
            (values['Morphology_Area'] <= max_area) &
            (values['Morphology_Convexity'] <= max_convexity) &
            (values['Morphology_Circularity'] <= max_circularity)
        )
        clumped_ids = values[index].index.values
        not_clumped_ids = values[~index].index.values

        if len(clumped_ids) == 0:
            logger.debug('no more clumped objects')
            break

        mh.labeled.remove_regions(label_image, not_clumped_ids, inplace=True)
        mh.labeled.relabel(label_image, inplace=True)
        bboxes = mh.labeled.bbox(label_image)
        for oid in np.unique(label_image[label_image > 0]):
            bbox = bboxes[oid]
            logger.debug('process clumped object #%d', oid)
            obj_image = extract_bbox(label_image, bboxes[oid], pad=pad)
            obj_image = obj_image == oid

            # Rescale distance intensities to make them independent of clump size
            dist = mh.stretch(mh.distance(obj_image))

            # Find peaks that can be used as seeds for the watershed transform
            thresh = mh.otsu(dist)
            peaks = dist > thresh
            n = mh.label(peaks)[1]
            if n == 1:
                logger.debug(
                    'only one peak detected - perform iterative erosion'
                )
                # Iteratively shrink the peaks until we have two peaks that we
                # can use to separate the clump.
                while True:
                    tmp = mh.morph.open(mh.morph.erode(peaks))
                    n = mh.label(tmp)[1]
                    if n == 2 or n == 0:
                        if n == 2:
                            peaks = tmp
                        break
                    peaks = tmp

            # Select the two biggest peaks, since we want only two objects.
            peaks = mh.label(peaks)[0]
            sizes = mh.labeled.labeled_size(peaks)
            index = np.argsort(sizes)[::-1][1:3]
            for label in np.unique(peaks):
                if label not in index:
                    peaks[peaks == label] = 0
            peaks = mh.labeled.relabel(peaks)[0]
            regions = mh.cwatershed(np.invert(dist), peaks)

            # Use the line separating watershed regions to make the cut
            se = np.ones((3,3), np.bool)
            line = mh.labeled.borders(regions, Bc=se)
            line[~obj_image] = 0
            line = mh.morph.dilate(line)

            # Ensure that cut is reasonable given user-defined criteria
            test_cut_image = obj_image.copy()
            test_cut_image[line] = False
            subobjects, n_subobjects = mh.label(test_cut_image)
            sizes = mh.labeled.labeled_size(subobjects)
            smaller_object_area = np.min(sizes)
            smaller_id = np.where(sizes == smaller_object_area)[0][0]
            smaller_object = subobjects == smaller_id

            do_cut = (
                (smaller_object_area > min_cut_area) &
                (np.sum(line) > 0)
            )
            if do_cut:
                logger.debug('cut object #%d', oid)
                y, x = np.where(line)
                y_offset, x_offset = bboxes[oid][[0, 2]] - pad - 1
                y += y_offset
                x += x_offset
                label_image[y, x] = 0
                separated_image[y, x] = 0
            else:
                logger.debug('don\'t cut object #%d', oid)
                mh.labeled.remove_regions(label_image, oid, inplace=True)

    return mh.label(separated_image)[0]
コード例 #50
0
plt.show()

# Gaussian Filter
im8 = mh.gaussian_filter(image,8)
plt.imshow(im8)
plt.savefig('Lenna_gaussian_filter_8.jpg')
plt.show()


# Salt and Pepper noise  
image = mh.colors.rgb2gray(image)  
plt.gray()
salt = np.random.random(image.shape) >.8  
pepper = np.random.random(image.shape) >.8  
  
image = mh.stretch(image)  
image = np.maximum(salt*170, image)  
image = np.minimum(pepper*30+image* (~pepper), image)  
plt.imshow(image) 
plt.savefig('Lenna_salt_pepper.jpg') 
plt.show ()  



# Putting center in focus
def center_focus(im,f):
  r, g, b = im.transpose(2,0,1)
  r12 = mh.gaussian_filter(r, 12.0)
  g12 = mh.gaussian_filter(g, 12.0)
  b12 = mh.gaussian_filter(b, 12.0)
  im12 = mh.as_rgb(r12,g12,b12)
コード例 #51
0
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License

import mahotas as mh
from mahotas.colors import rgb2grey
import numpy as np

im = mh.imread('lenna.jpg')
im = rgb2grey(im)

salt = np.random.random(im.shape) > .975
pepper = np.random.random(im.shape) > .975

im = np.maximum(salt * 170, mh.stretch(im))
im = np.minimum(pepper * 30 + im * (~pepper), im)

mh.imsave('../1400OS_10_13+.jpg', im.astype(np.uint8))
コード例 #52
0
import numpy as np
import mahotas as mh
image = mh.imread('../SimpleImageDataset/building05.jpg')
image = mh.colors.rgb2gray(image)

# Compute Gaussian filtered versions with increasing kernel widths
im8  = mh.gaussian_filter(image,  8)
im16 = mh.gaussian_filter(image, 16)
im32 = mh.gaussian_filter(image, 32)

# We now build a composite image with three panels:
#
# [ IM8 | | IM16 | | IM32 ]

h, w = im8.shape
canvas = np.ones((h, 3 * w + 256), np.uint8)
canvas *= 255
canvas[:, :w] = im8
canvas[:, w + 128:2 * w + 128] = im16
canvas[:, -w:] = im32
mh.imsave('../1400OS_10_05+.jpg', canvas[:, ::2])

# Threshold the image
# We need to first stretch it to convert to an integer image
im32 = mh.stretch(im32)
ot32 = mh.otsu(im32)

# Convert to 255 np.uint8 to match the other images
im255 = 255 * (im32 > ot32).astype(np.uint8)
mh.imsave('../1400OS_10_06+.jpg', im255)
コード例 #53
0
ファイル: watershed.py プロジェクト: zindy/CellProfiler
    def run(self, workspace):
        x_name = self.x_name.value

        y_name = self.y_name.value

        images = workspace.image_set

        x = images.get_image(x_name)

        dimensions = x.dimensions

        x_data = x.pixel_data

        if self.operation.value == "Distance":
            original_shape = x_data.shape

            if x.volumetric:
                x_data = skimage.transform.resize(x_data, (original_shape[0], 256, 256), order=0, mode="edge")

            distance = scipy.ndimage.distance_transform_edt(x_data)

            distance = mahotas.stretch(distance)

            surface = distance.max() - distance

            if x.volumetric:
                footprint = numpy.ones((self.connectivity.value, self.connectivity.value, self.connectivity.value))
            else:
                footprint = numpy.ones((self.connectivity.value, self.connectivity.value))

            peaks = mahotas.regmax(distance, footprint)

            if x.volumetric:
                markers, _ = mahotas.label(peaks, numpy.ones((16, 16, 16)))
            else:
                markers, _ = mahotas.label(peaks, numpy.ones((16, 16)))

            y_data = mahotas.cwatershed(surface, markers)

            y_data = y_data * x_data

            if x.volumetric:
                y_data = skimage.transform.resize(y_data, original_shape, order=0, mode="edge")
        else:
            markers_name = self.markers_name.value

            markers = images.get_image(markers_name)

            data = x_data

            markers_data = markers.pixel_data

            mask_data = None

            if not self.mask_name.is_blank:
                mask_name = self.mask_name.value

                mask = images.get_image(mask_name)

                mask_data = mask.pixel_data

            y_data = skimage.morphology.watershed(
                image=data,
                markers=markers_data,
                mask=mask_data
            )

        y_data = skimage.measure.label(y_data)

        objects = cellprofiler.object.Objects()

        objects.segmented = y_data

        objects.parent_image = x

        workspace.object_set.add_objects(objects, y_name)

        self.add_measurements(workspace.measurements, y_data)

        if self.show_window:
            workspace.display_data.x_data = x.pixel_data

            workspace.display_data.y_data = y_data

            workspace.display_data.dimensions = dimensions
コード例 #54
0
ファイル: image.py プロジェクト: icaoberg/pyslic
 def getchannel(channel):
     if type(self.channels['protein']) == list:
         orig=self.channeldata[channel][idx,:,:]
     else:
         orig=self.channeldata[channel]
     return mahotas.stretch(orig)
コード例 #55
0
ファイル: imageout.py プロジェクト: thanasi/imaging
def overlay_thresh(im):
    return overlay_withcolor(im, stretch(im) < double_otsu_smaller(stretch(im)))