Example #1
0
def pk_pos(img_, make_sparse=False, nsigs=7, sig_G=None, thresh=1):
    """
    function for detecting peaks with a little flexibility... 

    Parameters
    ==========
    img_, np.ndarray
        an image, it will be copied
    
    make_sparse, bool
        whether to threshold the image according to argument nsigs or not.
        if true, all pixels below nsigs on the mean will be set to 0

    nsigs, float
        how many standard deviations above the mean should a pixel be to be considered
        as a peak

    sig_G, float
        gaussian variance, for applying gaussian smoothing prior to peak detection 

    thresh, float

    Returns
    =======
    pos, list of tuples, 
        peak positions [(y0,x0), (y1,x1) .. ]
        where y,x corresponds to the image slow,fast scan, respeectively 
    intens, list
        the intensities of the peaks, the maximum value
    
    
    Note, if make_sparse is False, then nsigs can be ignored

    """
    if make_sparse:
        img = img_.copy()
        m = img[img > 0].mean()
        s = img[img > 0].std()
        img[img < m + nsigs * s] = 0
        if sig_G is not None:
            img = gaussian_filter(img, sig_G)
        lab_img, nlab = measurements.label(
            detect_peaks(gaussian_filter(img, sig_G)))
        locs = measurements.find_objects(lab_img)
        pos = [(int((y.start + y.stop) / 2.), int((x.start + x.stop) / 2.))
               for y, x in locs]
        pos = [p for p in pos if img[p[0], p[1]] > thresh]
        intens = [img[p[0], p[1]] for p in pos]
    else:
        if sig_G is not None:
            lab_img, nlab = measurements.label(
                detect_peaks(gaussian_filter(img_, sig_G)))
        else:
            lab_img, nlab = measurements.label(detect_peaks(img_))
        locs = measurements.find_objects(lab_img)
        pos = [(int((y.start + y.stop) / 2.), int((x.start + x.stop) / 2.))
               for y, x in locs]
        pos = [p for p in pos if img_[p[0], p[1]] > thresh]
        intens = [img_[p[0], p[1]] for p in pos]
    return pos, intens
Example #2
0
def find_objects(image, **kw):
    """Redefine the scipy.ndimage.measurements.find_objects function to
    work with a wider range of data types.  The default function
    is inconsistent about the data types it accepts on different
    platforms.
    
    Return a list of slice tuples for each label (except 0/bg),
    or None for missing labels between 0 and max_label+1.
    """
    # This OpenCV based approach is MUCH slower:
    # objects = list()
    # for label in range(max_label+1 if max_label else amax(image)):
    #     mask = array(image==(label+1), uint8)
    #     if mask.any():
    #         x, y, w, h = cv2.boundingRect(mask)
    #         objects.append(sl.box(y,y+h,x,x+w))
    #     else:
    #         objects.append(None)
    # return objects
    try:
        return measurements.find_objects(image, **kw)
    except:
        pass
    types = ["int32", "uint32", "int64", "uint64", "int16", "uint16"]
    for t in types:
        try:
            return measurements.find_objects(array(image, dtype=t), **kw)
        except:
            pass
    # let it raise the same exception as before
    return measurements.find_objects(image, **kw)
Example #3
0
def _filter_grouplen(arr, minsize=3):
    """Filter out the groups of grid points smaller than minsize

    Parameters
    ----------
    arr : the array to filter (should be False and Trues)
    minsize : the minimum size of the group

    Returns
    -------
    the array, with small groups removed
    """

    # Do it with trues
    r, nr = label(arr)
    nr = [
        i + 1 for i, o in enumerate(find_objects(r)) if (len(r[o]) >= minsize)
    ]
    arr = np.asarray([ri in nr for ri in r])

    # and with Falses
    r, nr = label(~arr)
    nr = [
        i + 1 for i, o in enumerate(find_objects(r)) if (len(r[o]) >= minsize)
    ]
    arr = ~np.asarray([ri in nr for ri in r])

    return arr
def find_objects(image, **kw):
    """Expand scipy.ndimage.measurements.find_objects to work w/ more data.

    The default function is inconsistent about the weight types it accepts on
    different platforms.

    # Arguments
        image [np array]: the image

    # Returns
        [tuple of np arrays]: the label of connected components and their
            corresponding positions
    """
    try:
        return measurements.find_objects(image, **kw)
    except:
        pass

    types = ["int32", "uint32", "int64", "unit64", "int16", "uint16"]
    for t in types:
        try:
            return measurements.find_objects(array(image, dtype=t), **kw)
        except:
            pass

    # let it raise the same exception as before
    return measurements.find_objects(image, **kw)
Example #5
0
def find_objects(image,**kw):
    """Redefine the scipy.ndimage.measurements.find_objects function to
    work with a wider range of data types.  The default function
    is inconsistent about the data types it accepts on different
    platforms."""
    try: return measurements.find_objects(image,**kw)
    except: pass
    types = ["int32","uint32","int64","unit64","int16","uint16"]
    for t in types:
        try: return measurements.find_objects(array(image,dtype=t),**kw)
        except: pass
    # let it raise the same exception as before
    return measurements.find_objects(image,**kw)
Example #6
0
def find_objects(image,**kw):
    """Redefine the scipy.ndimage.measurements.find_objects function to
    work with a wider range of data types.  The default function
    is inconsistent about the data types it accepts on different
    platforms."""
    try: return measurements.find_objects(image,**kw)
    except: pass
    types = ["int32","uint32","int64","unit64","int16","uint16"]
    for t in types:
        try: return measurements.find_objects(array(image,dtype=t),**kw) 
        except: pass
    # let it raise the same exception as before
    return measurements.find_objects(image,**kw)
Example #7
0
def pk_pos(img_,
           make_sparse=False,
           nsigs=7,
           sig_G=None,
           thresh=1,
           min_dist=None):
    if make_sparse:
        img = img_.copy()
        m = img[img > 0].mean()
        s = img[img > 0].std()
        img[img < m + nsigs * s] = 0
        if sig_G is not None:
            img = gaussian_filter(img, sig_G)
        lab_img, nlab = measurements.label(
            detect_peaks(gaussian_filter(img, sig_G)))
        locs = measurements.find_objects(lab_img)
        pos = [(int((y.start + y.stop) / 2.), int((x.start + x.stop) / 2.))
               for y, x in locs]
        pos = [p for p in pos if img[p[0], p[1]] > thresh]
        intens = [img[p[0], p[1]] for p in pos]
    else:
        if sig_G is not None:
            lab_img, nlab = measurements.label(
                detect_peaks(gaussian_filter(img_, sig_G)))
        else:
            lab_img, nlab = measurements.label(detect_peaks(img_))
        locs = measurements.find_objects(lab_img)
        pos = [(int((y.start + y.stop) / 2.), int((x.start + x.stop) / 2.))
               for y, x in locs]
        pos = [p for p in pos if img_[p[0], p[1]] > thresh]
        intens = [img_[p[0], p[1]] for p in pos]
    npeaks = len(pos)
    if min_dist and npeaks > 1:
        y, x = list(map(np.array, list(zip(*pos))))
        K = cKDTree(pos)
        XX = x.copy()
        II = np.array(intens)
        YY = y.copy()
        vals = list(K.query_pairs(min_dist))
        while vals:
            inds = [v[np.argmax(II[list(v)])] for v in vals]
            inds = np.unique(
                [i for i in range(len(II)) if i not in np.unique(inds)])
            K = cKDTree(list(zip(XX[inds], YY[inds])))
            vals = list(K.query_pairs(min_dist))
            XX = XX[inds]
            YY = YY[inds]
            II = II[inds]
        pos = list(zip(YY, XX))
        intens = II
    return pos, intens
Example #8
0
def deskew(args,image, image_param):
    # Deskew the given image based on the horizontal line
    # Calculate the angle of the points between 20% and 80% of the line
    uintimage = get_uintimg(image)
    binary = get_binary(args, uintimage)
    for x in range(0,args.binary_dilation):
        binary = ski.morphology.binary_dilation(binary,selem=np.ones((3, 3)))
    labels, numl = measurements.label(binary)
    objects = measurements.find_objects(labels)
    deskew_path = None
    for i, b in enumerate(objects):
        linecoords = Linecoords(image, i, b)
        # The line has to be bigger than minwidth, smaller than maxwidth, stay in the top (30%) of the img,
        # only one obj allowed and the line isn't allowed to start contact the topborder of the image
        if int(args.minwidthhor * image_param.width) < get_width(b) < int(args.maxwidthhor * image_param.width) \
                and int(image_param.height * args.minheighthor) < get_height(b) < int(image_param.height * args.maxheighthor) \
                and int(image_param.height * args.minheighthormask) < (linecoords.height_start+linecoords.height_stop)/2 < int(image_param.height * args.maxheighthormask) \
                and linecoords.height_start != 0:

            pixelwidth = set_pixelground(binary[b].shape[1])
            #arr = np.arange(1, pixelwidth(args.deskewlinesize) + 1)
            mean_y = []
            #Calculate the mean value for every y-array
            old_start = None
            for idx in range(pixelwidth(args.deskewlinesize)):
                value_y = measurements.find_objects(labels[b][:, idx + pixelwidth((1.0-args.deskewlinesize)/2)] == i + 1)[0]
                if old_start is None:
                    old_start = value_y[0].start
                #mean_y.append((value_y[0].stop + value_y[0].start) / 2)
                if abs(value_y[0].start-old_start) < 5:
                    mean_y.append(value_y[0].start)
                    old_start = value_y[0].start
            #stuff = range(1, len(mean_y) - 1)
            polyfit_value = np.polyfit(range(0,len(mean_y)), mean_y, 1)
            deskewangle = np.arctan(polyfit_value[0]) * (360 / (2 * np.pi))
            args.ramp = True
            deskew_image = transform.rotate(image, deskewangle, mode="edge")
            create_dir(image_param.pathout+os.path.normcase("/deskew/"))
            deskew_path = "%s_deskew.%s" % (image_param.pathout+os.path.normcase("/deskew/")+image_param.name, args.extension)
            deskewinfo = open(image_param.pathout+os.path.normcase("/deskew/")+image_param.name + "_deskewangle.txt", "w")
            deskewinfo.write("Deskewangle:\t%f" % deskewangle)
            deskewinfo.close()
            image_param.deskewpath = deskew_path
            with warnings.catch_warnings():
                #Transform rotate convert the img to float and save convert it back
                warnings.simplefilter("ignore")
                misc.imsave(deskew_path, deskew_image)
            break
    return deskew_path
Example #9
0
    def findVerticalAlternative(self):
        # This is an alternative method, a bit more expensive
        # than the first version, and is called on failure of
        # the previous findVertical. It uses Scipy labelling to segment the a strip 
        # of data from the ROI
        self.found = False
        cx = self.ROIwh[0]//2
        expectedW, expectedH = self.expectedSize

        win = (expectedW - (expectedW*self.sizeMargin) )//2 
        #take a vertical section of pixels from the ROI and threshold it
        vROI = self.ROIimg[:,cx-win:cx+win]

        #Make a single pixel wide strip, with the median of all the rows 
        vROI = np.median(vROI,axis=1)
        threshVal = int(vROI.max() * self.thresholdVal)
        vROIthres = vROI >= threshVal
        candidate = None
        if vROIthres.min() != vROIthres.max(): 
            # Prevent a divide by zero because roi is all the same value. 
            # e.g. we have a frame completely white or black
            lbl,numLbl = nd.label(vROIthres)
            obj = nd.find_objects(lbl)
            brightest = 0
            for s in obj:
                print s
                # s is an np.slice object
                sBright = np.mean(vROI[s]) 
                sHeight = s[0].stop - s[0].start
                if (self.heightRange[0] <= sHeight <= self.heightRange[1]) and sBright > brightest:
                    candidate = s[0]
                    brightest = sBright
        if candidate:
            self.setPerfPosition( self.ROIcentrexy[0], self.ROIxy[1]+candidate.start + ((candidate.stop-candidate.start)/2 )) 
            self.found = True
Example #10
0
def file_fetch_trends_extents(trends_path, geo_extent):
    """
    Retrieve the patches of trends data present inside of the given GeoExtent

    :param trends_path:
    :param geo_extent:
    :return: tuple of geo extents
    """
    affine = geo_utils.get_raster_affine(trends_path)

    block_arr = geo_utils.array_from_rasterband(trends_path, geo_extent=geo_extent)
    labels, _ = ndimage.label(block_arr)

    slices = find_objects(labels)

    geo_exts = tuple(geo_utils.rowcolext_to_geoext(affine, geo_utils.RowColumnExtent(start_row=y.start,
                                                                                     start_col=x.start,
                                                                                     end_row=y.stop,
                                                                                     end_col=x.stop))
                     for y, x in slices)

    blocks = tuple(block_arr[s] for s in slices)

    # return geo extents so that they can be used with other data sets
    return blocks, geo_exts
 def addFeatures(self, lbl, num_lbls, shrink_size):
   slices = find_objects(lbl) # index regions of each object in targets matrix
   for i, coords in ((i, np.where(lbl == i+1)) for i in range(num_lbls)): # coordinates list for each feature
     mass = len(coords[0]) # number of points in each object
     if mass >= self.minTargSize: # only continue if object is of reasonable size
       bounds = [slices[i][j].indices(shrink_size)[:2] for j in (0,1)] # slice.indices takes maximum index as argument
       com = np.mean(coords, axis=1, dtype=int) # scipy's center_of_mass is too heavy (for no reason)
Example #12
0
    def plot_labels(self, withfits=False, diameter=None, **kwargs):
        '''
        Generate a plot of the found peaks, individually
        '''

        # check if the fitting has been performed yet, warn user if it hasn't
        if withfits:
            if self._fits is None:
                withfits = False
                warnings.warn('Blobs have not been fit yet, cannot show fits',
                              UserWarning)
            else:
                fits = self._fits

        # pull the labels and the data from the object
        labels = self._labels
        data = self.data

        # check to see if data has been labelled
        if labels is None:
            labels = self.label_blobs(diameter=diameter)
            if labels is None:
                warnings.warn('Labels were not available', UserWarning)

                return None

        # find objects from labelled data
        my_objects = find_objects(labels)

        # generate a nice layout
        nb_labels = len(my_objects)

        nrows = int(np.ceil(np.sqrt(nb_labels)))
        ncols = int(np.ceil(nb_labels / nrows))

        fig, axes = plt.subplots(nrows, ncols, figsize=(3 * ncols, 3 * nrows))

        for n, (obj, ax) in enumerate(zip(my_objects, axes.ravel())):
            ex = (obj[1].start, obj[1].stop - 1, obj[0].stop - 1, obj[0].start)
            ax.matshow(data[obj], extent=ex, **kwargs)
            if withfits:
                # generate the model fit to display, from parameters.
                dict_params = dict(fits.loc[n].dropna())

                # recenter
                dict_params['x0'] -= obj[1].start
                dict_params['y0'] -= obj[0].start
                params = Gauss2D.dict_to_params(dict_params)
                fake_data = Gauss2D.gen_model(data[obj], *params)
                ax.contour(fake_data, extent=ex, colors='w', origin='image')

        # # Remove empty plots
        for ax in axes.ravel():
            if not(len(ax.images)) and not(len(ax.lines)):
                fig.delaxes(ax)

        fig.tight_layout()

        # return the fig and axes handles to user for later manipulation.
        return fig, axes
Example #13
0
def measure_bounding_box(mask, margin):
    """Determine the bounding box of a mask.
    
    This should give the same result as the ``bbox`` attribute of
    `skimage.measure.regionprops <http://scikit-image.org/docs/dev/api/skimage.measure.html#regionprops>`_:

    >>> from skimage.measure import regionprops
    >>> regionprops(mask).bbox    
    
    Parameters
    ----------
    mask : array_like
        Input mask
    margin : float
        Margin to add to bounding box
    
    Returns
    -------
    bounding_box : BoundingBox
        Bounding box
    """
    from scipy.ndimage.measurements import find_objects

    box = find_objects(mask.astype(int))[0]
    ny, nx = mask.shape
    xmin = max(0, int(box[1].start - margin)) + 1
    xmax = min(nx - 1, int(box[1].stop + margin)) + 1
    ymin = max(0, int(box[0].start - margin)) + 1
    ymax = min(ny - 1, int(box[0].stop + margin)) + 1
    bbox = BoundingBox(xmin, xmax, ymin, ymax)

    return bbox
Example #14
0
def manual_split(probs, seg, body, seeds, connectivity=1, boundary_seeds=None):
    """Manually split a body from a segmentation using seeded watershed.

    Input:
        - probs: the probability of boundary in the volume given.
        - seg: the current segmentation.
        - body: the label to be split.
        - seeds: the seeds for the splitting (should be just two labels).
        [-connectivity: the connectivity to use for watershed.]
        [-boundary_seeds: if not None, these locations become inf in probs.]
    Value:
        - the segmentation with the selected body split.
    """
    struct = generate_binary_structure(seg.ndim, connectivity)
    body_pixels = seg == body
    bbox = find_objects(body_pixels)[0]
    body_pixels = body_pixels[bbox]
    body_boundary = binary_dilation(body_pixels, struct) - body_pixels
    non_body_pixels = True - body_pixels - body_boundary
    probs = probs.copy()[bbox]
    probs[non_body_pixels] = probs.min()-1
    if boundary_seeds is not None:
        probs[boundary_seeds[bbox]] = probs.max()+1
    probs[body_boundary] = probs.max()+1
    seeds = label(seeds.astype(bool)[bbox], struct)[0]
    outer_seed = seeds.max()+1 # should be 3
    seeds[non_body_pixels] = outer_seed
    seg_new = watershed(probs, seeds, 
        dams=(seg==0).any(), connectivity=connectivity, show_progress=True)
    seg = seg.copy()
    new_seeds = unique(seeds)[:-1]
    for new_seed, new_label in zip(new_seeds, [0, body, seg.max()+1]):
        seg[bbox][seg_new == new_seed] = new_label
    return seg
Example #15
0
    def __init__(self, image, perc):
        threshold = np.percentile(image.ravel(), perc)
        a = image.copy()
        # Keep only tail of image values distribution with signal
        a[a < threshold] = 0
        s = generate_binary_structure(2, 2)
        # Label image
        labeled_array, num_features = label(a, structure=s)
        # Find objects
        objects = find_objects(labeled_array)
        # Container of object's properties
        _objects = np.empty(num_features, dtype=[('label', 'int'),
                                                 ('dx', '<f8'),
                                                 ('dy', '<f8'),
                                                 ('max_pos', 'int',
                                                  (2,))])

        labels = np.arange(num_features) + 1
        dx = [int(obj[1].stop - obj[1].start) for obj in objects]
        dy = [int(obj[0].stop - obj[0].start) for obj in objects]

        # Filling objects structured array
        _objects['label'] = labels
        _objects['dx'] = dx
        _objects['dy'] = dy
        self.objects = _objects
        self._classify(image, labeled_array)
        # Fetch positions of only successfuly classified objects
        self.max_pos = self._find_positions(image, labeled_array)
        self._sort()
def detect_sources(snmap, threshold):
    hot = (snmap > threshold)
    hot = binary_dilation(hot, iterations=2)
    hot = binary_fill_holes(hot)
    blobs,nblobs = label(hot)
    print(nblobs, 'blobs')
    #print('blobs min', blobs.min(), 'max', blobs.max())
    slices = find_objects(blobs)
    px,py = [],[]
    for i,slc in enumerate(slices):
        blob_loc = blobs[slc]
        sn_loc = snmap[slc]
        imax = np.argmax((blob_loc == (i+1)) * sn_loc)
        y,x = np.unravel_index(imax, blob_loc.shape)
        y0,x0 = slc[0].start, slc[1].start
        px.append(x0+x)
        py.append(y0+y)
        #if i == 0:
        #    plt.subplot(2,2,1)
        #    plt.imshow(blob_loc, interpolation='nearest', origin='lower')
        #    plt.colorbar()
        #    plt.subplot(2,2,2)
        #    plt.imshow((blob_loc==(i+1))*sn_loc, interpolation='nearest', origin='lower')
        #    plt.subplot(2,2,3)
        #    plt.plot(x, y, 'ro')
    return np.array(px),np.array(py)
Example #17
0
def find_blobsV2(G, cent, prom, ratio = 0.5, verbose = True):
    '''
    G- the big field
    cent- blob center whose bounds we want
    
    Very naive implementation - can be made significantly faster
    '''    
    from scipy.ndimage.measurements import find_objects, label

    
    #modify G with prom and ratio
    pr_th = G[cent]-prom*ratio
    G_ = G.copy()
    G_[G<=pr_th] = 0 #only regions with half prom available
    if G[cent] == 0: 
        return -1
        print('huhh'); print(pr_th); print(np.sum(G_))
    G_obs = find_objects(label(G_)[0])
    #print(len(G_obs))
    for i in G_obs:
        #print(i)
        test  = np.zeros(G.shape)
        test[i] = 1
        if test[cent]:
            #this blob contains the cnt
            if verbose:
                plt.figure()
                plt.imshow(test)
                plt.colorbar()
            test[cent] = 2
            dims = getdims(test[i])
            return dims
    return -1
Example #18
0
 def setSegmentation(self, segmentation, cseg=0):
     """Set the line segmentation."""
     # reorder the labels by the x center of bounding box
     segmentation = common.renumber_labels_by_boxes(
         segmentation, key=lambda x: mean((x[1].start, x[1].stop)))
     # compute the bounding boxes in order
     boxes = [None] + measurements.find_objects(segmentation)
     n = len(boxes)
     # now consider groups of boxes
     groups = []
     for i in range(1, n):
         for r in range(1, self.maxrange + 1):
             box = None
             gap = 0
             labels = []
             for j in range(i, min(n, i + r)):
                 if box is not None:
                     gap = max(gap, boxes[j][1].start - box[1].stop)
                 box = sl.union(box, boxes[j])
                 labels.append(j)
             # skip if two constituent boxes have too large a gap between them
             if gap > self.maxdist: continue
             a = sl.aspect(box)
             # skip if the aspect ratio is wrong
             if 1.0 / a > self.maxaspect: continue
             groups.append((box, labels))
     # compute some statistics
     mw = median([sl.dim0(g[0]) for g in groups])
     # now select based on statistics
     groups = [g for g in groups if sl.dim1(g[0]) < self.maxwidth * mw]
     # now we have a list of candidate groups
     self.segmentation = segmentation
     self.groups = groups
     self.clearLattice()
     return len(self.groups)
Example #19
0
def Pi():
    # L = 100
    for L in (50, 100, 200):
        p = linspace(0.5, 0.7, 50)
        nx = len(p)
        Ni = zeros(nx)
        N = 1000
        for i in range(N):
            z = rand(L, L)
            for ip in range(nx):
                m = z < p[ip]
                lw, num = measurements.label(m)
                labelList = arange(lw.max() + 1)
                area = measurements.sum(m, lw, labelList)
                maxLabel = labelList[where(area == area.max())]
                sliced = measurements.find_objects(lw == maxLabel)
                if (len(sliced) > 0):
                    sliceX = sliced[0][1]
                    sliceY = sliced[0][0]
                    dx = sliceX.stop - sliceX.start
                    dy = sliceY.stop - sliceY.start
                    maxsize = max(dx, dy)
                    if (maxsize >= L):  # Percolation
                        Ni[ip] = Ni[ip] + 1
        Pi = Ni / N
        plot(p, Pi)
    show()
def update(p):
    p = pSlider.val
    z = r<p
    im1.set_data(z)
    im1.set_clim(z.min(), z.max())
    lw, num = measurements.label(z)

    # labeled clusters
    b = arange(lw.max() + 1) # create an array of values from 0 to lw.max() + 1
    shuffle(b) # shuffle this array
    shuffledLw = b[lw] # replace all values with values from b
    im2.set_data(shuffledLw) # show image clusters as labeled by a shuffled lw    
    im2.set_clim(shuffledLw.min(), shuffledLw.max())
    
    # calculate area
    area = (measurements.sum(z, lw, index=range(lw.max() + 1))).astype(int)
    areaImg = area[lw]
    im3.set_data(areaImg)
    im3.set_clim(areaImg.min(), areaImg.max())
    sliced = measurements.find_objects(areaImg == areaImg.max())
    if(len(sliced) > 0):
        sliceX = sliced[0][1]
        sliceY = sliced[0][0]
        ontopplot.set_xdata([sliceX.start, sliceX.start, sliceX.stop, sliceX.stop, sliceX.start])
        ontopplot.set_ydata([sliceY.start, sliceY.stop, sliceY.stop, sliceY.start, sliceY.start])
    else:        
        ontopplot.set_xdata([0])
        ontopplot.set_ydata([0])
    
    
    draw()
Example #21
0
def get_chunks(array, area, output, tag):
    ## define connected bins to check, in this case, all the surrounding bins (8-d)
    neighborhood = generate_binary_structure(2, 2)
    ## put all the pixels maximal value in therir neighborhood
    local_max = maximum_filter(array, footprint=neighborhood) == array
    ## detect background of the array
    background = (array == 0)
    ## remove background from array, to only keep peaks
    eroded_background = binary_erosion(background,
                                       structure=neighborhood,
                                       border_value=1)
    detected_peaks = local_max ^ eroded_background
    ## in order to extract the chunks, fill gaps
    dilation = binary_dilation(detected_peaks, structure=np.ones(
        (area, area))).astype(np.int)
    ## extraction of loops and write results
    labeled_array, num_features = label(dilation, structure=neighborhood)
    positions = find_objects(labeled_array, max_label=num_features + 1)
    w = open(output + '/%s_chunk_loops.tsv' % (tag), 'w')
    for n, p in enumerate(positions):
        try:
            xstart, xend = p[0].start, p[0].stop
            ystart, yend = p[1].start, p[1].stop
            position_anchor1 = min(xstart, xend) + start_bin
            position_anchor2 = min(ystart, yend) + start_bin
            w.write('{}\t{}\t{}\n'.format(crm, position_anchor1,
                                          position_anchor2))
        except TypeError:
            continue
    w.close()
Example #22
0
def find_objects_bb(mask):
    '''Returns the bounding boxes of objects found in mask, sorted by size.'''

    labels, _ = nd_label(mask)
    labels = relabel_size_sorted(labels)

    return find_objects(labels)
Example #23
0
def findProbObjects(data, data_threshold, prob_threshold):
    from scipy.ndimage.measurements import label, find_objects
    connectivity = np.ones((3, 3, 3), dtype=np.int32)

    ens_prob = (data >= data_threshold).sum(axis=0) / float(data.shape[0])
    labels, n_objs = label(ens_prob >= prob_threshold, structure=connectivity)
    return find_objects(labels)
Example #24
0
def _filter_small_slopes(hgt, dx, min_slope=0):
    """Masks out slopes with NaN until the slope if all valid points is at 
    least min_slope (in degrees).
    """

    min_slope = np.deg2rad(min_slope)
    slope = np.arctan(-np.gradient(hgt, dx))  # beware the minus sign
    # slope at the end always OK
    slope[-1] = min_slope

    # Find the locs where it doesn't work and expand till we got everything
    slope_mask = np.where(slope >= min_slope, slope, np.NaN)
    r, nr = label(~np.isfinite(slope_mask))
    for objs in find_objects(r):
        obj = objs[0]
        i = 0
        while True:
            i += 1
            i0 = objs[0].start-i
            if i0 < 0:
                break
            ngap =  obj.stop - i0 - 1
            nhgt = hgt[[i0, obj.stop]]
            current_slope = np.arctan(-np.gradient(nhgt, ngap * dx))
            if i0 <= 0 or current_slope[0] >= min_slope:
                break
        slope_mask[i0:obj.stop] = np.NaN
    out = hgt.copy()
    out[~np.isfinite(slope_mask)] = np.NaN
    return out
Example #25
0
def edt_prob(lbl_img):
    """Perform EDT on each labeled object and normalize."""
    def grow(sl, interior):
        return tuple(
            slice(s.start - int(w[0]), s.stop + int(w[1]))
            for s, w in zip(sl, interior))

    def shrink(interior):
        return tuple(
            slice(int(w[0]), (-1 if w[1] else None)) for w in interior)

    objects = find_objects(lbl_img)
    prob = np.zeros(lbl_img.shape, np.float32)
    for i, sl in enumerate(objects, 1):
        # i: object label id, sl: slices of object in lbl_img
        if sl is None: continue
        interior = [(s.start > 0, s.stop < sz)
                    for s, sz in zip(sl, lbl_img.shape)]
        # 1. grow object slice by 1 for all interior object bounding boxes
        # 2. perform (correct) EDT for object with label id i
        # 3. extract EDT for object of original slice and normalize
        # 4. store edt for object only for pixels of given label id i
        shrink_slice = shrink(interior)
        grown_mask = lbl_img[grow(sl, interior)] == i
        mask = grown_mask[shrink_slice]
        edt = distance_transform_edt(grown_mask)[shrink_slice][mask]
        prob[sl][mask] = edt / np.max(edt)
    return prob
Example #26
0
def edt_prob(lbl_img, anisotropy=None):
    """Perform EDT on each labeled object and normalize."""
    def grow(sl,interior):
        return tuple(slice(s.start-int(w[0]),s.stop+int(w[1])) for s,w in zip(sl,interior))
    def shrink(interior):
        return tuple(slice(int(w[0]),(-1 if w[1] else None)) for w in interior)
    constant_img = lbl_img.min() == lbl_img.max() and lbl_img.flat[0] > 0
    if constant_img:
        lbl_img = np.pad(lbl_img, ((1,1),)*lbl_img.ndim, mode='constant')
        warnings.warn("EDT of constant label image is ill-defined. (Assuming background around it.)")
    dist_func = _edt_dist_func(anisotropy)
    objects = find_objects(lbl_img)
    prob = np.zeros(lbl_img.shape,np.float32)
    for i,sl in enumerate(objects,1):
        # i: object label id, sl: slices of object in lbl_img
        if sl is None: continue
        interior = [(s.start>0,s.stop<sz) for s,sz in zip(sl,lbl_img.shape)]
        # 1. grow object slice by 1 for all interior object bounding boxes
        # 2. perform (correct) EDT for object with label id i
        # 3. extract EDT for object of original slice and normalize
        # 4. store edt for object only for pixels of given label id i
        shrink_slice = shrink(interior)
        grown_mask = lbl_img[grow(sl,interior)]==i
        mask = grown_mask[shrink_slice]
        edt = dist_func(grown_mask)[shrink_slice][mask]
        prob[sl][mask] = edt/(np.max(edt)+1e-10)
    if constant_img:
        prob = prob[(slice(1,-1),)*lbl_img.ndim].copy()
    return prob
def spanning_cluster_density(perc_matrix):
	# Calculate and return the spanning cluster density
	total_area = 0
	Lx, Ly = perc_matrix.shape
	Lmin = min(Lx, Ly)


	lw, num = measurements.label(perc_matrix)
	labels = arange(lw.max()+1)

	area = measurements.sum(perc_matrix, lw, index=labels)
	
	for l in labels:
		if area[l] > Lmin:
		 	sliced = measurements.find_objects(lw == l)
		 	sliceX = sliced[0][1]
		 	sliceY = sliced[0][0]

		 	width = sliceX.stop - sliceX.start
		 	height = sliceY.stop - sliceY.start

		 	if width == Lx or height == Ly:
		 		total_area += area[l]

	return total_area/Lx/Ly
Example #28
0
def  __load(picklefile, label):
    """
    Load a pickled testbed as well as the original and label image for further processing.
    The label image will be relabeled to start with region id 1.
    @param picklefile the testbed pickle file name
    @param label the label image file name
    @return a tuple containing:
        label: the label image data as ndarray
        bounding_boxes: the bounding boxes around the label image regions (Note that the the bounding box of a region with id rid is accessed using bounding_boxes[rid - 1])
        model_fg_ids: the region ids of all regions to create the foreground model from
        model_bg_ids: the region ids of all regions to create the background model from
        eval_ids: the regions to evaluate the regions term on, represented by their ids
        truth_fg: subset of regions from the eval_ids that are foreground according to the ground-truth
        truth_bg:  subset of regions from the eval_ids that are background according to the ground-truth
    """
    # load and preprocess images
    label_image = load(label)
    
    label_image_d = scipy.squeeze(label_image.get_data())
    
    # relabel the label image to start from 1
    label_image_d = medpy.filter.relabel(label_image_d, 1)
    
    # extracting bounding boxes
    bounding_boxes = find_objects(label_image_d)
    
    # load testbed
    with open(picklefile, 'r') as f:
        model_fg_ids = cPickle.load(f)
        model_bg_ids = cPickle.load(f)
        cPickle.load(f) # eval ids
        truth_fg = cPickle.load(f)
        truth_bg = cPickle.load(f)
            
    return label_image_d, label_image, bounding_boxes, model_fg_ids, model_bg_ids, truth_fg, truth_bg
Example #29
0
 def split(self, text):  # noqa: D102
     a = _string_to_array(text)
     if not a.size:
         return []
     b = np.copy(a)
     b[b == ord(' ')] = 0
     if self.margin != (1, 1):
         # Dilate the image
         structure = np.zeros(
             (2 * (self.margin[1] - 1) + 1, 2 * (self.margin[0] - 1) + 1))
         structure[self.margin[1] - 1:, self.margin[0] - 1:] = 1
         labels = binary_dilation(b, structure=structure).astype(b.dtype)
     else:
         labels = b
     label(labels, structure=np.ones((3, 3)), output=labels)
     objects = find_objects(labels)
     parts = []
     for i, obj in enumerate(objects):
         mask = labels[obj] != i + 1
         region = np.copy(a[obj])
         region[mask] = ord(' ')
         part = '\n'.join(''.join(unichr(c or ord(' ')) for c in row)
                          for row in region.tolist())
         if part.strip():
             parts.append(part)
     return parts
Example #30
0
def bbox(mask, margin):
    """Determine the bounding box of a mask.
    
    This should give the same result as the ``bbox`` attribute of
    `skimage.measure.regionprops <http://scikit-image.org/docs/dev/api/skimage.measure.html#regionprops>`_:

    >>> from skimage.measure import regionprops
    >>> regionprops(mask).bbox    
    
    Parameters
    ----------
    mask : array_like
        Input mask
    margin : float
        Margin to add to bounding box
    
    Returns
    -------
    xmin, xmax, ymin, ymax : int
        Bounding box parameters
    """
    from scipy.ndimage.measurements import find_objects
    box = find_objects(mask.astype(int))[0]
    ny, nx = mask.shape
    xmin = max(0, int(box[1].start - margin)) + 1
    xmax = min(nx - 1, int(box[1].stop + margin)) + 1
    ymin = max(0, int(box[0].start - margin)) + 1
    ymax = min(ny - 1, int(box[0].stop + margin)) + 1
    # box_string = '[{xmin}:{xmax},{ymin}:{ymax}]'.format(**locals())
    bbox = xmin, xmax, ymin, ymax
    return bbox  # , box_string
Example #31
0
def validate_battlefield(field):
    field = np.array(field)
    return sorted(
        ship.size if min(ship.shape) == 1 else 0
        for ship in (field[pos]
                     for pos in find_objects(label(field, np.ones((
                         3, 3)))[0]))) == [1, 1, 1, 1, 2, 2, 2, 3, 3, 4]
 def addFeatures(self, lbl, num_lbls, shrink_size):
   slices = find_objects(lbl)
   for i, coords in ((i, np.where(lbl == i)) for i in range(1, num_lbls+1)): # coordinates list for each feature
     mass = len(coords[0])
     if mass >= self.minTargSize:
       bounds = [slices[i][j].indices(shrink_size)[:2] for j in (0,1)]
       com = np.mean(coords, axis=1, dtype=int) # scipy's center_of_mass is too heavy
Example #33
0
def select_biggest_segment(image, image_labels, mask_classes, buffer=10):

    copy = np.zeros(image.shape[:2], np.uint8)

    for i, class_value in enumerate(mask_classes):
        equality = np.equal(image_labels, class_value)
        class_map = np.all(equality, axis=-1)
        copy[class_map] = i

    regions = find_objects(copy)

    print(regions)

    if len(regions) == 0:
        return None, None

    region_sizes = [region_size(i) for i in regions]
    biggest_region = regions[np.argmax(region_sizes)]

    image_shape = image.shape[:2]

    bounds = [0, image_shape[0], 0, image_shape[1]]

    region_with_buffer = expand_slice(biggest_region, buffer, buffer, bounds)

    print("region with buffer", region_with_buffer)
    return image[region_with_buffer], image_labels[region_with_buffer]
Example #34
0
def collapse_small_area(labelled_image, minimum_area):
    """Collapse labelled image removing areas with too low are.


    Parameters
    ----------
    labelled_image: array_like
                    An image with labels
    minimum_area: float
                  Areas with this and above area are retained
    Returns
    -------
    label_collapsed_image: array_like
                     Image with contigous labels

    """
    collapsed_image = labelled_image.copy()
    collapsed_image = collapse_labels(collapsed_image)

    pixel_count, edges = np.histogram(collapsed_image,
                                      bins=collapsed_image.max() + 1)
    positions = ms.find_objects(collapsed_image)
    for i in range(1, pixel_count.size):
        if pixel_count[i] < minimum_area:
            patch = collapsed_image[positions[i - 1]]
            # Blacken out that patch
            patch[patch == i] = 0
    collapsed_image = collapse_labels(collapsed_image)
    return collapsed_image
Example #35
0
def manual_split(probs, seg, body, seeds, connectivity=1, boundary_seeds=None):
    """Manually split a body from a segmentation using seeded watershed.

    Input:
        - probs: the probability of boundary in the volume given.
        - seg: the current segmentation.
        - body: the label to be split.
        - seeds: the seeds for the splitting (should be just two labels).
        [-connectivity: the connectivity to use for watershed.]
        [-boundary_seeds: if not None, these locations become inf in probs.]
    Value:
        - the segmentation with the selected body split.
    """
    struct = generate_binary_structure(seg.ndim, connectivity)
    body_pixels = seg == body
    bbox = find_objects(body_pixels)[0]
    body_pixels = body_pixels[bbox]
    body_boundary = binary_dilation(body_pixels, struct) - body_pixels
    non_body_pixels = True - body_pixels - body_boundary
    probs = probs.copy()[bbox]
    probs[non_body_pixels] = probs.min()-1
    if boundary_seeds is not None:
        probs[boundary_seeds[bbox]] = probs.max()+1
    probs[body_boundary] = probs.max()+1
    seeds = label(seeds.astype(bool)[bbox], struct)[0]
    outer_seed = seeds.max()+1 # should be 3
    seeds[non_body_pixels] = outer_seed
    seg_new = watershed(probs, seeds, 
        dams=(seg==0).any(), connectivity=connectivity, show_progress=True)
    seg = seg.copy()
    new_seeds = unique(seeds)[:-1]
    for new_seed, new_label in zip(new_seeds, [0, body, seg.max()+1]):
        seg[bbox][seg_new == new_seed] = new_label
    return seg
Example #36
0
def select_segments(image, image_labels, mask_classes, buffer=10):
    from scipy.ndimage.measurements import find_objects

    copy = np.zeros(image.shape[:2], np.uint8)

    for i, class_value in enumerate(mask_classes):
        print i, class_value
        equality = np.equal(image_labels, class_value)
        class_map = np.all(equality, axis=-1)
        copy[class_map] = i

    regions = find_objects(copy)

    print(regions)

    regions = [i for i in regions if i != None]

    if len(regions) == 0:
        return []

    image_shape = image.shape[:2]
    bounds = [0, image_shape[0], 0, image_shape[1]]

    images = []
    for roi in regions:
        roi_with_buffer = expand_slice(roi, buffer, buffer, bounds)
        images.append([image[roi_with_buffer], image_labels[roi_with_buffer]])
    return images
Example #37
0
def _filter_small_slopes(hgt, dx, min_slope=1):
    """Masks out slopes with NaN until the slope if all valid points is at 
    least min_slope (in degrees).
    """

    min_slope = np.deg2rad(min_slope)
    slope = np.arctan(-np.gradient(hgt, dx))  # beware the minus sign
    # slope at the end always OK
    slope[-1] = min_slope

    # Find the locs where it doesn't work and expand till we got everything
    slope_mask = np.where(slope >= min_slope, slope, np.NaN)
    r, nr = label(~np.isfinite(slope_mask))
    for objs in find_objects(r):
        obj = objs[0]
        i = 0
        while True:
            i += 1
            i0 = objs[0].start-i
            if i0 < 0:
                break
            ngap =  obj.stop - i0 - 1
            nhgt = hgt[[i0, obj.stop]]
            current_slope = np.arctan(-np.gradient(nhgt, ngap * dx))
            if i0 <= 0 or current_slope[0] >= min_slope:
                break
        slope_mask[i0:obj.stop] = np.NaN
    out = hgt.copy()
    out[~np.isfinite(slope_mask)] = np.NaN
    return out
Example #38
0
def keep_only_middle_patch(image):
    
    middle_y, middle_x = (image.shape[0]/2, image.shape[1]/2)
    lw, num = measurements.label(image)
    slice_tuples = measurements.find_objects(lw)
    current_min_dist = image.shape[0]
    print(current_min_dist)
    current_min_tuple = 0
    for i, t in enumerate(slice_tuples):
        rows = (t[0].start, t[0].stop)
        cols = (t[1].start, t[1].stop)
        y_slice = (rows[1]+rows[0])/2
        x_slice = (cols[1]+cols[0])/2
        print(x_slice, y_slice)
        dist = np.sqrt((middle_x-x_slice)**2+(middle_y-y_slice)**2)
        if dist < current_min_dist:
            current_min_tuple = t
    
    rows = (current_min_tuple[0].start, current_min_tuple[0].stop)
    cols = (current_min_tuple[1].start, current_min_tuple[1].stop)
    
    image[:rows[0],:] = False
    image[rows[1]:,:] = False
    image[:,:cols[0]] = False
    image[cols[1]:,:] = False
Example #39
0
def clusterNumberDensity(L,p,nSamples=100,bins=None):
    if bins == None:
        bins = array(sorted(set(logspace(0, log10(maxBinArea), nBins).astype(int64).tolist())))        
        
    totalBins = zeros(len(bins) - 1)
    maxBinArea = L*L
    for sample in range(nSamples):
        system = PercolationSystem(L,p)
        lw = system.lw
        area = system.area
        maxArea = system.maxArea
        maxLabels = system.maxLabels
        for label in maxLabels:
            sliced = measurements.find_objects(lw == label)
            if(len(sliced) > 0):
                sliceX = sliced[0][1]
                sliceY = sliced[0][0]
                if sliceX.stop - sliceX.start >= L or sliceY.stop - sliceY.start >= L:
                    area[where(area == maxArea)] = 0 # remove the percolating cluster
        
#            currentBins = histogram(area, bins=bins, weights=area)[0]
        binAreas = diff(bins)
        currentBins = histogram(area, bins=bins)[0].astype(float)
        currentBins /= binAreas # normalize to values of 1x1 s bins
        currentBins /= maxBinArea
        totalBins += currentBins
#        totalBins += totalAreas / float(maxBinArea)
    totalBins /= nSamples
    return bins, totalBins
Example #40
0
    def get_coordinates(self, cluster_labels, max_width, max_height):
        ratio = 1 / self.__scale
        coordinates = []
        structure = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
        bbox_slices = {}
        for i in range(1, cluster_labels.max() + 1):
            B = cluster_labels.copy()
            B[B != i] = 0
            bbox_slices[i] = mnts.find_objects(
                mnts.label(B, structure=structure)[0])

        for key in bbox_slices.keys():
            for width_slice, height_slice in bbox_slices[key]:
                rectangle_coordinates = list()
                rectangle_coordinates.append((int(height_slice.start * ratio),
                                              int(width_slice.start * ratio)))
                rectangle_coordinates.append(
                    (int(height_slice.start * ratio),
                     min(int(width_slice.stop * ratio), max_width - 1)))
                rectangle_coordinates.append(
                    (min(int(height_slice.stop * ratio), max_height - 1),
                     min(int(width_slice.stop * ratio), max_width - 1)))
                rectangle_coordinates.append(
                    (min(int(height_slice.stop * ratio),
                         max_height - 1), int(width_slice.start * ratio)))
                coordinates.append(rectangle_coordinates)

        return coordinates
Example #41
0
def bounding_box(a):
    a = array(a > 0, 'i')
    l = measurements.find_objects(a)
    ys, xs = l[0]
    return (0, 0, 0, 0)
    # y0,x0,y1,x1
    return (ys.start, xs.start, ys.stop, xs.stop)
Example #42
0
    def _make_floor_ceil(self, cabin_voxel):
        """
        Alternate method of ceiling detection: get the label in a region containing troops (
        assumed to be the cabin interior volume), then find min and max points where that label
        is found, everywhere in the vehicle. Floor and ceiling are the endpoints of the longest
        continuous gap between floor and ceiling.

        :param cabin_voxel: 3-tuple containing the ijk indices of a voxel known to be cabin-
            determine this from the position of a troop manikin in the vehicle model
        """
        # Default value = bottom of vehicle box. Easy to spot meaningless ceiling points.

        labels = self.get_labels(mask_from_voxel=cabin_voxel)

        self.ceiling = np.zeros((labels.shape[0], labels.shape[1]), dtype=np.int16)
        self.floor = np.zeros((labels.shape[0], labels.shape[1]), dtype=np.int16)
        for i in xrange(labels.shape[0]):
            for j in xrange(labels.shape[1]):
                labs, isl = meas.label(labels[i, j, :])
                if isl == 0:
                    continue
                slices = meas.find_objects(labs)
                lrgst = np.argmax(np.array([sli[0].stop - sli[0].start for sli in slices]))
                
                self.floor[i, j] = slices[lrgst][0].start - 1
                self.ceiling[i, j] = slices[lrgst][0].stop
        # Hack: postprocess so that floor and ceiling arrays have the default values assumed
        # by rest of test bench
        self.floor[self.floor == -1] = 0
        self.ceiling[self.ceiling == labels.shape[2]] = 0
Example #43
0
 def detect_sources(self, detsn, thresh, ps):
     from scipy.ndimage.measurements import label, find_objects
     # HACK -- Just keep the brightest pixel in each blob!
     peaks = (detsn > thresh)
     blobs, nblobs = label(peaks)
     slices = find_objects(blobs)
     return slices
def extracts_minima_areas(arr):
    neighborhood = morphology.generate_binary_structure(len(arr.shape),2)
    local_min = (filters.minimum_filter(arr, footprint=neighborhood)==arr)
    labels = measurements.label(local_min)[0]
    objects = measurements.find_objects(labels)
    areas_and_indices_and_bounding_boxes = []
    for idx, sl in enumerate(objects):
        areas_and_indices_and_bounding_boxes.append((len(arr[sl][labels[sl] == idx + 1]), idx + 1, sl)) # first area, then index, then bounding box
    return sorted(areas_and_indices_and_bounding_boxes), labels
Example #45
0
def __distinct_binary_object_correspondences(reference, result, connectivity=1):
    """
    Determines all distinct (where connectivity is defined by the connectivity parameter
    passed to scipy's `generate_binary_structure`) binary objects in both of the input
    parameters and returns a 1to1 mapping from the labelled objects in reference to the
    corresponding (whereas a one-voxel overlap suffices for correspondence) objects in
    result.

    All stems from the problem, that the relationship is non-surjective many-to-many.

    @return (labelmap1, labelmap2, n_lables1, n_labels2, labelmapping2to1)
    """
    result = np.atleast_1d(result.astype(np.bool))
    reference = np.atleast_1d(reference.astype(np.bool))

    # binary structure
    footprint = generate_binary_structure(result.ndim, connectivity)

    # label distinct binary objects
    labelmap1, n_obj_result = label(result, footprint)
    labelmap2, n_obj_reference = label(reference, footprint)

    # find all overlaps from labelmap2 to labelmap1; collect one-to-one relationships and store all one-two-many for later processing
    slicers = find_objects(labelmap2)  # get windows of labelled objects
    mapping = dict()  # mappings from labels in labelmap2 to corresponding object labels in labelmap1
    used_labels = set()  # set to collect all already used labels from labelmap2
    one_to_many = list()  # list to collect all one-to-many mappings
    for l1id, slicer in enumerate(slicers):  # iterate over object in labelmap2 and their windows
        l1id += 1  # labelled objects have ids sarting from 1
        bobj = (l1id) == labelmap2[slicer]  # find binary object corresponding to the label1 id in the segmentation
        l2ids = np.unique(labelmap1[slicer][
                                 bobj])  # extract all unique object identifiers at the corresponding positions in the reference (i.e. the mapping)
        l2ids = l2ids[0 != l2ids]  # remove background identifiers (=0)
        if 1 == len(
                l2ids):  # one-to-one mapping: if target label not already used, add to final list of object-to-object mappings and mark target label as used
            l2id = l2ids[0]
            if not l2id in used_labels:
                mapping[l1id] = l2id
                used_labels.add(l2id)
        elif 1 < len(l2ids):  # one-to-many mapping: store relationship for later processing
            one_to_many.append((l1id, set(l2ids)))

    # process one-to-many mappings, always choosing the one with the least labelmap2 correspondences first
    while True:
        one_to_many = [(l1id, l2ids - used_labels) for l1id, l2ids in
                       one_to_many]  # remove already used ids from all sets
        one_to_many = [x for x in one_to_many if x[1]]  # remove empty sets
        one_to_many = sorted(one_to_many, key=lambda x: len(x[1]))  # sort by set length
        if 0 == len(one_to_many):
            break
        l2id = one_to_many[0][1].pop()  # select an arbitrary target label id from the shortest set
        mapping[one_to_many[0][0]] = l2id  # add to one-to-one mappings
        used_labels.add(l2id)  # mark target label as used
        one_to_many = one_to_many[1:]  # delete the processed set from all sets

    return labelmap1, labelmap2, n_obj_result, n_obj_reference, mapping
def area_open(im_label, min_area):
    """Removes small objects from label image.

    Parameters
    ----------
    im_label : array_like
        A uint32 type label image generated by segmentation methods.
    min_area : int
        minimum area threshold for objects. Objects with fewer than 'min_area'
        pixels will be zeroed to merge with background.

    Returns
    -------
    im_open : array_like
        A uint32 label where objects with pixels < min_area are removed.

    Notes
    -----
    Objects are assumed to have positive nonzero values. im_label image will be
    condensed during processing.

    See Also
    --------
    histomicstk.segmentation.label.condense,
    histomicstk.segmentation.label.shuffle,
    histomicstk.segmentation.label.split,
    histomicstk.segmentation.label.width_open
    """

    # copy input image
    im_open = im_label.copy()

    # condense label image
    if np.unique(im_open).size-1 != im_open.max():
        im_open = condense(im_open)

    # count pixels in each object
    Counts, Edges = np.histogram(im_open, bins=im_open.max()+1)

    # get locations of objects in initial image
    Locations = ms.find_objects(im_open)

    # iterate through objects, zeroing where needed
    for i in np.arange(1, Counts.size):
        if Counts[i] < min_area:

            # extract object from label image
            Template = im_open[Locations[i-1]]

            # label mask of object 'i'
            Template[Template == i] = 0

    # condense to fill gaps
    im_open = condense(im_open)

    return im_open
Example #47
0
 def find(self, in_location):
     image = in_location.image
     bin_image = self.binary_image_function(image)
     for y_slice, x_slice in find_objects(*label(bin_image)):
         yield Location(
             x_slice.start,
             y_slice.start,
             x_slice.stop - x_slice.start,
             y_slice.stop - y_slice.start,
             parent=in_location,
         )
Example #48
0
def ocropy_degrade(im, distort=1.0, dsigma=20.0, eps=0.03, delta=0.3, degradations=[(0.5, 0.0, 0.5, 0.0)]):
    """
    Degrades and distorts a line using the same noise model used by ocropus.

    Args:
        im (PIL.Image): Input image
        distort (float):
        dsigma (float):
        eps (float):
        delta (float): 
        degradations (list): list returning 4-tuples corresponding to
                             the degradations argument of ocropus-linegen.

    Returns:
        PIL.Image in mode 'L'
    """
    w, h = im.size
    # XXX: determine correct output shape from transformation matrices instead
    # of guesstimating.
    image = Image.new('L', (int(1.5*w), 4*h), 255)
    image.paste(im, (int((image.size[0] - w) / 2), int((image.size[1] - h) / 2)))
    a = pil2array(image.convert('L'))
    (sigma,ssigma,threshold,sthreshold) = degradations[np.random.choice(len(degradations))]
    sigma += (2*np.random.rand()-1)*ssigma
    threshold += (2*np.random.rand()-1)*sthreshold
    a = a*1.0/np.amax(a)
    if sigma>0.0:
        a = gaussian_filter(a,sigma)
    a += np.clip(np.random.randn(*a.shape)*0.2,-0.25,0.25)
    m = np.array([[1+eps*np.random.randn(),0.0],[eps*np.random.randn(),1.0+eps*np.random.randn()]])
    w,h = a.shape
    c = np.array([w/2.0,h/2])
    d = c-np.dot(m, c)+np.array([np.random.randn()*delta, np.random.randn()*delta])
    a = affine_transform(a, m, offset=d, order=1, mode='constant', cval=a[0,0])
    a = np.array(a>threshold,'f')
    [[r,c]] = find_objects(np.array(a==0,'i'))
    r0 = r.start
    r1 = r.stop
    c0 = c.start
    c1 = c.stop
    a = a[r0-5:r1+5,c0-5:c1+5]
    if distort > 0:
        h,w = a.shape
        hs = np.random.randn(h,w)
        ws = np.random.randn(h,w)
        hs = gaussian_filter(hs, dsigma)
        ws = gaussian_filter(ws, dsigma)
        hs *= distort/np.amax(hs)
        ws *= distort/np.amax(ws)
        def f(p):
            return (p[0]+hs[p[0],p[1]],p[1]+ws[p[0],p[1]])
        a = geometric_transform(a, f, output_shape=(h,w), order=1, mode='constant', cval=np.amax(a))
    im = array2pil(a).convert('L')
    return im
Example #49
0
def get_conn_comp(imgarr, sort=True):
    labelled_image, n_components = meas.label(imgarr)
    slices = meas.find_objects(labelled_image)
    components = []

    for islice, slaiss in enumerate(slices):
        components.append(Component(labelled_image, slaiss, islice+1))

    if sort:
        components = sorted(components)

    return components, labelled_image
def AreaOpenLabel(Label, Area):
    """Removes small objects from label image.

    Parameters:
    -----------
    Label : array_like
        A uint32 type label image generated by segmentation methods.
    Area : int
        Area threshold for objects. Objects with fewer than 'Area' pixels will
        be zeroed to merge with background.

    Notes:
    ------
    Objects are assumed to have positive nonzero values. Label image will be
    condensed during processing.

    Returns:
    --------
    Split : array_like
        A uint32 label where objects with pixels < Area are removed.

    See Also:
    ---------
    CondenseLabel, ShuffleLabel, SplitLabel, MaxwidthOpenLabel
    """

    # copy input image
    Opened = Label.copy()

    # condense label image
    if np.unique(Opened).size-1 != Opened.max():
        Opened = htk.CondenseLabel(Opened)

    # count pixels in each object
    Counts, Edges = np.histogram(Opened, bins=Opened.max()+1)

    # get locations of objects in initial image
    Locations = ms.find_objects(Opened)

    # iterate through objects, zeroing where needed
    for i in np.arange(1, Counts.size):
        if Counts[i] < Area:

            # extract object from label image
            Template = Opened[Locations[i-1]]

            # label mask of object 'i'
            Template[Template == i] = 0

    # condense to fill gaps
    Opened = htk.CondenseLabel(Opened)

    return Opened
Example #51
0
 def match_positions(self, shape, list_of_coords):
     """ In cases where we have multiple matches, each highlighted by a region of coordinates,
     we need to separate matches, and find mean of each to return as match position
     """
     match_array = np.zeros(shape)
     # excpetion hit on this line if nothing in list_of_coords- i.e. no matches
     match_array[list_of_coords[:,0],list_of_coords[:,1]] = 1
     labelled = label(match_array)
     objects = find_objects(labelled[0])
     coords = [{'x':(slice_x.start, slice_x.stop),'y':(slice_y.start, slice_y.stop)} for (slice_y,slice_x) in objects]
     final_positions = [MatchRegion(slice(int(np.mean(coords[i]['y'])), int(np.mean(coords[i]['y'])) + self.th),slice(int(np.mean(coords[i]['x'])), int(np.mean(coords[i]['x'])+self.tw))) for i in range(len(coords))]
     return final_positions
Example #52
0
def bbox(mask, margin, binsz):
    """Determine the bounding box of a mask"""
    from scipy.ndimage.measurements import find_objects
    box = find_objects(mask.astype(int))[0]
    ny, nx = mask.shape
    xmin = max(0, int(box[1].start - margin / binsz)) + 1
    xmax = min(nx - 1, int(box[1].stop + margin / binsz)) + 1
    ymin = max(0, int(box[0].start - margin / binsz)) + 1
    ymax = min(ny - 1, int(box[0].stop + margin / binsz)) + 1
    box_string = '[{xmin}:{xmax},{ymin}:{ymax}]'.format(**locals())
    logging.info('box = {box}, box_string = {box_string}'
                 ''.format(**locals()))
    box = xmin, xmax, ymin, ymax
    return box, box_string
Example #53
0
def spanningClusterMass(L,p,nSamples=100):
    percolatingMass = 0
    for sample in range(nSamples):
        system = PercolationSystem(L,p)
        lw = system.lw
        for label in system.maxLabels:
            sliced = measurements.find_objects(lw == label)
            if(len(sliced) > 0):
                sliceX = sliced[0][1]
                sliceY = sliced[0][0]
                if sliceX.stop - sliceX.start >= L or sliceY.stop - sliceY.start >= L:
                    percolatingMass += system.maxArea
    percolatingMass /= nSamples
    return percolatingMass
Example #54
0
def bbox(mask, margin, binsz):
    """Determine the bounding box of a mask.

    TODO: this is an old utility function ... put it into the BoundingBox class.
    """
    from scipy.ndimage.measurements import find_objects
    box = find_objects(mask.astype(int))[0]
    ny, nx = mask.shape
    xmin = max(0, int(box[1].start - margin / binsz)) + 1
    xmax = min(nx - 1, int(box[1].stop + margin / binsz)) + 1
    ymin = max(0, int(box[0].start - margin / binsz)) + 1
    ymax = min(ny - 1, int(box[0].stop + margin / binsz)) + 1
    box_string = '[{xmin}:{xmax},{ymin}:{ymax}]'.format(**locals())
    box = xmin, xmax, ymin, ymax
    return box, box_string
Example #55
0
def _filter_grouplen(arr, minsize=3):
    """Filter out the groups of grid points smaller than minsize

    Parameters
    ----------
    arr : the array to filter (should be False and Trues)
    minsize : the minimum size of the group

    Returns
    -------
    the array, with small groups removed
    """

    # Do it with trues
    r, nr = label(arr)
    nr = [i+1 for i, o in enumerate(find_objects(r)) if (len(r[o]) >= minsize)]
    arr = np.asarray([ri in nr for ri in r])

    # and with Falses
    r, nr = label(~ arr)
    nr = [i+1 for i, o in enumerate(find_objects(r)) if (len(r[o]) >= minsize)]
    arr = ~ np.asarray([ri in nr for ri in r])

    return arr
def group_pixels(regions):
  from scipy.ndimage.measurements import find_objects

  # Get the bounding box of each object
  objects = find_objects(regions)

  new_objects = []
  ref_index = []
  for i, obj in enumerate(objects):
    if obj != None:
      new_objects.append(obj)
      ref_index.append(i)

  # Return the list of objects
  return new_objects, ref_index
def group_pixels(mask):
  from scipy.ndimage.measurements import label, find_objects

  # Label the indices in the mask
  regions, nregions = label(mask)#, structure)

#    from matplotlib import pylab, cm
#    for f in range(9):
#        pylab.imshow(regions[f,:,:], cmap=cm.Greys_r)
#        pylab.show()

  # Get the bounding box of each object
  objects = find_objects(regions)

  # Return the list of objects
  return objects