Ejemplo n.º 1
0
def thresholdCenterlines(nms, tLow=0.012, tHigh=0.12, bimodal=True):
    """ Uses a continuity-preserving hysteresis thresholding to classify
    centerlines.
    
    Inputs:
    nms -- Non-maxima suppressed singularity index response

    Keyword Arguments:
    bimodal -- true if the areas of rivers in the image are sufficiently
               large that the distribution of ψ is bimodal
    tLow -- lower threshold (automatically set if bimodal=True)
    tHigh -- higher threshold (automatically set if bimodal=True)

    Returns:
    centerlines -- a binary matrix that indicates centerline locations
    """

    if bimodal:
        #Otsu's algorithm
        nms = preprocess.double2im(nms, 'uint8')
        tHigh,_ = cv2.threshold(nms, nms.min(), nms.max(), cv2.THRESH_OTSU)
        tLow = tHigh * 0.1

    strongCenterline    = nms >= tHigh
    centerlineCandidate = nms >= tLow

    # Find connected components that has at least one strong centerline pixel
    strel = np.ones((3, 3), dtype=bool)
    cclabels, numcc = ndlabel(centerlineCandidate, strel)
    sumstrong = ndsum(strongCenterline, cclabels, list(range(1, numcc+1)))
    centerlines = np.hstack((0, sumstrong > 0)).astype('bool')
    centerlines = centerlines[cclabels]

    return centerlines
Ejemplo n.º 2
0
def thresholdCenterlines(nms, tLow=0.012, tHigh=0.12, bimodal=True):
    """ Uses a continuity-preserving hysteresis thresholding to classify
    centerlines.
    
    Inputs:
    nms -- Non-maxima suppressed singularity index response

    Keyword Arguments:
    bimodal -- true if the areas of rivers in the image are sufficiently
               large that the distribution of ψ is bimodal
    tLow -- lower threshold (automatically set if bimodal=True)
    tHigh -- higher threshold (automatically set if bimodal=True)

    Returns:
    centerlines -- a binary matrix that indicates centerline locations
    """

    if bimodal:
        #Otsu's algorithm
        nms = preprocess.double2im(nms, 'uint8')
        tHigh,_ = cv2.threshold(nms, nms.min(), nms.max(), cv2.THRESH_OTSU)
        tLow = tHigh * 0.1

    strongCenterline    = nms >= tHigh
    centerlineCandidate = nms >= tLow

    # Find connected components that has at least one strong centerline pixel
    strel = np.ones((3, 3), dtype=bool)
    cclabels, numcc = ndlabel(centerlineCandidate, strel)
    sumstrong = ndsum(strongCenterline, cclabels, range(1, numcc+1))
    centerlines = np.hstack((0, sumstrong > 0)).astype('bool')
    centerlines = centerlines[cclabels]

    return centerlines
Ejemplo n.º 3
0
    def process_clusters1(self):
        # throw away cluster sizes smaller than min_C and relabel
        # gets the size of each cluster
        clus_sz = ndsum(self.den_th,
                        self.clus_label,
                        index=np.arange(1, self.clus_nb + 1))
        clus_rej = np.where(clus_sz < self.min_C)[0]
        for i in [self.clus_slice[x] for x in clus_rej]:
            self.den_th[i] = False
        self.clus_label, self.clus_nb = label(self.den_th)

        # cluster sizes in pixels
        self.clus_sz = ndsum(self.den_th,
                             self.clus_label,
                             index=np.arange(1, self.clus_nb + 1)).astype(int)

        self.keep_clus = range(-1, self.clus_nb + 1)
Ejemplo n.º 4
0
def getLargestMaskObject(mask):
    '''Given a numpy array `mask' containing a binary mask, returns an equivalent array with only the largest mask object.'''
    labeled, numfeatures = label(mask)  # generate a feature label
    sums = ndsum(mask, labeled, list(
        range(numfeatures + 1)))  # sum the pixels under each label
    maxfeature = np.where(sums == max(
        sums))  # choose the maximum sum whose index will be the label number

    return mask * (labeled == maxfeature)
Ejemplo n.º 5
0
def cleanSegment(seg, fillHoles=True, keepLargest=True, minSize=0):
    numUniqueVals = np.unique(seg).shape[0]
    assert numUniqueVals > 1

    foreground = seg != 0
    fillable = binary_fill_holes(foreground) != foreground

    seg1h = oneHot(seg, numUniqueVals)
    seg1h[
        ...,
        0] = 0  # zero out background so that argmax does not pick it as the first non-zero index

    # clean each segmentation channel separately
    for n in range(1, numUniqueVals):
        segn = seg1h[..., n]

        if np.unique(segn).shape[0] > 1:
            labeled, numfeatures = label(
                segn)  # label each separate object with a different number
            sums = ndsum(segn, labeled,
                         range(numfeatures +
                               1))  # sum the pixels under each label

            # keep the largest feature or eliminate ones smaller than the minimum size
            if keepLargest:
                maxfeature = np.where(
                    sums == max(sums)
                )[0]  # choose the maximum sum whose index will be the label number
                if len(maxfeature) > 0:
                    segn = (labeled == maxfeature[0]).astype(segn.dtype)
            elif minSize > 0:
                segn = segn.copy()
                for i, s in enumerate(sums[1:]):  # skip background
                    if s < minSize:
                        segn[labeled == (i + 1)] = 0

            # fill holes, that is background areas surrounded by segmentation
            if fillHoles:
                # choose pixels that are background and are in areas filled by binary fill
                holes = fillable * (binary_fill_holes(segn) != segn)
                segn[holes] = 1

        seg1h[..., n] = segn

    seg = np.argmax(seg1h, seg1h.ndim - 1)

    # eliminating smaller features from segmentation channels may create holes so fill these in
    if fillHoles:
        seg = greyFillHoles(seg)

    return seg
Ejemplo n.º 6
0
def isolateLargestMask(mask):
    '''Label the binary images in `mask' and return an image retaining only the largest.'''
    labeled, numfeatures = label(
        mask)  # label each separate object with a different number

    if numfeatures > 1:  # if there's more than one object in the segmentation, keep only the largest as the best guess
        sums = ndsum(mask, labeled,
                     range(numfeatures + 1))  # sum the pixels under each label
        maxfeature = np.where(
            sums == max(sums)
        )  # choose the maximum sum whose index will be the label number
        mask = mask * (labeled == maxfeature
                       )  # mask out the prediction under the largest label

    return mask
Ejemplo n.º 7
0
    def _calculate_focus_measure(self, src, operator, roi):
        '''
            see
            IMPLEMENTATION OF A PASSIVE AUTOMATIC FOCUSING ALGORITHM
            FOR DIGITAL STILL CAMERA
            DOI 10.1109/30.468047
            and
            http://cybertron.cg.tu-berlin.de/pdci10/frankencam/#autofocus
        '''

        # need to resize to 640,480. this is the space the roi is in
#        s = resize(grayspace(pychron), 640, 480)
        src = grayspace(src)
        v = crop(src, *roi)

        di = dict(var=lambda x:variance(x),
                  laplace=lambda x: get_focus_measure(x, 'laplace'),
                  sobel=lambda x: ndsum(generic_gradient_magnitude(x, sobel, mode='nearest'))
                  )

        func = di[operator]
        return func(v)
Ejemplo n.º 8
0
    def _calculate_focus_measure(self, src, operator, roi):
        '''
            see
            IMPLEMENTATION OF A PASSIVE AUTOMATIC FOCUSING ALGORITHM
            FOR DIGITAL STILL CAMERA
            DOI 10.1109/30.468047
            and
            http://cybertron.cg.tu-berlin.de/pdci10/frankencam/#autofocus
        '''

        # need to resize to 640,480. this is the space the roi is in
        #        s = resize(grayspace(pychron), 640, 480)
        src = grayspace(src)
        v = crop(src, *roi)

        di = dict(var=lambda x: variance(x),
                  laplace=lambda x: get_focus_measure(x, 'laplace'),
                  sobel=lambda x: ndsum(
                      generic_gradient_magnitude(x, sobel, mode='nearest')))

        func = di[operator]
        return func(v)
Ejemplo n.º 9
0
def closeSmallGaps(source_raster_path, output_dir):
    # Read the image
    with rasterio.open(source_raster_path) as src:
        im = src.read(1)
    image = im.copy()
    logger.debug('Creating small cloud mask')
    # Create a cloud mask
    mask = im == FLAG_CLOUD
    labels, count = label(mask)
    areas = np.array(ndsum(mask, labels, np.arange(labels.max() + 1)))
    # identify the clusters bigger than the ones that we want to interpolate
    new_mask = areas > MAX_AREA_SIZE_TO_INTERPOLATE
    big_clusters = new_mask[labels.ravel()].reshape(labels.shape)
    # Reverse the new mask so we get the small cloud mask
    small_cluster_mask = np.logical_xor(big_clusters, mask)
    logger.debug('Small cloud mask created')

    logger.debug('Applying inpainting algorithm')
    # Apply OpenCV's inpaint algorithm to the image in so it fills the small clouds
    image = opencvInpaint(image, small_cluster_mask)

    logger.debug('Saving the image')
    # Save the image in the same output directory
    saveInterpolatedImage(image, source_raster_path, output_dir)