def buildSeedPoints(image, mode='com'):
    """ Successive set of filters to take a still image, isolate cell-like ROIs, and return a
    list of points representing center points to pass into core.segmentation.pickCells.

    Filters are, in order:  thresholding, morphological closing, and then a connected pixel cutoff filter.

    Often, the best thing to pass into this image as a high pass filtered version of your field of view.

    :param image: a 2d numpy array to build points from
    :param mode: an optional string, either: 'centriod' or 'com'
    :returns: tuple, (seedPoints, seedPointImage) 2d and 3d numpy array, containing coordinates and an image of points, respectively
    """
    #    seedMask = ipg.binaryErode(ipg.connectedPixelFilter(pymorph.label(ipg.threshold(ipg.subGaussian(image))>0)))
    #    binarySeedMask = ipg.connectedPixelFilter(pymorph.label(pymorph.close(ipg.threshold(ipg.subGaussian(image))>0)))
    binarySeedMask = ipg.connectedPixelFilter(pymorph.label(pymorph.close(ipg.threshold(image))))
    seedMask = pymorph.label(binarySeedMask)
    seedingRegionProps = regionProps(image, seedMask)

    if mode is 'centroid':
        seedPoints = [r['centroid'] for r in sorted(seedingRegionProps, key=lambda x: x['meanIntensity'],reverse=True)]
    elif mode is 'com':
        seedPoints = [r['com'] for r in sorted(seedingRegionProps, key=lambda x: x['meanIntensity'],reverse=True)]

    #    pdb.set_trace()

    seedPoints = np.floor(np.array(seedPoints))
    seedPointImage = np.zeros_like(seedMask)        
    for point in seedPoints:
        seedPointImage[point] = 255

    return seedPoints, seedPointImage
 def extract2(self):
     im=self.image
     invimage=np.ones((np.size(im,0),np.size(im,1)))-im
     bloblabels=pymorph.label(im)
     holelabels=pymorph.label(invimage)
     print ("Image is ", (np.size(im,0),np.size(im,1)))
     self.blobs=[]  # pikseli predmeta
     self.holes=[]  # pikseli rupa
     for i in range(0,bloblabels.max()):
         self.blobs.append([])
     for i in range(0,holelabels.max()):
         self.holes.append([])
     for i in range(0,np.size(im,0)):
         for j in range(0,np.size(im,1)):
             if im[i][j]==1:
                 self.blobs[bloblabels[i][j]-1].append([i,j])  #
             else:                                             # Razdvajanje rupa i predmeta
                 self.holes[holelabels[i][j]-1].append([i,j])  #
Exemple #3
0
    def updateImage(self, image, mask=None, labels=False):
        """image is a 2d image, mask is a 2d RGBA image"""
        if np.any(np.not_equal(self.mask, mask)):
            labels_changed = True
        else:
            labels_changed = False

        if len(self.labels) == 0:
            labels_empty = True
        else:
            labels_empty = False

        self.image = image
        self.mask = mask
        self.image_ax.set_data(image)
        self.mask_ax.set_data(mask)

        if labels is True and (labels_changed or labels_empty):
            for item in self.labels:
                item.remove()
            self.labels = []

            labeled_mask = pymorph.label((mask[:, :, 3] > 0).astype(int))

            for cell in range(1, labeled_mask.max() + 1):
                xx, yy = zip(*np.argwhere(labeled_mask == cell))

                # NOTE INVERSION
                y_loc = np.median(np.unique(xx))
                x_loc = np.median(np.unique(yy))

                self.labels.append(
                    self.axes.text(
                        x_loc,
                        y_loc,
                        str(cell),
                        verticalalignment="center",
                        horizontalalignment="center",
                        color="yellow",
                        size=9,
                    )
                )
        elif labels is False:
            for item in self.labels:
                item.remove()
            self.labels = []
        else:
            pass
        self.draw()
Exemple #4
0
def bright_object_detection(image):
    """ Perform bright object detection on an array image."""

    # Store all intermediate steps in a dictionary. Useful for debugging.
    steps = dict()
    steps['input'] = image

    # Reduce noise using a median filter.
    med_filter_size = (MED_SIZE, MED_SIZE, MED_SIZE)
    steps['median'] = ndimg.median_filter(steps['input'], med_filter_size)

    # Convert median filtered image to grayscale.
    steps['luminance'] = scikits.image.color.rgb2gray(steps['median']) * 255.
    
    # Compute local pixel average.
    k_avg = np.ones((AVG_SIZE, AVG_SIZE)) / AVG_SIZE**2
    steps['average'] = ndimg.convolve(steps['luminance'], k_avg)

    # Compute local pixel variance.
    steps['diff_mean'] = steps['luminance'] - steps['average']
    steps['diff_mean_sq'] = steps['diff_mean'] * steps['diff_mean']
    steps['variance'] = ndimg.convolve(steps['diff_mean_sq'], k_avg)
    
    # Compute binary threshold image using mahalonobis distance. Use the sign
    # of the difference between the pixel and its local mean to ignore dark
    # pixels.
    steps['maha_sq'] = (steps['diff_mean'] > 0) * steps['diff_mean_sq'] / \
                       steps['variance']
    steps['thresh_maha'] = (steps['maha_sq'] > (NUM_STDDEV * NUM_STDDEV))
    
    # Integrate global illumination effects by taking a top percentage of
    # intensities from the detected light regions.
    steps['masked_regions_lum'] = steps['thresh_maha'] * steps['luminance']
    steps['masked_regions_hist'] = pymorph.histogram(steps['masked_regions_lum'])
    steps['global_bright_thresh'] = int((len(steps['masked_regions_hist']) * \
                                         (1.0 - GLOBAL_BRIGHT_PCT)) + 0.5)
    steps['thresh_global'] = steps['masked_regions_lum'] >= \
                             steps['global_bright_thresh']

    # Morphological operations on detected blobs.
    steps['detect_erode'] = pymorph.erode(steps['thresh_global'])
    steps['detect_dilate'] = pymorph.dilate(steps['detect_erode'])
    
    # Count bright objects. Connected components and raw pixels.
    steps['detect_labels'] = pymorph.label(steps['detect_dilate'])
    steps['bright_blob_count'] = steps['detect_labels'].max()
    steps['bright_pixel_count'] = sum(steps['masked_regions_hist']
                                           [steps['global_bright_thresh']:])
    return steps
def pickCellsMatlab(stack, seedPoints=None):
    """
    This is a wrapper function for MATLAB code that allows for semi-automated picking
    of cells via some MATLAB code from the Reid lab.  Depends on a simple matlab
    function which opens the hdf5 file and aligns the array, and writes it back out to
    another part of the hdf5 file

    Algorithm uses the average across time (axis=2) to form the image to pick cells with,
    generally is the average of the green channel

    Optional parameter seedPoints is a N x 2 np.array with coordinates of points to 'pre-click'
    Generated by core.morphProcessing.buildSeedPoints, for example.  Ideally sorted by brightest
    to dimmest object.

    Returns two 2d arrays- one is a binary array and one a labeled array

    :param stack: 3d nparray to use for picking.  
    :param seedPoints: optional, N x 2 numpy array
    :returns: (bwOut, mask): tuple of binary mask and labeled mask, both 2d numpy arrays
    """

    fovimage = np.mean(np.atleast_3d(stack),axis=2)

    temp_dir = tempfile.mkdtemp()
    
    f = h5py.File(os.path.join(temp_dir, 'temp.hdf5'))
    f.create_dataset('fovimage',data=fovimage)
    if seedPoints is not None:
        f.create_dataset('seedPoints',data=seedPoints)
        #    else:
        #        f.create_dataset('seedPoints',data=np.array([]))
    f.close()

    # call picking code (external matlab function, yuck)
    print 'Launching MATLAB to pick cells...\n'
    handle = subprocess.Popen('matlab -nodesktop -r \'imCellEditinteractiveExternal\'',stdin=open('/dev/null'), shell=True, executable="/bin/bash", cwd=temp_dir)
    handle.wait()

    # import the masks and delete temporary files
    f = h5py.File(os.path.join(temp_dir, 'temp.hdf5'),'r')
    bwOut = np.array(f.get('bwOut')[:])
    #    mask = np.array(f.get('mask')[:])
    f.close()

    os.system("rm -rf " + temp_dir)

    return bwOut, pymorph.label(bwOut)
                            steps['thresh_global']))

###############################################################################
# Morpohological operations on detected blobs.

# <demo> stop
# <demo> auto

steps['detect_erode'] = pymorph.erode(steps['thresh_global'])
steps['detect_dilate'] = pymorph.dilate(steps['detect_erode'])
print "Morphed mask (erode, dilate):"
plab.imshow(pymorph.overlay(steps['luminance'].astype('uint8'),
                            steps['detect_dilate']))

# <demo> stop
# <demo> auto

# Count bright objects. Connected components and raw pixels.
steps['detect_labels'] = pymorph.label(steps['detect_dilate'])
steps['bright_blob_count'] = steps['detect_labels'].max()
print "Bright blob count:", steps['bright_blob_count']
steps['bright_pixel_count'] = sum(steps['masked_regions_hist']
                                       [steps['global_bright_thresh']:])
print "Bright pixel count:", steps['bright_pixel_count']

print "Input image:"
plab.imshow(steps['input'])

# <demo> stop

def tracker(frames, event, thresh_amp, thresh_dis, blob_ext,
            direction='forward', plots=False, verbose=False):
    """
    Track the blob in a dynamic intervall forward or backward in time, as long
    as its amplitude is over a given threshold and the peak has detected less
    than a given threshold over consecutive frames.
    The maximum number of frames the blob is tracked for, is given by dim0 of
    frames

    Input:
        tau:        Maximum number of frames to track blob
        event:      ndarray, [I0, t0, R0, z0] Index of original feature to
                    track
        direction:  Traverse frames 'forward' or 'backward' in dimension 0
        thresh_amp: Threshold for amplitude decay relative to frame0
        thresh_dis: Threshold for blob movement relative to previous frame
        blob_ext:   Extend of the blob used for determining its average shape

    Returns:
        numframes:      Number of frames the blob was tracked
        xycom:          COM position of the blob in each frame
        amp:            Amplitude at COM in each frame
        fwhm_rad_idx:   Indices that mark left and right FWHM of the blob
        fwhm_pol_idx:   Indices that mark the lower and upper FWHM of the blob
        blob:           Array that stores the blob extend
    """
    if (verbose is True):
        print 'Called tracker with '
        print '\tevent = ', event
        print '\tthresh_amp = ', thresh_amp
        print '\tthresh_dis = ', thresh_dis
        print '\tblob_ext = ', blob_ext
        print '\tplots = ', plots

    assert (direction in ['forward', 'backward'])
    assert (blob_ext % 2 == 0)

    # Maximum number of frames the blob is tracked for is given by
    # dimension 0 of frames
    # tau_max = np.shape(frames)[0]
    tau_max = frames.shape[0]
    I0, z0_last, R0_last = event[0], event[2], event[3]

    # I0 is the threshold amplitude we use for detecting blobs
    # i.e. for blob tracking, we identify later all connected regions that are larger than 
    # I0 * thresh_amp

    if (direction is 'forward'):
        f_idx = 0  # Index used to access frames
        tau = 0  # Start with zero offset
    elif (direction is 'backward'):
        f_idx = -1  # Start at the second to last frame, 0 based indexing
        tau = 1  # Start with one frame offset

    if (verbose):
        print 'Tracking blob %s, t_idx %d x = %d, y = %d, I0 = %f' %\
            (direction, tau, R0_last, z0_last, I0)
        print 'thresh_amp = %f, thresh_dis = %f' %\
            (thresh_amp * I0, thresh_dis)
    xycom = np.zeros([tau_max, 2])  # Return values: COM position of peak
    xymax = np.zeros([tau_max, 2])  # Position of the blob peak
    fwhm_pol_idx = np.zeros([tau_max, 2], dtype='int')  # Poloidal FWHM
    fwhm_rad_idx = np.zeros([tau_max, 2], dtype='int')  # Radial FWHM
    amp = np.zeros([tau_max])  # Amplitude at COM position

    good_blob = True
    while (good_blob and tau < tau_max):
        if (verbose):
            print 'f_idx %d, blob from x = %d, y = %d, I0 = %f' %\
                (f_idx, R0_last, z0_last, frames[f_idx, z0_last, R0_last])

        event_frame = frames[f_idx, :, :]
        #plt.figure()
        #plt.contourf(event_frame, 64)
        #plt.title('direction: %s, fidx=%d' % (direction, f_idx))
        #plt.colorbar()
        
        # Label all contiguous regions with ore than 60% of the original intensity
        labels = pm.label(event_frame > thresh_amp * I0)
        # Get the area of all contiguous regions
        blob_area = pm.blob(labels, 'area', output='data')
        # Get the controid of all contiguous regions
        blob_cent = pm.blob(labels, 'centroid', output='data')
        if (verbose):
            print 'Centroid analysis:'
            print '    -> blob_cent = ', blob_cent
            print '    -> shape = ', blob_cent.shape

        if (blob_cent.size < 1):
            # No peak here, quit tracking
            good_blob = False
            print 'Frame %d, %ss: lost track of blob' % (f_idx, direction)
            break

        # We now have a bunch of contiguous regions.
        # Loop over the regions that are at least 10% of the largest region
        # and find the one, whose centroid is closest to the  last known position 
        # of the blob

        loop_area = np.where(blob_area > 0.1 * blob_area.max())[0]
        min_idx = -1                        #
        min_dist_frame = np.sqrt(event_frame.shape[0] * event_frame.shape[1])     # Maximal distance on a 64x64 grid
        for d_idx, i in enumerate(loop_area):
            dist = np.sqrt((blob_cent[i, 1] - R0_last) ** 2 +
                           (blob_cent[i, 0] - z0_last) ** 2)
            if (verbose):
                print 'Region %d, distance to last peak: %f' % (d_idx, dist)
            if (dist < min_dist_frame and dist < thresh_dis):
                min_dist_frame = dist
                min_idx = i
                if (verbose):
                    print 'Accepted'

        # If min_dist_frame is still sqrt(8192), no area was selected and the
        # blob could not be tracked successfully
        if (min_dist_frame is np.sqrt(event_frame.shape[0] * event_frame.shape[1])):
            print 'No peak satisfying criteria.'
            print '\tFound: dist = %f, Stopping %s tracking after %d frames' %\
                (min_dist_frame, direction, tau)
            break
        if (min_idx is -1):
            print 'This should not happen'
            raise ValueError

        # Compute the x and y COM coordinates of the blob, store
        blob_mask = labels != (min_idx + 1)
        event_masked = np.ma.MaskedArray(event_frame,
                                         mask=blob_mask,
                                         fill_value=0)

        # When used to index frames[:,:,:]:
        #      xymax[tau,:] = [index for axis 2, index for axis 1]
        # Maximum in the blob mask
        xymax[tau, :] = np.unravel_index(event_masked.argmax(),
                                         np.shape(labels))
        # When used to index frames[:,:,:]:
        #     xycom[tau,:] = [index for axis 1, index for axis 2]
        # To be consistent with indexing from xymax, flip this array
        # COM returns com along second dimension at index 0
        xycom[tau, ::-1] = com(event_masked)
        ycom_off, xcom_off = xycom[tau, :].round().astype('int')

        if (verbose):
            print 'Peak at (%d,%d), COM at (%d,%d)' %\
                (xymax[tau, 0], xymax[tau, 1],
                 xycom[tau, 0], xycom[tau, 1])

        amp[tau] = event_frame[z0_last, R0_last]
        # Follow the peak
        z0_last, R0_last = xymax[tau, :].astype('int')

        if (plots):
            plt.figure()
            plt.title('%s, frame %d' % (direction, f_idx))
            plt.contour(event_frame, 16, colors='k', linewidth=0.5)
            plt.contourf(event_frame, 16, cmap=plt.cm.hot)
            plt.plot(xycom[tau, 1], xycom[tau, 0], 'wo')
            plt.plot(xymax[tau, 1], xymax[tau, 0], 'w^')
            plt.colorbar()
            plt.xlabel('x / px')
            plt.ylabel('y / px')

        if (direction is 'forward'):
            tau += 1                              # Frame accepted, advance indices
            f_idx += 1
        elif (direction is 'backward'):
            # We started at tau=1, subtract one to return correct number of frame
            # tracked in one direction, ignoring the starting frame
            # Ignore this for forward frame as we count the original frame here
            tau -= 1
            f_idx -= 1


    if (plots):
        plt.show()

    return tau, amp, xycom, xymax, fwhm_rad_idx, fwhm_pol_idx
list_mask = glob.glob('data_papers/*_m.*')
count = 0
for file_mask in list_mask:
    file_image = glob.glob(file_mask[:-6] + '.*')[0]
    print "currently processing " + file_image
    dummy, filename = os.path.split(file_image)
    mask = cv2.imread(file_mask)
    mask = cv2.resize(
        mask,
        (mask.shape[1] / resizing_factor, mask.shape[0] / resizing_factor))
    height, width, c = mask.shape
    ground_truth_ = np.zeros((height, width))
    # get text bounding boxes
    ground_truth_text = ground_truth_ + np.where(
        np.linalg.norm(mask - [255, 0, 0], axis=2) < 10, 1, 0)
    label_text = label(ground_truth_text)
    bbox_text = blob(label_text, measurement='boundingbox', output="data")
    if bbox_text.size > 0:
        anno_text_x1 = bbox_text[:, 0].tolist()
        anno_text_y1 = bbox_text[:, 1].tolist()
        anno_text_x2 = bbox_text[:, 2].tolist()
        anno_text_y2 = bbox_text[:, 3].tolist()
        anno_text_label = list('t' * len(anno_text_x1))
    else:
        anno_text_x1 = []
        anno_text_y1 = []
        anno_text_x2 = []
        anno_text_y2 = []
        anno_text_label = []
    # get illustration bounding boxes
    ground_truth_illu = ground_truth_ + np.where(
Exemple #9
0
def find_closest_region(frame, thresh_amp, x0, max_dist=2.0, verbose=False):
    """
    Returns the contiguous area above a threshold in a frame, whose centroid coordinates 
    are closest to x0

    Input:
        frame       : Input frame
        thresh_amp  : Threshold for area separation
        x0          : Distance to centroid
        max_dist    : Maximal distance to centroid

    Output:
        Binary image with contiguous area marked with 1
    """

    # Label all contiguous regions with ore than 60% of the original intensity
    labels = pm.label(frame > thresh_amp)
    # Get the area of all contiguous regions
    blob_area = pm.blob(labels, 'area', output='data')
    # Get the controid of all contiguous regions
    blob_cent = pm.blob(labels, 'centroid', output='data')
    if (verbose):
        print 'x0 = (%f, %f)' % (x0[0], x0[1])
        print 'Labelling found %d regions: ' % labels.max()
        for i in np.arange(labels.max()):
            print 'Region: %d, centroid at %d, %d, area: %d' % (i, blob_cent[i, 1], blob_cent[i, 0], blob_area[i])

    if (blob_cent.size < 1):
        raise TrackingError

    # We now have a bunch of contiguous regions.
    # Loop over the regions that are at least 10% of the largest region
    # and find the one, whose centroid is closest to the  last known position 
    # of the blob

    min_idx = -1   
    min_dist_frame = np.sqrt(frame.shape[0] * frame.shape[1])     # Maximal distance on a 64x64 grid

    for d_idx, i in enumerate(blob_area):
        # Compute distance of current areas centroid to the last centroids position
        dist = np.sqrt((blob_cent[d_idx, 1] - x0[1]) ** 2 +
                       (blob_cent[d_idx, 0] - x0[0]) ** 2)
        if (verbose):
            print 'Region %d, center: x=%d, y=%d, A=%f, distance to last centroid: %f' %\
                (d_idx, blob_cent[d_idx, 0], blob_cent[d_idx, 1], i, dist)

        # Skip areas who are less than 10% of the original
        if (i < 0.1 * blob_area.max()):
            if(verbose):
                print 'passing blob with area %f, d_idx = %d' % (i, d_idx)
            continue

        if (dist < min(max_dist, min_dist_frame)):
            min_dist_frame = dist
            min_idx = d_idx
            if (verbose):
                print 'Accepted'

    # If min_dist_frame is still sqrt(8192), no area was selected and the
    # blob could not be tracked successfully
    if (min_idx is -1):
        print 'No peak satisfying criteria.'
        raise TrackingError

    x_centroid = blob_cent[min_idx]

    # Compute the x and y COM coordinates of the blob, store
    blob_mask = labels != (min_idx + 1)
    event_masked = np.ma.MaskedArray(frame,
                                     mask=blob_mask,
                                     fill_value=0)
#    plt.figure()
#    plt.subplot(131)
#    plt.contourf(labels)
#    plt.colorbar()
#
#    plt.subplot(132)
#    plt.contourf(frame, 64)
#    plt.colorbar()
#
#    plt.subplot(133)
#    plt.contourf(event_masked)
#    plt.colorbar()
#
#    plt.show()

    return (x_centroid, event_masked)
Exemple #10
0
import pymorph as m
import mahotas
from numpy import where, reshape

image = mahotas.imread('B.png') # Load image

b1 = image[:,:,0] < 100 # Make a binary image from the thresholded red channel
b2 = m.erode(b1, m.sedisk(4)) # Erode to enhance contrast of the bridge
b3 = m.open(b2,m.sedisk(4)) # Remove the bridge
b4 = b2-b3 # Bridge plus small noise
b5 = m.areaopen(b4,1000) # Remove small areas leaving only a thinned bridge
b6 = m.dilate(b3)*b5 # Extend the non-bridge area slightly and get intersection with the bridge.

#b6 is image of end of bridge, now find single points
b7 = m.thin(b6, m.endpoints('homotopic')) # Narrow regions to single points.
labelled = m.label(b7) # Label endpoints.

x1, y1 = reshape(where(labelled == 1),(1,2))[0]
x2, y2 = reshape(where(labelled == 2),(1,2))[0]

outputimage = m.overlay(b1, m.dilate(b7,m.sedisk(5)))
mahotas.imsave('output.png', outputimage)
Exemple #11
0
def test_label():
    assert (h,w) == pymorph.label(pieces > 0).shape
Exemple #12
0
def test_randomcolor():
    assert (h,w,3) == pymorph.randomcolor(pymorph.label(pieces > 0)).shape