Пример #1
0
def align_image_to_ellipse(coeffs, image):
    """
    Given the coefficients of an ellipse in 2D and a binary 
    image, return the angle required to align the image to the
    principal axes of the ellipse (with the longest axis
    as the first major 'hump' on the left).
    """
    
    coeff_a, coeff_b, coeff_c = coeffs[:3]
        
    # Calculate tan(angle) for the angle of rotation of the major axis
    preangle = coeff_b / (coeff_a - coeff_c)
    
    if not np.isinf(preangle):
        # Take the arctan and convert to degrees, which is what 
        # ndimage.rotate uses.
        angle = radians_to_degrees(-0.5 * np.arctan(preangle))
        
        # Order = 0 prevents interpolation from being done and screwing 
        # with our object boundaries.
        rotated = ndimage.rotate(image, angle, order=0)
        
        # Pull out the height/width of just the object.
        try:    
            height, width = rotated[ndimage.find_objects(rotated)[0]].shape
        except IndexError:
            raise EllipseAlignmentError("Can't find object after " \
                + "initial rotation.")
    else:
        angle = 0.
        height, width = image.shape
    
    # we want the height (first axis) to be the major axis.
    if width > height:
        angle -= 90.0
        rotated = ndimage.rotate(image, angle, order=0)
    
    # Correct so that in budding cells, the "major" hump is always
    # on the first.          
    if np.argmax(rotated.sum(axis=1)) > rotated.shape[0] // 2:
        angle -= 180.0
        rotated = ndimage.rotate(image, angle, order=0)
    
    # Do a find_objects on the resultant array after rotation in 
    # order to _just_ get the object and not any of the extra 
    # space that's been added.
    try:
        bounds = ndimage.find_objects(rotated)[0]
    except IndexError:
        raise EllipseAlignmentError("Can't find object after final rotation.")
    
    return rotated[bounds], angle
Пример #2
0
 def assembleimage(patches, pmasks, gridids):
     r"""
     Assemble an image from a number of patches, patch masks and their grid ids.
     
     Parameters
     ----------
     patches : sequence
         Sequence of patches.
     pmasks : sequence
         Sequence of associated patch masks.
     gridids
         Sequence of associated grid ids.
         
     Returns
     -------
     image : ndarray
         The patches assembled back into an image of the original proportions.
         
     Examples
     --------
     Two-dimensional example:
     >>> import numpy
     >>> from medpy.iterators import CentredPatchIterator
     >>> arr = numpy.arange(0, 25).reshape((5,5))
     >>> arr
     array([[ 0,  1,  2,  3,  4],
            [ 5,  6,  7,  8,  9],
            [10, 11, 12, 13, 14],
            [15, 16, 17, 18, 19],
            [20, 21, 22, 23, 24]])
     >>> patches, pmasks, gridids, _ = zip(*CentredPatchIterator(arr, 2))
     >>> result = CentredPatchIterator.assembleimage(patches, pmasks, gridids)
     >>> numpy.all(arr == result)
     True
     
     Five-dimensional example:
     >>> arr = numpy.random.randint(0, 10, range(5, 10))
     >>> patches, pmasks, gridids, _ = zip(*CentredPatchIterator(arr, range(2, 7)))
     >>> result = CentredPatchIterator.assembleimage(patches, pmasks, gridids)
     >>> numpy.all(arr == result)
     True            
     """
     for d in range(patches[0].ndim):
         groups = {}
         for patch, pmask, gridid in zip(patches, pmasks, gridids):
             groupid = gridid[1:]
             if not groupid in groups:
                 groups[groupid] = []
             groups[groupid].append((patch, pmask, gridid[0]))
         patches = []
         gridids = []
         pmasks = []
         for groupid, group in groups.iteritems():
             patches.append(numpy.concatenate([p for p, _, _ in sorted(group, key=itemgetter(2))], d))
             pmasks.append(numpy.concatenate([m for _, m, _ in sorted(group, key=itemgetter(2))], d))
             gridids.append(groupid)
     objs = find_objects(pmasks[0])
     if not 1 == len(objs):
         raise ValueError('The assembled patch masks contain more than one binary object.')
     return patches[0][objs[0]]
Пример #3
0
	def quantify_fibers2(self, args, vis_pix, thresholded_copix, im_co, rgb_pix): #label feature with number (2)
		vis_pix_matrix = vis_pix.reshape(im_co.size[1],im_co.size[0])
		all_array, num_features = si.label(vis_pix_matrix,structure=[[1,1,1],[1,1,1],[1,1,1]])
		co_fiber_sizes = []
		for fiber_slice in si.find_objects(all_array):
			fiber_pix = (thresholded_copix[fiber_slice].flatten()) > 0
			fiber_types = collections.Counter(all_array[fiber_slice].flatten())
			max_type = None
			if len(fiber_types) > 2:
				max_count = 0
				for ft in fiber_types:
					if ft > 0 and fiber_types[ft] > max_count:
						max_type = ft
						max_count = fiber_types[ft]
			if float(np.sum(fiber_pix))/float(fiber_pix.size) > .3: #if > 30% of the pixels are colocalized call entire fiber slow
				slow_fiber_size = 0
				for i in xrange(fiber_slice[0].start,fiber_slice[0].stop):
					for j in xrange(fiber_slice[1].start,fiber_slice[1].stop):
						if vis_pix_matrix[i][j] == 1:
							if max_type:
								if all_array[i][j] == max_type:
									vis_pix_matrix[i][j] = 2
									rgb_pix[i*im_co.size[0] + j] = (255, 255, 0)
									slow_fiber_size += 1
							else:
								vis_pix_matrix[i][j] = 2
								rgb_pix[i*im_co.size[0] + j] = (255, 255, 0)
								slow_fiber_size += 1
				co_fiber_sizes.append(slow_fiber_size)				
		vis_pix = vis_pix_matrix.flatten()
		return np.array(co_fiber_sizes)
						
Пример #4
0
    def find_bright_peaks(self, data, threshold=None, sigma=5, radius=5):
        """
        Find bright peak candidates in (data).  (threshold) specifies a
        threshold value below which an object is not considered a candidate.
        If threshold is blank, a default is calculated using (sigma).
        (radius) defines a pixel radius for determining local maxima--if the
        desired objects are larger in size, specify a larger radius.

        The routine returns a list of candidate object coordinate tuples
        (x, y) in data.
        """
        if threshold == None:
            # set threshold to default if none provided
            threshold = self.get_threshold(data, sigma=sigma)
            self.logger.debug("threshold defaults to %f (sigma=%f)" % (
                threshold, sigma))

        data_max = filters.maximum_filter(data, radius)
        maxima = (data == data_max)
        diff = data_max > threshold
        maxima[diff == 0] = 0

        labeled, num_objects = ndimage.label(maxima)
        slices = ndimage.find_objects(labeled)
        peaks = []
        for dy, dx in slices:
            xc = (dx.start + dx.stop - 1)/2.0
            yc = (dy.start + dy.stop - 1)/2.0

            # This is only an approximate center; use FWHM or centroid
            # calculation to refine further
            peaks.append((xc, yc))

        return peaks
Пример #5
0
  def find_local_maxima(self, data, neighborhood_size):
    """ 
     find local maxima within neighborhood 
      idea from http://stackoverflow.com/questions/9111711
      (get-coordinates-of-local-maxima-in-2d-array-above-certain-value)
    """

    # find local maxima in image (width specified by neighborhood_size)
    data_max = filters.maximum_filter(data,neighborhood_size);
    maxima   = (data == data_max);
    assert np.sum(maxima) > 0;        # we should always find local maxima
  
    # remove connected pixels (plateaus)
    labeled, num_objects = ndimage.label(maxima)
    slices = ndimage.find_objects(labeled)
    maxima *= 0;
    for dx,dy in slices:
      maxima[(dx.start+dx.stop-1)/2, (dy.start+dy.stop-1)/2] = 1

    # calculate difference between local maxima and lowest 
    # pixel in neighborhood (will be used in select_local_maxima)
    data_min = filters.minimum_filter(data,neighborhood_size);
    diff     = data_max - data_min;
    self._maxima = maxima;
    self._diff   = diff;

    return maxima,diff
    def find_albino_features(self, T, im):
        import scipy.ndimage as ndi

        binarized = zeros_like(T)
        binarized[T > self.albino_threshold] = True
        (labels, nlabels) = ndi.label(binarized)
        slices = ndi.find_objects(labels)

        intensities = []
        transform_means = []

        if len(slices) < 2:
            return (None, None)

        for s in slices:

            transform_means.append(mean(T[s]))
            intensities.append(mean(im[s]))

        sorted_transform_means = argsort(transform_means)
        candidate1 = sorted_transform_means[-1]
        candidate2 = sorted_transform_means[-2]

        c1_center = array(ndi.center_of_mass(im, labels, candidate1 + 1))
        c2_center = array(ndi.center_of_mass(im, labels, candidate2 + 1))

        if intensities[candidate1] > intensities[candidate2]:
            return (c2_center, c1_center)
        else:
            return (c1_center, c2_center)
Пример #7
0
def test_find_objects06():
    "find_objects 6"
    data = np.array([1, 0, 2, 2, 0, 3])
    out = ndimage.find_objects(data)
    assert_equal(out, [(slice(0, 1, None),),
                       (slice(2, 4, None),),
                       (slice(5, 6, None),)])
Пример #8
0
def precomputestats(image):
    image.lazy_load()
    image.temp['bgsubprotein'] = bgsub(image.channeldata['protein'].copy())
    if 'dna' in image.channeldata:
        image.temp['bgsubdna'] = bgsub(image.channeldata['dna'].copy())
    if image.regions is not None:
        image.temp['region_ids'] = ndimage.find_objects(image.regions)
Пример #9
0
def find_object_centers(image):
    """Find score from black and white image.

    Image should contain 12 white dots placed from left to right.
    Two outermost dots are not counted, they keep the score blocks in place.
    """
    # Find connected components
    labeled, nr_objects = ndimage.label(image)
    slices = ndimage.find_objects(labeled)

    # Center coordinates of objects
    objects = []
    for dy, dx in slices:
        # Skip too small or big regions
        area = abs((dx.stop - dx.start) * (dy.stop - dy.start))
        logging.debug('Found possible score block. Area: %s' % area)
        if area < MIN_SCORE_AREA or area > MAX_SCORE_AREA:
            logging.info('Skip object with area %s' % area)
            continue

        x_center = (dx.start + dx.stop - 1) / 2
        y_center = (dy.start + dy.stop - 1) / 2

        objects.append((x_center, y_center))

    logging.debug('Found %s objects which have correct area' % len(objects))

    if len(objects) != 12:
        err = 'Cannot find correct amount of score blocks. '
        err += 'Expected 12, but found %s' % len(objects)
        raise ValueError(err)

    return objects
Пример #10
0
    def make_profile_map(self, order_map, slitpos_map, lsf,
                         slitoffset_map=None):
        """
        lsf : callable object which takes (o, x, slit_pos)

        o : order (integer)
        x : detector position in dispersion direction
        slit_pos : 0..1

        x and slit_pos can be array.
        """

        iy, ix = np.indices(slitpos_map.shape)

        if slitoffset_map is not None:
            ix = ix - slitoffset_map

        profile_map = np.empty(slitpos_map.shape, "d")
        profile_map.fill(np.nan)

        slices = ni.find_objects(order_map)
        for o in self.orders:
            sl = slices[o-1][0], slice(0, 2048)
            msk = (order_map[sl] == o)

            profile1 = np.zeros(profile_map[sl].shape, "d")
            profile1[msk] = lsf(o, ix[sl][msk], slitpos_map[sl][msk])
            # TODO :make sure that renormalization is good thing to do.
            profile_sum = np.abs(profile1).sum(axis=0)
            profile1 /= profile_sum

            profile_map[sl][msk] = profile1[msk]

        return profile_map
Пример #11
0
def DrawRectangle(img, dst, nb_labels, color=(255,0,0)):
    """
        Function definition
        +++++++++++++++++++
            
        .. py:function:: DrawRectangle(img, dst, nb_labels, color=(255,0,0))
            
            This method finds objects of interest and draws a rectangle around each one of them. 

            :param numpy_array img: image on which the objects of interest are detected.
            :param numpy_array img: image on which the rectangles are drawn.
            :param int nb_labels: number of objects to detect. Usually, the number of labels in a
                                  labeled image.
            :param tuple coler: the color of rectangles outline.
            :return: regions of interest - each one of them contains an object of interest.
            :rtype: list of lists. **Example:** rois[0][.....] contains slice_x and rois[1][.....]
                                   contains slice_y.
        """
    
    rois = []
    
    for i in range(nb_labels):
        slice_x, slice_y = ndimage.find_objects(img==i+1)[0]
        cv2.rectangle(dst,(slice_y.start,slice_x.start),(slice_y.stop,slice_x.stop),color,1)
        rois.append([slice_x, slice_y])
        
    return rois

        
    
 def segment_with_label(self, img):
     # Next-nearest neighbors
     struct_nnn = np.ones((3, 3), dtype=int)
     labels, _ = ndimage.label(img, structure=struct_nnn)
     # np.savetxt(c.temp_path('labels.txt'), labels, fmt='%d')
     object_slices = ndimage.find_objects(labels)
     return labels, object_slices
Пример #13
0
def reg_median_cont(data,mask,min_size):
    emask = ndimage.morphology.binary_erosion(mask,iterations=2)
    cmask = mask[:]
    cmask[emask==1]=0
    label_im, nb_labels = ndimage.label(cmask)
    sizes = ndimage.sum(cmask,label_im,range(nb_labels+1))
    mask_size = sizes < min_size
    remove_pixel = mask_size[label_im]
    label_im[remove_pixel] = 0
    labels = np.unique(label_im)
    label_im = np.searchsorted(labels, label_im)
    labels = np.unique(label_im)
    out = np.array(label_im,dtype=np.float)
    for lab in labels:
        if( lab==0 ): continue
        try:
            slice_x, slice_y = ndimage.find_objects(label_im==lab)[0]
        except IndexError:
            print ("Bad index: "%lab)
            continue
#        print lab
        rois = data[slice_x, slice_y]
        tmask = label_im==lab 
        roim = tmask[slice_x, slice_y]
        roio = out[slice_x, slice_y]
        mean = np.ma.median(np.ma.array(rois,mask=~roim))
        roio[roim] = mean

    return out
Пример #14
0
    def extract_slit_profile(self, order_map, slitpos_map, data,
                             x1, x2, bins=None):

        x1, x2 = int(x1), int(x2)

        slices = ni.find_objects(order_map)
        slit_profile_list = []
        if bins is None:
            bins = np.linspace(0., 1., 40)

        for o in self.orders:
            sl = slices[o-1][0], slice(x1, x2)
            msk = (order_map[sl] == o)

            #ss = slitpos_map[sl].copy()
            #ss[~msk] = np.nan

            d = data[sl][msk]
            finite_mask = np.isfinite(d)
            hh = np.histogram(slitpos_map[sl][msk][finite_mask],
                              weights=d[finite_mask], bins=bins,
                              )
            slit_profile_list.append(hh[0])

        return bins, slit_profile_list
Пример #15
0
def get_forecast_objects(model_grid, ew_params, min_size, gaussian_window):
        ew = EnhancedWatershed(*ew_params)
        model_objects = []
        print "Find model objects Hour:",
        for h in range(int((model_grid.end_date - model_grid.start_date).total_seconds()/deltat.total_seconds())+1):
                print h,
                hour_labels = ew.size_filter(ew.label(gaussian_filter(model_grid.data[h], gaussian_window)), min_size)
                obj_slices = find_objects(hour_labels)
                num_slices = len(obj_slices)
                model_objects.append([])
                if num_slices > 0:
                        fig, ax = plt.subplots()
                        add_grid(basemap)
                        t = basemap.contourf(model_grid.lon,model_grid.lat,hour_labels,np.arange(0,num_slices+1)+0.5,extend="max",cmap="Set1",latlon=True,title=str(run_date)+" "+field+" "+str(h))
                        ret = mysavfig(odir+"enh_watershed_ex/ew{0:02d}.png".format(h))
                        for s, sl in enumerate(obj_slices): 
                                model_objects[-1].append(STObject(model_grid.data[h][sl],
                                                #np.where(hour_labels[sl] > 0, 1, 0),
                                                # For some objects (especially long, diagonal ones), the rectangular
                                                # slice encompasses part of other objects (i.e. non-zero elements of slice).
                                                # We don't want them in our mask.
                                                np.where(hour_labels[sl] == s+1, 1, 0),
                                                model_grid.x[sl], 
                                                model_grid.y[sl], 
                                                model_grid.i[sl], 
                                                model_grid.j[sl],
                                                h,
                                                h,
                                                dx=model_grid.dx))
                                if h > 0:
                                        dims = model_objects[-1][-1].timesteps[0].shape
                                        model_objects[-1][-1].estimate_motion(h, model_grid.data[h-1], dims[1], dims[0])
        return model_objects
Пример #16
0
    def sum_peaks(self, width):
        """
        Find peaks, then sum area around them for whole stack.

        If we're going to do this _properly_ we need a way to find areas that
        _don't_ have any beads nearby inorder to calculate noise and offset.
        """
        # fit the blobs first to find valid spots
        my_peaks = self.peakfinder

        peakfits = my_peaks.fit_blobs(diameter=width)
        # now reset the blobs to the fit values
        my_peaks.blobs = peakfits[['y0', 'x0', 'sigma_x', 'amp']].values

        # label again
        my_labels = my_peaks.label_blobs(diameter=width)

        # find all the objects.
        my_objects = ndi.find_objects(my_labels)

        my_medians = np.median(self.data, axis=(1, 2))

        my_sums = np.array([self.data[:, obj[0], obj[1]].sum((1, 2))
                            for obj in my_objects])

        self.sums = my_sums - my_medians
        # reset blobs to original
        self.peakfinder.find_blobs()
Пример #17
0
def extractPeople(im, mask, minPersonPixThresh=500, gradThresh=100, gradientFilter=True):

	if not gradientFilter:
		grad_bin = mask
	else:
		grad_g = np.max(np.abs(np.gradient(im.astype(np.int16))), 0)
		grad_bin = (np.abs(grad_g) < gradThresh)
		# grad_bin = nd.binary_erosion(grad_bin, iterations=1)
		mask = mask*grad_bin# np.logical_and(mask[:,:-1],grad_bin)# np.logical_not(grad_bin))

	labelIm, maxLabel = nd.label(im*mask)
	connComps = nd.find_objects(labelIm, maxLabel)

	# Only extract if there are sufficient pixels
	usrTmp = [(c,l) for c,l in zip(connComps,range(1, maxLabel+1)) if minPersonPixThresh < nd.sum(labelIm[c]==l)]
	if len(usrTmp) > 0:
		userBoundingBoxes, userLabels = zip(*usrTmp)
	else:
		userBoundingBoxes = []
		userLabels = []
	userCount = len(userLabels)

	#Relabel foregound mask with multiple labels
	mask = im.astype(np.uint8)*0
	for i,i_new in zip(userLabels, range(1, userCount+1)):
		mask[labelIm==i] = i_new

	return mask, userBoundingBoxes, userLabels
Пример #18
0
def get_mask_bounds(mask, affine):
    """ Return the world-space bounds occupied by a mask given an affine.

        Notes
        -----

        The mask should have only one connect component.

        The affine should be diagonal or diagonal-permuted.
    """
    (xmin, xmax), (ymin, ymax), (zmin, zmax) = get_bounds(mask.shape, affine)
    slices = ndimage.find_objects(mask)
    if len(slices) == 0:
        warnings.warn("empty mask", stacklevel=2)
    else:
        x_slice, y_slice, z_slice = slices[0]
        x_width, y_width, z_width = mask.shape
        xmin, xmax = (xmin + x_slice.start*(xmax - xmin)/x_width,
                    xmin + x_slice.stop *(xmax - xmin)/x_width)
        ymin, ymax = (ymin + y_slice.start*(ymax - ymin)/y_width,
                    ymin + y_slice.stop *(ymax - ymin)/y_width)
        zmin, zmax = (zmin + z_slice.start*(zmax - zmin)/z_width,
                    zmin + z_slice.stop *(zmax - zmin)/z_width)

    return xmin, xmax, ymin, ymax, zmin, zmax
Пример #19
0
def get_symbols(image):
  dil_eros = bin_search(dilatation_cross_numb, [image], (1, 16), 1.0, "dec")
  block_size = 50
  binary_adaptive_image = erosion(dilation(threshold_adaptive(
    array(image.convert("L")), block_size, offset=10),
      square(dil_eros)), square(dil_eros))

  all_labels = label(binary_adaptive_image, background = True)
  objects = find_objects(all_labels)

  av_width = av_height = 0
  symbols = []

  for obj in objects:
    symb = (binary_adaptive_image[obj], (obj[0].start, obj[1].start))
    symbols.append(symb)
    av_height += symb[0].shape[0]
    av_width += symb[0].shape[1]

  av_width /= float(len(objects))
  av_height /= float(len(objects))

  symbols = [symb for symb in symbols
    if symb[0].shape[0] >= av_height and symb[0].shape[1] >= av_width]

  return symbols
Пример #20
0
def findSources(image):
    """Return sources sorted by brightness.
    """

    img1 = image.copy()
    src_mask = makeSourcesMask(img1)
    img1[~src_mask] = img1[src_mask].min()
    img1 = exposure.rescale_intensity(img1)
    img1[~src_mask] = 0.
    img1.set_fill_value(0.)

    def obj_params_with_offset(img, labels, aslice, label_idx):
        y_offset = aslice[0].start
        x_offset = aslice[1].start
        thumb = img[aslice]
        lb = labels[aslice]
        yc, xc = ndimage.center_of_mass(thumb, labels=lb, index=label_idx)
        br = thumb[lb == label_idx].sum() #the intensity of the source
        return [br, xc + x_offset, yc + y_offset]

    srcs_labels, num_srcs = ndimage.label(img1)

    if num_srcs < 10:
        print("WARNING: Only %d sources found." % (num_srcs))

    #Eliminate here all 1 pixel sources
    all_objects = [[ind + 1, aslice] for ind, aslice in enumerate(ndimage.find_objects(srcs_labels))
                                                if srcs_labels[aslice].shape != (1,1)]
    lum = np.array([obj_params_with_offset(img1, srcs_labels, aslice, lab_idx)
                for lab_idx, aslice in all_objects])

    lum = lum[lum[:,0].argsort()[::-1]]  #sort by brightness highest to smallest

    return lum[:,1:]
Пример #21
0
    def __iter__(self):
        """
        Return generator of (source, target, void) tuples.

        Source and target are views into a larger array. Void is a newly
        created array containing the footprint of the void.
        """
        if progress:  # pragma: no cover
            gdal.TermProgress_nocb(0)

        # analyze
        mask = (self.source == self.no_data_value)
        labels, total = ndimage.label(mask)
        items = ndimage.find_objects(labels)

        # iterate the objects
        for label, item in enumerate(items, 1):
            index = self._grow(item)       # to include the edge
            source = self.source[index]    # view into source array
            target = self.target[index]    # view into target array
            void = labels[index] == label  # the footprint of this void
            yield source, target, void

            if progress:  # pragma: no cover
                gdal.TermProgress_nocb(label / total)
Пример #22
0
def precomputestats(image):
    image.lazy_load()
    image.temp["bgsubprotein"] = bgsub(image.channeldata["protein"].copy())
    if "dna" in image.channeldata:
        image.temp["bgsubdna"] = bgsub(image.channeldata["dna"].copy())
    if image.regions is not None:
        image.temp["region_ids"] = ndimage.find_objects(image.regions)
Пример #23
0
def label_components(img):
    # label connected components in image.
    labeled_img, count = spimg.label(img)
    # obtain list of tuple, which are slices of array with distinct label
    slices = spimg.find_objects(labeled_img)

    return labeled_img, slices
Пример #24
0
 def autofocus(self, data, zsp, zsu, zind, inclusionList,
               inclusionDict, lastLabelData):
     labelData = ndimage.label(zsp.binary[zind], output=numpy.int32)[0]
     slicess = ndimage.find_objects(labelData)
     for label, slices in enumerate(slicess, start=1):
         slices = [slice(max(sli.start - 4, 0), sli.stop + 4) \
                   for sli in slices]
         dataDetail = data[slices].astype(float)
         footprint = labelData[slices] == label
         newInclusion = Inclusion(zind, label, slices, dataDetail,
                                  footprint, zsp, zsu)
         if not newInclusion.valid:
             labelData[slices] = numpy.where(
                 footprint, 0, labelData[slices]
                 )
             continue
         inclusionList.append(newInclusion)
         inclusionDict[newInclusion.index] = newInclusion
         if lastLabelData is None:
             continue
         lastLabelDetail = numpy.where(footprint,
                                       lastLabelData[slices], 0)
         for l in numpy.unique(lastLabelDetail)[1:]:
             inclusionDict[(zind - 1, l)].append(newInclusion)
     return labelData
Пример #25
0
    def edges(cls):
        from scipy import ndimage, misc
        import numpy as np
        from skimage import feature
        col = Image.open("f990.jpg")
        gray = col.convert('L')

        # Let numpy do the heavy lifting for converting pixels to pure black or white
        bw = np.asarray(gray).copy()

        # Pixel range is 0...255, 256/2 = 128
        bw[bw < 245]  = 0    # Black
        bw[bw >= 245] = 255 # White
        bw[bw == 0] = 254
        bw[bw == 255] = 0
        im = bw
        im = ndimage.gaussian_filter(im, 1)
        edges2 = feature.canny(im, sigma=2)
        labels, numobjects =ndimage.label(im)
        slices = ndimage.find_objects(labels)
        print('\n'.join(map(str, slices)))
        misc.imsave('f990_sob.jpg', im)
        return

        #im = misc.imread('f990.jpg')
        #im = ndimage.gaussian_filter(im, 8)
        sx = ndimage.sobel(im, axis=0, mode='constant')
        sy = ndimage.sobel(im, axis=1, mode='constant')
        sob = np.hypot(sx, sy)
        misc.imsave('f990_sob.jpg', edges2)
def short_branches():
    """
    Visualization of short branches of the skeleton.
    
    """
    data1_sk = glob.glob('/backup/yuliya/vsi05/skeletons_largdom/*.h5')
    data1_sk.sort()

    for i,j, k in zip(d[1][37:47], data1_sk[46:56], ell[1][37:47]):
        g = nx.read_gpickle(i)
        dat = tb.openFile(j)
        skel = np.copy(dat.root.skel)
        bra = np.copy(dat.root.branches)
        mask = np.zeros_like(skel)    
        dat.close()
    
        length = nx.get_edge_attributes(g, 'length')
        number = nx.get_edge_attributes(g, 'number')
        num_dict = {}
        for m in number:
            for v in number[m]:
                num_dict.setdefault(v, []).append(m)
        find_br = ndimage.find_objects(bra)
        for l in list(length.keys()):
            if length[l]<0.5*k: #Criteria
                for b in number[l]:
                    mask[find_br[b-1]] = bra[find_br[b-1]]==b
        mlab.figure(bgcolor=(1,1,1), size=(1200,1200))
        mlab.contour3d(skel, colormap='hot')
        mlab.contour3d(mask)
        mlab.savefig('/backup/yuliya/vsi05/skeletons/short_bran/'+ i[42:-10] + '.png')
        mlab.close()
Пример #27
0
def get_stomata(max_proj_image, min_obj_size=200, max_obj_size=1000):
    """Performs image segmentation from a max_proj_image.
     Disposes of objects in range min_obj_size to
    max_obj_size

    :param max_proj_image: the maximum projection image
    :type max_proj_image: numpy.ndarray, uint16
    :param min_obj_size: minimum size of object to keep
    :type min_obj_size: int
    :param max_obj_size: maximum size of object to keep
    :type max_obj_size: int
    :returns: list of [ [coordinates of kept objects - list of slice objects],
                        binary object image - numpy.ndarray,
                        labelled object image - numpy.ndarray
                     ]

    """

    # pore_margin = 10
    # max_obj_size = 1000
    # min_obj_size = 200
    # for prop, value in segment_options:
    #     if prop == 'pore_margin':
    #         pore_margin = value
    #     if prop == 'max_obj_size':
    #         max_obj_size = value
    #     if prop == 'min_obj_size':
    #         min_obj_size = value
    #
    # print(pore_margin)
    # print(max_obj_size)
    # print(min_obj_size)

    #rescale_min = 50
    #rescale_max= 100
    #rescaled = exposure.rescale_intensity(max_proj_image, in_range=(rescale_min,rescale_max))
    rescaled = max_proj_image
    seed = np.copy(rescaled)
    seed[1:-1, 1:-1] = rescaled.max()
    #mask = rescaled
    #if gamma != None:
    #    rescaled = exposure.adjust_gamma(max_proj_image, gamma)
    #filled = reconstruction(seed, mask, method='erosion')
    closed = dilation(rescaled)
    seed = np.copy(closed)
    seed[1:-1, 1:-1] = closed.max()
    mask = closed


    filled = reconstruction(seed, mask, method='erosion')
    label_objects, nb_labels = ndimage.label(filled)
    sizes = np.bincount(label_objects.ravel())
    mask_sizes = sizes
    mask_sizes = (sizes > min_obj_size) & (sizes < max_obj_size)
    #mask_sizes = (sizes > 200) & (sizes < 1000)
    mask_sizes[0] = 0
    big_objs = mask_sizes[label_objects]
    stomata, _ = ndimage.label(big_objs)
    obj_slices = ndimage.find_objects(stomata)
    return [obj_slices, big_objs, stomata]
Пример #28
0
    def GetRegion(self, index, objects=None):
        if not objects:
            objects = ndimage.find_objects(self.image.labels)

        o = objects[index]

        mask = self.image.labels[o] == (index + 1)

        slx, sly, slz = o

        X, Y, Z = np.ogrid[slx, sly, slz]
        vs = (
            1e3 * self.image.mdh["voxelsize.x"],
            1e3 * self.image.mdh["voxelsize.y"],
            1e3 * self.image.mdh["voxelsize.z"],
        )

        return [
            DataBlock(
                np.maximum(self.image.data[slx, sly, slz, j] - self.image.data[slx, sly, slz, j].min(), 0) * mask,
                X,
                Y,
                Z,
                vs,
            )
            for j in range(self.image.data.shape[3])
        ]
Пример #29
0
    def RetrieveObjects(self, masterChan=0, orientChan=1, orient_dir=-1):
        objs = ndimage.find_objects(self.image.labels)

        self.objects = [
            BlobObject(self.GetRegion(i, objs), masterChan, orientChan, orient_dir)
            for i in range(self.image.labels.max())
        ]
Пример #30
0
def myfindChessboardCorners(im,dim):
    gr=30
    patern=np.zeros((gr,gr),dtype='uint8')
    patern[:gr/2,:gr/2]=255
    patern[gr/2:,gr/2:]=255
    m1=cv2.matchTemplate(im,patern,cv2.TM_CCORR_NORMED)
    patern=np.ones((gr,gr),dtype='uint8')*255
    patern[:gr/2,:gr/2]=0
    patern[gr/2:,gr/2:]=0
    m2=cv2.matchTemplate(im,patern,cv2.TM_CCORR_NORMED)
    #m=np.bitwise_or(m1>0.9,m2>0.9)
    #import pdb;pdb.set_trace()
    tresh=0.95
    labels=ndimage.label(np.bitwise_or(m1>tresh,m2>tresh))
    if labels[1]!=dim[0]*dim[1]:
        return False,[]
    objs=ndimage.find_objects(labels[0])
    corners=[]
    for xx,yy in objs:
        xpos=(xx.start+xx.stop)/2.0#+gr/2-0.5
        ypos=(yy.start+yy.stop)/2.0#+gr/2-0.5
        se=5
        #import pdb;pdb.set_trace()
        minVal, maxVal, minLoc, maxLoc=cv2.minMaxLoc(m2[xpos-se:xpos+se,ypos-se:ypos+se])
        if maxVal<tresh:
            minVal, maxVal, minLoc, maxLoc=cv2.minMaxLoc(m1[xpos-se:xpos+se,ypos-se:ypos+se])
        xpos+=-se+maxLoc[0]+gr/2-0.5
        ypos+=-se+maxLoc[1]+gr/2-0.5
        
        #xpos=xx.start+gr/2
        #ypos=yy.start+gr/2
        corners.append((ypos,xpos) )
    return True,np.array(corners)
Пример #31
0
def pre_process_digits(cut_numbers, structuring_element, filter_invalids=True):
    for number in cut_numbers:
        digits = number["digits"]
        for i, digit in enumerate(digits):

            ret, thresholded = cv2.threshold(digit,
                                             image_threshold,
                                             1,
                                             type=cv2.THRESH_BINARY_INV)

            # do connected component analysis
            digits[i], nr_of_objects = ndimage.measurements.label(
                thresholded, structuring_element)
            # determine the sizes of the objects
            sizes = np.bincount(np.reshape(digits[i], -1).astype(np.int64))
            selected_object = -1
            max_size = 0

            log = ""
            for j in range(1, nr_of_objects + 1):
                if sizes[j] < 11:
                    if filter_invalids:
                        log += str(i) + ": too small"
                        continue  # this is too small to be a number
                maxy, miny, maxx, minx = get_bounding_box(digits[i], j)
                # commented out because 1's were detected as vertical borders
                if (maxy - miny < 3 and
                    (miny < 2 or maxy > 59)) or (maxx - minx < 3 and
                                                 (minx < 2 or maxx > 25)):
                    # if maxy - miny < 3 and (miny < 2 or maxy > 59):
                    if filter_invalids:
                        log += str(i) + ": on border"
                        continue  # this is likely a border artifact
                border_dist = get_avg_border_distance(digits[i], j)
                # print borderdist
                if border_dist > 0.2:
                    if filter_invalids:
                        log += str(i) + ": too close to border"
                        continue  # this is likely a border artifact

                if sizes[j] > max_size:
                    max_size = sizes[j]
                    selected_object = j

            if selected_object == -1 and filter_invalids:
                digits[i] = None
                logging.info(log)
                continue

            if selected_object == -1 and not filter_invalids:
                loc = (slice(25, 42, None), slice(8, 13, None))
            else:
                loc = ndimage.find_objects(digits[i])[selected_object - 1]

            cropped = digits[i][loc]

            # replace the shape number by 255
            cropped[cropped == selected_object] = 255

            output_image = process_image(cropped)

            image_array = np.array(output_image)
            if isMinus(image_array):
                digits[i] = None
                continue
            digits[i] = image_array
Пример #32
0
def regionprops(label_image,
                intensity_image=None,
                cache=True,
                coordinates=None):
    """Measure properties of labeled image regions.

    Parameters
    ----------
    label_image : (N, M) ndarray
        Labeled input image. Labels with value 0 are ignored.

        .. versionchanged:: 0.14.1
            Previously, ``label_image`` was processed by ``numpy.squeeze`` and
            so any number of singleton dimensions was allowed. This resulted in
            inconsistent handling of images with singleton dimensions. To
            recover the old behaviour, use
            ``regionprops(np.squeeze(label_image), ...)``.
    intensity_image : (N, M) ndarray, optional
        Intensity (i.e., input) image with same size as labeled image.
        Default is None.
    cache : bool, optional
        Determine whether to cache calculated properties. The computation is
        much faster for cached properties, whereas the memory consumption
        increases.
    coordinates : 'rc' or 'xy', optional
        Coordinate conventions for 2D images. (Only 'rc' coordinates are
        supported for 3D images.)

    Returns
    -------
    properties : list of RegionProperties
        Each item describes one labeled region, and can be accessed using the
        attributes listed below.

    Notes
    -----
    The following properties can be accessed as attributes or keys:

    **area** : int
        Number of pixels of region.
    **bbox** : tuple
        Bounding box ``(min_row, min_col, max_row, max_col)``.
        Pixels belonging to the bounding box are in the half-open interval
        ``[min_row; max_row)`` and ``[min_col; max_col)``.
    **bbox_area** : int
        Number of pixels of bounding box.
    **centroid** : array
        Centroid coordinate tuple ``(row, col)``.
    **convex_area** : int
        Number of pixels of convex hull image.
    **convex_image** : (H, J) ndarray
        Binary convex hull image which has the same size as bounding box.
    **coords** : (N, 2) ndarray
        Coordinate list ``(row, col)`` of the region.
    **eccentricity** : float
        Eccentricity of the ellipse that has the same second-moments as the
        region. The eccentricity is the ratio of the focal distance
        (distance between focal points) over the major axis length.
        The value is in the interval [0, 1).
        When it is 0, the ellipse becomes a circle.
    **equivalent_diameter** : float
        The diameter of a circle with the same area as the region.
    **euler_number** : int
        Euler characteristic of region. Computed as number of objects (= 1)
        subtracted by number of holes (8-connectivity).
    **extent** : float
        Ratio of pixels in the region to pixels in the total bounding box.
        Computed as ``area / (rows * cols)``
    **filled_area** : int
        Number of pixels of filled region.
    **filled_image** : (H, J) ndarray
        Binary region image with filled holes which has the same size as
        bounding box.
    **image** : (H, J) ndarray
        Sliced binary region image which has the same size as bounding box.
    **inertia_tensor** : (2, 2) ndarray
        Inertia tensor of the region for the rotation around its mass.
    **inertia_tensor_eigvals** : tuple
        The two eigen values of the inertia tensor in decreasing order.
    **intensity_image** : ndarray
        Image inside region bounding box.
    **label** : int
        The label in the labeled input image.
    **local_centroid** : array
        Centroid coordinate tuple ``(row, col)``, relative to region bounding
        box.
    **major_axis_length** : float
        The length of the major axis of the ellipse that has the same
        normalized second central moments as the region.
    **max_intensity** : float
        Value with the greatest intensity in the region.
    **mean_intensity** : float
        Value with the mean intensity in the region.
    **min_intensity** : float
        Value with the least intensity in the region.
    **minor_axis_length** : float
        The length of the minor axis of the ellipse that has the same
        normalized second central moments as the region.
    **moments** : (3, 3) ndarray
        Spatial moments up to 3rd order::

            m_ji = sum{ array(x, y) * x^j * y^i }

        where the sum is over the `x`, `y` coordinates of the region.
    **moments_central** : (3, 3) ndarray
        Central moments (translation invariant) up to 3rd order::

            mu_ji = sum{ array(x, y) * (x - x_c)^j * (y - y_c)^i }

        where the sum is over the `x`, `y` coordinates of the region,
        and `x_c` and `y_c` are the coordinates of the region's centroid.
    **moments_hu** : tuple
        Hu moments (translation, scale and rotation invariant).
    **moments_normalized** : (3, 3) ndarray
        Normalized moments (translation and scale invariant) up to 3rd order::

            nu_ji = mu_ji / m_00^[(i+j)/2 + 1]

        where `m_00` is the zeroth spatial moment.
    **orientation** : float
        In 'rc' coordinates, angle between the 0th axis (rows) and the major
        axis of the ellipse that has the same second moments as the region,
        ranging from `-pi/2` to `pi/2` counter-clockwise.

        In `xy` coordinates, as above but the angle is now measured from the
        "x" or horizontal axis.
    **perimeter** : float
        Perimeter of object which approximates the contour as a line
        through the centers of border pixels using a 4-connectivity.
    **slice** : tuple of slices
        A slice to extract the object from the source image.
    **solidity** : float
        Ratio of pixels in the region to pixels of the convex hull image.
    **weighted_centroid** : array
        Centroid coordinate tuple ``(row, col)`` weighted with intensity
        image.
    **weighted_local_centroid** : array
        Centroid coordinate tuple ``(row, col)``, relative to region bounding
        box, weighted with intensity image.
    **weighted_moments** : (3, 3) ndarray
        Spatial moments of intensity image up to 3rd order::

            wm_ji = sum{ array(x, y) * x^j * y^i }

        where the sum is over the `x`, `y` coordinates of the region.
    **weighted_moments_central** : (3, 3) ndarray
        Central moments (translation invariant) of intensity image up to
        3rd order::

            wmu_ji = sum{ array(x, y) * (x - x_c)^j * (y - y_c)^i }

        where the sum is over the `x`, `y` coordinates of the region,
        and `x_c` and `y_c` are the coordinates of the region's weighted
        centroid.
    **weighted_moments_hu** : tuple
        Hu moments (translation, scale and rotation invariant) of intensity
        image.
    **weighted_moments_normalized** : (3, 3) ndarray
        Normalized moments (translation and scale invariant) of intensity
        image up to 3rd order::

            wnu_ji = wmu_ji / wm_00^[(i+j)/2 + 1]

        where ``wm_00`` is the zeroth spatial moment (intensity-weighted area).

    Each region also supports iteration, so that you can do::

      for prop in region:
          print(prop, region[prop])

    See Also
    --------
    label

    References
    ----------
    .. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
           Core Algorithms. Springer-Verlag, London, 2009.
    .. [2] B. Jähne. Digital Image Processing. Springer-Verlag,
           Berlin-Heidelberg, 6. edition, 2005.
    .. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
           Features, from Lecture notes in computer science, p. 676. Springer,
           Berlin, 1993.
    .. [4] https://en.wikipedia.org/wiki/Image_moment

    Examples
    --------
    >>> from skimage import data, util
    >>> from skimage.measure import label
    >>> img = util.img_as_ubyte(data.coins()) > 110
    >>> label_img = label(img, connectivity=img.ndim)
    >>> props = regionprops(label_img)
    >>> # centroid of first labeled object
    >>> props[0].centroid
    (22.729879860483141, 81.912285234465827)
    >>> # centroid of first labeled object
    >>> props[0]['centroid']
    (22.729879860483141, 81.912285234465827)

    """

    if label_image.ndim not in (2, 3):
        raise TypeError('Only 2-D and 3-D images supported.')

    if not np.issubdtype(label_image.dtype, np.integer):
        raise TypeError('Label image must be of integer type.')

    regions = []

    objects = ndi.find_objects(label_image)
    for i, sl in enumerate(objects):
        if sl is None:
            continue

        label = i + 1

        props = _RegionProperties(sl,
                                  label,
                                  label_image,
                                  intensity_image,
                                  cache,
                                  coordinates=coordinates)
        regions.append(props)

    return regions
Пример #33
0
def test_label_default_dtype():
    test_array = np.random.rand(10, 10)
    label, no_features = ndimage.label(test_array > 0.5)
    assert_(label.dtype in (np.int32, np.int64))
    # Shouldn't raise an exception
    ndimage.find_objects(label)
Пример #34
0
 def SE(img, thresh=.7, size=4):
     mask = img > thresh
     rank = len(mask.shape)
     la, co = ndimage.label(mask,
                            ndimage.generate_binary_structure(rank, rank))
     _ = ndimage.find_objects(la)
Пример #35
0
var = np.where(var >= low_thrs, 1, 0)

for time_ind in range(nb_time_stamps):
    print("time stamp : ", time_ind, file=f)
    ### detect and label objects
    print("detect and label objects...", file=f)
    labeled_array[time_ind, :, :, :] = label(
        var[time_ind, :, :, :])[0]  # shape (72,502,602)
    nb_features.append(label(var[time_ind, :, :, :])[1])
    print("number of objects found : ", nb_features[time_ind], file=f)
    print("...OK", file=f)

    ### find location of objects
    print("find locations of objects...", file=f)
    objects_location += [
        ndimage.find_objects(labeled_array[time_ind, :, :, :])
    ]
    obj_nb = 0
    nb_obj_solo = 0
    num_features_solo_time_ind = []

    ### find objects composed of only 1 particle
    for obj in objects_location[time_ind]:
        vertical_extension = np.shape(labeled_array[time_ind, :, :, :][obj])[0]
        y_width = np.shape(labeled_array[time_ind, :, :, :][obj])[1]
        x_width = np.shape(labeled_array[time_ind, :, :, :][obj])[2]
        if (vertical_extension == 1) & (y_width == 1) & (x_width == 1):
            num_features_solo_time_ind.append(obj_nb + 1)
            nb_obj_solo += 1
        obj_nb += 1
Пример #36
0
def BuildDB():
    # create  custom data frame database type
    mydatabasetype = [('dataid', int), ('axialliverbounds', bool),
                      ('axialtumorbounds', bool), ('imagepath', 'S128'),
                      ('imagedata', '(%d,%d)int16' %
                       (options.trainingresample, options.trainingresample)),
                      ('truthpath', 'S128'),
                      ('truthdata', '(%d,%d)uint8' %
                       (options.trainingresample, options.trainingresample))]

    # initialize empty dataframe
    numpydatabase = np.empty(0, dtype=mydatabasetype)

    # load all data from csv
    totalnslice = 0
    with open(options.dbfile, 'r') as csvfile:
        myreader = csv.DictReader(csvfile, delimiter=',')
        for row in myreader:
            imagelocation = '%s/%s' % (options.rootlocation, row['image'])
            truthlocation = '%s/%s' % (options.rootlocation, row['label'])
            print(imagelocation, truthlocation)

            # load nifti file
            imagedata = nib.load(imagelocation)
            numpyimage = imagedata.get_data().astype(IMG_DTYPE)
            # error check
            assert numpyimage.shape[0:2] == (_globalexpectedpixel,
                                             _globalexpectedpixel)
            nslice = numpyimage.shape[2]
            resimage = skimage.transform.resize(
                numpyimage,
                (options.trainingresample, options.trainingresample, nslice),
                order=0,
                mode='constant',
                preserve_range=True).astype(IMG_DTYPE)

            # load nifti file
            truthdata = nib.load(truthlocation)
            numpytruth = truthdata.get_data().astype(SEG_DTYPE)
            # error check
            assert numpytruth.shape[0:2] == (_globalexpectedpixel,
                                             _globalexpectedpixel)
            assert nslice == numpytruth.shape[2]
            restruth = skimage.transform.resize(
                numpytruth,
                (options.trainingresample, options.trainingresample, nslice),
                order=0,
                mode='constant',
                preserve_range=True).astype(SEG_DTYPE)

            # bounding box for each label
            if (np.max(restruth) == 1):
                (liverboundingbox, ) = ndimage.find_objects(restruth)
                tumorboundingbox = None
            else:
                (liverboundingbox,
                 tumorboundingbox) = ndimage.find_objects(restruth)

            if (nslice == restruth.shape[2]):
                # custom data type to subset
                datamatrix = np.zeros(nslice, dtype=mydatabasetype)

                # custom data type to subset
                datamatrix['dataid'] = np.repeat(row['dataid'], nslice)
                # id the slices within the bounding box
                axialliverbounds = np.repeat(False, nslice)
                axialtumorbounds = np.repeat(False, nslice)
                axialliverbounds[liverboundingbox[2]] = True
                if (tumorboundingbox != None):
                    axialtumorbounds[tumorboundingbox[2]] = True
                datamatrix['axialliverbounds'] = axialliverbounds
                datamatrix['axialtumorbounds'] = axialtumorbounds
                datamatrix['imagepath'] = np.repeat(imagelocation, nslice)
                datamatrix['truthpath'] = np.repeat(truthlocation, nslice)
                datamatrix['imagedata'] = resimage.transpose(2, 1, 0)
                datamatrix['truthdata'] = restruth.transpose(2, 1, 0)
                numpydatabase = np.hstack((numpydatabase, datamatrix))
                # count total slice for QA
                totalnslice = totalnslice + nslice
            else:
                print('training data error image[2] = %d , truth[2] = %d ' %
                      (nslice, restruth.shape[2]))

    # save numpy array to disk
    np.save(_globalnpfile, numpydatabase)
Пример #37
0
def eval_localization(dataloader,
                      model,
                      LABEL,
                      map_thresholds,
                      percentiles,
                      ior_threshold=0.1,
                      method='ior'):

    num_correct_pred = 0
    num_images_examined = 0

    def compute_ior(activated_masks, gt_mask):
        intersection_masks = np.logical_and(activated_masks, gt_mask)

        detected_region_areas = np.sum(activated_masks, axis=(1, 2))
        intersection_areas = np.sum(intersection_masks, axis=(1, 2))

        ior = np.divide(intersection_areas, detected_region_areas)

        return ior

    def compute_iou(activated_masks, gt_mask):
        intersection_masks = np.logical_and(activated_masks, gt_mask)
        union_masks = np.logical_or(activated_masks, gt_mask)

        intersection_areas = np.sum(intersection_masks, axis=(1, 2))
        union_areas = np.sum(union_masks, axis=(1, 2))

        iou = np.divide(intersection_areas, union_areas)

        return iou

    map_thresholds = np.array(map_thresholds)
    map_thresholds = map_thresholds[:, np.newaxis, np.newaxis]

    for data in dataloader:

        inputs, labels, filename, bbox = data
        num_images_examined += 1

        # get cam map
        inputs = inputs.to(device)

        raw_cam = calc_cam(inputs, LABEL, model)
        raw_cam = np.array(
            Image.fromarray(raw_cam.squeeze()).resize((224, 224),
                                                      Image.NEAREST))

        raw_cams = np.broadcast_to(raw_cam,
                                   shape=(len(map_thresholds),
                                          raw_cam.shape[0], raw_cam.shape[1]))
        activation_masks = np.greater_equal(raw_cams, map_thresholds)

        # bounding box as a mask
        bbox = bbox.type(torch.cuda.IntTensor)
        bbox_mask = np.zeros(raw_cam.shape, dtype=bool)
        bbox_mask[bbox[0, 1]:bbox[0, 1] + bbox[0, 3],
                  bbox[0, 0]:bbox[0, 0] + bbox[0, 2]] = True

        if method == 'iobb':

            object_masks_union_all_thresholds = []
            for activation_mask in activation_masks:

                label_im, nb_labels = ndimage.label(activation_mask)
                object_slices = ndimage.find_objects(label_im)

                object_masks = []
                for object_slice in object_slices:
                    object_mask = np.zeros(label_im.shape, dtype=bool)
                    object_mask[object_slice[0], object_slice[1]] = True
                    object_masks.append(object_mask)
                object_masks = np.array(object_masks)

                object_masks_union = np.logical_or.reduce(object_masks)
                object_masks_union_all_thresholds.append(object_masks_union)
            object_masks_union_all_thresholds = np.array(
                object_masks_union_all_thresholds)

            iobb = compute_ior(object_masks_union_all_thresholds, bbox_mask)
            num_correct_pred += np.greater_equal(iobb, ior_threshold)

        if method == 'ior':
            ior = compute_ior(activation_masks, bbox_mask)
            num_correct_pred += np.greater_equal(ior, ior_threshold)

        if method == 'ior_percentile_dynamic':
            bbox_area_ratio = (bbox_mask.sum() / bbox_mask.size) * 100
            activation_mask = raw_cam >= np.percentile(raw_cam,
                                                       (100 - bbox_area_ratio))
            intersection = np.logical_and(activation_mask, bbox_mask)
            ior = intersection.sum() / activation_mask.sum()
            num_correct_pred += np.greater_equal(ior, ior_threshold)

        if method == 'ior_percentile_static':
            activation_masks = []
            for percentile in percentiles:
                activation_mask = raw_cam >= np.percentile(
                    raw_cam, 100 - percentile)
                activation_masks.append(activation_mask)
            activation_masks = np.array(activation_masks)
            ior = compute_ior(activation_masks, bbox_mask)
            num_correct_pred += np.greater_equal(ior, ior_threshold)

        if method == 'iou':
            iou = compute_iou(activation_masks, bbox_mask)
            num_correct_pred += np.greater_equal(iou, ior_threshold)

        if method == 'iou_percentile_dynamic':
            bbox_area_ratio = (bbox_mask.sum() / bbox_mask.size) * 100
            activation_mask = raw_cam >= np.percentile(raw_cam,
                                                       (100 - bbox_area_ratio))
            intersection = np.logical_and(activation_mask, bbox_mask)
            union = np.logical_or(activation_mask, bbox_mask)
            iou = intersection.sum() / union.sum()
            num_correct_pred += np.greater_equal(iou, ior_threshold)

        if method == 'iou_percentile_static':
            activation_masks = []
            for percentile in percentiles:
                activation_mask = raw_cam >= np.percentile(
                    raw_cam, 100 - percentile)
                activation_masks.append(activation_mask)
            activation_masks = np.array(activation_masks)
            iou = compute_iou(activation_masks, bbox_mask)
            num_correct_pred += np.greater_equal(iou, ior_threshold)

        if method == 'iou_percentile_bb_dynamic':
            bbox_area_ratio = (bbox_mask.sum() / bbox_mask.size) * 100
            activation_mask = raw_cam >= np.percentile(raw_cam,
                                                       (100 - bbox_area_ratio))

            label_im, nb_labels = ndimage.label(activation_mask)
            object_slices = ndimage.find_objects(label_im)

            object_masks = []
            for object_slice in object_slices:
                object_mask = np.zeros(label_im.shape, dtype=bool)
                object_mask[object_slice[0], object_slice[1]] = True

                if (np.logical_and(object_mask, bbox_mask)).sum() > 0:
                    object_masks.append(object_mask)

            object_masks = np.array(object_masks)
            object_masks = np.logical_or.reduce(object_masks)

            intersection = np.logical_and(object_masks, bbox_mask)
            union = np.logical_or(object_masks, bbox_mask)
            iou = intersection.sum() / union.sum()
            num_correct_pred += np.greater_equal(iou, ior_threshold)

        if method == 'iou_percentile_bb_dynamic_nih':
            bbox_area_ratio = (bbox_mask.sum() / bbox_mask.size) * 100
            activation_mask = raw_cam >= np.percentile(raw_cam,
                                                       (100 - bbox_area_ratio))

            label_im, nb_labels = ndimage.label(activation_mask)
            object_slices = ndimage.find_objects(label_im)

            object_masks = []
            for object_slice in object_slices:
                object_mask = np.zeros(label_im.shape, dtype=bool)
                object_mask[object_slice[0], object_slice[1]] = True

                if (np.logical_and(object_mask, bbox_mask)).sum() > 0:
                    object_masks.append(object_mask)

            if len(object_masks) > 0:
                object_masks = np.array(object_masks)
                # object_masks = np.logical_or.reduce(object_masks)

                intersection = np.logical_and(object_masks, bbox_mask)
                union = np.logical_or(object_masks, bbox_mask)
                iou = intersection.sum(axis=(1, 2)) / union.sum(axis=(1, 2))
                iou = np.amax(iou)
                num_correct_pred += np.greater_equal(iou, ior_threshold)

        if method == 'ior_percentile_bb_dynamic_nih':
            bbox_area_ratio = (bbox_mask.sum() / bbox_mask.size) * 100
            activation_mask = raw_cam >= np.percentile(raw_cam,
                                                       (100 - bbox_area_ratio))

            label_im, nb_labels = ndimage.label(activation_mask)
            object_slices = ndimage.find_objects(label_im)

            object_masks = []
            for object_slice in object_slices:
                object_mask = np.zeros(label_im.shape, dtype=bool)
                object_mask[object_slice[0], object_slice[1]] = True

                if (np.logical_and(object_mask, bbox_mask)).sum() > 0:
                    object_masks.append(object_mask)

            if len(object_masks) > 0:
                object_masks = np.array(object_masks)
                # object_masks = np.logical_or.reduce(object_masks)

                intersection = np.logical_and(object_masks, bbox_mask)
                # union = np.logical_or(object_masks, bbox_mask)
                iou = intersection.sum(axis=(1, 2)) / object_masks.sum(
                    axis=(1, 2))
                iou = np.amax(iou)
                num_correct_pred += np.greater_equal(iou, ior_threshold)

    accuracy = num_correct_pred / num_images_examined
    return accuracy
Пример #38
0
def show_next(cxr, model, label, inputs, filename, bbox):
    """
    Plots CXR, activation map of CXR, and shows model probabilities of findings

    Args:
        dataloader: dataloader of test CXRs
        model: fine-tuned torchvision densenet-121
        LABEL: finding we're interested in seeing heatmap for
    Returns:
        None (plots output)
    """

    raw_cam = calc_cam(inputs, label, model)
    print('range:')
    print(np.ptp(raw_cam))
    print('percerntile:')
    print(np.percentile(raw_cam, 4))
    print('avg:')
    print(np.mean(raw_cam))

    raw_cam = np.array(
        Image.fromarray(raw_cam.squeeze()).resize((224, 224), Image.NEAREST))

    # bounding box as a mask
    bbox_mask = np.zeros(raw_cam.shape, dtype=bool)
    bbox_mask[bbox[0, 1]:bbox[0, 1] + bbox[0, 3],
              bbox[0, 0]:bbox[0, 0] + bbox[0, 2]] = True

    bbox_area_ratio = (bbox_mask.sum() / bbox_mask.size) * 100
    activation_mask = np.logical_or(raw_cam >= 180, raw_cam <= 60)
    heat_mask = np.logical_and(raw_cam < 180, raw_cam > 60)

    # finding components in heatmap
    label_im, nb_labels = ndimage.label(activation_mask)
    # print('nb_labels:')
    # print(nb_labels)
    # print('label_im:')
    # print(label_im)

    # heat_mask = label_im == 0
    #
    # components_masks = []
    # for label in range(1, nb_labels + 1):
    #     component_mask = label_im == label
    #     components_masks.append(component_mask)
    object_slices = ndimage.find_objects(label_im)
    detected_patchs = []
    for object_slice in object_slices:
        y_slice = object_slice[0]
        x_slice = object_slice[1]
        xy_corner = (x_slice.start, y_slice.start)
        x_length = x_slice.stop - x_slice.start
        y_length = y_slice.stop - y_slice.start
        detected_patch = patches.Rectangle(xy_corner,
                                           x_length,
                                           y_length,
                                           linewidth=2,
                                           edgecolor='m',
                                           facecolor='none',
                                           zorder=2)
        detected_patchs.append(detected_patch)

        print(object_slice)

    object_masks = []
    for object_slice in object_slices:
        object_mask = np.zeros(label_im.shape, dtype=bool)
        object_mask[object_slice[0], object_slice[1]] = True
        object_masks.append(object_mask)
    object_masks = np.array(object_masks)

    object_masks_union = np.logical_or.reduce(object_masks)

    def compute_ior(activated_mask, gt_mask):
        intersection_mask = np.logical_and(activated_mask, gt_mask)
        detected_region_area = np.sum(activated_mask)
        # print('detected_area:')
        # print(detected_region_area)
        intersection_area = np.sum(intersection_mask)
        # print('intersection:')
        # print(intersection_area)
        ior = intersection_area / detected_region_area
        return ior

    ior = compute_ior(activation_mask, bbox_mask)
    print('ior:')
    print(ior)
    iobb = compute_ior(object_masks_union, bbox_mask)
    print('iobb:')
    print(iobb)

    fig, (showcxr, heatmap) = plt.subplots(ncols=2, figsize=(14, 5))

    hmap = sns.heatmap(
        raw_cam.squeeze(),
        cmap='viridis',
        # vmin= -200, vmax=100,
        mask=heat_mask,
        # alpha = 0.8, # whole heatmap is translucent
        annot=False,
        zorder=2,
        linewidths=0)

    hmap.imshow(cxr, zorder=1)  # put the map under the heatmap
    hmap.axis('off')
    hmap.set_title('Own Implementation for category {}'.format(label),
                   fontsize=8)

    rect = patches.Rectangle((bbox[0, 0], bbox[0, 1]),
                             bbox[0, 2],
                             bbox[0, 3],
                             linewidth=2,
                             edgecolor='r',
                             facecolor='none',
                             zorder=2)
    hmap.add_patch(rect)

    for patch in detected_patchs:
        hmap.add_patch(patch)

    rect_original = patches.Rectangle((bbox[0, 0], bbox[0, 1]),
                                      bbox[0, 2],
                                      bbox[0, 3],
                                      linewidth=2,
                                      edgecolor='r',
                                      facecolor='none',
                                      zorder=2)

    showcxr.imshow(cxr)
    showcxr.axis('off')
    showcxr.set_title(filename[0])
    showcxr.add_patch(rect_original)
    # plt.savefig(str(LABEL+"_P"+str(predx[label_index])+"_file_"+filename[0]))
    plt.show()
Пример #39
0
coords = pp.Z, pp.X, pp.T
data = pp.U, pp.V, pp.W

all_coords = np.concatenate([c[None] for c in coords])

data = pp.U[:, :, 2000:-2000]

invalid = np.isnan(data)
invalid_with_shell = ndi.binary_dilation(invalid,
                                         iterations=1,
                                         structure=np.ones((3, 3, 3)))
complete_valid_shell = invalid_with_shell & ~invalid

volumes, n = ndi.label(invalid_with_shell)
slices = ndi.find_objects(volumes)


def interpolate_region(slice):
    nans = invalid[slice]
    shell = complete_valid_shell[slice]

    valid_points = np.vstack(c[slice][shell] for c in coords).T
    valid_values = data[slice][shell]

    interpolator = interp.LinearNDInterpolator(valid_points, valid_values)

    invalid_points = np.vstack(c[slice][nans] for c in coords).T
    invalid_values = interpolator(invalid_points).astype(valid_values.dtype)

    return slice, invalid_values
Пример #40
0
def prepare_scenenet_data(
    data_path: str, protobuf_path: str
) -> Tuple[List[List[str]], List[List[str]], List[List[List[dict]]]]:
    """
    Prepares the SceneNet RGB-D data and returns it in Python format.
    
    Args:
        data_path: path to the SceneNet RGB-D data set
        protobuf_path: path to the SceneNet RGB-D protobuf
    Returns:
        file names photos, file names instances, instances
    """
    from twomartens.masterthesis import definitions
    from twomartens.masterthesis import scenenet_pb2

    trajectories = scenenet_pb2.Trajectories()
    with open(protobuf_path, 'rb') as file:
        trajectories.ParseFromString(file.read())

    sorted_trajectories = sorted(trajectories.trajectories,
                                 key=lambda k: k.render_path)
    file_names_photos = []
    file_names_instances = []
    instances = []
    for trajectory in tqdm.tqdm(sorted_trajectories,
                                desc="preparing trajectories"):
        path = f"{data_path}/{trajectory.render_path}"
        file_names_photos_traj = []
        file_names_instances_traj = []
        instances_traj = []
        instances_traj_dict = {}

        for instance in trajectory.instances:
            instance_type = instance.instance_type
            instance_id = instance.instance_id
            instance_dict = {}
            if instance_type != scenenet_pb2.Instance.BACKGROUND:
                wnid = instance.semantic_wordnet_id
                wn_class = instance.semantic_english
                instance_dict['wordnet_id'] = wnid
                instance_dict['wordnet_class_name'] = wn_class
                if wnid in definitions.WNID_TO_COCO:
                    instance_dict['coco_id'] = definitions.WNID_TO_COCO[wnid]
                else:
                    continue  # only save instances that are positive instances and not background

                instances_traj_dict[instance_id] = instance_dict

        # iterate through images/frames
        for view in trajectory.views:
            frame_num = view.frame_num
            instance_file = f"{path}/instance/{frame_num}.png"
            file_names_photos_traj.append(f"{path}/photo/{frame_num}.jpg")
            file_names_instances_traj.append(instance_file)
            instances_view = []

            # load instance file
            instance_image = scipy.misc.imread(instance_file)
            for instance_id in instances_traj_dict:
                instance_local = np.copy(instance_image)
                instance_local[instance_local != instance_id] = 0
                instance_local[instance_local == instance_id] = 1
                coordinates = ndimage.find_objects(instance_local)
                if coordinates is None or not coordinates:  # the current instance was not in this frame
                    continue
                else:
                    coordinates = coordinates[
                        0]  # extract the coords of the one object

                x = coordinates[1]
                y = coordinates[0]
                xmin, xmax = x.start, x.stop
                ymin, ymax = y.start, y.stop
                instance = instances_traj_dict[instance_id].copy()
                instance['bbox'] = (xmin, ymin, xmax, ymax)
                instances_view.append(instance)

            instances_traj.append(instances_view)

        file_names_photos.append(file_names_photos_traj)
        file_names_instances.append(file_names_instances_traj)
        instances.append(instances_traj)

    return file_names_photos, file_names_instances, instances
Пример #41
0
def find_xyz_cut_coords(img, mask=None, activation_threshold=None):
    """ Find the center of the largest activation connected component.

        Parameters
        -----------
        img : 3D Nifti1Image
            The brain map.
        mask : 3D ndarray, boolean, optional
            An optional brain mask.
        activation_threshold : float, optional
            The lower threshold to the positive activation. If None, the
            activation threshold is computed using the 80% percentile of
            the absolute value of the map.

        Returns
        -------
        x : float
            the x world coordinate.
        y : float
            the y world coordinate.
        z : float
            the z world coordinate.
    """
    # if a pseudo-4D image or several images were passed (cf. #922),
    # we reduce to a single 3D image to find the coordinates
    img = check_niimg_3d(img)
    data = _safe_get_data(img)

    # To speed up computations, we work with partial views of the array,
    # and keep track of the offset
    offset = np.zeros(3)

    # Deal with masked arrays:
    if hasattr(data, 'mask'):
        not_mask = np.logical_not(data.mask)
        if mask is None:
            mask = not_mask
        else:
            mask *= not_mask
        data = np.asarray(data)

    # Get rid of potential memmapping
    data = as_ndarray(data)
    my_map = data.copy()
    if mask is not None:
        # check against empty mask
        if mask.sum() == 0.:
            warnings.warn(
                "Provided mask is empty. Returning center of mass instead.")
            cut_coords = ndimage.center_of_mass(np.abs(my_map)) + offset
            x_map, y_map, z_map = cut_coords
            return np.asarray(
                coord_transform(x_map, y_map, z_map,
                                img.get_affine())).tolist()
        slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0]
        my_map = my_map[slice_x, slice_y, slice_z]
        mask = mask[slice_x, slice_y, slice_z]
        my_map *= mask
        offset += [slice_x.start, slice_y.start, slice_z.start]

    # Testing min and max is faster than np.all(my_map == 0)
    if (my_map.max() == 0) and (my_map.min() == 0):
        return .5 * np.array(data.shape)
    if activation_threshold is None:
        activation_threshold = fast_abs_percentile(my_map[my_map != 0].ravel(),
                                                   80)
    mask = np.abs(my_map) > activation_threshold - 1.e-15
    # mask may be zero everywhere in rare cases
    if mask.max() == 0:
        return .5 * np.array(data.shape)
    mask = largest_connected_component(mask)
    slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0]
    my_map = my_map[slice_x, slice_y, slice_z]
    mask = mask[slice_x, slice_y, slice_z]
    my_map *= mask
    offset += [slice_x.start, slice_y.start, slice_z.start]

    # For the second threshold, we use a mean, as it is much faster,
    # althought it is less robust
    second_threshold = np.abs(np.mean(my_map[mask]))
    second_mask = (np.abs(my_map) > second_threshold)
    if second_mask.sum() > 50:
        my_map *= largest_connected_component(second_mask)
    cut_coords = ndimage.center_of_mass(np.abs(my_map))
    x_map, y_map, z_map = cut_coords + offset

    # Return as a list of scalars
    return np.asarray(coord_transform(x_map, y_map, z_map,
                                      img.get_affine())).tolist()
Пример #42
0
    def FindObjects(self, thresholdFactor, numThresholdSteps="default", blurRadius=1.5, blurRadiusZ=1.5, mask=None):
        """Finds point-like objects by subjecting the data to a band-pass filtering (as defined when 
        creating the identifier) followed by z-projection and a thresholding procedure where the 
        threshold is progressively decreased from a maximum value (half the maximum intensity in the image) to a 
        minimum defined as [thresholdFactor]*the mode (most frequently occuring value, 
        should correspond to the background) of the image. The number of steps can be given as 
        [numThresholdSteps], with defualt being 5 when filterMode="fast" and 10 for filterMode="good".
        At each step the thresholded image is blurred with a Gaussian of radius [blurRadius] to 
        approximate the image of the points found in that step, and subtracted from the original, thus
        removing the objects from the image such that they are not detected at the lower thresholds.
        This allows the detection of objects which are relatively close together and spread over a 
        large range of intenstities. A binary mask [mask] may be applied to the image to specify a region
        (e.g. a cell) in which objects are to be detected.

        A copy of the filtered image is saved such that subsequent calls to FindObjects with, e.g., a
        different thresholdFactor are faster."""
        
        #save a copy of the parameters.
        self.thresholdFactor = thresholdFactor
        self.estSN = False
        
        if (numThresholdSteps == "default"):
            self.numThresholdSteps = 10
        elif (numThresholdSteps == 'Estimate S/N'):
            self.numThresholdSteps = 0
            self.estSN = True
        else:
            self.numThresholdSteps = int(numThresholdSteps)
            
        self.blurRadius = blurRadius
        self.blurRadiusZ = blurRadiusZ
        self.mask = mask

        #clear the list of previously found points
        del self[:]
        
        #do filtering
        filteredData = np.maximum(self.__FilterData(), 0)
        
        #apply mask
        if not (self.mask is None):
            maskedFilteredData = filteredData*self.mask
        else:
            maskedFilteredData = filteredData

        #manually mask the edge pixels
        if maskedFilteredData.ndim == 3 and maskedFilteredData.shape[2] > 1:
            maskedFilteredData[0:5, 0:5, :] = 0
            maskedFilteredData[0:5, -5:,:] = 0
            maskedFilteredData[-5:, -5:,:] = 0
            maskedFilteredData[-5:, 0:5,:] = 0
            maskedFilteredData[:,:,:3] = 0
            maskedFilteredData[:,:,-3:] = 0
        else:
            maskedFilteredData[0:5, 0:5] = 0
            maskedFilteredData[0:5, -5:] = 0
            maskedFilteredData[-5:, -5:] = 0
            maskedFilteredData[-5:, 0:5] = 0
        
        if self.numThresholdSteps > 0:
            #determine (approximate) mode
            #N, bins = scipy.histogram(maskedFilteredData, bins=200)
            #posMax = N.argmax() #find bin with maximum number of counts
            #modeApp = bins[posMax:(posMax+1)].mean() #bins contains left-edges - find middle of most frequent bin
            
            #modeApp = np.median(maskedFilteredData)

            #catch the corner case where the mode could be zero - this is highly unlikely, but if it were
            #to occur one would no longer be able to influence the threshold with threshFactor
            #if (abs(modeApp) < 1): 
            #    modeApp = 1
            
            modeApp = maskedFilteredData.max()/100.

            #calc thresholds
            self.lowerThreshold = modeApp*self.thresholdFactor 
            self.upperThreshold = maskedFilteredData.max()/2 
            
        else:
            if self.estSN:
                self.lowerThreshold = self.thresholdFactor*scipy.sqrt(scipy.median(self.data.ravel()))
            else:
                self.lowerThreshold = self.thresholdFactor

        
        X,Y,Z = scipy.mgrid[0:maskedFilteredData.shape[0], 0:maskedFilteredData.shape[1],0:maskedFilteredData.shape[2]]
    
        if (self.numThresholdSteps == 0): #don't do threshold scan - just use lower threshold (faster)
            im = maskedFilteredData
            #View3D(im)
            (labeledPoints, nLabeled) = ndimage.label(im > self.lowerThreshold)
            
            objSlices = ndimage.find_objects(labeledPoints)
            
            #loop over objects
            for i in range(nLabeled):
                #measure position
                #x,y = ndimage.center_of_mass(im, labeledPoints, i)
                imO = im[objSlices[i]]
                x = (X[objSlices[i]]*imO).sum()/imO.sum()
                y = (Y[objSlices[i]]*imO).sum()/imO.sum()
                z = (Z[objSlices[i]]*imO).sum()/imO.sum()
                #and add to list
                self.append(OfindPoint(x,y,z,detectionThreshold=self.lowerThreshold))
        else: #do threshold scan (default)

            #generate threshold range - note slightly awkard specification of lowwer and upper bounds as the stop bound is excluded from arange
            #self.thresholdRange = scipy.arange(self.upperThreshold, self.lowerThreshold - (self.upperThreshold - self.lowerThreshold)/(self.numThresholdSteps -1), - (self.upperThreshold - self.lowerThreshold)/(self.numThresholdSteps))
            self.thresholdRange = scipy.logspace(np.log10(self.upperThreshold), np.log10(self.lowerThreshold), self.numThresholdSteps)
            print(('Thresholds:', self.thresholdRange))

            #get a working copy of the filtered data
            im = maskedFilteredData.copy()

        

            #use for quickly deterimining the number of pixels in a slice (there must be a better way)
            corrWeightRef = scipy.ones(im.shape)
            

        
            for threshold in self.thresholdRange:
                #View3D(im)
                #apply threshold and label regions
                (labeledPoints, nLabeled) = ndimage.label(im > threshold)

                #initialise correction weighting mask
                corrWeights = scipy.zeros(im.shape, 'f')
            
                #get 'adress' of each object
                objSlices = ndimage.find_objects(labeledPoints)

                #loop over objects
                for i in range(1, nLabeled):
                    #measure position
                    #x,y = ndimage.center_of_mass(im, labeledPoints, i)
                    nPixels = corrWeightRef[objSlices[i]].sum()
                    imO = im[objSlices[i]]
                    x = (X[objSlices[i]]*imO).sum()/imO.sum()
                    y = (Y[objSlices[i]]*imO).sum()/imO.sum()
                    z = (Z[objSlices[i]]*imO).sum()/imO.sum()
                    #and add to list
                    self.append(OfindPoint(x,y,z,detectionThreshold=threshold))

                    #now work out weights for correction image (N.B. this is somewhat emperical)
                    corrWeights[objSlices[i]] = 1.0/scipy.sqrt(nPixels)

                #calculate correction matrix
                corr = ndimage.gaussian_filter(((2*self.blurRadius)**2)*(2*self.blurRadiusZ)*1.5*im*corrWeights, [self.blurRadius, self.blurRadius, self.blurRadiusZ])
                #View3D([im, corr, corrWeights])

                #subtract from working image
                im  = np.maximum(im - corr, 0)

                #pylab.figure()
                #pylab.imshow(corr)
                #pylab.colorbar()

                #pylab.figure()
                #pylab.imshow(im)
                #pylab.colorbar()

                #clip border pixels again
                if maskedFilteredData.ndim == 3 and maskedFilteredData.shape[2] > 1:
                    im[0:5, 0:5, :] = 0
                    im[0:5, -5:,:] = 0
                    im[-5:, -5:,:] = 0
                    im[-5:, 0:5,:] = 0
    
                    im[:,:,:3]  = 0
                    im[:,:,-3:] = 0
                else:
                    im[0:5, 0:5] = 0
                    im[0:5, -5:] = 0
                    im[-5:, -5:] = 0
                    im[-5:, 0:5] = 0

                print((len(self)))

        #create pseudo lists to allow indexing along the lines of self.x[i]
        self.x = PseudoPointList(self, 'x')
        self.y = PseudoPointList(self, 'y')
        self.z = PseudoPointList(self, 'z')
Пример #43
0
def _findobjs(data,
              threshold,
              kernel,
              min_separation=None,
              exclude_border=False,
              local_peaks=True):
    """
    Find sources in an image by convolving the image with the input
    kernel and selecting connected pixels above a given threshold.

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

    threshold : float
        The absolute image value above which to select sources.  Note
        that this threshold is not the same threshold input to
        ``daofind`` or ``irafstarfind``.  It should be multiplied by the
        kernel relerr.

    kernel : `_FindObjKernel`
        The convolution kernel.  The dimensions should match those of
        the cutouts.  The kernel should be normalized to zero sum.

    exclude_border : bool, optional
        Set to `True` to exclude sources found within half the size of
        the convolution kernel from the image borders.  The default is
        `False`, which is the mode used by `DAOFIND`_ and `starfind`_.

    local_peaks : bool, optional
        Set to `True` to exactly match the `DAOFIND`_ method of finding
        local peaks.  If `False`, then only one peak per thresholded
        segment will be used.

    Returns
    -------
    objects : list of `_ImgCutout`
        A list of `_ImgCutout` objects containing the image cutout for
        each source.


    .. _DAOFIND: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind
    .. _starfind: http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind
    """

    from scipy import ndimage

    x_kernradius = kernel.kern.shape[1] // 2
    y_kernradius = kernel.kern.shape[0] // 2

    if not exclude_border:
        # create a larger image padded by zeros
        ysize = int(data.shape[0] + (2. * y_kernradius))
        xsize = int(data.shape[1] + (2. * x_kernradius))
        data_padded = np.zeros((ysize, xsize))
        data_padded[y_kernradius:y_kernradius + data.shape[0],
                    x_kernradius:x_kernradius + data.shape[1]] = data
        data = data_padded

    convolved_data = filter_data(data,
                                 kernel.kern,
                                 mode='constant',
                                 fill_value=0.0,
                                 check_normalization=False)

    if not exclude_border:
        # keep border=0 in convolved data
        convolved_data[:y_kernradius, :] = 0.
        convolved_data[-y_kernradius:, :] = 0.
        convolved_data[:, :x_kernradius] = 0.
        convolved_data[:, -x_kernradius:] = 0.

    selem = ndimage.generate_binary_structure(2, 2)
    object_labels, nobjects = ndimage.label(convolved_data > threshold,
                                            structure=selem)
    objects = []
    if nobjects == 0:
        return objects

    # find object peaks in the convolved data
    if local_peaks:
        # footprint overrides min_separation in find_peaks
        if min_separation is None:  # daofind
            footprint = kernel.mask.astype(np.bool)
        else:
            from skimage.morphology import disk
            footprint = disk(min_separation)
        tbl = find_peaks(convolved_data, threshold, footprint=footprint)
        coords = np.transpose([tbl['y_peak'], tbl['x_peak']])
    else:
        object_slices = ndimage.find_objects(object_labels)
        coords = []
        for object_slice in object_slices:
            # thresholded_object is not the same size as the kernel
            thresholded_object = convolved_data[object_slice]
            ypeak, xpeak = np.unravel_index(thresholded_object.argmax(),
                                            thresholded_object.shape)
            xpeak += object_slice[1].start
            ypeak += object_slice[0].start
            coords.append((ypeak, xpeak))

    for (ypeak, xpeak) in coords:
        # now extract the object from the data, centered on the peak
        # pixel in the convolved image, with the same size as the kernel
        x0 = xpeak - x_kernradius
        x1 = xpeak + x_kernradius + 1
        y0 = ypeak - y_kernradius
        y1 = ypeak + y_kernradius + 1
        if x0 < 0 or x1 > data.shape[1]:
            continue  # pragma: no cover (isolated continue is never tested)
        if y0 < 0 or y1 > data.shape[0]:
            continue  # pragma: no cover (isolated continue is never tested)
        object_data = data[y0:y1, x0:x1]
        object_convolved_data = convolved_data[y0:y1, x0:x1].copy()
        if not exclude_border:
            # correct for image padding
            x0 -= x_kernradius
            y0 -= y_kernradius
        imgcutout = _ImgCutout(object_data, object_convolved_data, x0, y0)
        objects.append(imgcutout)
    return objects
Пример #44
0
def regionprops(label_image,
                intensity_image=None,
                cache=True,
                coordinates=None,
                *,
                extra_properties=None):
    r"""Measure properties of labeled image regions.

    Parameters
    ----------
    label_image : (M, N[, P]) ndarray
        Labeled input image. Labels with value 0 are ignored.

        .. versionchanged:: 0.14.1
            Previously, ``label_image`` was processed by ``numpy.squeeze`` and
            so any number of singleton dimensions was allowed. This resulted in
            inconsistent handling of images with singleton dimensions. To
            recover the old behaviour, use
            ``regionprops(np.squeeze(label_image), ...)``.
    intensity_image : (M, N[, P][, C]) ndarray, optional
        Intensity (i.e., input) image with same size as labeled image, plus
        optionally an extra dimension for multichannel data. Currently,
        this extra channel dimension, if present, must be the last axis.
        Default is None.

        .. versionchanged:: 0.18.0
            The ability to provide an extra dimension for channels was added.
    cache : bool, optional
        Determine whether to cache calculated properties. The computation is
        much faster for cached properties, whereas the memory consumption
        increases.
    coordinates : DEPRECATED
        This argument is deprecated and will be removed in a future version
        of scikit-image.

        See :ref:`Coordinate conventions <numpy-images-coordinate-conventions>`
        for more details.

        .. deprecated:: 0.16.0
            Use "rc" coordinates everywhere. It may be sufficient to call
            ``numpy.transpose`` on your label image to get the same values as
            0.15 and earlier. However, for some properties, the transformation
            will be less trivial. For example, the new orientation is
            :math:`\frac{\pi}{2}` plus the old orientation.
    extra_properties : Iterable of callables
        Add extra property computation functions that are not included with
        skimage. The name of the property is derived from the function name,
        the dtype is inferred by calling the function on a small sample.
        If the name of an extra property clashes with the name of an existing
        property the extra property wil not be visible and a UserWarning is
        issued. A property computation function must take a region mask as its
        first argument. If the property requires an intensity image, it must
        accept the intensity image as the second argument.

    Returns
    -------
    properties : list of RegionProperties
        Each item describes one labeled region, and can be accessed using the
        attributes listed below.

    Notes
    -----
    The following properties can be accessed as attributes or keys:

    **area** : int
        Number of pixels of the region.
    **area_bbox** : int
        Number of pixels of bounding box.
    **area_convex** : int
        Number of pixels of convex hull image, which is the smallest convex
        polygon that encloses the region.
    **area_filled** : int
        Number of pixels of the region will all the holes filled in. Describes
        the area of the image_filled.
    **axis_major_length** : float
        The length of the major axis of the ellipse that has the same
        normalized second central moments as the region.
    **axis_minor_length** : float
        The length of the minor axis of the ellipse that has the same
        normalized second central moments as the region.
    **bbox** : tuple
        Bounding box ``(min_row, min_col, max_row, max_col)``.
        Pixels belonging to the bounding box are in the half-open interval
        ``[min_row; max_row)`` and ``[min_col; max_col)``.
    **centroid** : array
        Centroid coordinate tuple ``(row, col)``.
    **centroid_local** : array
        Centroid coordinate tuple ``(row, col)``, relative to region bounding
        box.
    **centroid_weighted** : array
        Centroid coordinate tuple ``(row, col)`` weighted with intensity
        image.
    **centroid_weighted_local** : array
        Centroid coordinate tuple ``(row, col)``, relative to region bounding
        box, weighted with intensity image.
    **coords** : (N, 2) ndarray
        Coordinate list ``(row, col)`` of the region.
    **eccentricity** : float
        Eccentricity of the ellipse that has the same second-moments as the
        region. The eccentricity is the ratio of the focal distance
        (distance between focal points) over the major axis length.
        The value is in the interval [0, 1).
        When it is 0, the ellipse becomes a circle.
    **equivalent_diameter_area** : float
        The diameter of a circle with the same area as the region.
    **euler_number** : int
        Euler characteristic of the set of non-zero pixels.
        Computed as number of connected components subtracted by number of
        holes (input.ndim connectivity). In 3D, number of connected
        components plus number of holes subtracted by number of tunnels.
    **extent** : float
        Ratio of pixels in the region to pixels in the total bounding box.
        Computed as ``area / (rows * cols)``
    **feret_diameter_max** : float
        Maximum Feret's diameter computed as the longest distance between
        points around a region's convex hull contour as determined by
        ``find_contours``. [5]_
    **image** : (H, J) ndarray
        Sliced binary region image which has the same size as bounding box.
    **image_convex** : (H, J) ndarray
        Binary convex hull image which has the same size as bounding box.
    **image_filled** : (H, J) ndarray
        Binary region image with filled holes which has the same size as
        bounding box.
    **image_intensity** : ndarray
        Image inside region bounding box.
    **inertia_tensor** : ndarray
        Inertia tensor of the region for the rotation around its mass.
    **inertia_tensor_eigvals** : tuple
        The eigenvalues of the inertia tensor in decreasing order.
    **intensity_max** : float
        Value with the greatest intensity in the region.
    **intensity_mean** : float
        Value with the mean intensity in the region.
    **intensity_min** : float
        Value with the least intensity in the region.
    **label** : int
        The label in the labeled input image.
    **moments** : (3, 3) ndarray
        Spatial moments up to 3rd order::

            m_ij = sum{ array(row, col) * row^i * col^j }

        where the sum is over the `row`, `col` coordinates of the region.
    **moments_central** : (3, 3) ndarray
        Central moments (translation invariant) up to 3rd order::

            mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j }

        where the sum is over the `row`, `col` coordinates of the region,
        and `row_c` and `col_c` are the coordinates of the region's centroid.
    **moments_hu** : tuple
        Hu moments (translation, scale and rotation invariant).
    **moments_normalized** : (3, 3) ndarray
        Normalized moments (translation and scale invariant) up to 3rd order::

            nu_ij = mu_ij / m_00^[(i+j)/2 + 1]

        where `m_00` is the zeroth spatial moment.
    **moments_weighted** : (3, 3) ndarray
        Spatial moments of intensity image up to 3rd order::

            wm_ij = sum{ array(row, col) * row^i * col^j }

        where the sum is over the `row`, `col` coordinates of the region.
    **moments_weighted_central** : (3, 3) ndarray
        Central moments (translation invariant) of intensity image up to
        3rd order::

            wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j }

        where the sum is over the `row`, `col` coordinates of the region,
        and `row_c` and `col_c` are the coordinates of the region's weighted
        centroid.
    **moments_weighted_hu** : tuple
        Hu moments (translation, scale and rotation invariant) of intensity
        image.
    **moments_weighted_normalized** : (3, 3) ndarray
        Normalized moments (translation and scale invariant) of intensity
        image up to 3rd order::

            wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1]

        where ``wm_00`` is the zeroth spatial moment (intensity-weighted area).
    **orientation** : float
        Angle between the 0th axis (rows) and the major
        axis of the ellipse that has the same second moments as the region,
        ranging from `-pi/2` to `pi/2` counter-clockwise.
    **perimeter** : float
        Perimeter of object which approximates the contour as a line
        through the centers of border pixels using a 4-connectivity.
    **perimeter_crofton** : float
        Perimeter of object approximated by the Crofton formula in 4
        directions.
    **slice** : tuple of slices
        A slice to extract the object from the source image.
    **solidity** : float
        Ratio of pixels in the region to pixels of the convex hull image.

    Each region also supports iteration, so that you can do::

      for prop in region:
          print(prop, region[prop])

    See Also
    --------
    label

    References
    ----------
    .. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
           Core Algorithms. Springer-Verlag, London, 2009.
    .. [2] B. Jähne. Digital Image Processing. Springer-Verlag,
           Berlin-Heidelberg, 6. edition, 2005.
    .. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
           Features, from Lecture notes in computer science, p. 676. Springer,
           Berlin, 1993.
    .. [4] https://en.wikipedia.org/wiki/Image_moment
    .. [5] W. Pabst, E. Gregorová. Characterization of particles and particle
           systems, pp. 27-28. ICT Prague, 2007.
           https://old.vscht.cz/sil/keramika/Characterization_of_particles/CPPS%20_English%20version_.pdf

    Examples
    --------
    >>> from skimage import data, util
    >>> from skimage.measure import label, regionprops
    >>> img = util.img_as_ubyte(data.coins()) > 110
    >>> label_img = label(img, connectivity=img.ndim)
    >>> props = regionprops(label_img)
    >>> # centroid of first labeled object
    >>> props[0].centroid
    (22.72987986048314, 81.91228523446583)
    >>> # centroid of first labeled object
    >>> props[0]['centroid']
    (22.72987986048314, 81.91228523446583)

    Add custom measurements by passing functions as ``extra_properties``

    >>> from skimage import data, util
    >>> from skimage.measure import label, regionprops
    >>> import numpy as np
    >>> img = util.img_as_ubyte(data.coins()) > 110
    >>> label_img = label(img, connectivity=img.ndim)
    >>> def pixelcount(regionmask):
    ...     return np.sum(regionmask)
    >>> props = regionprops(label_img, extra_properties=(pixelcount,))
    >>> props[0].pixelcount
    7741
    >>> props[1]['pixelcount']
    42

    """

    if label_image.ndim not in (2, 3):
        raise TypeError('Only 2-D and 3-D images supported.')

    if not np.issubdtype(label_image.dtype, np.integer):
        if np.issubdtype(label_image.dtype, bool):
            raise TypeError('Non-integer image types are ambiguous: '
                            'use skimage.measure.label to label the connected'
                            'components of label_image,'
                            'or label_image.astype(np.uint8) to interpret'
                            'the True values as a single label.')
        else:
            raise TypeError('Non-integer label_image types are ambiguous')

    if coordinates is not None:
        if coordinates == 'rc':
            msg = ('The coordinates keyword argument to skimage.measure.'
                   'regionprops is deprecated. All features are now computed '
                   'in rc (row-column) coordinates. Please remove '
                   '`coordinates="rc"` from all calls to regionprops before '
                   'updating scikit-image.')
            warn(msg, stacklevel=2, category=FutureWarning)
        else:
            msg = ('Values other than "rc" for the "coordinates" argument '
                   'to skimage.measure.regionprops are no longer supported. '
                   'You should update your code to use "rc" coordinates and '
                   'stop using the "coordinates" argument, or use skimage '
                   'version 0.15.x or earlier.')
            raise ValueError(msg)

    regions = []

    objects = ndi.find_objects(label_image)
    for i, sl in enumerate(objects):
        if sl is None:
            continue

        label = i + 1

        props = RegionProperties(sl,
                                 label,
                                 label_image,
                                 intensity_image,
                                 cache,
                                 extra_properties=extra_properties)
        regions.append(props)

    return regions
Пример #45
0
    def calculateHomologyRank(self, ids=None, dim=None):
        """
        Calculates the rank of the homology group for dimensionality dim.

        Currently implemented for dim = 0, self.ndim-1 and self.ndim (trivial).
        If dim is None, the ranks are calculated for the above three dim's 
        and the data is saved in self.homologyRank.

        Arguments:
          - ids: segment ids, if None self.ids is used
          - dim: dimensionality, 0 - self.ndim or None to calculate all

        Returns (ndarray) rank[id, dim].
        """

        # check ndim
        if dim is not None:
            if (dim > 0) and (dim < self.ndim - 1):
                raise NotImplementedError(
                    "Sorry, don't know how to caclulate" + " rank of the " +
                    str(dim) + "-Homology group for an " + str(self.ndim) +
                    "-dimensional obect.")

        # ids
        ids, max_id = self.findIds(ids=ids)

        # deal with no ids
        if max_id == 0:
            return numpy.zeros(shape=(max_id + 1, self.ndim + 1), dtype='int')

        if self.segments is not None:

            # single segments array
            if dim is None:

                # recursively calculate all faces
                h_rank = numpy.zeros(shape=(max_id + 1, self.ndim + 1),
                                     dtype='int')
                for i_dim in [0, self.ndim - 1, self.ndim]:
                    h_rank_i = self.calculateHomologyRank(ids=ids, dim=i_dim)
                    h_rank[:, i_dim] = h_rank_i
                return h_rank

            # get objects and expand them if a "non-existing" id > max id
            objects = ndimage.find_objects(self.segments)
            len_obj = len(objects)
            no_slice = self.ndim * [slice(0, 0)]
            if len_obj <= max_id:
                for id_ in range(len_obj, max_id + 1):
                    objects.append(tuple(no_slice))

            if dim == 0:

                # find separate segments
                h_rank = numpy.zeros(shape=(max_id + 1), dtype='int')
                h_rank[ids] = [
                    (ndimage.label(self.segments[objects[id_ - 1]] == id_,
                                   structure=self.structEl))[1] for id_ in ids
                ]
                h_rank[0] = sum(h_rank[id_] for id_ in ids)

            elif dim == self.ndim - 1:

                # find holes
                h_rank = numpy.zeros(shape=(max_id + 1), dtype='int')
                for id_ in ids:
                    data_inset = self.segments[objects[id_ - 1]]
                    filled = ndimage.binary_fill_holes(
                        data_inset == id_, structure=self.invStructEl)
                    inter = (filled == True) & (data_inset == 0)
                    h_rank[id_] = (ndimage.label(input=inter,
                                                 structure=self.structEl))[1]
                h_rank[0] = sum(h_rank[id_] for id_ in ids)

            elif dim == self.ndim:

                h_rank = numpy.zeros(shape=max_id + 1, dtype='int')

            return h_rank

        else:
            raise NotImplementedError("Sorry, dealing with Hierarchy objects",
                                      " hasn't been implemented yet.")
Пример #46
0
    neighborhood_size = 6
    threshold = 6
    ref_min=43
    
    data_max = filters.maximum_filter(data, neighborhood_size)
    print(data_max.max())
    maxima = (data == data_max)
    data_min = filters.minimum_filter(data, neighborhood_size)
    print(data_min.max())
    diff = ((data_max - data_min) > threshold)
    #print "diff: ", diff
    maxima[diff == False] = 0
    maxima[data_max < ref_min] = 0
    
    labeled, num_objects = ndimage.label(maxima)
    slices = ndimage.find_objects(labeled)
    x, y = [], []
    for dy,dx in slices:
        x_center = (dx.start + dx.stop - 1)/2
        x.append(x_center)
        y_center = (dy.start + dy.stop - 1)/2    
        y.append(y_center)

    plot_plt=True
    if plot_plt:
        plt.imshow(data, vmin=0, vmax=0.9*data_max.max())
        #plt.imshow(data, vmin=0, vmax=50)
        #plt.imshow(data)
        plt.autoscale(False)
        outputFile = outputDir+'odd_'+prop_str+'-'+area+'_'+yearS[2:]+monthS+dayS+hourS+minS +'.png'
        plt.savefig(outputFile, bbox_inches = 'tight')
Пример #47
0
if (options.imagefile != None and options.labelfile != None and options.output != None ):
    # load nifti file
    imagedata = nib.load(options.imagefile)
    numpyimage= imagedata.get_data().astype(IMG_DTYPE )

    # load nifti file
    truthdata = nib.load(options.labelfile )
    numpytruth= truthdata.get_data().astype(SEG_DTYPE)

    # error check
    assert numpyimage.shape == numpytruth.shape

    # bounding box for each label
    assert np.max(numpytruth) > 0 
    if( np.max(numpytruth) ==1 ) :
      (liverboundingbox,)  = ndimage.find_objects(numpytruth)
      tumorboundingbox  = None
    else:
      boundingboxes = ndimage.find_objects(numpytruth)
      liverboundingbox = boundingboxes[0]

    print(imagedata.shape,numpytruth.shape,liverboundingbox  )
    npimagebb = numpyimage[:,:, liverboundingbox[2] ]
    nptruthbb = numpyimage[:,:, liverboundingbox[2] ]

    imagebbcmd = 'c3d -verbose %s -dup %s -info -copy-transform -info -binarize -foreach -region 0x0x%dvox %dx%dx%dvox -info -type short -endfor -omc %s/image.nii -multiply -o %s/maskimage.nii ' % (options.imagefile, options.labelfile, int(liverboundingbox[2].start), imagedata.shape[0],imagedata.shape[1],int(liverboundingbox[2].stop-liverboundingbox[2].start),options.output,options.output )
    labelbbcmd = 'c3d -verbose %s -info -region 0x0x%dvox %dx%dx%dvox -info -type uchar -o %s/label.nii  ' % (options.labelfile, int(liverboundingbox[2].start), imagedata.shape[0],imagedata.shape[1],int(liverboundingbox[2].stop-liverboundingbox[2].start),options.output )
    print(imagebbcmd )
    os.system(imagebbcmd )
    print(labelbbcmd )
    os.system(labelbbcmd )
Пример #48
0
def find_clusters(x, threshold, tail=0, connectivity=None):
    """For a given 1d-array (test statistic), find all clusters which
    are above/below a certain threshold. Returns a list of 2-tuples.

    Parameters
    ----------
    x: 1D array
        Data
    threshold: float
        Where to threshold the statistic
    tail : -1 | 0 | 1
        Type of comparison
    connectivity : sparse matrix in COO format
        Defines connectivity between features. The matrix is assumed to
        be symmetric and only the upper triangular half is used.
        Defaut is None, i.e, no connectivity.

    Returns
    -------
    clusters: list of slices or list of arrays (boolean masks)
        We use slices for 1D signals and mask to multidimensional
        arrays.

    sums: array
        Sum of x values in clusters
    """
    if tail not in [-1, 0, 1]:
        raise ValueError('invalid tail parameter')

    x = np.asanyarray(x)

    if tail == -1:
        x_in = x <= threshold
    elif tail == 1:
        x_in = x >= threshold
    else:
        x_in = np.abs(x) >= threshold

    if connectivity is None:
        labels, n_labels = ndimage.label(x_in)

        if x.ndim == 1:
            clusters = ndimage.find_objects(labels, n_labels)
            sums = ndimage.measurements.sum(x,
                                            labels,
                                            index=range(1, n_labels + 1))
        else:
            clusters = list()
            sums = np.empty(n_labels)
            for l in range(1, n_labels + 1):
                c = labels == l
                clusters.append(c)
                sums[l - 1] = np.sum(x[c])
    else:
        if x.ndim > 1:
            raise Exception("Data should be 1D when using a connectivity "
                            "to define clusters.")
        if np.sum(x_in) == 0:
            return [], np.empty(0)
        components = _get_components(x_in, connectivity)
        labels = np.unique(components)
        clusters = list()
        sums = list()
        for l in labels:
            c = (components == l)
            if np.any(x_in[c]):
                clusters.append(c)
                sums.append(np.sum(x[c]))
        sums = np.array(sums)
    return clusters, sums
Пример #49
0
def test_find_objects01():
    data = np.ones([], dtype=int)
    out = ndimage.find_objects(data)
    assert_(out == [()])
Пример #50
0
def get_centroid_largest_blob(seg_mask):
    labeled_blobs = ndi.label(seg_mask)
    objs = ndi.find_objects(labeled_blobs[0])
    largest_obj = find_largest_obj(seg_mask, objs)
    return np.array(get_centroid(seg_mask, largest_obj))
Пример #51
0
def test_find_objects05():
    data = np.ones([5], dtype=int)
    out = ndimage.find_objects(data)
    assert_equal(out, [(slice(0, 5, None), )])
Пример #52
0
def segment(image):
    # Resize the image, convert to gray and equalize histogram
    image = cv2.resize(image, (int(image.shape[1] * (100 / image.shape[0])), 100))
    image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    image_gray = cv2.equalizeHist(image_gray)
    
    # Apply gaussian filter
    blur1 = cv2.GaussianBlur(image_gray, (5, 5), cv2.BORDER_DEFAULT)
    
    # Find elevation map from sobel edge detection
    elevation_map = sobel(blur1)
    
    # Place the markers in the interest points
    # less than 35 is black and should be the characters
    # more than 80 should be the background
    markers = np.zeros_like(blur1)
    markers[image_gray < 35] = 2
    markers[image_gray > 80] = 1
    
    # Segments the characters using morphology watershed operation
    # Which behaves as a water shed in real life
    # Takes the elevation map from Sobel edge detection and markers
    # For what to consider objects and what to consider background.
    segmentation = morphology.watershed(elevation_map, markers)
    
    
    # Fill the holes
    segmentation_fill = ndi.binary_fill_holes(segmentation - 1)
    
    # Change to greyscale image, since it is now a boolean array containing
    # True where an object was detected
    # False where background was found
    segmentation_s = np.where(segmentation_fill, 0, 255)
    segmentation_s = segmentation_s.astype(np.uint8)
    
    # Make the characters thinner
    segmentation_s = cv2.dilate(segmentation_s, np.ones((5, 5)), iterations=1)
    segmentation_s = cv2.erode(segmentation_s, np.ones((3, 3)), iterations=1)
    segmentation_s = cv2.dilate(segmentation_s, np.ones((3, 3)), iterations=1)
    segmentation_s = cv2.erode(segmentation_s, np.ones((5, 5)), iterations=1)
    
    # Get labeled image segments
    segmentation_fill = np.where(segmentation_s == 255, False, True)
    
    # Label the coins based on regions where a 'whole' object was detected
    labeled_coins, n_objects = ndi.label(segmentation_fill)
    
    # Sort the objects by x coordinate
    slices = ndi.find_objects(labeled_coins)
    slices = sorted(slices, key=lambda slice: slice[1].start)
    possible_chars = []
    last_end = 0
    
    index = 0
    ends = []
    for i in range(n_objects):
        curr_segment = segmentation[slices[i]]
        curr_segment = np.where(curr_segment > 1, 255, 0)
        curr_segment = curr_segment.astype(np.uint8)
        (height, width) = curr_segment.shape
        if width / height < 0.9 and height / width > 1.1 and height > image.shape[0] * 0.5:
            if last_end == 0:
                last_end = slices[i][1].stop
            elif last_end + 20 < slices[i][1].start:
                ends.append(index)
                last_end = slices[i][1].stop
                possible_chars.append(None)
            else:
                last_end = slices[i][1].stop
            possible_chars.append(curr_segment)
            index += 1
    if len(possible_chars) == 8 and len(ends) == 2:
        if ends[0] == 1 and ends[1] == 4:
            return get_characters(possible_chars, 0)
        elif ends[0] == 2:
            if ends[1] == 5:
                return get_characters(possible_chars, 1)
            elif ends[1] == 4:
                return get_characters(possible_chars, 2)
        elif ends[0] == 3 and ends[1] == 5:
            return get_characters(possible_chars, 3)
    
    return None, None
Пример #53
0
def extract_storm_objects(label_grid,
                          data,
                          x_grid,
                          y_grid,
                          times,
                          dx=1,
                          dt=1,
                          obj_buffer=0):
    """
    After storms are labeled, this method extracts the storm objects from the grid and places them into STObjects.
    The STObjects contain intensity, location, and shape information about each storm at each timestep.

    Args:
        label_grid: 2D or 3D array output by label_storm_objects.
        data: 2D or 3D array used as input to label_storm_objects.
        x_grid: 2D array of x-coordinate data, preferably on a uniform spatial grid with units of length.
        y_grid: 2D array of y-coordinate data.
        times: List or array of time values, preferably as integers
        dx: grid spacing in same units as x_grid and y_grid.
        dt: period elapsed between times
        obj_buffer: number of extra pixels beyond bounding box of object to store in each STObject

    Returns:
        storm_objects: list of lists containing STObjects identified at each time.
    """
    storm_objects = []
    if len(label_grid.shape) == 3:
        ij_grid = np.indices(label_grid.shape[1:])
        for t, time in enumerate(times):
            storm_objects.append([])
            object_slices = list(
                find_objects(label_grid[t], label_grid[t].max()))
            if len(object_slices) > 0:
                for o, obj_slice in enumerate(object_slices):
                    if obj_buffer > 0:
                        obj_slice_buff = [
                            slice(
                                np.maximum(0, osl.start - obj_buffer),
                                np.minimum(osl.stop + obj_buffer,
                                           label_grid.shape[l + 1]))
                            for l, osl in enumerate(obj_slice)
                        ]
                    else:
                        obj_slice_buff = obj_slice
                    storm_objects[-1].append(
                        STObject(data[t][obj_slice_buff],
                                 np.where(
                                     label_grid[t][obj_slice_buff] == o + 1, 1,
                                     0),
                                 x_grid[obj_slice_buff],
                                 y_grid[obj_slice_buff],
                                 ij_grid[0][obj_slice_buff],
                                 ij_grid[1][obj_slice_buff],
                                 time,
                                 time,
                                 dx=dx,
                                 step=dt))
                    if t > 0:
                        dims = storm_objects[-1][-1].timesteps[0].shape
                        storm_objects[-1][-1].estimate_motion(
                            time, data[t - 1], dims[1], dims[0])
    else:
        ij_grid = np.indices(label_grid.shape)
        storm_objects.append([])
        object_slices = list(find_objects(label_grid, label_grid.max()))
        if len(object_slices) > 0:
            for o, obj_slice in enumerate(object_slices):
                if obj_buffer > 0:
                    obj_slice_buff = [
                        slice(
                            np.maximum(0, osl.start - obj_buffer),
                            np.minimum(osl.stop + obj_buffer,
                                       label_grid.shape[l + 1]))
                        for l, osl in enumerate(obj_slice)
                    ]
                else:
                    obj_slice_buff = obj_slice
                storm_objects[-1].append(
                    STObject(data[obj_slice_buff],
                             np.where(label_grid[obj_slice_buff] == o + 1, 1,
                                      0),
                             x_grid[obj_slice_buff],
                             y_grid[obj_slice_buff],
                             ij_grid[0][obj_slice_buff],
                             ij_grid[1][obj_slice_buff],
                             times,
                             times,
                             dx=dx,
                             step=dt))
    return storm_objects
Пример #54
0
def all_preprocess():
    training_paths = pathlib.Path("../data/stage1_train").glob(
        "*/images/*.png")
    training_sorted = sorted([x for x in training_paths])
    im_path = training_sorted[sample_index]
    im = imageio.imread(str(im_path))

    print("Original image shape: {}".format(im.shape))
    im_gray = rgb2gray(im)
    print("Grayed image shape: {}".format(im_gray.shape))

    # make images drastic
    thresh_val = threshold_otsu(im_gray)
    mask = np.where(im_gray > thresh_val, 1, 0)
    # imageio.imwrite("tmp2.png", mask)
    if np.sum(mask == 0) < np.sum(mask == 1):
        mask = np.where(mask, 0, 1)

    # get separate labels
    # numbers in labels represent feature number
    # [1,1,0,0,0...] <- label of feature1
    labels, nlabels = ndimage.label(mask)
    label_arrays = []
    for label_num in range(1, nlabels + 1):
        label_mask = np.where(labels == label_num, 1, 0)
        label_arrays.append(label_mask)
    imageio.imwrite("tmp3.png", label_arrays[0])

    print(
        "There are {} separate components / objects detected.".format(nlabels))

    rand_cmap = ListedColormap(np.random.rand(256, 3))
    print(rand_cmap)
    labels_for_display = np.where(labels > 0, labels, np.nan)
    # for showing a background picture
    plt.imshow(im_gray, cmap="gray")
    plt.imshow(labels_for_display, cmap=rand_cmap)
    plt.axis("off")
    plt.title("Labeled Cells ({} cells)".format(nlabels))
    # plt.show()
    # find_objects: return location of the each object (separated) given an input
    # square
    for label_ind, label_coords in enumerate(ndimage.find_objects(labels)):
        cell = im_gray[label_coords]
        print(label_coords)
        # remove too small nuclei
        if np.product(cell.shape) < 10:
            print('Label {} is too small! Setting to 0.'.format(label_ind))
            mask = np.where(labels == label_ind, 0, mask)

    # regenerate the labels
    labels, nlabels = ndimage.label(mask)
    print("There are now {} separate components / objects detected.".format(
        nlabels))

    # get the object indices, and perform a binary opening procudure
    # that is, separate combined objects
    two_cell_indices = ndimage.find_objects(labels)[1]
    cell_mask = mask[two_cell_indices]
    cell_mask_opened = ndimage.binary_opening(cell_mask, iterations=8)

    # convert each label object to RLE
    print("RLE Encoding for the current mask is: {}".format(
        rle_encoding(label_mask)))
Пример #55
0
        print('Normalizing automatically')
        norm = normalize(image, 1, 99.8, axis=(0, 1))

    images.append(norm)

# Run CellPose
masks, flows, styles, diams = model.eval(images,
                                         diameter=diameter,
                                         flow_threshold=None,
                                         channels=channels,
                                         net_avg=net_avg)

for one_mask, image_file in zip(masks, image_files):
    polygons = []

    slices = find_objects(one_mask.astype(int))
    for i, si in enumerate(slices):
        if si is not None:
            coords = [[], []]
            sr, sc = si
            mask = (one_mask[sr, sc] == (i + 1)).astype(np.uint8)
            contours = cv.findContours(mask, cv.RETR_EXTERNAL,
                                       cv.CHAIN_APPROX_NONE)
            pvc, pvr = np.concatenate(contours[-2], axis=0).squeeze().T
            vr, vc = pvr + sr.start, pvc + sc.start
            coords[0] = vr
            coords[1] = vc

            # sub_polygons.append(coords)
            polygons.append(coords)
Пример #56
0
def wavenq(dat, thresh=caCYT_init * 2, verbose=False):
    nq = NQS("widx", "startt", "endt", "y", "speed", "durt", "overlap", "up")
    lab, nwaves = wavecut(dat, thresh)
    nq.clear(len(dat[0] * nwaves))
    for widx in xrange(1, nwaves + 1):
        slicey, slicex = ndimage.find_objects(lab == widx)[0]
        midy = int(slicey.start + (slicey.stop - slicey.start) / 2.0)
        lastyup, lastydown, speed = midy, midy, 0
        lastyuph, lastydownh = midy, midy
        if verbose:
            print "slicex:", slicex.start, slicex.stop
            print "slicey:", slicey.start, slicey.stop
        # y0 + (y1-y0) * (threshold-f(y0)) /  (f(y1)-f(y0))
        x, y = slicex.start, midy
        if lab[y][x] == widx:
            endx, speed = scanx(lab, thresh, widx, x, y, slicex, y, lastyup)
            nq.append(widx, x * recdt, endx * recdt, y * spaceum, speed,
                      recdt * (endx - x), 0, 1)
        lastendxup, lastendxdown = -1, -1
        while x < slicex.stop:  # traverse through time
            found = False
            y = slicey.stop - 1  # look for highest point
            if verbose: print "x:", x, "y:", y, " = ", lab[y][x]
            while y >= slicey.start:  # starting above and going down until hit it
                if lab[y][x] == widx:
                    if x > 0:
                        if lab[y][
                                x -
                                1] != widx:  # make sure on outer edge (avoids overlap)
                            found = True
                            break
                    else:
                        found = True
                        break
                y -= 1
            if found:
                yh = y
                if y + 1 < len(dat):
                    y0, y1 = y, y + 1  # y0 is part of wave, y1 is above its top at t=x so dat[y1][x] <= thresh
                    yh = y0 + (thresh - dat[y1][x]) / (dat[y0][x] - dat[y1][x])
                if verbose: print "found u ", x, y, yh
                endx, speed = scanx(lab, thresh, widx, x, y, slicex, yh,
                                    lastyuph)
                olap = 0
                if y == lastyup and x < lastendxup: olap = 1
                nq.append(widx, x * recdt, endx * recdt, yh * spaceum, speed,
                          recdt * (endx - x), olap, 1)
                lastyup, lastendxup, lastyuph = y, endx, yh
            found = False
            y = slicey.start  # look for lowest point
            while y < slicey.stop:  # starting below and going up until hit it
                if lab[y][x] == widx:
                    if x > 0:
                        if lab[y][
                                x -
                                1] != widx:  # make sure on outer edge (avoids overlap)
                            found = True
                            break
                    else:
                        found = True
                        break
                y += 1
            if found:
                yh = y
                if y - 1 >= 0:
                    y0, y1 = y, y - 1  # y0 is part of wave, y1 is below its bottom at t=x so dat[y1][x] <= thresh
                    yh = y0 - (thresh - dat[y1][x]) / (dat[y0][x] - dat[y1][x])
                if verbose: print "found d ", x, y, yh
                endx, speed = scanx(lab, thresh, widx, x, y, slicex, yh,
                                    lastydownh)
                olap = 0
                if y == lastydown and x < lastendxdown: olap = 1
                nq.append(widx, x * recdt, endx * recdt, yh * spaceum, speed,
                          recdt * (endx - x), olap, 0)
                lastydown, lastendxdown, lastydownh = y, endx, yh
            x += 1  # move to next time-point
    return nq
Пример #57
0
def test_find_objects04():
    data = np.zeros([1], dtype=int)
    out = ndimage.find_objects(data)
    assert_equal(out, [])
Пример #58
0
    def FindObjects(self,
                    thresholdFactor,
                    numThresholdSteps="default",
                    blurRadius=1.5,
                    mask=None,
                    splitter=None,
                    debounceRadius=4,
                    maskEdgeWidth=5,
                    upperThreshFactor=0.5,
                    discardClumpRadius=0):
        """Finds point-like objects by subjecting the data to a band-pass filtering (as defined when 
        creating the identifier) followed by z-projection and a thresholding procedure where the 
        threshold is progressively decreased from a maximum value (half the maximum intensity in the image) to a 
        minimum defined as [thresholdFactor]*the mode (most frequently occuring value, 
        should correspond to the background) of the image. The number of steps can be given as 
        [numThresholdSteps], with defualt being 5 when filterMode="fast" and 10 for filterMode="good".
        At each step the thresholded image is blurred with a Gaussian of radius [blurRadius] to 
        approximate the image of the points found in that step, and subtracted from the original, thus
        removing the objects from the image such that they are not detected at the lower thresholds.
        This allows the detection of objects which are relatively close together and spread over a 
        large range of intenstities. A binary mask [mask] may be applied to the image to specify a region
        (e.g. a cell) in which objects are to be detected.

        A copy of the filtered image is saved such that subsequent calls to FindObjects with, e.g., a
        different thresholdFactor are faster."""

        #save a copy of the parameters.
        self.thresholdFactor = thresholdFactor
        self.estSN = False

        if (numThresholdSteps == "default"):
            if (self.filterMode == "fast"):
                self.numThresholdSteps = 5
            else:
                self.numThresholdSteps = 10
        elif (numThresholdSteps == 'Estimate S/N'):
            self.numThresholdSteps = 0
            self.estSN = True
        else:
            self.numThresholdSteps = int(numThresholdSteps)

        self.blurRadius = blurRadius
        self.mask = mask

        #clear the list of previously found points
        del self[:]

        #do filtering
        filteredData = self.__FilterData()

        #apply mask
        if not (self.mask == None):
            maskedFilteredData = filteredData * self.mask
        else:
            maskedFilteredData = filteredData

        #manually mask the edge pixels
        if maskEdgeWidth and filteredData.shape[1] > maskEdgeWidth:
            maskedFilteredData[:, :maskEdgeWidth] = 0
            maskedFilteredData[:, -maskEdgeWidth:] = 0
            maskedFilteredData[-maskEdgeWidth:, :] = 0
            maskedFilteredData[:maskEdgeWidth, :] = 0

        if self.numThresholdSteps > 0:
            #determine (approximate) mode
            N, bins = numpy.histogram(maskedFilteredData, bins=200)
            posMax = N.argmax()  #find bin with maximum number of counts
            modeApp = bins[posMax:(posMax + 1)].mean(
            )  #bins contains left-edges - find middle of most frequent bin

            #catch the corner case where the mode could be zero - this is highly unlikely, but if it were
            #to occur one would no longer be able to influence the threshold with threshFactor
            if (abs(modeApp) < 1):
                modeApp = 1

            #calc thresholds
            self.lowerThreshold = modeApp * self.thresholdFactor
            self.upperThreshold = maskedFilteredData.max() * upperThreshFactor

        else:
            if self.estSN:
                self.lowerThreshold = self.thresholdFactor * numpy.sqrt(
                    numpy.median(self.data.ravel()))
            else:
                self.lowerThreshold = self.thresholdFactor

        X, Y = numpy.mgrid[0:maskedFilteredData.shape[0],
                           0:maskedFilteredData.shape[1]]
        #X = X.astype('f')
        #Y = Y.astype('f')

        #store x, y, and thresholds
        xs = []
        ys = []
        ts = []

        if (self.numThresholdSteps == 0
            ):  #don't do threshold scan - just use lower threshold (faster)
            im = maskedFilteredData
            imt = im > self.lowerThreshold
            #imt = ndimage.binary_erosion(im >self.lowerThreshold)
            (labeledPoints, nLabeled) = ndimage.label(imt)

            objSlices = ndimage.find_objects(labeledPoints)

            #loop over objects
            for i in range(nLabeled):
                #measure position
                #x,y = ndimage.center_of_mass(im, labeledPoints, i)
                imO = im[objSlices[i]]
                x = (X[objSlices[i]] * imO).sum() / imO.sum()
                y = (Y[objSlices[i]] * imO).sum() / imO.sum()

                #and add to list
                #self.append(OfindPoint(x,y,detectionThreshold=self.lowerThreshold))
                xs.append(x)
                ys.append(y)
                ts.append(self.lowerThreshold)
        else:  #do threshold scan (default)

            #generate threshold range - note slightly awkard specification of lowwer and upper bounds as the stop bound is excluded from arange
            self.thresholdRange = numpy.arange(
                self.upperThreshold, self.lowerThreshold -
                (self.upperThreshold - self.lowerThreshold) /
                (self.numThresholdSteps - 1),
                -(self.upperThreshold - self.lowerThreshold) /
                (self.numThresholdSteps))

            #get a working copy of the filtered data
            im = maskedFilteredData.copy()

            #use for quickly deterimining the number of pixels in a slice (there must be a better way)
            corrWeightRef = numpy.ones(im.shape)

            for threshold in self.thresholdRange:
                #apply threshold and label regions
                (labeledPoints, nLabeled) = ndimage.label(im > threshold)

                #initialise correction weighting mask
                corrWeights = numpy.zeros(im.shape, 'f')

                #get 'adress' of each object
                objSlices = ndimage.find_objects(labeledPoints)

                #loop over objects
                for i in range(1, nLabeled):
                    #measure position
                    #x,y = ndimage.center_of_mass(im, labeledPoints, i)
                    nPixels = corrWeightRef[objSlices[i]].sum()
                    imO = im[objSlices[i]]
                    x = (X[objSlices[i]] * imO).sum() / imO.sum()
                    y = (Y[objSlices[i]] * imO).sum() / imO.sum()
                    #and add to list
                    #self.append(OfindPoint(x,y,detectionThreshold=threshold))
                    xs.append(x)
                    ys.append(y)
                    ts.append(threshold)

                    #now work out weights for correction image (N.B. this is somewhat emperical)
                    corrWeights[objSlices[i]] = 1.0 / numpy.sqrt(nPixels)

                #calculate correction matrix
                corr = ndimage.gaussian_filter(
                    2 * self.blurRadius * numpy.sqrt(2 * numpy.pi) * 1.7 * im *
                    corrWeights, self.blurRadius)

                #subtract from working image
                im -= corr

                #pylab.figure()
                #pylab.imshow(corr)
                #pylab.colorbar()

                #pylab.figure()
                #pylab.imshow(im)
                #pylab.colorbar()

                #clip border pixels again
                im[0:5, 0:5] = 0
                im[0:5, -5:] = 0
                im[-5:, -5:] = 0
                im[-5:, 0:5] = 0

                print((len(xs)))

        xs = numpy.array(xs)
        ys = numpy.array(ys)

        # if splitter:
        #     ys = ys + (ys > im.shape[1]/2)*(im.shape[1] - 2*ys)

        if splitter and (len(xs) > 0):
            xs, ys = splitter(xs, ys)

            xs = numpy.clip(xs, 0, self.filteredData.shape[0] - 1)
            ys = numpy.clip(ys, 0, self.filteredData.shape[1] - 1)

        if discardClumpRadius > 0:
            print 'ditching clumps'
            xs, ys = self.__discardClumped(xs, ys, discardClumpRadius)

        xs, ys = self.__Debounce(xs, ys, debounceRadius)

        for x, y, t in zip(xs, ys, ts):
            self.append(OfindPoint(x, y, t))

        #create pseudo lists to allow indexing along the lines of self.x[i]
        self.x = PseudoPointList(self, 'x')
        self.y = PseudoPointList(self, 'y')
Пример #59
0
def test_find_objects02():
    data = np.zeros([], dtype=int)
    out = ndimage.find_objects(data)
    assert_(out == [])
Пример #60
0
def test_find_objects06():
    data = np.array([1, 0, 2, 2, 0, 3])
    out = ndimage.find_objects(data)
    assert_equal(out, [(slice(0, 1, None), ), (slice(2, 4, None), ),
                       (slice(5, 6, None), )])