def dice(img,y_true,y_pred):

    h, w = img.shape
    im_true = y_true.reshape(h, w)
    im_pred = y_pred.reshape(h, w)

    labels_true = measure.label(im_true)
    regions_true = regionprops(labels_true)

    labels_pred = measure.label(im_pred)
    regions_pred = regionprops(labels_pred)
    features = ['coords','area','dice']
    df = pd.DataFrame(columns=features)

    i=0
    for x_pred in regions_pred :
        centroid = (np.array(x_pred.centroid)).astype(int)
        if im_true[(centroid[0], centroid[1])] == 1:
            for x_true in regions_true:
               if centroid in x_true.coords:
                   A = np.zeros((img.shape[0], img.shape[1]))
                   B = np.zeros((img.shape[0], img.shape[1]))


                   A[x_pred.coords[:, 0], x_pred.coords[:, 1]] = 1
                   B[x_true.coords[:, 0], x_true.coords[:, 1]] = 1
                   intersect = float((sum(sum(B))))

                   D = intersect/(sum(sum(B))+ sum(sum(A)))
                   df.loc[i] = [x_pred.coords , x_pred.area, D]
                   break
        i+=1
    return df
Пример #2
0
 def detect_tc_in_step(self, nc, i, ecc_th=0.75):
     mask = self.ocean_mask.copy()
     uas = nc.variables["uas"][i].squeeze()
     vas = nc.variables["vas"][i].squeeze()
     wind_speed = numpy.sqrt(uas**2+vas**2)
     wind_mask = logical_and(self.ocean_mask, wind_speed > 20.)
     temp = nc.variables["ts"][i].squeeze()
     temp_mask = logical_and(self.ocean_mask, temp > 298.15)
     ps = nc.variables["ps"][i].squeeze()
     ps_mask = logical_and(self.ocean_mask, ps < 1005)
     mask = logical_or(wind_mask, logical_and(temp_mask, ps_mask))
     mask = remove_small_objects(mask, 20)
     lbl = label(mask)
     props_windspeed = regionprops(lbl, wind_speed)
     props_pressure = regionprops(lbl, ps)
     centroids = []
     for windspeed, pressure in zip(props_windspeed, props_pressure):
         max_wind_speed = windspeed["max_intensity"]
         min_pressure = pressure["min_intensity"]
         if windspeed["eccentricity"] > ecc_th or max_wind_speed<20.:
             lbl[lbl == windspeed["label"]]=0
         else:
             y, x = windspeed["centroid"]
             lon = float(self.idx_to_lon(x, y))
             lat = float(self.idx_to_lat(x, y))
             centroids.append([lon, lat, max_wind_speed, min_pressure])
     mask = lbl>0
     return mask, centroids
def extract_cell_stats(img1_path, img2_path):

    # Function reads in the images and labels the cells. The features are
    # extracted from these labelled images.
    #
    # Inputs:   img1_path - path to previous image
    #           img2_path - path to current image
    #
    # Outputs:  out -   dict containing the relevant information
    #

    # TODO: be more accommodating with image types, RGB etc, tifffile warning
    # read image data
    img1 = skimage.io.imread(img1_path)
    img2 = skimage.io.imread(img2_path)

    # Image shape
    if img1.shape != img2.shape:
        warnings.warn('Caution: Comparing image frames of different sizes.')
    img_shape = img1.shape

    # Label pre-segmented images
    l_label, l_cell_total = label(img1, return_num=True)
    r_label, r_cell_total = label(img2, return_num=True)

    # Collect cell features if cell is of minimum size (not segmented debris)
    # TODO: clever way of setting this number
    l_cells = [cell for cell in regionprops(l_label) if cell['filled_area'] > 50]
    r_cells = [cell for cell in regionprops(r_label) if cell['filled_area'] > 50]

    # Output
    out = {'img1': l_cells, 'img2': r_cells, 'img_shape': img_shape}
    return out
Пример #4
0
def get_segmented_lungs(im, plot=False):
    # Step 1: Convert into a binary image.
    binary = im < -400
    # Step 2: Remove the blobs connected to the border of the image.
    cleared = clear_border(binary)
    # Step 3: Label the image.
    label_image = label(cleared)
    # Step 4: Keep the labels with 2 largest areas.
    areas = [r.area for r in regionprops(label_image)]
    areas.sort()
    if len(areas) > 2:
        for region in regionprops(label_image):
            if region.area < areas[-2]:
                for coordinates in region.coords:
                       label_image[coordinates[0], coordinates[1]] = 0
    binary = label_image > 0
    # Step 5: Erosion operation with a disk of radius 2. This operation is seperate the lung nodules attached to the blood vessels.
    selem = disk(2)
    binary = binary_erosion(binary, selem)
    # Step 6: Closure operation with a disk of radius 10. This operation is    to keep nodules attached to the lung wall.
    selem = disk(10) # CHANGE BACK TO 10
    binary = binary_closing(binary, selem)
    # Step 7: Fill in the small holes inside the binary mask of lungs.
    edges = roberts(binary)
    binary = ndi.binary_fill_holes(edges)
    # Step 8: Superimpose the binary mask on the input image.
    get_high_vals = binary == 0
    im[get_high_vals] = -2000
    return im, binary
Пример #5
0
def filter_segments(labels, max_ecc, min_area, max_area, max_detect=None,
                    circ=None, intensity=None, **extra_args):
    """ filter_segments(labels, max_ecc=0.5, min_area=15, max_area=200) -> [Segment]
        Returns a list of Particles and masks out labels for
        particles not meeting acceptance criteria.
    """
    pts = []
    strengths = []
    centroid = 'Centroid' if intensity is None else 'WeightedCentroid'
    if skversion < version('0.10'):
        rprops = regionprops(labels, ['Area', 'Eccentricity', centroid], intensity)
    else:
        rprops = regionprops(labels, intensity)
    for rprop in rprops:
        area = rprop['area']
        if area < min_area or area > max_area:
            continue
        ecc = rprop['eccentricity']
        if ecc > max_ecc:
            continue
        x, y = rprop[centroid]
        if circ:
            co, ro = circ
            if (x - co[0])**2 + (y - co[1])**2 > ro**2:
                continue
        pts.append(Segment(x, y, rprop.label, ecc, area))
        if max_detect is not None:
            strengths.append(rprop['mean_intensity'])
    if max_detect is not None:
        pts = pts[np.argsort(-strengths)]
    return pts[:max_detect]
Пример #6
0
    def clean_by_area(self, binary_image):
        image = binary_image.copy()
        image = ndi.binary_fill_holes(image)

        label_image = label(binary_image)
        initial_label = regionprops(label_image[0, :, :])[0].label

        for z in range(0, image.shape[0]):
            regions = regionprops(label_image[z, :, :])
            for region in regions:
                if region.label != initial_label:
                    for coords in region.coords:
                        image[z, coords[0], coords[1]] = 0

        for z in range(0, image.shape[0]):
            label_image = label(image[z, :, :], connectivity=1)
            regions = regionprops(label_image)
            if len(regions) > 1:
                max_area = np.max([r.area for r in regions])
                for region in regions:
                    if region.centroid[1] > 120 and region.area < max_area:
                        for coords in region.coords:
                            image[z, coords[0], coords[1]] = 0

        return image
Пример #7
0
def test_orientation():
    orientation = regionprops(SAMPLE, ['Orientation'])[0]['Orientation']
    # determined with MATLAB
    assert_almost_equal(orientation, 0.10446844651921)
    # test correct quadrant determination
    orientation2 = regionprops(SAMPLE.T, ['Orientation'])[0]['Orientation']
    assert_almost_equal(orientation2, math.pi / 2 - orientation)
Пример #8
0
def get_segmented_lungs(im):

    binary = im < -320
    cleared = clear_border(binary) 
    cleared=morph(cleared,5)
    label_image = label(cleared)
  
    areas = [r.area for r in regionprops(label_image)]
    areas.sort()
    if len(areas) > 2:
        for region in regionprops(label_image):
            if region.area < areas[-2]:
                for coordinates in region.coords:
                       label_image[coordinates[0], coordinates[1]] = 0
    binary = label_image > 0  
    selem = disk(2)
    binary = binary_erosion(binary, selem)
 
    selem = disk(10)
    binary = binary_closing(binary, selem)
    edges = roberts(binary)
    binary = ndi.binary_fill_holes(edges)
 
    get_high_vals = binary == 0
    im[get_high_vals] = 0
  
    binary = morphology.dilation(binary,np.ones([5,5]))
    return binary
Пример #9
0
def get_segmentation_features(im):
    dilwindow = [4, 4]
    imthr = np.where(im > np.mean(im), 0.0, 1.0)
    imdil = morphology.dilation(imthr, np.ones(dilwindow))
    labels = measure.label(imdil)
    labels = imthr * labels
    labels = labels.astype(int)
    regions = measure.regionprops(labels)
    numregions = len(regions)
    while len(regions) < 1:
        dilwindow[0] = dilwindow[0] - 1
        dilwindow[1] = dilwindow[1] - 1
        if dilwindow == [0, 0]:
            regions = None
            break
        imthr = np.where(im > np.mean(im), 0.0, 1.0)
        imdil = morphology.dilation(imthr, np.ones(dilwindow))
        labels = measure.label(imdil)
        labels = imthr * labels
        labels = labels.astype(int)
        regions = measure.regionprops(labels)
    regionmax = get_largest_region(regions, labels, imthr)

    if regionmax is None:
        return (np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan)
    eccentricity = regionmax.eccentricity
    convex_area = regionmax.convex_area
    convex_to_total_area = regionmax.convex_area / regionmax.area
    extent = regionmax.extent
    filled_area = regionmax.filled_area
    return (eccentricity, convex_area, convex_to_total_area, extent,
            filled_area, numregions)
Пример #10
0
def test_bbox():
    bbox = regionprops(SAMPLE, ['BoundingBox'])[0]['BoundingBox']
    assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1]))

    SAMPLE_mod = SAMPLE.copy()
    SAMPLE_mod[:, -1] = 0
    bbox = regionprops(SAMPLE_mod, ['BoundingBox'])[0]['BoundingBox']
    assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1]-1))
Пример #11
0
def test_euler_number():
    en = regionprops(SAMPLE, ['EulerNumber'])[0]['EulerNumber']
    assert en == 0

    SAMPLE_mod = SAMPLE.copy()
    SAMPLE_mod[7, -3] = 0
    en = regionprops(SAMPLE_mod, ['EulerNumber'])[0]['EulerNumber']
    assert en == -1
Пример #12
0
def test_filled_area():
    area = regionprops(SAMPLE, ['FilledArea'])[0]['FilledArea']
    assert area == np.sum(SAMPLE)

    SAMPLE_mod = SAMPLE.copy()
    SAMPLE_mod[7, -3] = 0
    area = regionprops(SAMPLE_mod, ['FilledArea'])[0]['FilledArea']
    assert area == np.sum(SAMPLE)
Пример #13
0
def _properties2d(image, dim):
    """
    Compute shape property of the input 2D image. Accounts for partial volume information.
    :param image: 2D input image in uint8 or float (weighted for partial volume) that has a single object.
    :param dim: [px, py]: Physical dimension of the image (in mm). X,Y respectively correspond to AP,RL.
    :return:
    """
    upscale = 5  # upscale factor for resampling the input image (for better precision)
    pad = 3  # padding used for cropping
    # Check if slice is empty
    if not image.any():
        logging.debug('The slice is empty.')
        return None
    # Normalize between 0 and 1 (also check if slice is empty)
    image_norm = (image - image.min()) / (image.max() - image.min())
    # Convert to float64
    image_norm = image_norm.astype(np.float64)
    # Binarize image using threshold at 0. Necessary input for measure.regionprops
    image_bin = np.array(image_norm > 0.5, dtype='uint8')
    # Get all closed binary regions from the image (normally there is only one)
    regions = measure.regionprops(image_bin, intensity_image=image_norm)
    # Check number of regions
    if len(regions) > 1:
        logging.debug('There is more than one object on this slice.')
        return None
    region = regions[0]
    # Get bounding box of the object
    minx, miny, maxx, maxy = region.bbox
    # Use those bounding box coordinates to crop the image (for faster processing)
    image_crop = image_norm[np.clip(minx-pad, 0, image_bin.shape[0]): np.clip(maxx+pad, 0, image_bin.shape[0]),
                 np.clip(miny-pad, 0, image_bin.shape[1]): np.clip(maxy+pad, 0, image_bin.shape[1])]
    # Oversample image to reach sufficient precision when computing shape metrics on the binary mask
    image_crop_r = transform.pyramid_expand(image_crop, upscale=upscale, sigma=None, order=1)
    # Binarize image using threshold at 0. Necessary input for measure.regionprops
    image_crop_r_bin = np.array(image_crop_r > 0.5, dtype='uint8')
    # Get all closed binary regions from the image (normally there is only one)
    regions = measure.regionprops(image_crop_r_bin, intensity_image=image_crop_r)
    region = regions[0]
    # Compute area with weighted segmentation and adjust area with physical pixel size
    area = np.sum(image_crop_r) * dim[0] * dim[1] / upscale ** 2
    # Compute ellipse orientation, rotated by 90deg because image axis are inverted, modulo pi, in deg, and between [0, 90]
    orientation = _fix_orientation(region.orientation)
    # Find RL and AP diameter based on major/minor axes and cord orientation=
    [diameter_AP, diameter_RL] = \
        _find_AP_and_RL_diameter(region.major_axis_length, region.minor_axis_length, orientation,
                                 [i / upscale for i in dim])
    # TODO: compute major_axis_length/minor_axis_length by summing weighted voxels along axis
    # Fill up dictionary
    properties = {'area': area,
                  'diameter_AP': diameter_AP,
                  'diameter_RL': diameter_RL,
                  'centroid': region.centroid,
                  'eccentricity': region.eccentricity,
                  'orientation': orientation,
                  'solidity': region.solidity  # convexity measure
    }

    return properties
def test_get_boundaries_of_image_3d():
    # Test if equivalent diameter of the maximum intensity project of edges of the object is same
    # as the input sphere, measure.regionprops, 3D perimeter parameter not implemented in skimage
    radius = 4
    binary = morphology.ball(radius)
    boundary = radius_skeleton.get_boundaries_of_image(binary)
    maxip = np.amax(boundary, 0)
    nose.tools.assert_almost_equal(measure.regionprops(binary)[0].equivalent_diameter,
                                   measure.regionprops(maxip)[0].equivalent_diameter, places=1)
Пример #15
0
def bin_analyser(RGB_image, bin_image, list_feature, marge=None, pandas_table=False, do_label=True):
    bin_image_copy = bin_image.copy()

    p = 0
    for feat in list_feature:
        p += feat.size

    if marge is not None and marge != 0:
        seed = np.zeros_like(bin_image_copy)
        seed[marge:-marge, marge:-marge] = 1
        mask = bin_image_copy.copy()
        mask[ mask > 0 ] = 1
        mask[marge:-marge, marge:-marge] = 1
        reconstructed = reconstruction(seed, mask, 'dilation')
        bin_image_copy[reconstructed == 0] = 0
    if do_label:
        bin_image_copy = label(bin_image_copy)

    if len(np.unique(bin_image_copy)) != 2:
        if len(np.unique(bin_image_copy)) == 1:
            if 0 in bin_image_copy:
                print "Return blank matrix. Change this shit"
                white_npy = np.zeros(shape=(1, p))
                if not pandas_table:
                    return white_npy
                else:
                    names = GetNames(list_feature) 
                    return pd.DataFrame(white_npy, columns=names)
            else:
                print "Error, must give a bin image."
    
    GrowRegion_N = NeededGrownRegion(list_feature)
    img = {0: bin_image_copy}
    RegionProp = {0: regionprops(bin_image_copy)}
    for val in GrowRegion_N:
        if val != 0:
            img[val] = GrowRegion(bin_image_copy, val)
            RegionProp[val] = regionprops(img[val])
    
    n = len(RegionProp[0])

    TABLE = np.zeros(shape=(n,p))
    for i in range(n):
        offset_ALL = 0
        for j, feat in enumerate(list_feature):
            tmp_regionprop = RegionProp[feat._return_n_extension()][i]
            off_tmp = feat.size      
            TABLE[i, (j + offset_ALL):(j + offset_ALL + off_tmp)] = feat._apply_region(tmp_regionprop ,RGB_image)

            offset_ALL += feat.size - 1

    if pandas_table:
        names = GetNames(list_feature)
        return pd.DataFrame(TABLE, columns=names)
    else:
        return TABLE
Пример #16
0
def extractSegmentPropertiesRGB(segments,img):
	"""Function to extract 6 properties from each segment of a segmented image.

    Returns a numpy array with 6 columns and a row per segment.
    [:,1] : Mean red (r) intensity of segment
    [:,2] : Mean green (g) intensity of segment
    [:,3] : Mean blue (b) intensity of segment
    [:,4] : (b-r)/(b+r)
    [:,5] : (b-g)/(b+g)
    [:,6] : (g-r)/(2b - g -r)

    Parameters
    ----------
    segments : ndarray, shape(M, N)
        The segmented image
    img : ndarray, shape(M, N, 3)
        The RGB image that has been segmented.


    Returns
    -------
    properties : ndarray, shape(6, n)
        A numpy array.
    """

	# Extract feature properties
	# Mean RGB intensities
	props = measure.regionprops(segments,img[:,:,0], cache=False)
	r=[prop.mean_intensity for prop in props]
	props = measure.regionprops(segments,img[:,:,1], cache=False)
	g=[prop.mean_intensity for prop in props]
	props = measure.regionprops(segments,img[:,:,2], cache=False)
	b=[prop.mean_intensity for prop in props]

	# RGB Ratios
	denom = np.add(b,r)
	denom[denom==0] = 0.05
	Ratio1 = np.divide(np.subtract(b,r) , denom)

	denom = np.add(b,g)
	denom[denom==0] = 0.05
	Ratio2 = np.divide(np.subtract(b,g) , denom)

	denom = np.subtract(np.subtract(np.multiply(b,2),g),r)
	denom[denom==0] = 0.05
	Ratio3 = np.divide(np.subtract(g,r) , denom)

	# Stack properties to array
	properties = np.column_stack((r,g,b,Ratio1,Ratio2,Ratio3))

	return properties
Пример #17
0
def rag_solidity(labels, connectivity=2):

    graph = RAG()

    # The footprint is constructed in such a way that the first
    # element in the array being passed to _add_edge_filter is
    # the central value.
    fp = ndi.generate_binary_structure(labels.ndim, connectivity)
    for d in range(fp.ndim):
        fp = fp.swapaxes(0, d)
        fp[0, ...] = 0
        fp = fp.swapaxes(0, d)

    # For example
    # if labels.ndim = 2 and connectivity = 1
    # fp = [[0,0,0],
    #       [0,1,1],
    #       [0,1,0]]
    #
    # if labels.ndim = 2 and connectivity = 2
    # fp = [[0,0,0],
    #       [0,1,1],
    #       [0,1,1]]

    ndi.generic_filter(
        labels,
        function=_add_edge_filter,
        footprint=fp,
        mode='nearest',
        output=np.zeros(labels.shape, dtype=np.uint8),
        extra_arguments=(graph,))

    regions = regionprops(labels)
    regions = {r.label: r for r in regionprops(labels)}

    graph.remove_node(0)

    for n in graph:
        region = regions[n]
        graph.node[n].update({'labels': [n],
                              'solidity': region['solidity'],
                              'mask': labels == region.label})

    for x, y, d in graph.edges_iter(data=True):
        new_mask = np.logical_or(graph.node[x]['mask'], graph.node[y]['mask'])
        new_solidity = 1. * new_mask.sum() / convex_hull_image(new_mask).sum()
        org_solidity = np.mean([graph.node[x]['solidity'],
                                graph.node[y]['solidity']])
        d['weight'] = org_solidity / new_solidity

    return graph
Пример #18
0
def remove_abnormal_samples(seg, sigma=0.9):
    '''removes abnormal samples based on sig. deviation from mean centroid,
       then removes based on mean size'''
    # Get centroids of samples
    centroids = []
    for idx, lab in enumerate(measure.regionprops(scipy.ndimage.label(seg)[0])):
        centroids.append(lab.centroid)    
    row_vals = [x[0] for x in centroids]
    # Get relevant stats of row values
    mean = np.mean(row_vals)
    std = np.std(row_vals)
    hi = mean + sigma*std
    lo = mean - sigma*std
    # Eliminate sig. deviation from mean
    for idx, lab in enumerate(measure.regionprops(scipy.ndimage.label(seg)[0])):
        centroid = lab.centroid
        if centroid[0] < lo or centroid[0] > hi:
            seg = floodfill_fast(
                    seg, 
                    x=int(centroid[0]), 
                    y=int(centroid[1]), 
                    value=0, 
                    border_color=0, 
                    dtype=np.uint8
                    )[0]
              
    # Get sizes of samples
    areas = []
    for idx, lab in enumerate(measure.regionprops(scipy.ndimage.label(seg)[0])):
        areas.append(lab.filled_area) 
    mean = np.mean(areas)
    std = np.std(areas)
    hi = mean + 3*sigma*std
    lo = mean - 3*sigma*std  
    # Eliminate sig. deviation from mean
    for idx, lab in enumerate(measure.regionprops(scipy.ndimage.label(seg)[0])):
        area = lab.filled_area
        centroid = lab.centroid
        if area < lo or area > hi:
            seg = floodfill_fast(
                    seg, 
                    x=int(centroid[0]), 
                    y=int(centroid[1]), 
                    value=0, 
                    border_color=0, 
                    dtype=np.uint8
                    )[0]      
    return seg
def centres_of_mass_2D(image):
    """
    Calculates centres of mass
    for all the labels
    """
    centroids = []
    bords = []
    areas = []
    radius = []

    for info in measure.regionprops(image, ['Centroid', 'BoundingBox', 'equivalent_diameter']): 
        
        centre = info['Centroid']
        minr, minc, maxr, maxc = info['BoundingBox']
        D = info['equivalent_diameter']
    
        
        margin = 0
        
        radius.append((D / 2.0))
        bords.append((minr-margin, minc-margin, maxr+margin, maxc+margin))
        areas.append(image[minr-margin:maxr+margin,minc-margin:maxc+margin].copy())
        centroids.append(centre)
        
    return centroids, areas, bords, radius
Пример #20
0
def getLabelImFeats(lsim,center,orgim):
    """Compute object geometry features.

    Parameters
    ----------
    lsim:
        Segmented binary image
    center:
        Center coordinate(x,y) of the object
    orgim:
        Original image

    """
    
    label_img = skimage.measure.label(lsim)
    regions = regionprops(label_img)
    index = label_img[center[0],center[1]]-1

    # direct features
    Area = regions[index].area
    CentralMoments = regions[index].moments_central
    Eccentricity = regions[index].eccentricity
    Perimeter = regions[index].perimeter

    skewx=np.mean(stats.skew(lsim, axis=0, bias=True))
    skewy=np.mean(stats.skew(lsim, axis=1, bias=True))
    
    # derived features
    compact = Area/Perimeter**2
    skewness = np.sqrt(skewx**2 + skewy**2)
    cen_skew = getCentSkewness(label_img,Area, index,regions[index].centroid)
    numBranch = getRBSTim(label_img,orgim)

    return np.hstack((Area, Eccentricity, Perimeter, compact, skewness, cen_skew, numBranch))
Пример #21
0
def getImgData(img):
    labelPos = np.array((img < 0.9), dtype=int)
    # labelNeg = np.array((img >= 0.9),dtype=int)
    props = [
        "Area",
        "Centroid",
        "WeightedCentroid",
        "WeightedMoments",
        "WeightedHuMoments",
        "HuMoments",
        "EulerNumber",
        "Eccentricity",
        "EquivDiameter",
        "Extent",
        "MeanIntensity",
        "MinIntensity",
        "MaxIntensity",
        "Orientation",
        "Solidity",
    ]
    # props = ['Centroid','WeightedCentroid']
    dataPos = regionprops(labelPos, properties=props, intensity_image=img)[0]
    del dataPos["Label"]
    # dataNeg = regionprops(labelNeg,properties=props,intensity_image=img)[0]
    # del dataNeg['Label']
    return dataPos  # ,dataNeg
Пример #22
0
    def find_peaks(self):
        """Identify candidate sources that match the PSF.

        Updates self.corrmap and self.pix.
        """
        data = self.data
        psf_grid = self.psf_grid
        psf_size = psf_grid.psf_size
        grid_size = psf_grid.grid_size
        box_size = psf_grid.box_size

        # map the corrleation coefficient
        data_grid = blockshaped(data, grid_size, grid_size)
        corrmap = np.array([match_template(image, template, pad_input=True)
            for image, template in zip(data_grid, psf_grid.to_array())])
        corrmap = unblockshaped(corrmap, *self.shape)

        # find peak positions in the correlation map
        mask = ((corrmap > self.corrmin)
              & (data > self.adumin)
              & (data < self.adumax))
        pix = np.array([bbox_max(corrmap, region.bbox)
            for region in regionprops(label(mask, neighbors=4))])

        # exclude peaks near the edges
        fov = 0, 0, self.shape[0], self.shape[1]
        d_max = np.max([box_size, psf_size])/2
        pix = pix[np.array([bbox_inside(p[0], p[1], fov, d_max) for p in pix])]

        if len(pix) == 0:
            warn("Found no candidate sources.")

        self.corrmap = corrmap
        self.pix = pix
def computeITCList(evaluation_mask, resolution, level):
    """Compute the list of labels containing Isolated Tumor Cells (ITC)
    
    Description:
        A region is considered ITC if its longest diameter is below 200µm.
        As we expanded the annotations by 75µm, the major axis of the object 
        should be less than 275µm to be considered as ITC (Each pixel is 
        0.243µm*0.243µm in level 0). Therefore the major axis of the object 
        in level 5 should be less than 275/(2^5*0.243) = 35.36 pixels.
        
    Args:
        evaluation_mask:    The evaluation mask
        resolution:         Pixel resolution of the image at level 0
        level:              The level at which the evaluation mask was made
        
    Returns:
        Isolated_Tumor_Cells: list of labels containing Isolated Tumor Cells
    """
    max_label = np.amax(evaluation_mask)    
    properties = measure.regionprops(evaluation_mask)
    Isolated_Tumor_Cells = [] 
    threshold = 275/(resolution * pow(2, level))
    for i in range(0, max_label):
        if properties[i].major_axis_length < threshold:
            Isolated_Tumor_Cells.append(i+1)
    return Isolated_Tumor_Cells
def do_evaluate(model):
    print('Model evaluating')
    X, y_true = next(get_seg_batch(1, from_train=False, random_choice=True))
    y_pred = model.predict(X)

    X, y_true, y_pred = X[0,:,:,:,0], y_true[0,:,:,:,0], y_pred[0,:,:,:,0]
    intersection = y_true * y_pred
    recall = (np.sum(intersection) + SMOOTH) / (np.sum(y_true) + SMOOTH)
    precision = (np.sum(intersection) + SMOOTH) / (np.sum(y_pred) + SMOOTH)
    print('Average recall {:.4f}, precision {:.4f}'.format(recall, precision))

    for threshold in range(0, 10, 2):
        threshold = threshold / 10.0
        pred_mask = (y_pred > threshold).astype(np.uint8)
        intersection = y_true * pred_mask
        recall = (np.sum(intersection) + SMOOTH) / (np.sum(y_true) + SMOOTH)
        precision = (np.sum(intersection) + SMOOTH) / (np.sum(y_pred) + SMOOTH)
        print("Threshold {}: recall {:.4f}, precision {:.4f}".format(threshold, recall, precision))

    regions = measure.regionprops(measure.label(y_pred))
    print('Num of pred regions {}'.format(len(regions)))

    if DEBUG_PLOT_WHEN_EVALUATING_SEG:
        plot_comparison(X, y_true, y_pred)
        plot_slices(X)
        plot_slices(y_true)
        plot_slices(y_pred)
def single_out_annotation(base_image, small_cc_image):
    """ extracting individual annotations :
    starting from potential annotation + noise, we remove the noise and
     consolidate annotation area, then return the coordinates of center of
     potential annotations"""
    import numpy as np

    # remove small stuff
    filtered_small_cc, removed_small_cc_small = remove_small_ccomponents(
        small_cc_image, size_closing=5, hist_thres=120)
    # plot_image(removed_small_cc_small)

    # dilate
    from skimage.morphology import binary_dilation, disk
    dilation_radius = 10
    small_cc_cleaned_mask = binary_dilation(filtered_small_cc, disk(dilation_radius))
    # plot_image(small_cc_cleaned_mask)

    # label connected compoenents
    from skimage.morphology import label
    from skimage.measure import regionprops

    markers, n_label = label(small_cc_cleaned_mask, connectivity=1, background=0, return_num=True)

    # for each cc, defines a region
    image_for_region = (base_image*255).astype(np.uint8)
    region_prop = regionprops(markers, image_for_region)

    # for each region, do something

    return region_prop
Пример #26
0
def analyze_image(parent_conn, frame, image, bgnd_im, bgnd_mask):
    image = cv2.cvtColor(image, cv2.cv.CV_RGB2GRAY);
    im_diff = np.abs(bgnd_im-image.astype(np.double));
    #mask = im_diff >20;
    #mask2 = cv2.adaptiveThreshold(image.astype(np.uint8),1,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,61,35)
    mask2 = cv2.adaptiveThreshold(im_diff.astype(np.uint8),1,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,61,-10)
    L = label(mask2 | bgnd_mask)
    L = morphology.remove_small_objects(L, min_size=10, connectivity=2)
    image[L==0] = 0
    
    props = regionprops(L);
    
    coord_x = [x.centroid[0] for x in props];
    coord_y = [x.centroid[1] for x in props];
    area = [x.area for x in props];
    perimeter = [x.perimeter for x in props];
    major_axis = [x.major_axis_length for x in props];
    minor_axis = [x.minor_axis_length for x in props];
    eccentricity = [x.eccentricity for x in props];
    compactness = [x.perimeter**2/x.area for x in props];
    orientation = [x.orientation for x in props];
    solidity = [x.solidity for x in props];
    
    props_list = [coord_x, coord_y, area, perimeter, 
               major_axis, minor_axis, eccentricity, compactness, 
               orientation, solidity]

    
    parent_conn.send({'frame': frame, 'image': image, 'props_list' :props_list})
    parent_conn.close()
 def get_properties(self, imbin, img):
     props = {}
     cell_labels = label(imbin, neighbors=4, background=0)
     cell_labels = cell_labels - cell_labels.min()
     properties = measure.regionprops(cell_labels, img)
     areas = [0] + [pr.area for pr in properties]
     convex_areas = [1] + [pr.convex_area for pr in properties]
     mean_intensities = [0.0] + [pr.mean_intensity for pr in properties]        
     eccentricities = [0.0] + [pr.eccentricity for pr in properties]
     centers = [(0.0, 0.0)] + [pr.centroid for pr in properties]
     perimeters = [1.0] + [pr.perimeter for pr in properties]
     a = np.array(areas)
     b = np.array(perimeters)
     b[b==0.0] = 1.0
     circ = 2 * np.sqrt(np.pi) * a / b
     c = np.array(convex_areas)
     cc_ar = a.astype(np.dtype('float')) / c.astype(np.dtype('float'))
     for i in range(1, cell_labels.max()+1):
         props[i] = {
                     'area': areas[i],
                     'mean_intensity': mean_intensities[i],
                     'eccentricity': eccentricities[i],
                     'center': centers[i],
                     'circularity': circ[i], 
                     'cc_ar': cc_ar[i],
                     }
     return props
Пример #28
0
def largest_region(imData):

    belowMeanFilter = np.where(imData > np.mean(imData), 0., 1.0)
    dialated = morphology.dilation(belowMeanFilter, np.ones((3, 3)))
    regionLabels = (belowMeanFilter * measure.label(dialated)).astype(int)

    # calculate common region properties for each region within the segmentation
    regions = measure.regionprops(regionLabels)
    areas = [(None
              if sum(belowMeanFilter[regionLabels == region.label]) * 1.0 / region.area < 0.50
              else region.filled_area)
             for region in regions]

    if len(areas) > 0:

        regionMax = regions[np.argmax(areas)]

        # trim image to the max region
        regionMaxImg = trim_image(
            np.minimum(
                imData*np.where(regionLabels == regionMax.label, 1, 255),
                255))

        # rotate
        angle = intertial_axis(regionMaxImg)[2]
        rotatedRegionMaxImg = ndimage.rotate(regionMaxImg, np.degrees(angle))
        rotatedRegionMaxImg = trim_image(trim_image(rotatedRegionMaxImg, 0), 255)

    else:
        regionMax = None
        rotatedRegionMaxImg = None
        angle = 0

    return regionMax, rotatedRegionMaxImg, angle, regionLabels, regions, areas, belowMeanFilter, dialated
Пример #29
0
def mouse_centroid(im, previous_centroid):
    """
    Find mouse's centroid in a single image
    
    Parameters:
        im = image of analyze (numpy array)
        
        previous_centroid = coordinates of the mouse in the previous frame
        
    Returns:
        Coordinates of the mouse's centroid
    """
    original = copy(im)
    im = im < filters.threshold_otsu(im) * 0.2
    distance = ndimage.distance_transform_edt(im)
    centers = (distance > 0.8 * distance.max())
    if len(centers) == 0:
        return previous_centroid
    labels = label(centers)
    centroids = [r.weighted_centroid for r in regionprops(labels, original)]
    if len(centroids) == 1:
        return list(centroids[0])
    elif len(centroids) > 1:
        d = lambda a, b: ((a[0] - b[0])**2 + (a[1] - b[1])**2)**0.5
        dists = [d(c, previous_centroid) for c in centroids]
        d_idx = np.array(dists == np.min(dists))
        return list(np.array(centroids)[d_idx][0])
Пример #30
0
    def locate(i):
        """
        Median subtract each hologram, convolve with Mexican hat kernel,
        then smooth the absolute value of the convolution, and use
        Otsu's thresholding to segment the image into specimens.
        Record the time, x and y centroids, and some intensity features
        within each segment.
        """
        median_sub_holo = hologram_cube[i, ...] - median_holo
        conv_holo = convolve_fft(median_sub_holo,
                                 MexicanHat2DKernel(convolution_kernel_radius),
                                 fftn=fft2, ifftn=ifft2)
        smooth_abs_conv = gaussian_filter(np.abs(conv_holo),
                                          gaussian_kernel_radius)

        thresh = threshold_otsu(smooth_abs_conv - np.median(smooth_abs_conv))
        # thresh = threshold_yen(smooth_abs_conv - np.median(smooth_abs_conv))

        masked = np.ones_like(smooth_abs_conv)
        masked[smooth_abs_conv <= thresh] = 0

        label_image = label(masked)
        regions = regionprops(label_image, smooth_abs_conv)
        for region in regions:
            centroid = region.weighted_centroid
            pos = (i, centroid[0], centroid[1],
                   region.max_intensity, region.mean_intensity)
            positions.append(pos)
Пример #31
0
            mu_merge_cutoff=mu_merge_cutoff,
            del_mu=del_mu,
            mu_cutoff=mu_cutoff,
            EUV_CHD_sep=EUV_CHD_sep)
        # add synchronic clustering method to final map
        synchronic_map.append_method_info(cluster_method)

        # area constraint
        chd_labeled = measure.label(synchronic_map.chd,
                                    connectivity=2,
                                    background=0,
                                    return_num=True)

        # get area
        chd_area = [
            props.area for props in measure.regionprops(chd_labeled[0])
        ]
        chd_area_ezseg[date_ind] = np.sum(chd_area)

        #### STEP SIX: K-MEANS DETECTION ####
        map = np.where(synchronic_map.data == -9999, 0, synchronic_map.data)
        map2 = np.log(map)
        map2 = np.where(map2 == -np.inf, 0, map2)

        arr = np.zeros((full_map_nxcoord * full_map_nycoord, 3))
        arr[:, 0] = idx_col_flt * weight
        arr[:, 1] = idx_row_flt * weight
        arr[:, 2] = map2.flatten() * 2
        psi_chd_map, psi_ar_map, chd_labeled, ar_labeled = \
            ml_funcs.kmeans_detection(synchronic_map.data, map2, arr, N_CLUSTERS,
                                      full_map_nycoord, full_map_nxcoord, full_map_x, full_map_y)
img_gray = cv2.resize(
    img_gray,
    (int(img_gray.shape[1] * 40 / 100), int(img_gray.shape[0] * 40 / 100)))
img_rgb = cv2.resize(
    img_rgb,
    (int(img_rgb.shape[1] * 40 / 100), int(img_rgb.shape[0] * 40 / 100)))
# print(img_rgb.shape)

# -----

segments = slic(img_rgb,
                n_segments=100,
                compactness=20,
                sigma=5,
                convert2lab=True)
regions = regionprops(segments)

for index, props in enumerate(regions):
    cx, cy = props.centroid  # cen
    # burada degisiklik olacak
# ------

# show the output of SLIC
# segments den gelen bilgilere gore matloplib de cizdik
fig = plt.figure("Superpixels -- %d segments" % (100))
ax = fig.add_subplot(1, 1, 1)
ax.imshow(mark_boundaries(img_rgb, segments, color=(0, 0, 0)))
plt.axis("off")

# show the plots
plt.show()
def edge_detection(frames, n_samples, method='canny', track=False):
    """
    To detect the edges of the wells, fill and label them to
    determine their centroids.

    Parameters
    -----------
    frames : Array
        The frames to be processed and determine the
        sample temperature from.
    n_samples : Int
        The number of samples in the input video.
    method : String
        Edge detection algorithm to be used
    track : Boolean
        to enable spatial tracking (to be implemented with real-time in the future)

    Returns
    --------
    labeled_samples : Array
        All the samples in the frame are labeled
        so that they can be used as props to get pixel data.
    """

    # when enable spatial tracking
    if track:
        # type cast to ndarray
        if not isinstance(frames, np.ndarray):
            frames_array = np.array(frames)
        else:
            frames_array = frames

        video_length = len(frames_array)
        video_with_label = np.empty(frames_array.shape, dtype=int)
        background = frames_array.mean(0)
        alpha = 2  # intensity threshold
        counter = 0
        missing = 0
        boolean_mask = None
        for time in range(video_length):
            # remove background proportional to time in video
            img_lin_bg = frames_array[time] - background * time / (
                video_length - 1)
            # apply sobel filter
            edges_lin_bg = filters.sobel(img_lin_bg)
            #  booleanize with certain threshold alpha
            edges_lin_bg = edges_lin_bg > edges_lin_bg.mean() * alpha
            # erode edges, fill in holes
            edges_lin_bg = ndimage.binary_erosion(edges_lin_bg,
                                                  mask=boolean_mask)
            edges_lin_bg = binary_fill_holes(edges_lin_bg)

            # find progressive background
            if time is 0:
                progressive_background = 0
            else:
                progressive_background = frames_array[0:time].mean(0)
            # remove background
            img_prog_bg = frames_array[time] - progressive_background
            # apply sobel filter
            edges_prog_bg = filters.sobel(img_prog_bg)
            #  booleanize with certain threshold alpha
            edges_prog_bg = edges_prog_bg > edges_prog_bg.mean() * alpha
            # erode edges, fill in holes
            edges_prog_bg = ndimage.binary_erosion(edges_prog_bg,
                                                   mask=boolean_mask)
            edges_prog_bg = binary_fill_holes(edges_prog_bg)

            # combining
            combined_samples = edges_lin_bg + edges_prog_bg
            #  make the boolean mask for the for frame
            if time is 0:
                boolean_mask = ~ndimage.binary_erosion(combined_samples)
                # boolean_mask = ~combined_samples

            # labeled_samples = ndimage.binary_erosion(labeled_samples, mask=boolean_mask)
            # labeled_samples = binary_fill_holes(labeled_samples, structure=np.ones((2,2)))

            # remove stray pixels and label
            combined_samples = remove_small_objects(combined_samples,
                                                    min_size=2)
            labeled_samples = label(combined_samples)

            # confirm matching labels vs n_samples
            unique, counts = np.unique(labeled_samples, return_counts=True)
            label_dict = dict(zip(unique, counts))

            #  in case of missing label
            if len(label_dict) < n_samples + 1:
                trial = 0
                # keep eroding to separate the samples
                while len(label_dict) < n_samples + 1 and trial < 10:
                    labeled_samples = ndimage.binary_erosion(labeled_samples,
                                                             mask=boolean_mask)
                    labeled_samples = label(labeled_samples)
                    unique, counts = np.unique(labeled_samples,
                                               return_counts=True)
                    label_dict = dict(zip(unique, counts))
                    trial += 1
                # print('missing:', time)
                missing += 1

            # in case of extra label identify
            if len(label_dict) > n_samples + 1:
                trial = 0
                # keep removing smaller labels until matching with n_samples
                while len(label_dict) > n_samples + 1 and trial < 10:
                    temp = min(label_dict.values())
                    labeled_samples = remove_small_objects(labeled_samples,
                                                           min_size=temp + 1)
                    unique, counts = np.unique(labeled_samples,
                                               return_counts=True)
                    label_dict = dict(zip(unique, counts))
                    trial += 1

                # print('excess:', time, val)
                counter += 1

            video_with_label[time] = labeled_samples
        # print(counter)
        # print(missing)
        return video_with_label

    # when disable spatial tracking (default)
    else:
        labeled_samples = None
        size = None
        thres = None
        props = None

        # use canny edge detection method
        if method is 'canny':
            for size in range(15, 9, -1):
                for thres in range(1500, 900, -100):
                    edges = feature.canny(frames[0] / thres)

                    # fig = plt.figure(2)  # for debugging
                    # plt.imshow(edges)
                    # plt.show()

                    filled_samples = binary_fill_holes(edges)
                    cl_samples = remove_small_objects(filled_samples,
                                                      min_size=size)
                    labeled_samples = label(cl_samples)
                    props = regionprops(labeled_samples,
                                        intensity_image=frames[0])

                    # fig = plt.figure(3)
                    # plt.imshow(filled_samples)  # for debugging

                    if len(props) == n_samples:
                        break
        #             if thres == 1000 and len(props) != n_samples:
        #                 print('Not all the samples are being recognized with
        #                 the set threshold range for size ',size)
                if len(props) == n_samples:
                    break
            if size == 10 and thres == 1000 and len(props) != n_samples:
                print('Not all the samples are being recognized with the set \
                    minimum size and threshold range')
            # plt.show()  # for debugging
            return labeled_samples

        # use sobel edge detection method
        if method is 'sobel':
            for size in range(15, 9, -1):
                # use sobel
                edges = filters.sobel(frames[0])
                edges = edges > edges.mean() * 3  # booleanize data

                # fig = plt.figure(2)  # for debugging
                # plt.imshow(edges)
                # plt.colorbar()

                #  fill holes and remove noise
                filled_samples = binary_fill_holes(edges)
                cl_samples = remove_small_objects(filled_samples,
                                                  min_size=size)
                labeled_samples = label(cl_samples)
                props = regionprops(labeled_samples, intensity_image=frames[0])

                # fig = plt.figure(3)
                # plt.imshow(filled_samples)  # for debugging

                if len(props) == n_samples:
                    break
            if size == 10 and len(props) != n_samples:
                print('Not all the samples are being recognized with the set \
                    minimum size and threshold range')
            # plt.show()  # for debugging
            return labeled_samples
Пример #34
0
def findalltags(im, im_name, DEBUG):
    print('start find IP tags..........................')
    lower_hue_low = [23, 100, 100]
    lower_hue_high = [32, 255, 255]

    hsv_image = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
    kernel_size = (5, 5)
    mask_lower = create_hue_mask(hsv_image, lower_hue_low, lower_hue_high,
                                 kernel_size)
    if DEBUG:
        result_image_path = os.path.join(DEBUG_DIR, im_name + "_IPtags.jpg")
        cv2.imwrite(result_image_path, mask_lower)
    labels = measure.label(mask_lower, connectivity=2)
    pro = measure.regionprops(labels)

    # find all tags
    tagboxes = []
    tagimages = []
    tagmasks = []
    uimages = []
    uboxes = []
    umasks = []

    im_copy = im.copy()
    for p in pro:
        (x1, y1, x2, y2) = p.bbox
        #print('tagcccccc:', (x1, y1, x2, y2), (y2 - y1), (x2 - x1), p.area*1.0/((x2-x1)*(y2-y1)))
        if 230 >= (y2 - y1) >= 100 and 40 >= (
                x2 - x1) >= 20 and p.area * 1.0 / ((x2 - x1) *
                                                   (y2 - y1)) >= 0.58:
            print('tag:', (x1, y1, x2, y2), (y2 - y1), (x2 - x1),
                  p.area * 1.0 / ((x2 - x1) * (y2 - y1)))
            tagboxes.append(p.bbox)
        if 230 >= (y2 - y1) >= 100 and 75 >= (
                x2 - x1) > 40 and 0.4 < p.area * 1.0 / ((x2 - x1) *
                                                        (y2 - y1)) < 0.9:
            print('IP tag width!!!!!!!!!!!!!!!!!!!!!!!')
            x1, x2 = findminbox(mask_lower[x1:x2, y1:y2], x1, x2, 'ip')
            if 51 >= (x2 - x1) >= 20:
                print('tag:', (x1, y1, x2, y2), (y2 - y1), (x2 - x1),
                      p.area * 1.0 / ((x2 - x1) * (y2 - y1)))
                tagboxes.append((x1, y1, x2, y2))

    for i, box in enumerate(tagboxes):
        (x1, y1, x2, y2) = box
        x1 = 0 if x1 - 1 < 0 else x1 - 1
        x2 = im.shape[0] if x2 + 1 > im.shape[0] else x2 + 1
        tagimages.append(im[x1:x2, y1 - 1:y2 + 1, :])
        tagmasks.append(mask_lower[x1:x2, y1 - 1:y2 + 1])
        # tagimages.append(im[x1:x2, y1:y2, :])
        # tagmasks.append(mask_lower[x1:x2, y1:y2])
        if DEBUG:
            result_image_path = os.path.join(
                DEBUG_DIR, im_name + '_' + str(i) + '_' + 'tag.jpg')
            # cv2.imwrite(result_image_path, im[x1:x2, y1-5:y2+5, :])
            cv2.imwrite(result_image_path, im[x1:x2, y1:y2, :])
        cv2.rectangle(im_copy, (y1, x1), (y2, x2), (0, 0, 255), 3)

    print('start find U tags...........................')
    lower_hue_low = [23, 50, 45]
    lower_hue_high = [33, 255, 255]

    hsv_image = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
    kernel_size = (5, 5)
    mask_lower = create_hue_mask(hsv_image, lower_hue_low, lower_hue_high,
                                 kernel_size)
    if DEBUG:
        result_image_path = os.path.join(DEBUG_DIR, im_name + "_Utags.jpg")
        cv2.imwrite(result_image_path, mask_lower)
    labels = measure.label(mask_lower, connectivity=2)
    pro = measure.regionprops(labels)
    i = 0
    for p in pro:
        (x1, y1, x2, y2) = p.bbox
        if y1 > 80 and 39 <= y2 - y1 <= 80 and 31 <= x2 - x1 <= 45 and 2.3 > (
                y2 - y1) * 1.0 / (x2 - x1) > 0.9 and p.area * 1.0 / (
                    (x2 - x1) * (y2 - y1)) >= 0.65:
            print('u:', (x1, y1, x2, y2), y2 - y1, x2 - x1,
                  p.area * 1.0 / ((x2 - x1) * (y2 - y1)))
        elif y1 > 80 and 45 <= y2 - y1 <= 80 and 45 < x2 - x1 <= 75 and 1.7 > (
                y2 - y1) * 1.0 / (x2 - x1) > 0.6 and 1 > p.area * 1.0 / (
                    (x2 - x1) * (y2 - y1)) >= 0.4:
            print('U tag width!!!!!!!!!!!!!!!!!!!!!!!')
            x1, x2 = findminbox(mask_lower[x1:x2, y1:y2], x1, x2, 'u')
            if (x2 - x1) <= 30 or (x2 - x1) >= 60:
                continue
            else:
                print('u:', (x1, y1, x2, y2), y2 - y1, x2 - x1,
                      p.area * 1.0 / ((x2 - x1) * (y2 - y1)))
        else:
            continue
        i += 1
        uboxes.append((x1, y1, x2, y2))
        x = 0 if x1 - 1 <= 0 else x1 - 1
        y = 0 if y1 - 1 <= 0 else y1 - 1
        uimages.append(im[x:x2 + 1, y:y2 + 1, :])
        umasks.append(mask_lower[x:x2 + 1, y:y2 + 1])
        # uimages.append(im[x1:x2, y1:y2, :])
        # umasks.append(mask_lower[x1:x2, y1:y2])
        cv2.rectangle(im_copy, (y1, x1), (y2, x2), (0, 0, 255), 3)

    if DEBUG:
        result_image_path = os.path.join(DEBUG_DIR, im_name + ".jpg")
        cv2.imwrite(result_image_path, im_copy)

    return tagimages, tagmasks, tagboxes, uimages, umasks, uboxes
def figure_7():
    """
    Figure 7. Visual representation of each track labeled by the
    segmentation algorithm, when using the ISODATA binarization
    (threshold: ~0.475). The numbers show how many tracks were counted
    in each region. Magenta lines: regions representing only one track.
    Green dots: extremity pixels. Green lines: Euclidean distance
    between extremity pixels. Blue paths: route between extremity
    pixels.
    """

    image = imread('orig_figures/dur_grain1apatite01.tif', as_grey=True)
    img_bin = _processed_image(image)

    _, x_px = image.shape
    x_um = _calibrate_aux(len_px=x_px)

    props = regionprops(label(img_bin))
    img_skel = skeletonize_3d(img_bin)
    rows, cols = np.where(img_skel != 0)

    img_rgb = gray2rgb(img_as_ubyte(image))
    img_rgb[rows, cols] = [255, 0, 255]

    # Checking if the folder 'figures' exists.
    if not os.path.isdir('./figures'):
        os.mkdir('./figures')

    fig = plt.figure(figsize=(12, 10))
    host = host_subplot(111, axes_class=mpl_aa.Axes)
    plt.subplots_adjust(bottom=0.2)

    for prop in props:
        obj_info = []
        aux = skeletonize_3d(prop.image)
        trk_area, trk_px = ds.tracks_classify(aux)
        count_auto = ds.count_by_region(ds.regions_and_skel(prop.image))

        x_min, y_min, x_max, y_max = prop.bbox
        obj_info.append(
            [prop.centroid[0], prop.centroid[1],
             str(count_auto[2][0][0])])
        for obj in obj_info:
            host.text(obj[1],
                      obj[0],
                      obj[2],
                      family='monospace',
                      color='yellow',
                      size='x-small',
                      weight='bold')

        if trk_area is not None:
            for px in trk_px:
                route, _ = route_through_array(~aux, px[0], px[1])

                for rx in route:
                    host.scatter(y_min + rx[1],
                                 x_min + rx[0],
                                 c='b',
                                 s=SCATTER_SIZE + 25)
                for p in px:
                    host.scatter(y_min + p[1],
                                 x_min + p[0],
                                 c='g',
                                 s=SCATTER_SIZE + 25)

                rows, cols = line(x_min + px[0][0], y_min + px[0][1],
                                  x_min + px[1][0], y_min + px[1][1])
                img_rgb[rows, cols] = [0, 255, 0]

    host.imshow(img_rgb, cmap='gray')
    host.axis['bottom', 'left'].toggle(all=False)

    guest = host.twiny()
    new_fixed_ax = guest.get_grid_helper().new_fixed_axis
    guest.axis['bottom'] = new_fixed_ax(loc='bottom',
                                        axes=guest,
                                        offset=(0, OFFSET))
    guest.axis['top'].toggle(all=False)
    guest.set_xlabel('$\mu m$')
    guest.set_xlim(0, x_um)

    plt.savefig('figures/Fig_07' + SAVE_FIG_FORMAT, bbox_inches='tight')
    plt.close()

    return None
Пример #36
0
    def segment_chars(self, filename):

        plate_detector = PlateDetector2()
        plate_detector.detect(filename)

        characters_list = []
        column_list = []
        image_outputs_list = []

        counter = 0
        for plate in plate_detector.get_found_plates():
            characters = []
            column = []
            # The invert wasdone so as to convert the black pixel to white pixel and vice versa
            license_plate = np.invert(plate)

            labelled_plate = measure.label(license_plate)

            fig, ax1 = plt.subplots(1)
            ax1.imshow(license_plate, cmap="gray")
            # the next two lines is based on the assumptions that the width of
            # a license plate should be between 5% and 15% of the license plate,
            # and height should be between 35% and 60%
            # this will eliminate some
            character_dimensions = (0.3 * license_plate.shape[0],
                                    0.90 * license_plate.shape[0],
                                    0.035 * license_plate.shape[1],
                                    0.14 * license_plate.shape[1])
            min_height, max_height, min_width, max_width = character_dimensions

            for regions in regionprops(labelled_plate):
                y0, x0, y1, x1 = regions.bbox
                region_height = y1 - y0
                region_width = x1 - x0

                if min_height < region_height < max_height and min_width < region_width < max_width:
                    roi = license_plate[y0:y1, x0:x1]

                    # draw a red bordered rectangle over the character.
                    rect_border = patches.Rectangle((x0, y0),
                                                    x1 - x0,
                                                    y1 - y0,
                                                    edgecolor="red",
                                                    linewidth=2,
                                                    fill=False)
                    ax1.add_patch(rect_border)

                    # resize the characters to 20X20 and then append each character into the characters list
                    resized_char = resize(roi, (20, 20),
                                          mode='constant',
                                          anti_aliasing=False)
                    characters.append(resized_char)

                    # this is just to keep track of the arrangement of the characters
                    column.append(x0)
            # print(characters)
            characters_list.append(characters)
            column_list.append(column)
            output_file_name = "output/%s" % datetime.now().strftime(
                "%Y_%m_%d %H_%M_%S")
            if counter != 0:
                output_file_name += "_%d" % counter
            image_outputs_list.append(output_file_name)
            plt.savefig("%s.jpg" % output_file_name, cmap='gray')
            plt.show()
            counter += 1

        return characters_list, column_list, image_outputs_list
Пример #37
0
def label_image(folder, image_file, area_thresh=50, figsize=(10, 10),
                show=False, imname=None):
    """Filters out small objects from binary image and finds cell features.

    Similar to clean_image, but operates on imput binary image rather than the
    raw image file. Run binary_image on raw image file before feeding into
    label_image.

    Parameters
    ----------
    folder : string
        Directory containing image_file
    image_file : string
        Filename of image to be analyzed.
    figsize : tuple of int or float
        Size of output figure
    show : bool
        If True, outputs image to Jupyter notebook display
    area_thresh : int or float
        Minimum square pixels for object to be included in final image
    channel : int
        Channel of image to read in for multichannel images e.g.
        testim[:, :, channel]
    imname : string
        Desired name of output file. Defaults to 'test.png'

    Returns
    -------
    short_image : numpy.ndarray
        Output binary image. All small objects (area < area_thresh) are
        filtered out
    short_props : skimage.object
        Contains all properties of objects identified in image

    Examples
    --------

    """

    fname = '{}/{}'.format(folder, image_file)
    test_image = sio.imread(fname)
    labels = label(test_image)
    props = regionprops(labels)

    short_image = np.zeros(labels.shape)
    counter = 0
    skip = 0
    short_props = []
    for i in range(0, len(props)):
        area = props[i]['area']
        if area < area_thresh:
            skip = skip + 1
        else:
            short_props.append(props[i])
            test_coords = props[i]['coords'].tolist()
            for coord in test_coords:
                short_image[coord[0], coord[1]] = True
            counter = counter + 1

    if show:
        fig, ax = plt.subplots(figsize=figsize)
        ax.imshow(short_image, cmap='gray')
        ax.axis('off')

    short_image = short_image.astype('uint8')*255

    if imname is None:
        output = "short_{}".format(image_file)
    else:
        output = imname

    sio.imsave(folder+'/'+output, short_image)

    return short_image, short_props
Пример #38
0
def clean_image(folder, image_file, threshold=2, figsize=(10, 10),
                ajar=False, close=False, show=False,
                area_thresh=50, channel=None, imname=None):
    """Create binary image from input image with optional opening step.

    Parameters
    ----------
    folder : string
        Directory containing image_file
    image_file : string
        Filename of image to be analyzed.
    threshold : int or float
        Intensity threshold of binary image.
    figsize : tuple of int or float
        Size of output figure
    ajar : bool
        If True, opens binary image by performing a dilation followed by
        an erosion.
    close : bool
        If True, closes binary image by performing an erosion followed by a
        dilation.
    show : bool
        If True, outputs image to Jupyter notebook display
    area_thresh : int or float
        Minimum square pixels for object to be included in final image
    channel : int
        Channel of image to read in for multichannel images e.g.
        testim[:, :, channel]
    imname : string
        Desired name of output file. Defaults to 'test.png'

    Returns
    -------
    short_image : numpy.ndarray
        Output binary image. All small objects (area < area_thresh) are
        filtered out
    short_props : skimage.object
        Contains all properties of objects identified in image

    Examples
    --------

    """

    fname = '{}/{}'.format(folder, image_file)
    if channel is None:
        test_image = sio.imread(fname)
    else:
        test_image = sio.imread(fname)[:, :, channel]
    bi_image = test_image > threshold

    if ajar is True:
        op_image = opening(bi_image, square(3))
    else:
        op_image = bi_image

    if close is True:
        op_image = closing(op_image, square(3))

    op_image = op_image.astype('uint8')*255

#     if default_name:
#         output = "clean_{}.png".format(image_file.split('.')[0])
#     else:
#         output = fname

#     sio.imsave(folder+'/'+output, op_image)

    # Labelling and cleaning up image.
    test_image = op_image
    labels = label(test_image)
    props = regionprops(labels)

    short_image = np.zeros(labels.shape)
    counter = 0
    skip = 0
    short_props = []
    for i in range(0, len(props)):
        area = props[i]['area']
        if area < area_thresh:
            skip = skip + 1
        else:
            short_props.append(props[i])
            test_coords = props[i]['coords'].tolist()
            for coord in test_coords:
                short_image[coord[0], coord[1]] = True
            counter = counter + 1

    if show:
        fig, ax = plt.subplots(figsize=figsize)
        ax.imshow(short_image, cmap='gray')
        ax.axis('off')

    short_image = short_image.astype('uint8')*255

    if imname is None:
        output = "short_{}".format(image_file)
    else:
        output = imname

    sio.imsave(folder+'/'+output, short_image)

    return short_image, short_props
Пример #39
0
cv2.imshow('Image after morphological transformation1', sample_step1)
cv2.imshow('Image after morphological transformation2', sample_step2)
cv2.imshow('Image after morphological transformation3', sample_step3)
cv2.imshow('Image after morphological transformation3', sample_step4)

cv2.waitKey(0)
cv2.destroyAllWindows()

# Find connected pixels and compose them into objects
labels = measure.label(sample_step4, 8)

# Calculate features for each object;
# For task3, since we want to differentiate
# between circular and oval shapes, the major and minor axes may help; we
# will use also the centroid to annotate the final result
properties = measure.regionprops(labels, intensity_image=sample_h)

# *** Calculate features for each object:
# - some geometrical feature 1 (dimension 1)
# - some intensity/color-based feature 2 (dimension 2)
features = np.zeros((len(properties), 2))

for i in range(0, len(properties)):
    """
    if properties[i].perimeter > 2000:
        print(properties[i].perimeter)
        print(properties[i].bbox)
        print(i)
    """
    features[i, 0] = properties[i].perimeter
    # img = properties[i].intensity_image
    plt.plot(threshs,d)
    plt.show()
    plt.close()

    plt.plot(threshs[:-1],np.diff(d))
    plt.show()
    plt.close()

    thresh = threshs[np.diff(d).argmax()+1]*1.1
    bin = fdata > thresh
    labeled, n = ndimage.label(bin)

    xy = np.zeros((0, 2))
    areas = np.zeros((0, 1))
    for region in regionprops(labeled):
        if region.area > 100:
            xy = np.vstack((xy, region.centroid))
            areas = np.vstack((areas, region.area*nmpx**2))

    num = xy.shape[0]
    particles[i] = num

    if num == 2:
        c1 = measure.find_contours(labeled == 2, 0)[0]
        c2 = measure.find_contours(labeled == 1, 0)[0]
        d = find_shortest_distance(c1,c2)


    fig = plt.figure()
    #fig.set_size_inches(1, 1)
Пример #41
0
    # Grayscale and Adaptive threshold
    img_blood_th = 1 - threshold_adaptive(
        image=rgb2gray(img_blood), block_size=29, offset=0.02)
    plot_image(img_blood_th)

    # Opening applied
    img_blood_opening = closing(image=img_blood_th, selem=disk(1))

    img_filled = ndi.binary_fill_holes(img_blood_opening)
    # plot_image(img_blood_opening)
    plot_image(img_filled)

    # Make image labeled and get regions
    img_blood_lab = label(img_filled)
    regions = regionprops(img_blood_lab)
    """
    ratios = []
    for region in regions:
        ratios.append(region.area)

    n, bins, patches = plt.hist(ratios, bins=10)
    plt.show()
    """

    # Extract only bigger circles and half-circles
    regions_blood = []
    for region in regions:
        if region.area > 200:
            regions_blood.append(region)
Пример #42
0
    def generate_submission(self):
        # load and shuffle filenames
        test_filenames = os.listdir(TEST_IMAGES_PATH)

        try:
            test_filenames = test_filenames[:int(self.debug_sample_size / 10)]
            logging.warning("This submission file is incomplete for debug purpose.")
        except Exception:
            logging.info('n test samples:', len(test_filenames))
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            # create test generator with predict flag set to True
            test_gen = generator(TEST_IMAGES_PATH,
                                 test_filenames,
                                 None,
                                 batch_size=20,
                                 image_size=self.image_size,
                                 shuffle=False,
                                 predict=True)

            logging.info("Generating submission...")
            # create submission dictionary
            submission_dict = {}
            # loop through testset
            for imgs, filenames in test_gen:
                # predict batch of images
                preds = self.model.predict(imgs)
                # loop through batch
                for pred, filename in zip(preds, filenames):
                    # resize predicted mask
                    pred = resize(pred, (1024, 1024), mode='reflect')
                    # threshold predicted mask
                    comp = pred[:, :, 0] > 0.5
                    # apply connected components
                    comp = measure.label(comp)
                    # apply bounding boxes
                    predictionString = ''
                    for region in measure.regionprops(comp):
                        # retrieve x, y, height and width
                        y, x, y2, x2 = region.bbox
                        height = y2 - y
                        width = x2 - x
                        # proxy for confidence score
                        conf = np.mean(pred[y:y + height, x:x + width])
                        # add to predictionString
                        predictionString += str(conf) + ' ' + str(x) + ' ' + str(y) + ' ' + str(width) + ' ' + str(
                            height) + ' '
                    # add filename and predictionString to dictionary
                    filename = filename.split('.')[0]
                    submission_dict[filename] = predictionString
                # stop if we've got them all
                if len(submission_dict) >= len(test_filenames):
                    break

            # save dictionary as csv file
            logging.info("Persisting submission...")
            sub = pd.DataFrame.from_dict(submission_dict, orient='index')
            sub.index.names = ['patientId']
            sub.columns = ['PredictionString']
            sub.to_csv(SUBMISSIONS_FOLDER_PATH + self.model_name + '_submission.csv')
            logging.info("Submission file is ready, good luck!")
def figure_10():
    """
    Figure 10. Counting tracks in Figure 2(a). MLSS binarization creates
    artifacts in the resulting binary image, thus misleading the track
    counting algorithm, which counts 115 tracks. (a) MLSS binary image
    obtained from Figure 2, presenting the generated artifacts. (b)
    Results of the automatic counting algorithm. Manual counting: 54
    tracks. Automatic counting using ISODATA, Li, Otsu, and Yen
    binarizations, respectively: 41, 43, 41, and 44 tracks.
    """

    image = imread('orig_figures/dur_grain1mica01.tif', as_grey=True)

    filename = 'auto_count/mlss/dur_grain1mica01.csv'
    aux = pd.read_csv(filename)
    img_bin = binary_fill_holes(
        remove_small_objects(np.asarray(aux, dtype='bool')))

    _, x_px = img_bin.shape
    x_um = _calibrate_aux(len_px=x_px)

    # Checking if the folder 'figures' exists.
    if not os.path.isdir('./figures'):
        os.mkdir('./figures')

    # Figure 10(a).
    fig = plt.figure(figsize=(12, 10))
    host = host_subplot(111, axes_class=mpl_aa.Axes)
    plt.subplots_adjust(bottom=0.2)

    host.imshow(img_bin, cmap='gray')
    host.axis['bottom', 'left'].toggle(all=False)

    guest = host.twiny()
    new_fixed_ax = guest.get_grid_helper().new_fixed_axis
    guest.axis['bottom'] = new_fixed_ax(loc='bottom',
                                        axes=guest,
                                        offset=(0, OFFSET))
    guest.axis['top'].toggle(all=False)
    guest.set_xlabel('$\mu m$')
    guest.set_xlim(0, x_um)
    plt.savefig('figures/Fig_10a' + SAVE_FIG_FORMAT, bbox_inches='tight')
    plt.close()

    # Figure 10(b).
    fig = plt.figure(figsize=(12, 10))
    host = host_subplot(111, axes_class=mpl_aa.Axes)
    plt.subplots_adjust(bottom=0.2)

    props = regionprops(label(img_bin))
    img_skel = skeletonize_3d(img_bin)
    rows, cols = np.where(img_skel != 0)

    img_rgb = gray2rgb(img_as_ubyte(image))
    img_rgb[rows, cols] = [255, 0, 255]

    for prop in props:
        obj_info = []
        aux = skeletonize_3d(prop.image)
        trk_area, trk_px = ds.tracks_classify(aux)
        count_auto = ds.count_by_region(ds.regions_and_skel(prop.image))

        x_min, y_min, x_max, y_max = prop.bbox
        obj_info.append(
            [prop.centroid[0], prop.centroid[1],
             str(count_auto[2][0][0])])
        for obj in obj_info:
            host.text(obj[1],
                      obj[0],
                      obj[2],
                      family='monospace',
                      color='yellow',
                      size='x-small',
                      weight='bold')

        if trk_area is not None:
            for px in trk_px:
                route, _ = route_through_array(~aux, px[0], px[1])

                for rx in route:
                    host.scatter(y_min + rx[1],
                                 x_min + rx[0],
                                 c='b',
                                 s=SCATTER_SIZE + 25)
                for p in px:
                    host.scatter(y_min + p[1],
                                 x_min + p[0],
                                 c='g',
                                 s=SCATTER_SIZE + 25)

                rows, cols = line(x_min + px[0][0], y_min + px[0][1],
                                  x_min + px[1][0], y_min + px[1][1])
                img_rgb[rows, cols] = [0, 255, 0]

    host.imshow(img_rgb, cmap='gray')
    host.axis['bottom', 'left'].toggle(all=False)

    guest = host.twiny()
    new_fixed_ax = guest.get_grid_helper().new_fixed_axis
    guest.axis['bottom'] = new_fixed_ax(loc='bottom',
                                        axes=guest,
                                        offset=(0, OFFSET))
    guest.axis['top'].toggle(all=False)
    guest.set_xlabel('$\mu m$')
    guest.set_xlim(0, x_um)

    plt.savefig('figures/Fig_10b' + SAVE_FIG_FORMAT, bbox_inches='tight')
    plt.close()

    return None
Пример #44
0
    cv2.imshow('Colored Grains', img2)
    cv2.waitKey()

    #View just by making mask=threshold and also mask = dilation (after morph operations)
    #Some grains are well separated after morph operations

    #Now each object had a unique number in the image.
    #Total number of labels found are...
    #print(num_labels)

    #Step 5: Measure the properties of each grain (object)

    # regionprops function in skimage measure module calculates useful parameters for each object.

    clusterlist.append(measure.regionprops(labeled_mask, eroded))

propList = [
    'Area',
    'equivalent_diameter',  #Added... verify if it works
    'orientation',  #Added, verify if it works. Angle btwn x-axis and major axis.
    'MajorAxisLength',
    'MinorAxisLength',
    'Perimeter',
    'MinIntensity',
    'MeanIntensity',
    'MaxIntensity'
]

output_file = open('image_measurements.csv', 'w')
output_file.write(
def figure_6():
    """
    Figure 6. Track candidates chosen by the algorithm for the region in
    Figure 2(b). Green dots: extremity pixels. Green line: Euclidean
    distance between extremity pixels. Blue dots: route between
    extremity pixels.
    """

    image = imread('orig_figures/dur_grain1apatite01.tif', as_grey=True)
    img_bin = _processed_image(image)

    props = regionprops(label(img_bin))

    x_min, y_min, x_max, y_max = props[TEST_REGION].bbox
    img_skel = skeletonize_3d(props[TEST_REGION].image)
    _, x_px = img_skel.shape
    x_um = _calibrate_aux(len_px=x_px)

    _, trk_pts = ds.tracks_classify(img_skel)

    # Checking if the folder 'figures' exists.
    if not os.path.isdir('./figures'):
        os.mkdir('./figures')

    # Generating all figures at once.
    figures = ['a', 'b']

    for idx, pt in enumerate(trk_pts):
        fig = plt.figure(figsize=(9, 8))
        host = host_subplot(111, axes_class=mpl_aa.Axes)
        plt.subplots_adjust(bottom=0.2)

        img_rgb = gray2rgb(image[x_min:x_max, y_min:y_max])

        # calculating route and distances.
        route, _ = route_through_array(~img_skel, pt[0], pt[1])
        distances, _, _ = ds.track_parameters(pt[0], pt[1], route)

        # generating minimal distance line.
        rows, cols = line(pt[0][0], pt[0][1], pt[1][0], pt[1][1])
        img_rgb[rows, cols] = [False, True, False]

        # plotting minimal distance and route.
        host.imshow(img_rgb, cmap='gray')
        host.axis['bottom', 'left'].toggle(all=False)

        guest = host.twiny()
        new_fixed_ax = guest.get_grid_helper().new_fixed_axis
        guest.axis['bottom'] = new_fixed_ax(loc='bottom',
                                            axes=guest,
                                            offset=(0, OFFSET))
        guest.axis['top'].toggle(all=False)
        guest.set_xlabel('$\mu m$')
        guest.set_xlim(0, x_um)

        for rt_pt in route:
            host.scatter(rt_pt[1], rt_pt[0], c='b', s=SCATTER_SIZE)

        # plotting extreme points.
        for p in pt:
            host.scatter(p[1], p[0], c='g', s=SCATTER_SIZE)

        filename = 'figures/Fig_06' + figures[idx] + SAVE_FIG_FORMAT
        plt.savefig(filename, bbox_inches='tight')
        plt.close()

    return None
Пример #46
0
def cluster_analysis(_imagefile, _anomalyfile, _matfile, write_dir, _junk,
                     file_name, permanent_dir):
    _feature_data = []
    _hist_data = []
    prefix = re.split('IR_|.pgm', _imagefile)[0]
    # print(prefix);
    postfix = re.split('IR_|.pgm', _imagefile)[1]
    # print(postfix);
    # _image = imread(_imagefile)
    # Get the region properties of the whole fruit
    # We need these to express the properties of anomaly region as ratios
    # of the whole fruit surface
    (_image, mask) = segment.segment(_imagefile, _matfile)
    _image = np.asarray(_image)
    mask = np.asarray(mask)
    mask = mask.astype(int)
    _props = measure.regionprops(mask, _image)
    plt.imshow(_image, cmap='gray')
    plt.close()
    if len(_props) == 0:
        return None
    else:
        _props = measure.regionprops(mask, _image)[0]
        # To store the sample points (pixel coordinates + pixel value)
        _datapoints = []
        # To store only the pixel coordinates
        _coords = []

        # turn on interactive mode. Required in VS for displaying figures interactively
        # during script execution
        # plt.ion()

        # Read the file
        with open(_anomalyfile, 'rU') as inp:
            reader = csv.reader(inp)
            for row in reader:
                _datapoints.append([row[1], row[2], row[0]])
                _coords.append([row[1], row[2]])

        # Convert the values from string to integers using this hack I found
        # on Stack Overflow
        _datapoints = map(myFloat, _datapoints)
        _coords = map(myFloat, _coords)
        _coords = map(myInt, _coords)

        # Convert the lists into arrays
        _datapoints = np.asarray(_datapoints)
        _coords = np.asarray(_coords)

        # Normalize the data points (0 mean and 1 standard deviation)
        _center_xform = StandardScaler().fit_transform(_datapoints)

        # Do the clustering
        db = DBSCAN(eps=0.3, min_samples=20).fit(_center_xform)

        labels = db.labels_
        labels_set = set(labels)
        #print labels_set
        # Remove the anomalies label
        labels_set.discard(-1)

        # Non-empty clusters found
        nclusters = 0

        for k in labels_set:
            # Get points in the current cluster
            members = (labels == k)
            members = _coords[members]

            # Form a binary image representing the cluster as points with value 1
            bw = np.zeros((480, 640), dtype=bool)
            for c in members:
                # Array indexing needs a tuple lists don't work
                xy = tuple(c)
                bw[xy] = 1

            # Merge the points into one large region
            bw = morphology.binary_closing(bw, np.ones((3, 3)), iterations=6)
            bw = morphology.binary_opening(bw, np.ones((3, 3)), iterations=3)
            bw = morphology.binary_fill_holes(bw)
            # Remove very small regions
            skimorph.remove_small_objects(bw, in_place=True)

            # Need to do this to avoid error in latest skimage library
            bw = bw.astype(int)

            # Binary image contains a region?
            if bw.any():
                nclusters += 1

                points = bw.nonzero()
                values = _image[points]
                cluster_props = measure.regionprops(bw, _image)[0]

                features = {}

                # These two are not features; they are only used for plotting
                features['points'] = points
                features['values'] = values

                # Eccentricity of the ellipse
                features['eccentricity'] = cluster_props.eccentricity

                # Diameter of the circle with the same area as the region
                # Normalized using image width
                features[
                    'eq_diameter'] = cluster_props.equivalent_diameter / 640

                # Number of objects - number of holes (8 connectivity)
                features['euler_number'] = cluster_props.euler_number
                # Fraction of area of entire fruit occupied
                features['area'] = 1. * cluster_props.area / _props.area

                # Ratio of pixels in the region to pixels of the convex hull
                features['solidity'] = cluster_props.solidity

                # Ellipse properties
                features[
                    'major_axis'] = 1. * cluster_props.major_axis_length / _props.major_axis_length
                features[
                    'minor_axis'] = 1. * cluster_props.minor_axis_length / _props.minor_axis_length

                # Normalized mean pixel value and standard deviation
                features[
                    'mean_value'] = 1. * cluster_props.mean_intensity / _props.max_intensity
                features['std'] = np.std(values) / _props.max_intensity

                hist = values.copy()
                hist = hist - _props.min_intensity
                hist = 256. * hist / _props.max_intensity
                hist = hist.astype(int)
                bins = np.bincount(hist, minlength=256)
                hist = []
                for i in range(0, 32):
                    start = i * 4
                    end = start + 4
                    v = 1. * sum(bins[start:end]) / values.size
                    hist.append(v)
                    features['hist' + str(i)] = v

                plt.figure()
                plt.bar(np.arange(32), hist)
                #plt.savefig(prefix + postfix + "_Histogram_" + str(nclusters) + ".png")
                print("->->->->->", _junk + file_name + postfix)
                plt.savefig(_junk + file_name + "_" + postfix + "_Histogram_" +
                            str(nclusters) + ".png")
                _feature_data.append(features)

            plt.close()
    # for cluster in _feature_data:
    #    points = cluster['points']
    #    im = np.zeros((480, 640), dtype=int)
    #    im[points] = cluster['values']
    #    plt.figure()
    #    plt.imshow(im, cmap='gray')

    # plt.show()
    n1 = csvwrite(_imagefile, _feature_data, permanent_dir)
    # n2 = csvwrite_histo(_imagefile, _hist_data);
    # mergeCSV(n1, n2);
    return _feature_data
Пример #47
0
label_image = measure.label(img_bw)

# print(label_image.shape[0]) #width of car img

# getting the maximum width, height and minimum width and height that a license plate can be
plate_dimensions = (0.03*label_image.shape[0], 0.08*label_image.shape[0], 0.15*label_image.shape[1], 0.3*label_image.shape[1])
plate_dimensions2 = (0.08*label_image.shape[0], 0.2*label_image.shape[0], 0.15*label_image.shape[1], 0.4*label_image.shape[1])
min_height, max_height, min_width, max_width = plate_dimensions
plate_objects_cordinates = []
plate_like_objects = []

fig, (ax1) = plt.subplots(1)
ax1.imshow(img_gray, cmap="gray")
flag =0
# regionprops creates a list of properties of all the labelled regions
for region in regionprops(label_image):
    # print(region)
    if region.area < 50:
        #if the region is so small then it's likely not a license plate
        continue
        # the bounding box coordinates
    min_row, min_col, max_row, max_col = region.bbox

    region_height = max_row - min_row
    region_width = max_col - min_col

    # ensuring that the region identified satisfies the condition of a typical license plate
    if region_height >= min_height and region_height <= max_height and region_width >= min_width and region_width <= max_width and region_width > region_height:
        flag = 1
        plate_like_objects.append(img_bw[min_row:max_row,
                                  min_col:max_col])
Пример #48
0
def find_crystals(img, magnification, spread=2.0, plot=False, **kwargs):
    """Function for finding crystals in a low contrast images.
    Used adaptive thresholds to find local features.
    Edges are detected, and rejected, on the basis of a histogram.
    Kmeans clustering is used to spread points over the segmented area.
    
    img: 2d np.ndarray
        Input image to locate crystals on
    magnification: float
        value indicating the magnification used, needed in order to determine the size of the crystals
    spread: float
        Value in micrometer to roughly indicate the desired spread of centroids over individual regions
    plot: bool
        Whether to plot the results or not
    **kwargs:
    keywords to pass to segment_crystals
    """
    img, scale = autoscale(img, maxdim=256)  # scale down for faster

    # segment the image, and find objects
    arr, seg = segment_crystals(img, **kwargs)

    labels, numlabels = ndimage.label(seg)
    props = measure.regionprops(labels, img)

    # calculate the pixel dimensions in micrometer
    px, py = calibration.pixelsize_mag1[magnification] / 1000  # nm -> um

    iters = 20

    crystals = []
    for prop in props:
        area = prop.area * px * py
        bbox = np.array(prop.bbox)

        # origin of the prop
        origin = bbox[0:2]

        # edge detection
        if isedge(prop):
            continue

        # number of centroids for kmeans clustering
        nclust = int(area // spread) + 1

        if nclust > 1:
            # use skmeans clustering to segment large blobs
            coordinates = np.argwhere(prop.image)

            # kmeans needs normalized data (w), store std to calculate coordinates after
            w, std = whiten(coordinates)

            # nclust must be an integer for some reason
            cluster_centroids, closest_centroids = kmeans2(w,
                                                           nclust,
                                                           iter=iters,
                                                           minit='points')

            # convert to image coordinates
            xy = (cluster_centroids * std + origin[0:2]) / scale
            crystals.extend([
                CrystalPosition(x, y, False, nclust, area, prop.area)
                for x, y in xy
            ])
        else:
            x, y = prop.centroid
            crystals.append(
                CrystalPosition(x / scale, y / scale, True, nclust, area,
                                prop.area))

    if plot:
        plt.imshow(img)
        plt.contour(seg, [0.5], linewidths=1.2, colors="yellow")
        if len(crystals) > 0:
            x, y = np.array([(crystal.x * scale, crystal.y * scale)
                             for crystal in crystals]).T
            plt.scatter(y, x, color="red")
        ax = plt.axes()
        ax.set_axis_off()
        plt.show()

    return crystals
Пример #49
0
def getMinorMajorRatio(image):
    image = image.copy()
    # Create the thresholded image to eliminate some of the background
    imagethr = np.where(image > np.mean(image), 0., 1.0)
    imagethr2 = np.where(image > np.mean(image) - 2 * np.std(image), 0., 1.0)
    coords = corner_peaks(corner_harris(imagethr), min_distance=5)
    coords_subpix = corner_subpix(imagethr, coords, window_size=13)
    cornercentercoords = np.nanmean(coords_subpix, axis=0)
    cornerstdcoords = np.nanstd(coords_subpix, axis=0)

    #Dilate the image
    imdilated = morphology.dilation(imagethr, np.ones((4, 4)))

    # Create the label list
    label_list = measure.label(imdilated)
    label_list2 = imagethr2 * label_list
    label_list = imagethr * label_list
    label_list2 = label_list2.astype(int)
    label_list = label_list.astype(int)

    region_list = measure.regionprops(label_list)
    region_list2 = measure.regionprops(label_list2)
    maxregion, max2ndregion = getLargestRegions(region_list, label_list,
                                                imagethr)
    maxregion2, max2ndregion2 = getLargestRegions(region_list2, label_list2,
                                                  imagethr2)

    # guard against cases where the segmentation fails by providing zeros
    ratio = 0.0
    fillratio = 0.0
    largeeigen = 0.0
    smalleigen = 0.0
    eigenratio = 0.0
    solidity = 0.0
    perimratio = 0.0
    arearatio = 0.0
    orientation = 0.0
    centroid = (0.0, 0.0)
    cornercenter = 0.0
    cornerstd = 0.0
    hu1 = hu2 = hu3 = hu12 = hu13 = hu23 = 0.0
    if ((not maxregion is None) and (maxregion.major_axis_length != 0.0)):
        ratio = 0.0 if maxregion is None else maxregion.minor_axis_length * 1.0 / maxregion.major_axis_length
        largeeigen = 0.0 if maxregion is None else maxregion.inertia_tensor_eigvals[
            0]
        smalleigen = 0.0 if maxregion is None else maxregion.inertia_tensor_eigvals[
            1]
        fillratio = 0.0 if (
            maxregion2 is None or maxregion2.minor_axis_length == 0.0
        ) else maxregion2.filled_area / (maxregion2.minor_axis_length *
                                         maxregion2.major_axis_length)
        solidity = 0.0 if maxregion2 is None else maxregion2.solidity
        hu1 = 0.0 if maxregion is None else maxregion.moments_hu[1]
        hu2 = 0.0 if maxregion is None else maxregion.moments_hu[2]
        hu3 = 0.0 if maxregion is None else maxregion.moments_hu[3]
        hu12 = 0.0 if (maxregion is None or hu1 == 0.0) else hu2 / hu1
        hu13 = 0.0 if (maxregion is None or hu1 == 0.0) else hu3 / hu1
        hu23 = 0.0 if (maxregion is None or hu2 == 0.0) else hu3 / hu2
        perimratio = 0.0 if (
            maxregion is None or maxregion.minor_axis_length == 0.0
        ) else maxregion.perimeter / (maxregion.minor_axis_length * 4.0 +
                                      maxregion.major_axis_length * 4.0)
        eigenratio = 0.0 if largeeigen == 0.0 else smalleigen / largeeigen
        orientation = 0.0 if maxregion is None else maxregion.orientation
        centroid = (0.0, 0.0) if maxregion is None else maxregion.centroid
        cornercentercoords = np.absolute(
            cornercentercoords -
            centroid) if maxregion.major_axis_length == 0.0 else np.absolute(
                cornercentercoords - centroid) / maxregion.major_axis_length
        cornercenter = np.linalg.norm(cornercentercoords)
        if maxregion.major_axis_length != 0.0:
            cornerstdcoords = np.absolute(
                cornerstdcoords) / maxregion.major_axis_length
        cornerstd = np.linalg.norm(cornerstdcoords)
    if ((not maxregion is None) and (not max2ndregion is None)):
        arearatio = max2ndregion.area / maxregion.area
    #print perimratio
    if np.isnan(cornercenter):
        cornercenter = 0.0
    if sum(np.isnan(cornercentercoords)) > 0.0:
        cornercentercoords = np.array([0.0, 0.0])
    if math.isnan(cornerstd):
        cornerstd = 0.0
    if sum(np.isnan(cornerstdcoords)) > 0.0:
        cornerstdcoords = np.array([0.0, 0.0])
    return cornercenter, cornercentercoords, cornerstd, cornerstdcoords, ratio, fillratio, eigenratio, solidity, hu1, hu2, hu3, hu12, hu13, hu23, perimratio, arearatio, orientation, centroid
Пример #50
0
# capture the video again
cap = cv2.VideoCapture(filename)

# init dictionary for neurons and the neurons with background subtraction
neurons = {}
neurons_subtracted = {}
frame_number = 0

while True:
    ret, frame = cap.read()
    if ret:
        # convert current frame to grayscale
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        frame = frame.astype(float)
        # get the properties of the labeled regions
        neuropil_regions = regionprops(neuropils, frame)
        for i, region in enumerate(regionprops(label_image, frame)):
            key = f'Neuron {i}'
            if key not in neurons.keys():
                neurons[key] = np.array([])
                neurons_subtracted[key] = np.array([])
            neurons[key] = np.append(neurons[key], [region.max_intensity])
            neurons_subtracted[key] = np.append(neurons_subtracted[key], [(region.max_intensity - neuropil_regions[i].min_intensity)])
        frame_number += 1
    else:
        print(f'Captured {frame_number} frames')
        break

# init dictionary for dF_f
neurons_df_f = {}
# neurons_deconv = {}
def figure_5():
    """
    Figure 5. Choosing track candidates in the region presented in
    Figure 2(b), obtaining extremity points two by two. Green pixels:
    Euclidean distance. Blue dots: route between the two extremity
    points. Yellow pixels: inner area of the region formed by Euclidean
    distance and route.
    """

    image = imread('orig_figures/dur_grain1apatite01.tif', as_grey=True)
    img_bin = _processed_image(image)
    props = regionprops(label(img_bin))

    img_skel = skeletonize_3d(props[TEST_REGION].image)
    _, x_px = img_skel.shape
    x_um = _calibrate_aux(len_px=x_px)

    # checking if the folder 'figures' exists.
    if not os.path.isdir('./figures'):
        os.mkdir('./figures')

    px_ext, _ = ds.pixels_interest(img_skel)

    # Generating all figures at once.
    figures = ['a', 'b', 'c']

    for idx, pts in enumerate(combinations(px_ext, r=2)):
        px, py = pts

        img_aux = gray2rgb(img_skel)
        region_area = np.zeros(img_skel.shape)

        route, _ = route_through_array(~img_skel, px, py)
        distances, _, _ = ds.track_parameters(px, py, route)

        rows, cols = line(px[0], px[1], py[0], py[1])
        img_aux[rows, cols] = [0, 255, 0]
        region_area[rows, cols] = True

        fig = plt.figure(figsize=(9, 8))
        host = host_subplot(111, axes_class=mpl_aa.Axes)
        plt.subplots_adjust(bottom=0.2)

        for pt in route:
            host.scatter(pt[1], pt[0], c='b', s=SCATTER_SIZE)
            region_area[pt[0], pt[1]] = True

        # extremity points.
        host.scatter(px[1], px[0], c='g', s=SCATTER_SIZE)
        host.scatter(py[1], py[0], c='g', s=SCATTER_SIZE)

        region_area = binary_fill_holes(region_area)

        for pt in route:
            region_area[pt[0], pt[1]] = False
            region_area[rows, cols] = False

        img_aux[region_area] = [255, 255, 0]

        host.imshow(img_aux, cmap='gray')
        host.axis['bottom', 'left'].toggle(all=False)

        guest = host.twiny()
        new_fixed_ax = guest.get_grid_helper().new_fixed_axis
        guest.axis['bottom'] = new_fixed_ax(loc='bottom',
                                            axes=guest,
                                            offset=(0, OFFSET))
        guest.axis['top'].toggle(all=False)
        guest.set_xlabel('$\mu m$')
        guest.set_xlim(0, x_um)

        filename = 'figures/Fig_05' + figures[idx] + SAVE_FIG_FORMAT
        plt.savefig(filename, bbox_inches='tight')
        plt.close()

    return None
def getAnswers(im, i):

	r, thresh = cv2.threshold(im, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
	thresh = 255-thresh;
	thresh =  cv2.morphologyEx(thresh, cv2.MORPH_OPEN, np.ones((3,3)))

	a, b = im.shape[:2]
	bound1 = b/4
	bound2 = bound1*2
	bound3 = bound1*3
	bound4 = b

	height = a/15;
	width = b

	lowerbound = 1
	upperbound = height

	results = []
	for n in range(i, (i+15)):
		img = thresh[lowerbound:upperbound, 1:width]
		# cv2.imshow(str(n), img)

		label, count = measure.label(img, background=0, return_num=True)
		props = measure.regionprops(label)
		# colord = plt.imshow(label, cmap='spectral')
		# plt.axis('off')
		# plt.tight_layout()
		# plt.show()

		shaded = []
		for p in props:
			# print(p.area)
			if p.area > 700:
				shaded.append(p)

		shadeCount = len(shaded)

		if shadeCount > 1:
			results.append([n, 'Shade Error (more than 1 shade)'])
		elif shadeCount == 0:
			results.append([n, 'Shade Error (no shade)'])
		else:
			# results.append([n, 'Good Shade! Good job <3'])
			letter = None
			x, y = shaded[0].centroid
			if y >= 0 and y <= bound1:
				letter = 'A'
			elif y > bound1 and y <= bound2:
				letter = 'B'
			elif y > bound2 and y <= bound3:
				letter = 'C'
			elif y > bound3 and y <= bound4:
				letter = 'D'
			results.append([n, letter])

		lowerbound += height
		upperbound += height

	# for r in results:
	# 	print(r[0], r[1])

	return results
Пример #53
0
def test(test_loader, model, args):
    # switch to evaluate mode
    model.eval()
    with torch.no_grad():
        bar = tqdm.tqdm(test_loader)
        iter_num = len(test_loader.dataset)
        ftime = 0
        ntime = 0
        for i, data in enumerate(bar):
            t = time.time()
            images, names, size = data

            images = images.cuda(device=CONFIGS["TRAIN"]["GPU_ID"])
            # size = (size[0].item(), size[1].item())
            key_points = model(images)

            key_points = torch.sigmoid(key_points)
            ftime += (time.time() - t)
            t = time.time()
            visualize_save_path = os.path.join(CONFIGS["MISC"]["TMP"],
                                               'visualize_test')
            os.makedirs(visualize_save_path, exist_ok=True)

            binary_kmap = key_points.squeeze().cpu().numpy(
            ) > CONFIGS['MODEL']['THRESHOLD']
            kmap_label = label(binary_kmap, connectivity=1)
            props = regionprops(kmap_label)
            plist = []
            for prop in props:
                plist.append(prop.centroid)

            size = (size[0][0], size[0][1])
            b_points = reverse_mapping(plist,
                                       numAngle=CONFIGS["MODEL"]["NUMANGLE"],
                                       numRho=CONFIGS["MODEL"]["NUMRHO"],
                                       size=(400, 400))
            scale_w = size[1] / 400
            scale_h = size[0] / 400
            for i in range(len(b_points)):
                y1 = int(np.round(b_points[i][0] * scale_h))
                x1 = int(np.round(b_points[i][1] * scale_w))
                y2 = int(np.round(b_points[i][2] * scale_h))
                x2 = int(np.round(b_points[i][3] * scale_w))
                if x1 == x2:
                    angle = -np.pi / 2
                else:
                    angle = np.arctan((y1 - y2) / (x1 - x2))
                (x1, y1), (x2,
                           y2) = get_boundary_point(y1, x1, angle, size[0],
                                                    size[1])
                b_points[i] = (y1, x1, y2, x2)

            vis = visulize_mapping(b_points, size, names[0])

            cv2.imwrite(join(visualize_save_path, names[0].split('/')[-1]),
                        vis)
            np_data = np.array(b_points)
            np.save(
                join(visualize_save_path,
                     names[0].split('/')[-1].split('.')[0]), np_data)

            if CONFIGS["MODEL"]["EDGE_ALIGN"] and args.align:
                for i in range(len(b_points)):
                    b_points[i] = edge_align(b_points[i],
                                             names[0],
                                             size,
                                             division=5)
                vis = visulize_mapping(b_points, size, names[0])
                cv2.imwrite(
                    join(visualize_save_path,
                         names[0].split('/')[-1].split('.')[0] + '_align.png'),
                    vis)
                np_data = np.array(b_points)
                np.save(
                    join(visualize_save_path,
                         names[0].split('/')[-1].split('.')[0] + '_align'),
                    np_data)
            ntime += (time.time() - t)
    print('forward time for total images: %.6f' % ftime)
    print('post-processing time for total images: %.6f' % ntime)
    return ftime + ntime
Пример #54
0
sub2.set_title("Thresholded Image")

imdilated = morphology.dilation(imthr, np.ones((4, 4)))
sub3 = plt.subplot(1, 4, 3)
plt.imshow(imdilated, cmap=cm.gray_r)
sub3.set_title("Dilated Image")

labels = measure.label(imdilated)
labels = imthr * labels
labels = labels.astype(int)
sub4 = plt.subplot(1, 4, 4)
sub4.set_title("Labeled Image")
plt.imshow(labels)

# calculate common region properties for each region within the segmentation
regions = measure.regionprops(labels)


# find the largest nonzero region
def getLargestRegions(props=regions, labelmap=labels, imagethres=imthr):
    regionmaxprop = None
    region2ndmaxprop = None
    for regionprop in props:
        # check to see if the region is at least 50% nonzero
        if sum(imagethres[labelmap ==
                          regionprop.label]) * 1.0 / regionprop.area < 0.50:
            continue
        if regionmaxprop is None:
            regionmaxprop = regionprop
        elif region2ndmaxprop is None:
            region2ndmaxprop = regionprop
Пример #55
0
l = 1

while l == 1 :
	segments_slic = slic(img, n_segments=initSegments, compactness=25, sigma=3)
	edges = getEdges(segments_slic, 5)

	def rpl(target):
		return 1 if target not in edges else 0
	rpl_v = np.vectorize(rpl)

	segments_noedge = rpl_v(segments_slic)
	l = len(np.unique(segments_noedge))
	initSegments += 1

lesionprops = measure.regionprops(segments_noedge)[0]
bbox = lesionprops.bbox
lesionimg = img[bbox[0]:bbox[2], bbox[1]:bbox[3]]
lesionfilter = lesionprops.convex_image
strictfilter = lesionprops.image
lesionimg = (lesionimg * lesionfilter.reshape(lesionfilter.shape[0], lesionfilter.shape[1], 1))
strictimg = (lesionimg * strictfilter.reshape(strictfilter.shape[0], strictfilter.shape[1], 1))
hflip = np.fliplr(lesionimg)
vflip = np.flipud(lesionimg)
rot = np.flipud(hflip)
h_mse = np.mean(np.square(lesionimg-hflip))
v_mse = np.mean(np.square(lesionimg-vflip))
r_mse = np.mean(np.square(lesionimg-rot))
ellipseArea = lesionprops.major_axis_length * lesionprops.minor_axis_length * math.pi
area = lesionprops.area
lesionAvgColor = imgAvg(strictimg) * ((bbox[2]-bbox[0])*(bbox[3]-bbox[1])) / area
Пример #56
0
def area_measure(label_img):
    for region in measure.regionprops(label_img):
        mask_area = region.area
    return mask_area
def figure_2():
    """
    Figure 2. Binarizing and skeletonizing the region highlighted in an
    input photomicrograph [1]. (a) Input photomicrograph. (b) Example
    highlighted region. (c) Binarizing the example region using the
    ISODATA algorithm (threshold: 133). (d) Skeletonizing the binary
    region in (c). Colormap: gray.

    [1] Image sample1_01.jpg, from the folder `orig_figures`. Available
    in the Supplementary Material.
    """

    image = imread('orig_figures/dur_grain1apatite01.tif', as_grey=True)
    img_bin = _processed_image(image)

    props = regionprops(label(img_bin))

    x_min, y_min, x_max, y_max = props[TEST_REGION].bbox

    img_orig = image[x_min:x_max, y_min:y_max]
    img_reg = props[TEST_REGION].image
    img_skel = skeletonize_3d(props[TEST_REGION].image)

    _, x_px = img_skel.shape
    x_um = _calibrate_aux(len_px=x_px)

    # checking if the folder 'figures' exists.
    if not os.path.isdir('./figures'):
        os.mkdir('./figures')

    # Figure 2(a).
    image_arrow = imread('misc/Fig01a.tif')
    _, xarr_px, _ = image_arrow.shape

    xarr_um = _calibrate_aux(len_px=xarr_px)

    fig = plt.figure(figsize=(12, 10))
    host = host_subplot(111, axes_class=mpl_aa.Axes)
    plt.subplots_adjust(bottom=0.2)
    host.imshow(image_arrow, cmap='gray')
    host.axis['bottom', 'left'].toggle(all=False)

    guest = host.twiny()
    new_fixed_ax = guest.get_grid_helper().new_fixed_axis
    guest.axis['bottom'] = new_fixed_ax(loc='bottom',
                                        axes=guest,
                                        offset=(0, OFFSET))
    guest.axis['top'].toggle(all=False)
    guest.set_xlabel('$\mu m$')
    guest.set_xlim(0, xarr_um)

    plt.savefig('figures/Fig_02a' + SAVE_FIG_FORMAT, bbox_inches='tight')
    plt.close()

    # Figure 2(b).
    fig = plt.figure(figsize=(12, 10))
    host = host_subplot(111, axes_class=mpl_aa.Axes)
    plt.subplots_adjust(bottom=0.2)
    host.imshow(img_orig, cmap='gray')
    host.axis['bottom', 'left'].toggle(all=False)

    guest = host.twiny()
    new_fixed_ax = guest.get_grid_helper().new_fixed_axis
    guest.axis['bottom'] = new_fixed_ax(loc='bottom',
                                        axes=guest,
                                        offset=(0, OFFSET))
    guest.axis['top'].toggle(all=False)
    guest.set_xlabel('$\mu m$')
    guest.set_xlim(0, x_um)

    plt.savefig('figures/Fig_02b' + SAVE_FIG_FORMAT, bbox_inches='tight')
    plt.close()

    # Figure 2(c).
    fig = plt.figure(figsize=(12, 10))
    host = host_subplot(111, axes_class=mpl_aa.Axes)
    plt.subplots_adjust(bottom=0.2)
    host.imshow(img_reg, cmap='gray')
    host.axis['bottom', 'left'].toggle(all=False)

    guest = host.twiny()
    new_fixed_ax = guest.get_grid_helper().new_fixed_axis
    guest.axis['bottom'] = new_fixed_ax(loc='bottom',
                                        axes=guest,
                                        offset=(0, OFFSET))
    guest.axis['top'].toggle(all=False)
    guest.set_xlabel('$\mu m$')
    guest.set_xlim(0, x_um)

    plt.savefig('figures/Fig_02c' + SAVE_FIG_FORMAT, bbox_inches='tight')
    plt.close()

    # Figure 2(d).
    fig = plt.figure(figsize=(12, 10))
    host = host_subplot(111, axes_class=mpl_aa.Axes)
    plt.subplots_adjust(bottom=0.2)
    host.imshow(img_skel, cmap='gray')
    host.axis['bottom', 'left'].toggle(all=False)

    guest = host.twiny()
    new_fixed_ax = guest.get_grid_helper().new_fixed_axis
    guest.axis['bottom'] = new_fixed_ax(loc='bottom',
                                        axes=guest,
                                        offset=(0, OFFSET))
    guest.axis['top'].toggle(all=False)
    guest.set_xlabel('$\mu m$')
    guest.set_xlim(0, x_um)

    plt.savefig('figures/Fig_02d' + SAVE_FIG_FORMAT, bbox_inches='tight')
    plt.close()

    return None
Пример #58
0
def centroids(label_img):
    centroids = []
    for region in measure.regionprops(label_img):
        centroids.append(region.centroid)
    return centroids  # list of (row, col) tuples
labelled_plate = measure.label(license_plate)

fig, ax1 = plt.subplots(1)
ax1.imshow(license_plate, cmap="gray")

character_dimesions = (0.35*license_plate.shape[0], 0.60*license_plate.shape[0],
	0.05*license_plate.shape[1], 0.15*license_plate.shape[1])

min_height, max_height, min_width, max_width = character_dimesions

characters = []
counter = 0
column_list = []

for regions in regionprops(labelled_plate):
	y0, x0, y1, x1 = regions.bbox
	region_height = y1 - y0
	region_width = x1 - x0

	if region_height > min_height and region_height < max_height and region_width > min_width and region_width < max_width
		roi = license_plate[y0:y1, x0:x1]

		rect_border = patches.Rectangle((x0, y0), x1 - x0, y1 - y0, edgecolor = "red", 
			linewidth = 2, fill = False)
		ax1.add_patch(rect_border)

		resized_char = resize(roi, (20, 20))
		characters.append(resized_char)

		column_list.append(x0)
def regprop(labeled_samples, frames, n_rows, n_columns):
    '''
    Determines the area and centroid of all samples.

    Parameters
    -----------
    labeled_samples: Array
        An array with labeled samples.

    frames : Array
        Original intensity image to determine
        the intensity at sample centroids.
    n_rows: Int
        Number of rows of sample
    n_columns: Int
        Number of columns of sample

    Returns
    --------
    regprops: Dict
        A dictionary of dataframes with information about samples in every
        frame of the video.
    '''
    regprops = {}
    n_samples = n_rows * n_columns
    unique_index = random.sample(range(100), n_samples)

    missing = 0
    index = 0

    for i in range(len(frames)):
        if len(labeled_samples.shape) is 3:
            props = regionprops(labeled_samples[i], intensity_image=frames[i])
        elif len(labeled_samples.shape) is 2:
            props = regionprops(labeled_samples, intensity_image=frames[i])
        else:
            raise ValueError('Invalid labeled samples dimension')

        # Initializing arrays for all sample properties obtained from regprops.
        row = np.zeros(len(props)).astype(int)
        column = np.zeros(len(props)).astype(int)
        area = np.zeros(len(props))
        radius = np.zeros(len(props))
        perim = np.zeros(len(props))
        intensity = np.zeros(len(props), dtype=np.float64)
        plate = np.zeros(len(props), dtype=np.float64)
        plate_coord = np.zeros(len(props))

        unsorted_label = np.zeros((len(props), 5)).astype(int)
        sorted_label = np.zeros((len(props), 4)).astype(int)

        # collect data on centroid
        for item in range(len(props)):
            unsorted_label[item, 0] = int(props[item].centroid[0])
            unsorted_label[item, 1] = int(props[item].centroid[1])
            unsorted_label[item, 3] = item
            unsorted_label[item, 4] = np.unique(labeled_samples[i])[item + 1]

        # sort label based on euclidean distance
        for item in range(len(props)):
            unsorted_label[item, 2] = np.power(
                unsorted_label[item, 0] + unsorted_label[:, 0].min(),
                2) + np.power(
                    unsorted_label[item, 1] - unsorted_label[:, 1].min(), 2)
            sorted_label = unsorted_label[unsorted_label[:, 2].argsort()]

        c = 0
        for item in range(len(props)):
            prop = props[sorted_label[item, 3]]

            row[c] = int(prop.centroid[0])
            column[c] = int(prop.centroid[1])
            area[c] = prop.area

            loc_index = np.argwhere(labeled_samples[i] == sorted_label[item,
                                                                       4])
            left_side_column = min(loc_index[:, 0]) - 1
            right_side_column = max(loc_index[:, 0]) + 1
            left_side_row = min(loc_index[:, 1]) - 1
            right_side_row = max(loc_index[:, 1]) + 1

            # This part is for gettng the total temp and then get the average temp in each samples
            sample_temp = []
            for loc_index_len in range(len(loc_index)):
                x_coordinate = loc_index[loc_index_len].tolist()[0]
                y_coordinate = loc_index[loc_index_len].tolist()[1]

                result = frames[i][x_coordinate][y_coordinate]
                sample_temp.append(result)
            sum_temp_sample = np.sum(sample_temp)
            intensity[c] = sum_temp_sample / area[c]

            # This part is getting the environment temperature
            envir_area = (right_side_column - left_side_column +
                          1) * (right_side_row - left_side_row + 1) - area[c]

            # First, get the total temperature in the range crop rectangle
            total_rectangle_temp_list = []
            for j in range(right_side_column - left_side_column + 1):
                for k in range(right_side_row - left_side_row + 1):
                    crop_temp = frames[i][left_side_column + j][left_side_row +
                                                                k]
                    total_rectangle_temp_list.append(crop_temp)

            # Next, use the result from the last step to minus the sum_temp_sample, and
            # you can get the sum_temp_envir
            total_rectangle_temp = np.sum(total_rectangle_temp_list)
            sum_temp_envir = total_rectangle_temp - sum_temp_sample
            plate[c] = sum_temp_envir / envir_area

            c = c + 1

        try:
            regprops[index] = pd.DataFrame(
                {
                    'Row': row,
                    'Column': column,
                    'Plate_temp(cK)': plate,
                    'Radius': radius,
                    'Plate_coord': plate_coord,
                    'Area': area,
                    'Perim': perim,
                    'Sample_temp(cK)': intensity,
                    'unique_index': unique_index
                },
                dtype=np.float64)
            regprops[index].sort_values(['Column', 'Row'], inplace=True)
            index += 1
        except ValueError:
            # print('Wrong number of samples detected in frame %d' % i)
            missing += 1
            continue

        if len(intensity) != n_samples:
            print('Wrong number of samples are being detected in frame %d' % i)

    if missing > 0:
        print(str(missing) + ' frames skipped due to missing samples')

    return regprops