def dice(img,y_true,y_pred):

    h, w = img.shape
    im_true = y_true.reshape(h, w)
    im_pred = y_pred.reshape(h, w)

    labels_true = measure.label(im_true)
    regions_true = regionprops(labels_true)

    labels_pred = measure.label(im_pred)
    regions_pred = regionprops(labels_pred)
    features = ['coords','area','dice']
    df = pd.DataFrame(columns=features)

    i=0
    for x_pred in regions_pred :
        centroid = (np.array(x_pred.centroid)).astype(int)
        if im_true[(centroid[0], centroid[1])] == 1:
            for x_true in regions_true:
               if centroid in x_true.coords:
                   A = np.zeros((img.shape[0], img.shape[1]))
                   B = np.zeros((img.shape[0], img.shape[1]))


                   A[x_pred.coords[:, 0], x_pred.coords[:, 1]] = 1
                   B[x_true.coords[:, 0], x_true.coords[:, 1]] = 1
                   intersect = float((sum(sum(B))))

                   D = intersect/(sum(sum(B))+ sum(sum(A)))
                   df.loc[i] = [x_pred.coords , x_pred.area, D]
                   break
        i+=1
    return df
def extract_cell_stats(img1_path, img2_path):

    # Function reads in the images and labels the cells. The features are
    # extracted from these labelled images.
    #
    # Inputs:   img1_path - path to previous image
    #           img2_path - path to current image
    #
    # Outputs:  out -   dict containing the relevant information
    #

    # TODO: be more accommodating with image types, RGB etc, tifffile warning
    # read image data
    img1 = skimage.io.imread(img1_path)
    img2 = skimage.io.imread(img2_path)

    # Image shape
    if img1.shape != img2.shape:
        warnings.warn('Caution: Comparing image frames of different sizes.')
    img_shape = img1.shape

    # Label pre-segmented images
    l_label, l_cell_total = label(img1, return_num=True)
    r_label, r_cell_total = label(img2, return_num=True)

    # Collect cell features if cell is of minimum size (not segmented debris)
    # TODO: clever way of setting this number
    l_cells = [cell for cell in regionprops(l_label) if cell['filled_area'] > 50]
    r_cells = [cell for cell in regionprops(r_label) if cell['filled_area'] > 50]

    # Output
    out = {'img1': l_cells, 'img2': r_cells, 'img_shape': img_shape}
    return out
Пример #3
0
def incorporate_cells(binary_image):    
    # invert input binary image
    inv_image = np.invert(binary_image)
    
    # matrix for binary_dilation
    struct = generate_binary_structure(2, 1)

    # do bunary dilation until the colony number even out
    plate_bin_dil = binary_dilation(inv_image, structure=struct)
    plate_dil_labels = label(plate_bin_dil)
    labels_number = len(np.unique(plate_dil_labels))  # initial number of colonies
    new_labels_number = labels_number - 1  # starting value
    cycle_number = 0  # starting value for dilation cycles
    while True:
        cycle_number += 1
        if cycle_number >= 30:
            break  # defence against infinite cycling
        else:
            if new_labels_number >= labels_number:
                break   # further dilation is useless (in theory)
            else:
                labels_number = new_labels_number
                plate_bin_dil = binary_dilation(plate_bin_dil, structure=struct)
                plate_dil_labels = label(plate_bin_dil)
                new_labels_number = len(np.unique(plate_dil_labels))
                
    return plate_bin_dil
Пример #4
0
    def start(self):
        """Segment the frame.

        The returned value is a labeled uint16 image.
        """
        background = np.bincount(self._frame.ravel()).argmax()  # Most common value.
        I_label = measure.label(self._frame, background=background)
        I_label += 1  # Background is labeled as -1, make it 0.
        I_bin = I_label > 0

        # Remove cells which are too small (leftovers).
        if self._a_min:
            I_label = mh.label(I_bin)[0]
            sizes = mh.labeled.labeled_size(I_label)
            too_small = np.where(sizes < self._a_min)
            I_cleanup = mh.labeled.remove_regions(I_label, too_small)
            I_bin = I_cleanup > 0

        # Fill holes.
        if self._fill:
            I_bin = ndimage.morphology.binary_fill_holes(I_bin)  # Small holes.
            # Bigger holes.
            labels = measure.label(I_bin)
            label_count = np.bincount(labels.ravel())
            background = np.argmax(label_count)
            I_bin[labels != background] = True

        I_label = mh.label(I_bin)[0].astype('uint16')
        return I_label
 def get_rough_detection(self, img, bigsize=40.0, smallsize=4.0, thresh = 0):
     diff = self.difference_of_gaussian(-img, bigsize, smallsize)
     diff[diff>thresh] = 1
     
     se = morphology.square(4)
     ero = morphology.erosion(diff, se)
     
     labimage = label(ero)
     #rec = morphology.reconstruction(ero, img, method='dilation').astype(np.dtype('uint8'))
     
     # connectivity=1 corresponds to 4-connectivity.
     morphology.remove_small_objects(labimage, min_size=600, connectivity=1, in_place=True)
     #res = np.zeros(img.shape)
     ero[labimage==0] = 0
     ero = 1 - ero
     labimage = label(ero)
     morphology.remove_small_objects(labimage, min_size=400, connectivity=1, in_place=True)
     ero[labimage==0] = 0
     res = 1 - ero
     res[res>0] = 255
     
     #temp = 255 - temp
     #temp = morphology.remove_small_objects(temp, min_size=400, connectivity=1, in_place=True)
     #res = 255 - temp
     
     return res
Пример #6
0
    def test_background(self):
        x = np.zeros((2, 3, 3), int)
        x[0] = np.array([[1, 0, 0],
                         [1, 0, 0],
                         [0, 0, 0]])
        x[1] = np.array([[0, 0, 0],
                         [0, 1, 5],
                         [0, 0, 0]])

        lnb = x.copy()
        lnb[0] = np.array([[0, 1, 1],
                           [0, 1, 1],
                           [1, 1, 1]])
        lnb[1] = np.array([[1, 1, 1],
                           [1, 0, 2],
                           [1, 1, 1]])
        lb = x.copy()
        lb[0] = np.array([[0,  BG, BG],
                          [0,  BG, BG],
                          [BG, BG, BG]])
        lb[1] = np.array([[BG, BG, BG],
                          [BG, 0,   1],
                          [BG, BG, BG]])

        with expected_warnings(['`background`']):
            assert_array_equal(label(x), lnb)

        assert_array_equal(label(x, background=0), lb)
Пример #7
0
    def test_return_num(self):
        x = np.array([[1, 0, 6],
                      [0, 0, 6],
                      [5, 5, 5]])

        assert_array_equal(label(x, return_num=True)[1], 3)
        assert_array_equal(label(x, background=-1, return_num=True)[1], 4)
Пример #8
0
    def test_background(self):
        x = np.zeros((2, 3, 3), int)
        x[0] = np.array([[1, 0, 0],
                         [1, 0, 0],
                         [0, 0, 0]])
        x[1] = np.array([[0, 0, 0],
                         [0, 1, 5],
                         [0, 0, 0]])

        lnb = x.copy()
        lnb[0] = np.array([[1, 2, 2],
                           [1, 2, 2],
                           [2, 2, 2]])
        lnb[1] = np.array([[2, 2, 2],
                           [2, 1, 3],
                           [2, 2, 2]])
        lb = x.copy()
        lb[0] = np.array([[1,  BG, BG],
                          [1,  BG, BG],
                          [BG, BG, BG]])
        lb[1] = np.array([[BG, BG, BG],
                          [BG, 1,   2],
                          [BG, BG, BG]])

        assert_array_equal(label(x), lb)
        assert_array_equal(label(x, background=-1), lnb)
Пример #9
0
    def clean_by_area(self, binary_image):
        image = binary_image.copy()
        image = ndi.binary_fill_holes(image)

        label_image = label(binary_image)
        initial_label = regionprops(label_image[0, :, :])[0].label

        for z in range(0, image.shape[0]):
            regions = regionprops(label_image[z, :, :])
            for region in regions:
                if region.label != initial_label:
                    for coords in region.coords:
                        image[z, coords[0], coords[1]] = 0

        for z in range(0, image.shape[0]):
            label_image = label(image[z, :, :], connectivity=1)
            regions = regionprops(label_image)
            if len(regions) > 1:
                max_area = np.max([r.area for r in regions])
                for region in regions:
                    if region.centroid[1] > 120 and region.area < max_area:
                        for coords in region.coords:
                            image[z, coords[0], coords[1]] = 0

        return image
Пример #10
0
def get_segmentation_features(im):
    dilwindow = [4, 4]
    imthr = np.where(im > np.mean(im), 0.0, 1.0)
    imdil = morphology.dilation(imthr, np.ones(dilwindow))
    labels = measure.label(imdil)
    labels = imthr * labels
    labels = labels.astype(int)
    regions = measure.regionprops(labels)
    numregions = len(regions)
    while len(regions) < 1:
        dilwindow[0] = dilwindow[0] - 1
        dilwindow[1] = dilwindow[1] - 1
        if dilwindow == [0, 0]:
            regions = None
            break
        imthr = np.where(im > np.mean(im), 0.0, 1.0)
        imdil = morphology.dilation(imthr, np.ones(dilwindow))
        labels = measure.label(imdil)
        labels = imthr * labels
        labels = labels.astype(int)
        regions = measure.regionprops(labels)
    regionmax = get_largest_region(regions, labels, imthr)

    if regionmax is None:
        return (np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan)
    eccentricity = regionmax.eccentricity
    convex_area = regionmax.convex_area
    convex_to_total_area = regionmax.convex_area / regionmax.area
    extent = regionmax.extent
    filled_area = regionmax.filled_area
    return (eccentricity, convex_area, convex_to_total_area, extent,
            filled_area, numregions)
Пример #11
0
 def test_4_vs_8(self):
     x = np.zeros((2, 2, 2), int)
     x[0, 1, 1] = 1
     x[1, 0, 0] = 1
     label4 = x.copy()
     label4[1, 0, 0] = 2
     assert_array_equal(label(x, 4), label4)
     assert_array_equal(label(x, 8), x)
Пример #12
0
 def test_4_vs_8(self):
     x = np.zeros((2, 2, 2), int)
     x[0, 1, 1] = 1
     x[1, 0, 0] = 1
     label4 = x.copy()
     label4[1, 0, 0] = 2
     with expected_warnings(['`background`']):
         assert_array_equal(label(x, 4), label4)
         assert_array_equal(label(x, 8), x)
Пример #13
0
    def test_return_num(self):
        x = np.array([[1, 0, 6],
                      [0, 0, 6],
                      [5, 5, 5]])

        with expected_warnings(['`background`']):
            assert_array_equal(label(x, return_num=True)[1], 4)

        assert_array_equal(label(x, background=0, return_num=True)[1], 3)
Пример #14
0
 def test_4_vs_8(self):
     x = np.array([[0, 1],
                   [1, 0]], dtype=int)
     assert_array_equal(label(x, 4),
                        [[0, 1],
                         [2, 0]])
     assert_array_equal(label(x, 8),
                        [[0, 1],
                         [1, 0]])
Пример #15
0
    def test_basic(self):
        assert_array_equal(label(self.x), self.labels)

        # Make sure data wasn't modified
        assert self.x[0, 2] == 3

        # Check that everything works if there is no background
        assert_array_equal(label(self.x, background=99), self.labels_nobg)
        # Check that everything works if background value != 0
        assert_array_equal(label(self.x, background=9), self.labels_bg_9)
Пример #16
0
 def test_4_vs_8(self):
     x = np.array([[0, 1],
                   [1, 0]], dtype=int)
     with expected_warnings(['`background`']):
         assert_array_equal(label(x, 4),
                            [[0, 1],
                             [2, 3]])
         assert_array_equal(label(x, 8),
                            [[0, 1],
                             [1, 0]])
Пример #17
0
def eval_area(pred_ror, true_ror):
    '''
        面積に対する評価をし、[true_size, pred_size, score]の配列を返す

        それぞれのiごとで、
            T     : 岩の大きさの真値
            P     : pred_rorの大きさの新地
            TnorP : 岩領域で検出できていないピクセル数
            TandP : 検出できているピクセル数
            PnorT : 後検出のピクセル数
    '''
    detect = 0
    total_pre = 0
    total_recall = 0

    pred = sk.label(pred_ror, return_num = False, background=None)
    true = sk.label(true_ror, return_num = False, background=0)

    for i in range(0, np.max(true_ror)+1):

        TandP = np.count_nonzero(pred[true == i]) # pred_ror[true_ror == i]でzeroじゃなかった数 / iの領域のサイズ
        
        if TandP !=0: # もし検出できていれば

            ## Get P
            non = np.nonzero(pred[true == i]) 
            p = np.unique(pred[true == i][non]) ## 被っている領域のpredの番号
            P = 0 # Initialization
            for i2 in p:
                P += (pred == i2).sum()

            ## Get others
            T = (true == i).sum()
            TnorP = (true == i).sum() - np.count_nonzero(pred[true == i])
            PnorT = P - TandP

            ## Get score
            pre = 1. * TandP / P
            recall = 1. * TandP / T
            
            ## renew total score
            total_pre += pre
            total_recall += recall
            detect += 1
            
            ## Draw
            plt.scatter(pre, recall, color = 'b')
            # print T,P,TandP,TnorP,PnorT

    pre_ave    = 1. * total_pre   / detect
    recall_ave = 1. * total_recall/ detect

    return pre_ave, recall_ave
Пример #18
0
    def test_background(self):
        x = np.array([[1, 0, 0],
                      [1, 1, 5],
                      [0, 0, 0]])

        assert_array_equal(label(x), [[1, 0, 0],
                                      [1, 1, 2],
                                      [0, 0, 0]])

        assert_array_equal(label(x, background=0),
                           [[1, 0, 0],
                            [1, 1, 2],
                            [0, 0, 0]])
Пример #19
0
    def test_background(self):
        x = np.array([[1, 0, 0],
                      [1, 1, 5],
                      [0, 0, 0]])

        with expected_warnings(['`background`']):
            assert_array_equal(label(x), [[0, 1, 1],
                                          [0, 0, 2],
                                          [3, 3, 3]])

        assert_array_equal(label(x, background=0),
                           [[0, -1, -1],
                            [0,  0,  1],
                            [-1, -1, -1]])
Пример #20
0
def largest_region(imData):

    belowMeanFilter = np.where(imData > np.mean(imData), 0., 1.0)
    dialated = morphology.dilation(belowMeanFilter, np.ones((3, 3)))
    regionLabels = (belowMeanFilter * measure.label(dialated)).astype(int)

    # calculate common region properties for each region within the segmentation
    regions = measure.regionprops(regionLabels)
    areas = [(None
              if sum(belowMeanFilter[regionLabels == region.label]) * 1.0 / region.area < 0.50
              else region.filled_area)
             for region in regions]

    if len(areas) > 0:

        regionMax = regions[np.argmax(areas)]

        # trim image to the max region
        regionMaxImg = trim_image(
            np.minimum(
                imData*np.where(regionLabels == regionMax.label, 1, 255),
                255))

        # rotate
        angle = intertial_axis(regionMaxImg)[2]
        rotatedRegionMaxImg = ndimage.rotate(regionMaxImg, np.degrees(angle))
        rotatedRegionMaxImg = trim_image(trim_image(rotatedRegionMaxImg, 0), 255)

    else:
        regionMax = None
        rotatedRegionMaxImg = None
        angle = 0

    return regionMax, rotatedRegionMaxImg, angle, regionLabels, regions, areas, belowMeanFilter, dialated
Пример #21
0
def get_segmented_lungs(im, plot=False):
    # Step 1: Convert into a binary image.
    binary = im < -400
    # Step 2: Remove the blobs connected to the border of the image.
    cleared = clear_border(binary)
    # Step 3: Label the image.
    label_image = label(cleared)
    # Step 4: Keep the labels with 2 largest areas.
    areas = [r.area for r in regionprops(label_image)]
    areas.sort()
    if len(areas) > 2:
        for region in regionprops(label_image):
            if region.area < areas[-2]:
                for coordinates in region.coords:
                       label_image[coordinates[0], coordinates[1]] = 0
    binary = label_image > 0
    # Step 5: Erosion operation with a disk of radius 2. This operation is seperate the lung nodules attached to the blood vessels.
    selem = disk(2)
    binary = binary_erosion(binary, selem)
    # Step 6: Closure operation with a disk of radius 10. This operation is    to keep nodules attached to the lung wall.
    selem = disk(10) # CHANGE BACK TO 10
    binary = binary_closing(binary, selem)
    # Step 7: Fill in the small holes inside the binary mask of lungs.
    edges = roberts(binary)
    binary = ndi.binary_fill_holes(edges)
    # Step 8: Superimpose the binary mask on the input image.
    get_high_vals = binary == 0
    im[get_high_vals] = -2000
    return im, binary
Пример #22
0
def get_largest_cc(u,v):
    """
    Return mask with largest connected component in u,v

    """

    if not skimage_available:
        print('*** skimage is not available. get_larget_cc() will not work. ***')
        return np.ones_like(u).astype('bool')
    
    fxx = np.array([[1,-2.0,1.0]])
    fxy = np.array([[-0.25,0,0.25],[0.0,0,0],[0.25,0,-0.25]])
    fyy = fxx.T

    u_ = u.astype('float32')
    v_ = v.astype('float32')
    uxx = cv2.filter2D(u_,-1,fxx)
    uxy = cv2.filter2D(u_,-1,fxy)
    uyy = cv2.filter2D(u_,-1,fyy)

    vxx = cv2.filter2D(v_,-1,fxx)
    vxy = cv2.filter2D(v_,-1,fxy)
    vyy = cv2.filter2D(v_,-1,fyy)

    THRESH=0.1
    ue = np.logical_or(np.logical_or(np.abs(uxx)>THRESH, np.abs(uxy)>THRESH),np.abs(uyy)>THRESH)
    ve = np.logical_or(np.logical_or(np.abs(vxx)>THRESH, np.abs(vxy)>THRESH),np.abs(vyy)>THRESH)
    edg = np.logical_or(ue,ve)
    
    L = measure.label(edg.astype('int32'),neighbors=4)
    
    sums = np.bincount(L.ravel())
    biggest_cc = L==np.argmax(sums)
    return biggest_cc
Пример #23
0
    def get_maxima(self, src):
        '''
        入力された画像を領域分割し、各領域の最大値を算出する。
        src: 1ch-img
        dst: 領域の最大値のピクセルにのみその値が格納された画像。
        '''

        img = copy.deepcopy(src)
        img[img!=0] = 255

        # 各領域にラベルをつける
        labels, num = sk.label(img, return_num = True) 

        seed_img = np.zeros_like(src)
        
        # 各領域の最大値を求める
        for i in range(1,num+1):

            # iの領域だけ残す
            img = copy.deepcopy(src) # 初期に戻す
            img[labels!=i] = 0 # これで残った領域の最大値求める
            
            # 最大値を求める,1行にしたときの値が出てくるからこんなになってる
            y = np.argmax(img)/len(img)
            x = np.argmax(img)%len(img)

            if img[y,x] != 0: # 中に空いた穴じゃなければ種にする
                seed_img[y,x] = src[y,x]
        
        return seed_img
Пример #24
0
def noise_removal(mask):
    dil_kern = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
    dilated = cv2.dilate(mask, dil_kern)
    log_img('segmask-dilate', dilated)

    L = measure.label(dilated)
    ccs = []
    for k in range(L.max()):
        pts = np.nonzero(L==k)
        pts = np.asarray(pts).transpose().astype('float32')
        pts = pts[:,::-1]
        val = dilated[pts[0,1],pts[0,0]]
        if val == 0:
            continue
        ccs.append(pts.reshape(pts.shape[0],1,2))
    boxes = [cv2.boundingRect(k) for k in ccs]
    rects = [Rect(b[0], b[1], b[2] + 1, b[3] + 1) for b in boxes]
    #maxw = max([r.w for r in rects])
    #maxh = max([r.h for r in rects])
    #rects = [r for r in rects if r.w != maxw or r.h != maxh]
    maxh = max([r.h for r in rects])
    valid_rects = [r for r in rects if \
        r.h > maxh * 0.5 \
       and r.w > maxh * 0.5] # minimum allowed size for a box
    logger.info("Valid bounding boxes: " + str(valid_rects))

    valid_mask = np.zeros(mask.shape, mask.dtype)
    for r in valid_rects:
        roi = r.roi(valid_mask)
        roi.fill(1)
    mask = mask * valid_mask
    log_img('segmask-filtered', mask)
    return mask
Пример #25
0
def class_and_replace(org_label, classes):
    """
    CLASSIFIES AND REPLACES VALUES IN NUMPY ARRAY
    
    org_label = numpy array with original labels
    classes = array of same length as len of org_label with new labels
    
    classified = numpy array with new classified values
    classified_ID = merged regions with a unique ID
    
    """
    # Classify and replace
    keys = np.unique(org_label)
    values = classes #0 for non veg, 1 for veg
    dictionary = dict(zip(keys, values))
    classified = replace(org_label, dictionary)
    
    # Label merged regions with unique ID
    labeld = label(classified)
    number_of_labels = np.unique(labeld)
    newvals = np.asarray(list(range(1,(len(number_of_labels) + 1))))
    keys_ID = number_of_labels
    values_ID = newvals
    dictionary_ID = dict(zip(keys_ID, values_ID))
    classified_ID = replace(labeld, dictionary_ID)
    
    del(labeld)
    
    return classified, classified_ID
Пример #26
0
def get_labels(ror, minsize=20):
    """
        ・各領域にラベルをつける
        ・0の領域にはラベルをつけない
        ・minsizeより小さい領域は削除する
    """
    labels = sk.label(ror, return_num=False, background=0)

    for i in range(1, np.max(labels) + 1):
        if np.count_nonzero(labels[labels == i]) < minsize:
            ror[labels == i] = 0

    ## Generate label images from only large regions
    large_labels = sk.label(ror, return_num=False, background=0)
    # print np.nonzero(large_labels==0)
    return large_labels
Пример #27
0
def label_image(image):
    
    ROI = np.zeros((470,400,3), dtype=np.uint8)
    for c in range(3):
        for i in range(50,520):
            for j in range(240,640):
                ROI[i-50,j-240,c] = image[i,j,c]

    
    gray_ROI = cv2.cvtColor(ROI,cv2.COLOR_BGR2GRAY)
    
    ROI_flou = cv2.medianBlur((ROI).astype('uint8'),3)
    
    Laser = Detecte_laser.Detect_laser(ROI_flou)
    
    open_laser = cv2.morphologyEx(Laser, cv2.MORPH_DILATE, disk(3))
    
    skel = skeletonize(open_laser > 0)
    
    tranche = Detecte_laser.tranche(skel,90,30)    
    
    ret, thresh = cv2.threshold(gray_ROI*tranche.astype('uint8'),0,1,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    thresh01 = thresh<1.0
    
    open_thresh = cv2.morphologyEx(thresh01.astype('uint8'), cv2.MORPH_OPEN, disk(10))
    
    labelised = (label(open_thresh,8,0))+1
    
    return gray_ROI,labelised
Пример #28
0
def ComputeMetrics(prob, batch_labels, p1, p2, rgb=None, save_path=None, ind=0):
    GT = label(batch_labels.copy())
    PRED = PostProcess(prob, p1, p2)
    lbl = GT.copy()
    pred = PRED.copy()
    aji = AJI_fast(lbl, pred)
    lbl[lbl > 0] = 1
    pred[pred > 0] = 1 
    l, p = lbl.flatten(), pred.flatten()
    acc = accuracy_score(l, p)
    roc = roc_auc_score(l, p)
    jac = jaccard_similarity_score(l, p)
    f1 = f1_score(l, p)
    recall = recall_score(l, p)
    precision = precision_score(l, p)
    if rgb is not None:
        xval_n = join(save_path, "xval_{}.png").format(ind)
        yval_n = join(save_path, "yval_{}.png").format(ind)
        prob_n = join(save_path, "prob_{}.png").format(ind)
        pred_n = join(save_path, "pred_{}.png").format(ind)
        c_gt_n = join(save_path, "C_gt_{}.png").format(ind)
        c_pr_n = join(save_path, "C_pr_{}.png").format(ind)
        ## CHECK PLOT FOR PROB AS IT MIGHT BE ILL ADAPTED

        imsave(xval_n, rgb)
        imsave(yval_n, color_bin(GT))
        imsave(prob_n, prob)
        imsave(pred_n, color_bin(PRED))
        imsave(c_gt_n, add_contours(rgb, GT))
        imsave(c_pr_n, add_contours(rgb, PRED))

    return acc, roc, jac, recall, precision, f1, aji
Пример #29
0
def edge_curvature(mask, min_sep=5, average_over=3):
    '''
    Compute the menger curvature along the edges of the contours in the mask.
    '''

    labels = me.label(mask, neighbors=8, connectivity=2)

    edges = find_boundaries(labels, connectivity=2, mode='outer')

    pts = integer_boundaries(mask, edges, 0.5)

    curvature_mask = np.zeros_like(mask, dtype=float)

    for cont_pts in pts:
        # Last one is a duplicate
        cont_pts = cont_pts[:-1]

        num = cont_pts.shape[0]

        for i in xrange(num):

            curv = 0.0
            for j in xrange(min_sep, min_sep+average_over+1):
                curv += menger_curvature(cont_pts[i-j], cont_pts[i],
                                         cont_pts[(i+j) % num])

            y, x = cont_pts[i]

            if np.isnan(curv):
                curv = 0.0
            curvature_mask[y, x] = curv / average_over

    return curvature_mask
def do_evaluate(model):
    print('Model evaluating')
    X, y_true = next(get_seg_batch(1, from_train=False, random_choice=True))
    y_pred = model.predict(X)

    X, y_true, y_pred = X[0,:,:,:,0], y_true[0,:,:,:,0], y_pred[0,:,:,:,0]
    intersection = y_true * y_pred
    recall = (np.sum(intersection) + SMOOTH) / (np.sum(y_true) + SMOOTH)
    precision = (np.sum(intersection) + SMOOTH) / (np.sum(y_pred) + SMOOTH)
    print('Average recall {:.4f}, precision {:.4f}'.format(recall, precision))

    for threshold in range(0, 10, 2):
        threshold = threshold / 10.0
        pred_mask = (y_pred > threshold).astype(np.uint8)
        intersection = y_true * pred_mask
        recall = (np.sum(intersection) + SMOOTH) / (np.sum(y_true) + SMOOTH)
        precision = (np.sum(intersection) + SMOOTH) / (np.sum(y_pred) + SMOOTH)
        print("Threshold {}: recall {:.4f}, precision {:.4f}".format(threshold, recall, precision))

    regions = measure.regionprops(measure.label(y_pred))
    print('Num of pred regions {}'.format(len(regions)))

    if DEBUG_PLOT_WHEN_EVALUATING_SEG:
        plot_comparison(X, y_true, y_pred)
        plot_slices(X)
        plot_slices(y_true)
        plot_slices(y_pred)
Пример #31
0
    def writeExample(self, scene_id):
        filename_glob = ROOT_FOLDER + scene_id + '/' + 'dataset/mesh/*.obj'
        filename = list(glob.glob(filename_glob))[0]
        points = load_pnt(filename)
        points = np.array(points)

        #segmentation = segmentation[sampledInds[:NUM_POINTS]]
        filename = ROOT_FOLDER + scene_id + '/annotation/metadata.t7'
        metadata = torchfile.load(filename)
        topDownViewTransformation = metadata["topDownTransformation"]

        #degree = metadata["topDownViewAngle"]
        rotMat = getZRotationMatrix(-metadata["topDownViewAngle"])
        #print(rotMat, topDownViewTransformation)
        #exit(1)

        #XYZ_rotated = np.transpose(np.dot(rotMat, np.transpose(points[:, :3])))
        #XYZ = np.tensordot(np.concatenate([points[:, :3], np.ones((points.shape[0], 1))], axis=1), topDownViewTransformation, axes=((1), (1)))
        #XYZ[:, 2] = points[:, 2]

        #ratio_1 = (XYZ[:, 0].max() - XYZ[:, 0].min()) / (XYZ_rotated[:, 0].max() - XYZ_rotated[:, 0].min())
        #ratio_2 = (XYZ[:, 1].max() - XYZ[:, 1].min()) / (XYZ_rotated[:, 1].max() - XYZ_rotated[:, 1].min())

        #XYZ[2, 2] *= np.sqrt(ratio_1 * ratio_2)

        #ratio = pow(np.abs(rotMat[0][0] / topDownViewTransformation[0][0] * rotMat[1][0] / topDownViewTransformation[1][0] * rotMat[0][1] / topDownViewTransformation[0][1] * rotMat[1][1] / topDownViewTransformation[1][1]), 0.25)
        ratio = 0
        for i in xrange(2):
            for j in xrange(2):
                if rotMat[i][j] != 0:
                    ratio = max(
                        ratio,
                        np.abs(topDownViewTransformation[i][j] / rotMat[i][j]))
                    pass
                continue
            continue

        globalTransformation = topDownViewTransformation
        globalTransformation[2, 2] = ratio
        globalTransformation[2, 3] = 0
        globalTransformation = np.concatenate(
            [globalTransformation, np.zeros((1, 4))], axis=0)
        globalTransformation[3, 3] = 1

        XYZ = np.tensordot(np.concatenate(
            [points[:, :3], np.ones((points.shape[0], 1))], axis=1),
                           globalTransformation,
                           axes=((1), (1)))
        XYZ = XYZ[:, :3] / XYZ[:, 3:]

        mins = XYZ.min(0, keepdims=True)
        maxs = XYZ.max(0, keepdims=True)
        maxRange = (maxs - mins)[:, :2].max()
        padding = maxRange * 0.05
        mins = (maxs + mins) / 2 - maxRange / 2
        mins -= padding
        maxRange += padding * 2
        minXY = mins[:, :2]

        #XYZ[:, :2] = (XYZ[:, :2] - minXY) / maxRange
        XYZ = (XYZ - mins) / maxRange
        points[:, :3] = XYZ

        originalWidth = 700.

        if points.shape[0] < NUM_POINTS:
            indices = np.arange(points.shape[0])
            points = np.concatenate([
                points, points[np.random.choice(indices,
                                                NUM_POINTS - points.shape[0])]
            ],
                                    axis=0)
        elif points.shape[0] > NUM_POINTS:
            sampledInds = np.arange(points.shape[0])
            np.random.shuffle(sampledInds)
            points = points[sampledInds[:NUM_POINTS]]
            pass

        points[:, 3:] = points[:, 3:] / 255 - 0.5

        coordinates = np.clip(
            np.round(points[:, :2] * HEIGHT).astype(np.int32), 0, HEIGHT - 1)

        self.indicesMaps = np.zeros((NUM_POINTS), dtype=np.int64)
        self.projectIndices(
            np.concatenate([coordinates,
                            np.arange(NUM_POINTS).reshape(-1, 1)],
                           axis=1), 0, WIDTH, 0, HEIGHT)

        filename = ROOT_FOLDER + scene_id + '/annotation/floorplan.txt'
        walls = []
        doors = []
        windows = []
        semantics = {}

        def transformPoint(v, c):
            return max(
                min(round((float(v) - minXY[0, c]) / maxRange * WIDTH),
                    WIDTH - 1), 0)

        with open(filename) as info_file:
            line_index = 0
            for line in info_file:
                line = line.split('\t')
                if line[4] == 'wall':
                    walls.append(
                        ((transformPoint(line[0],
                                         0), transformPoint(line[1], 1)),
                         (transformPoint(line[2],
                                         0), transformPoint(line[3], 1))))
                elif line[4] == 'door':
                    doors.append(
                        ((transformPoint(line[0],
                                         0), transformPoint(line[1], 1)),
                         (transformPoint(line[2],
                                         0), transformPoint(line[3], 1))))
                elif line[4] == 'window':
                    windows.append(
                        ((transformPoint(line[0],
                                         0), transformPoint(line[1], 1)),
                         (transformPoint(line[2],
                                         0), transformPoint(line[3], 1))))
                else:
                    if line[4] not in semantics:
                        semantics[line[4]] = []
                        pass
                    semantics[line[4]].append(
                        ((transformPoint(line[0],
                                         0), transformPoint(line[1], 1)),
                         (transformPoint(line[2],
                                         0), transformPoint(line[3], 1))))
                    pass
                continue
            pass

        roomSegmentation = np.zeros((HEIGHT, WIDTH), dtype=np.uint8)
        for line in walls:
            cv2.line(roomSegmentation,
                     (int(round(line[0][0])), int(round(line[0][1]))),
                     (int(round(line[1][0])), int(round(line[1][1]))),
                     color=15 + calcLineDirection(line),
                     thickness=self.gap)
            #cv2.line(roomSegmentation, (int(round(line[0][0])), int(round(line[0][1]))), (int(round(line[1][0])), int(round(line[1][1]))), color = 15, thickness=self.gap)
            continue

        rooms = measure.label(roomSegmentation == 0, background=0)

        corners = lines2Corners(walls, gap=self.gap)
        #corner_gt = np.zeros((HEIGHT, WIDTH), dtype=np.uint8)
        corner_gt = []
        for corner in corners:
            #corner_gt[int(round(corner[0][1])), int(round(corner[0][0]))] = corner[1] + 1
            corner_gt.append((int(round(corner[0][0])),
                              int(round(corner[0][1])), corner[1] + 1))
            continue

        openingCornerMap = [[3, 1], [0, 2]]
        for openingType, openings in enumerate([doors, windows]):
            for opening in openings:
                direction = calcLineDirection(opening)
                for cornerIndex, corner in enumerate(opening):
                    #corner_gt[int(round(corner[1])), int(round(corner[0]))] = 14 + openingCornerMap[direction][cornerIndex]
                    corner_gt.append(
                        (int(round(corner[0])), int(round(corner[1])),
                         14 + openingCornerMap[direction][cornerIndex]))
                    continue
                continue
            continue

        wallIndex = rooms.min()
        for pixel in [(0, 0), (0, HEIGHT - 1), (WIDTH - 1, 0),
                      (WIDTH - 1, HEIGHT - 1)]:
            backgroundIndex = rooms[pixel[1]][pixel[0]]
            if backgroundIndex != wallIndex:
                break
            continue

        iconSegmentation = np.zeros((HEIGHT, WIDTH), dtype=np.uint8)
        for line in doors:
            cv2.line(iconSegmentation,
                     (int(round(line[0][0])), int(round(line[0][1]))),
                     (int(round(line[1][0])), int(round(line[1][1]))),
                     color=self.labelMap['door'],
                     thickness=self.gap - 1)
            continue
        for line in windows:
            cv2.line(iconSegmentation,
                     (int(round(line[0][0])), int(round(line[0][1]))),
                     (int(round(line[1][0])), int(round(line[1][1]))),
                     color=self.labelMap['window'],
                     thickness=self.gap - 1)
            continue

        roomLabelMap = {}
        for semantic, items in semantics.iteritems():
            group, label = self.labelMap[semantic]
            for corners in items:
                if group == 'icons':
                    if label == 0:
                        continue
                    cv2.rectangle(
                        iconSegmentation,
                        (int(round(corners[0][0])), int(round(corners[0][1]))),
                        (int(round(corners[1][0])), int(round(corners[1][1]))),
                        color=label,
                        thickness=-1)
                    # corner_gt[int(round(corners[0][1])), int(round(corners[0][0]))] = 18 + 2
                    # corner_gt[int(round(corners[1][1])), int(round(corners[0][0]))] = 18 + 1
                    # corner_gt[int(round(corners[0][1])), int(round(corners[1][0]))] = 18 + 3
                    # corner_gt[int(round(corners[1][1])), int(round(corners[1][0]))] = 18 + 0
                    corner_gt.append((int(round(corners[0][0])),
                                      int(round(corners[0][1])), 18 + 2))
                    corner_gt.append((int(round(corners[0][0])),
                                      int(round(corners[1][1])), 18 + 1))
                    corner_gt.append((int(round(corners[1][0])),
                                      int(round(corners[0][1])), 18 + 3))
                    corner_gt.append((int(round(corners[1][0])),
                                      int(round(corners[1][1])), 18 + 0))
                else:
                    roomIndex = rooms[int(
                        round((corners[0][1] + corners[1][1]) / 2))][int(
                            round((corners[0][0] + corners[1][0]) / 2))]
                    if roomIndex == wallIndex or roomIndex == backgroundIndex:
                        print('label on background')
                        exit(1)
                        pass
                    if roomIndex in roomLabelMap:
                        print('room has more than one labels', label)
                        exit(1)
                        pass
                    roomLabelMap[roomIndex] = label
                    roomSegmentation[rooms == roomIndex] = label
                    pass
                continue
            continue
        for roomIndex in xrange(rooms.min(), rooms.max() + 1):
            if roomIndex == wallIndex or roomIndex == backgroundIndex:
                continue
            if roomIndex not in roomLabelMap:
                print('room has no label')
                print(roomIndex, rooms.max())
                pass
            continue
        flags = np.zeros(2, np.int64)
        flags[0] = 1

        corner_feature_file = ROOT_FOLDER + scene_id + '/corner_acc.npy'
        icon_feature_file = ROOT_FOLDER + scene_id + '/topdown_acc.npy'
        image_features = [[], []]
        if os.path.exists(corner_feature_file) and os.path.exists(
                icon_feature_file):
            flags[1] = 1
            corner_feature = np.load(corner_feature_file).reshape(
                (HEIGHT, WIDTH, -1))
            icon_feature = np.load(icon_feature_file).reshape(
                (HEIGHT, WIDTH, -1))
            for featureIndex, feature in enumerate(
                [corner_feature, icon_feature]):
                for size, numChannels in zip(SIZES, NUM_CHANNELS)[1:]:
                    feature = cv2.resize(feature, (size, size))
                    image_features[featureIndex].append(
                        feature.reshape((size, size, numChannels,
                                         -1)).mean(-1).reshape(-1))
                    continue
                image_features[featureIndex] = np.concatenate(
                    image_features[featureIndex], axis=0)
                continue
            image_features = image_features[0] + image_features[1]
        else:
            image_features = np.zeros(
                sum([
                    size * size * numChannels
                    for size, numChannels in zip(SIZES, NUM_CHANNELS)[1:]
                ]))
            pass

        if False:
            cv2.imwrite('test/density.png',
                        drawDensityImage(getDensity(points, HEIGHT, WIDTH)))
            cv2.imwrite(
                'test/density_indices.png',
                drawDensityImage(
                    getDensityFromIndices(self.indicesMaps, HEIGHT, WIDTH)))
            cv2.imwrite('test/icon_segmentation.png',
                        drawSegmentationImage(iconSegmentation))
            cv2.imwrite('test/room_segmentation.png',
                        drawSegmentationImage(roomSegmentation))
            cv2.imwrite('test/corner_segmentation.png',
                        drawSegmentationImage(corner_gt, blackIndex=0))
            if flags[1]:
                cv2.imwrite(
                    'test/topdown_corner.png',
                    cv2.imread(ROOT_FOLDER + scene_id + '/corner_pred.png'))
                cv2.imwrite(
                    'test/topdown_icon.png',
                    cv2.imread(ROOT_FOLDER + scene_id +
                               '/topdown_pred_nonzero.png'))
                exit(1)
                pass
            pass

        corner_gt = np.array(corner_gt, dtype=np.int64)
        numCorners = len(corner_gt)
        print('num corners', numCorners)
        if numCorners > MAX_NUM_CORNERS:
            exit(1)
        elif numCorners < MAX_NUM_CORNERS:
            corner_gt = np.concatenate([
                corner_gt,
                np.zeros((MAX_NUM_CORNERS - numCorners, 3), dtype=np.int64)
            ],
                                       axis=0)
            pass

        example = tf.train.Example(features=tf.train.Features(
            feature={
                'image_path': _bytes_feature(scene_id),
                'points': _float_feature(points.reshape(-1)),
                'point_indices': _int64_feature(self.indicesMaps.reshape(-1)),
                'corner': _int64_feature(corner_gt.reshape(-1)),
                'num_corners': _int64_feature([numCorners]),
                'icon': _bytes_feature(iconSegmentation.tostring()),
                'room': _bytes_feature(roomSegmentation.tostring()),
                'image': _float_feature(image_features.reshape(-1)),
                'flags': _int64_feature(flags),
            }))
        self.writer.write(example.SerializeToString())
        return
Пример #32
0
def all_slice_analysis(bw,
                       spacing,
                       cut_num=0,
                       vol_limit=[0.68, 8.2],
                       area_th=6e3,
                       dist_th=62):
    # in some cases, several top layers need to be removed first
    if cut_num > 0:
        bw0 = np.copy(bw)
        bw[-cut_num:] = False
    label = measure.label(bw, connectivity=1)
    # remove components access to corners
    mid = int(label.shape[2] / 2)
    bg_label = set([label[0, 0, 0], label[0, 0, -1], label[0, -1, 0], label[0, -1, -1], \
                    label[-1-cut_num, 0, 0], label[-1-cut_num, 0, -1], label[-1-cut_num, -1, 0], label[-1-cut_num, -1, -1], \
                    label[0, 0, mid], label[0, -1, mid], label[-1-cut_num, 0, mid], label[-1-cut_num, -1, mid]])
    for l in bg_label:
        label[label == l] = 0

    # select components based on volume
    properties = measure.regionprops(label)
    for prop in properties:
        if prop.area * spacing.prod() < vol_limit[
                0] * 1e6 or prop.area * spacing.prod() > vol_limit[1] * 1e6:
            label[label == prop.label] = 0

    # prepare a distance map for further analysis
    x_axis = np.linspace(-label.shape[1] / 2 + 0.5, label.shape[1] / 2 - 0.5,
                         label.shape[1]) * spacing[1]
    y_axis = np.linspace(-label.shape[2] / 2 + 0.5, label.shape[2] / 2 - 0.5,
                         label.shape[2]) * spacing[2]
    x, y = np.meshgrid(x_axis, y_axis)
    d = (x**2 + y**2)**0.5
    vols = measure.regionprops(label)
    valid_label = set()
    # select components based on their area and distance to center axis on all slices
    for vol in vols:
        single_vol = label == vol.label
        slice_area = np.zeros(label.shape[0])
        min_distance = np.zeros(label.shape[0])
        for i in range(label.shape[0]):
            slice_area[i] = np.sum(single_vol[i]) * np.prod(spacing[1:3])
            min_distance[i] = np.min(single_vol[i] * d +
                                     (1 - single_vol[i]) * np.max(d))

        if np.average([
                min_distance[i]
                for i in range(label.shape[0]) if slice_area[i] > area_th
        ]) < dist_th:
            valid_label.add(vol.label)

    bw = np.in1d(label, list(valid_label)).reshape(label.shape)

    # fill back the parts removed earlier
    if cut_num > 0:
        # bw1 is bw with removed slices, bw2 is a dilated version of bw, part of their intersection is returned as final mask
        bw1 = np.copy(bw)
        bw1[-cut_num:] = bw0[-cut_num:]
        bw2 = np.copy(bw)
        bw2 = scipy.ndimage.binary_dilation(bw2, iterations=cut_num)
        bw3 = bw1 & bw2
        label = measure.label(bw, connectivity=1)
        label3 = measure.label(bw3, connectivity=1)
        l_list = list(set(np.unique(label)) - {0})
        valid_l3 = set()
        for l in l_list:
            indices = np.nonzero(label == l)
            l3 = label3[indices[0][0], indices[1][0], indices[2][0]]
            if l3 > 0:
                valid_l3.add(l3)
        bw = np.in1d(label3, list(valid_l3)).reshape(label3.shape)

    return bw, len(valid_label)
Пример #33
0
def Workflow_dsp(struct_img,
                 rescale_ratio,
                 output_type,
                 output_path,
                 fn,
                 output_func=None):
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [8000]
    gaussian_smoothing_sigma = 1
    gaussian_smoothing_truncate_range = 3.0
    dot_3d_sigma = 1
    dot_3d_cutoff = 0.012
    minArea = 4
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img,
                                         scaling_param=intensity_norm_param)

    out_img_list.append(struct_img.copy())
    out_name_list.append('im_norm')

    # rescale if needed
    if rescale_ratio > 0:
        struct_img = resize(struct_img, [1, rescale_ratio, rescale_ratio],
                            method="cubic")
        struct_img = (struct_img - struct_img.min() +
                      1e-8) / (struct_img.max() - struct_img.min() + 1e-8)
        gaussian_smoothing_truncate_range = gaussian_smoothing_truncate_range * rescale_ratio

    # smoothing with gaussian filter
    structure_img_smooth = image_smoothing_gaussian_slice_by_slice(
        struct_img,
        sigma=gaussian_smoothing_sigma,
        truncate_range=gaussian_smoothing_truncate_range)

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append('im_smooth')

    ###################
    # core algorithm
    ###################

    # step 1: LOG 3d
    response = dot_3d(structure_img_smooth, log_sigma=dot_3d_sigma)
    bw = response > dot_3d_cutoff
    bw = remove_small_objects(bw > 0,
                              min_size=minArea,
                              connectivity=1,
                              in_place=False)

    out_img_list.append(bw.copy())
    out_name_list.append('interm_mask')

    # step 2: 'local_maxi + watershed' for cell cutting
    local_maxi = peak_local_max(struct_img,
                                labels=label(bw),
                                min_distance=2,
                                indices=False)

    out_img_list.append(local_maxi.copy())
    out_name_list.append('interm_local_max')

    distance = distance_transform_edt(bw)
    im_watershed = watershed(-distance,
                             label(dilation(local_maxi, selem=ball(1))),
                             mask=bw,
                             watershed_line=True)

    ###################
    # POST-PROCESSING
    ###################
    seg = remove_small_objects(im_watershed,
                               min_size=minArea,
                               connectivity=1,
                               in_place=False)

    # output
    seg = seg > 0
    seg = seg.astype(np.uint8)
    seg[seg > 0] = 255

    out_img_list.append(seg.copy())
    out_name_list.append('bw_final')

    if output_type == 'default':
        # the default final output
        save_segmentation(seg, False, output_path, fn)
    elif output_type == 'AICS_pipeline':
        # pre-defined output function for pipeline data
        save_segmentation(seg, True, output_path, fn)
    elif output_type == 'customize':
        # the hook for passing in a customized output function
        output_fun(out_img_list, out_name_list, output_path, fn)
    else:
        # the hook for pre-defined RnD output functions (AICS internal)
        img_list, name_list = DSP_output(out_img_list, out_name_list,
                                         output_type, output_path, fn)
        if output_type == 'QCB':
            return img_list, name_list
Пример #34
0
plot_3d(pix_resampled, 400)

image = pix_resampled
print(np.unique(image))
fill_lung_structures = True
# not actually binary, but 1 and 2.
# 0 is treated as background, which we do not want
binary_image = np.array(image > -320, dtype=np.int8) + 1
print(binary_image.shape)
print(image.shape)

print(np.unique(binary_image))
print(np.unique(image))

labels = measure.label(binary_image)
print(len([x.shape for x in labels]))

print(len(np.unique(labels)))
#print(labels[1,1,1])
print(binary_image[labels == 100])

# Pick the pixel in the very corner to determine which label is air.
#   Improvement: Pick multiple background labels from around the patient
#   More resistant to "trays" on which the patient lays cutting the air
#   around the person in half
background_label = labels[0, 0, 0]
print(background_label)

#Fill the air around the person
binary_image[background_label == labels] = 2
def add_bright_blob(fundus, seg):
    img_h, img_w, _ = fundus.shape

    # hyperparameters
    pos_bb_margin = 0.5
    n_aug_blobs = np.random.randint(0, 6)

    # get bounding box of od, format : [(top_left.x,top_left.y), (bottom_right.x, bottom_right.y)]
    # mask image has values of 0 and positive value
    labels, n_labels = measure.label(seg > 200, return_num=True)
    assert n_labels == 1
    # get coordinates of top left and bottom right
    row_inds, col_inds = np.where(labels == 1)
    top_left = np.array((np.min(col_inds), np.min(row_inds)))
    bottom_right = np.array((np.max(col_inds), np.max(row_inds)))
    # enlarge by 'margin'
    center = (top_left + bottom_right) / 2
    top_left = (center - (center - top_left) *
                (1 + 1. * pos_bb_margin)).astype(np.int)
    bottom_right = (center + (bottom_right - center) *
                    (1 + 1. * pos_bb_margin)).astype(np.int)
    # adjust if outbound
    top_left = coord_image(top_left, [img_h, img_w])
    bottom_right = coord_image(bottom_right, [img_h, img_w])
    bbox_pos = [top_left, bottom_right]

    # superimpose patch when not overlapped
    n_current = 0
    while n_current < n_aug_blobs:
        # augment od patch
        patch = np.copy(fundus[top_left[1]:bottom_right[1],
                               top_left[0]:bottom_right[0]])
        r_angle = random.randint(0, 359)
        x_scale, y_scale = random.uniform(1. / 1.05, 1.05), random.uniform(
            1. / 1.05, 1.05)
        shear = random.uniform(-5 * np.pi / 180, 5 * np.pi / 180)
        x_translation, y_translation = random.randint(-10, 10), random.randint(
            -10, 10)
        tform = AffineTransform(scale=(x_scale, y_scale),
                                shear=shear,
                                translation=(x_translation, y_translation))
        patch_warped = warp(patch,
                            tform.inverse,
                            output_shape=(patch.shape[0], patch.shape[1]))
        patch_warped *= 255
        patch = rotate(patch_warped,
                       r_angle,
                       axes=(0, 1),
                       order=1,
                       reshape=False)

        h, w, _ = patch.shape
        top_left_x = np.random.randint(0, img_w - w)
        top_left_y = np.random.randint(0, img_h - h)
        candi = [[top_left_x, top_left_y], [top_left_x + w, top_left_y + h]]

        # check intersection with positivie bounding boxes
        if not intersects(candi, bbox_pos):
            fundus[top_left_y:top_left_y + h,
                   top_left_x:top_left_x + w, :] = patch
            n_current += 1

    return fundus
def center_blob(mask):
    gt_labels = measure.label(mask > 0, return_num=False)
    gt_row_inds, gt_col_inds = np.where(gt_labels == 1)
    h_center = (np.min(gt_row_inds) + np.max(gt_row_inds)) // 2
    w_center = (np.min(gt_col_inds) + np.max(gt_col_inds)) // 2
    return h_center, w_center
Пример #37
0
def crossSectionFlux(mask, quslab, qvslab, axis_rdp):
    '''Compute setion-wise orientation differences and cross-section fluxes
    in an AR

    Args:
        mask (ndarray): CROPPED (see cropMask and applyCropIdx) 2D binary map
                        showing the location of an AR with 1s.
        quslab (cdms.TransientVariable): CROPPED (n * m) 2D array of u-flux,
                                       in kg/m/s.
        qvslab (cdms.TransientVariable): CROPPED (n * m) 2D array of v-flux,
                                       in kg/m/s.
        axis_rdp (ndarray): Nx2 array storing the (lat, lon) coordinates of
                           rdp-simplified AR axis.

    Returns:
        angles (TransientVariable): 2D map with the same shape as <mask>,
                                    showing section-wise orientation
                                    differences between horizontal flux (as
                                    in <quslab>, <qvslab>) and the AR axis of
                                    that section. In degrees. Regions outside
                                    of AR (0s in <mask>) are masked.
        anglesmean (float): area-weighted averaged of <angles> inside <mask>.
        crossflux (TransientVariable): 2D map with the same shape as <mask>,
                                       the section-wise cross-section fluxes
                                       in the AR, defined as the projection
                                       of fluxes onto the AR axis, i.e. flux
                                       multiplied by the cos of <angles>.
        seg_thetas (list): list of (x, y, z) Cartesian coordinates of the
                           tangent vectors along section boundaries.
    '''
    # get coordinates
    axislist = quslab.getAxisList()
    lats = quslab.getLatitude()[:]
    lons = quslab.getLongitude()[:]
    lonss, latss = np.meshgrid(lons, lats)

    # convert to cartesian coordinates
    carts = spherical2Cart(latss, lonss)
    vs = wind2Cart(quslab, qvslab, latss, lonss)
    vsnorm = np.linalg.norm(vs, axis=0)
    vsnorm = vs / vsnorm[None, :, :]

    # loop through segments to get orientation differences
    nsegs = len(axis_rdp) - 1
    seg_thetas = []
    angles = np.zeros(mask.shape)

    for ii in range(nsegs):

        pic = spherical2Cart(*axis_rdp[ii])
        pi1c = spherical2Cart(*axis_rdp[ii + 1])

        if ii == 0:
            setL = 1.
            thetai = 0  # dummy place holder
        else:
            # get evenly dividing angle theta and normal vector to theta
            normi, thetai = getNormalVectors(axis_rdp, ii)
            # dot products between normal vector and grid coordinates
            dotsi = (normi[:, None, None] * carts).sum(axis=0)
            setL = np.where(dotsi * (normi.dot(pi1c)) >= 0, 1, 0)

        if ii == nsegs - 1:
            setR = 1.
            thetai = 0  # dummy place holder
        else:
            normi1, thetai = getNormalVectors(axis_rdp, ii + 1)
            dotsi1 = (normi1[:, None, None] * carts).sum(axis=0)
            setR = np.where(dotsi1 * (normi1.dot(pic)) > 0, 1, 0)

        segii = setL * setR * mask
        seg_thetas.append(thetai)

        # sel the correct region if shape too curvy
        segregii = measure.label(segii)
        if segregii.max() > 1:
            piidx=[funcs.findIndex(axis_rdp[ii][0],lats),\
                    funcs.findIndex(axis_rdp[ii][1],lons)]
            for jj in range(segregii.max()):
                segjj = np.where(segregii == jj + 1, 1, 0)
                if segjj[piidx[0], piidx[1]] == 1:
                    segii = segjj
                    break

        # mean orientation of AR axis segment
        meanori = np.cross(pic, pi1c)
        meanori = meanori / np.linalg.norm(meanori)

        # orientation of flux vectors
        '''
        fluxori=np.cross(carts,vs,axisa=0,axisb=0) # output: ny,nx,3
        norms=np.linalg.norm(fluxori,axis=2)
        fluxori=fluxori/(1e-6+norms[:,:,None])

        # get angles as arccos of the dot product of meanori and fluxori
        anglesii=(meanori[None,None,:]*fluxori).sum(axis=-1)
        anglesii=anglesii*segii
        '''

        # get sin(angle of flux vector and axis segment plane)
        # sign of sin() is: >0: flux vector aligns with meanori, and it
        # is pointing towards the cold side (according to thermal wind).
        # sin() <0: flux vector aligns against meanori, and it is pointing
        # towards the warm side.
        anglesii = (meanori[:, None, None] * vsnorm).sum(axis=0)
        #anglesii=np.sqrt(1-cos_alphaii**2)*np.where(cos_alphaii<0,-1,1)
        anglesii = anglesii * segii

        angles = angles + anglesii

    # compute cross section flux
    angles = np.array(angles)
    cos_angles = np.sqrt(1 - angles**2)
    crossflux_c = cos_angles[None, :, :] * vs
    # convert to local tangent winds
    crossflux_u, crossflux_v = cart2Wind(crossflux_c, latss, lonss)
    crossflux = np.sqrt(crossflux_u**2 + crossflux_v**2)
    crossflux = MV.masked_where(mask == 0, crossflux)
    crossflux.setAxisList(axislist)

    # convert cos to angle in degrees
    angles = np.arcsin(angles) / np.pi * 180
    angles = MV.masked_where(mask == 0, angles)
    angles.setAxisList(axislist)

    anglesmean = cdutil.averager(angles,
                                 axis='xy',
                                 weights=['generate', 'generate'])

    return angles, anglesmean, crossflux, seg_thetas
Пример #38
0
         filename_split = input_name.split('\\')[-1]
         filename_split = filename_split.split('.')[0]
             
         plt.subplot(122); plt.imshow(seg_train); plt.title('Output');                            
         plt.savefig(sav_dir + filename_split + '_' + str(i) + '_compare_output.png', bbox_inches='tight')
               
         batch_x = []; batch_y = []; weights = [];
               
         #plt.imsave(sav_dir + filename_split + '_' + str(i) + '_output_mask.tif', (seg_train), cmap='binary_r')
         
 
         """ Compute accuracy """
         if truth:
             overlap_im = seg_train + truth_im[:, :, 1]
             binary_overlap = overlap_im > 0
             labelled = measure.label(binary_overlap)
             cc_overlap = measure.regionprops(labelled, intensity_image=overlap_im)
 
             """ (1) Find # True Positives identified (overlapped) """
             masked = np.zeros(seg_train.shape)
             all_no_overlap = np.zeros(seg_train.shape)
             truth_no_overlap = np.zeros(seg_train.shape)   # ALL False Negatives
             seg_no_overlap = np.zeros(seg_train.shape)     # All False Positives
             for M in range(len(cc_overlap)):
                 overlap_val = cc_overlap[M]['MaxIntensity']
                 overlap_coords = cc_overlap[M]['coords']
                 if overlap_val > 1:    # if there is overlap
                     for T in range(len(overlap_coords)):
                         masked[overlap_coords[T,0], overlap_coords[T,1]] = overlap_im[overlap_coords[T,0], overlap_coords[T,1]]   # TRUE POSITIVES
                 else:  # no overlap
                     for T in range(len(overlap_coords)):
Пример #39
0
def entropy_conn(name_in):
    print('Start calculating ECM...')
    with open('./dataset/data_split.json', 'r') as jf:
        json_data = json.load(jf)['test']
    img_list = [x + '.png' for x in json_data]
    ECM = 0
    naive = 0
    with tqdm(total=len(img_list), unit='img') as pbar:
        for i, img in enumerate(img_list):
            gt_image = np.array(Image.open(os.path.join(args.mask_dir,
                                                        img)))[:, :, 0]
            pre_image = np.array(
                Image.open(os.path.join(predicted_skel_dir, img)))[:, :, 0]
            # find instances of the gt map
            gt_instance_map = measure.label(gt_image / 255, background=0)
            gt_instance_indexes = np.unique(gt_instance_map)[1:]
            # record length of all predicted instance assigned to a gt instance
            gt_assigned_lengths = [[] for x in range(len(gt_instance_indexes))]
            # record gt-instance length and vertices of this instance
            gt_instance_length = []
            gt_instance_points = []
            # record gt-instance pixels covered by projected predicted instances (measure completion)
            gt_covered = []
            # each gt_index labels is an gt instance
            for index in gt_instance_indexes:
                instance_map = (gt_instance_map == index)
                instance_points = np.where(instance_map == 1)
                instance_points = [[
                    instance_points[0][i], instance_points[1][i]
                ] for i in range(len(instance_points[0]))]
                gt_instance_length.append(len(instance_points))
                gt_covered.append(np.zeros((len(instance_points))))
                gt_instance_points.append(instance_points)
            # find instances of the predicted graph map
            pre_instance_map = measure.label(pre_image / 255, background=0)
            pre_instance_indexes = np.unique(pre_instance_map)[1:]
            # all gt pixel points
            gt_points = np.where(gt_image != 0)
            gt_points = [[gt_points[0][i], gt_points[1][i]]
                         for i in range(len(gt_points[0]))]
            tree = cKDTree(gt_points)
            # each pre_index is an predicted instance
            for index in pre_instance_indexes:
                votes = []
                instance_map = (pre_instance_map == index)
                instance_points = np.where(instance_map == 1)
                instance_points = [[
                    instance_points[0][i], instance_points[1][i]
                ] for i in range(len(instance_points[0]))]
                if instance_points:
                    # Each predicted point of the current pre-instance finds its closest gt point and votes
                    # to the gt-instance that the closest gt point belongs to.
                    _, iis = tree.query(instance_points, k=[1])
                    closest_gt_points = [
                        [gt_points[x[0]][0], gt_points[x[0]][1]] for x in iis
                    ]
                    votes = [
                        gt_instance_map[x[0], x[1]] for x in closest_gt_points
                    ]
                # count the voting results
                votes_summary = np.zeros((len(gt_instance_indexes)))
                for j in range(len(gt_instance_indexes)):
                    # the number of votes made to gt-instance j+1
                    votes_summary[j] = votes.count(j + 1)
                # find the gt-instance winning the most vote and assign the current pre-instance to it
                if np.max(votes_summary):
                    vote_result = np.where(
                        votes_summary == np.max(votes_summary))[0][0]
                    # the length of the pre-instance assigned to corresponding gt-instance
                    gt_assigned_lengths[vote_result].append(
                        len(instance_points[0]))
                    # calculate projection of the predicted instance to corresponding gt-instance
                    instance_tree = cKDTree(gt_instance_points[vote_result])
                    _, iis = instance_tree.query(instance_points, k=[1])
                    gt_covered[vote_result][np.min(iis):np.max(iis) + 1] = 1
            # calculate ECM
            entropy_conn = 0
            naive_conn = 0
            # iterate all gt-instances, calculate connectivity of each of them
            for j, lengths in enumerate(gt_assigned_lengths):
                # lengths are the length of assigned pre-instances to the current gt-instance
                if len(lengths):
                    lengths = np.array(lengths)
                    # contribution of each assigned pre-instance
                    probs = (lengths / np.sum(lengths)).tolist()
                    C_j = 0
                    for p in probs:
                        C_j += -p * np.log2(p)
                    entropy_conn += np.exp(-C_j) * np.sum(
                        gt_covered[j]) / len(gt_points)
                    naive_conn += 1 / len(lengths)
            if len(gt_assigned_lengths):
                naive_conn = naive_conn / len(gt_assigned_lengths)
            # weighted sum
            ECM = (ECM * i + entropy_conn) / (i + 1)
            naive = (naive * i + naive_conn) / (i + 1)
            pbar.update()
        output_json = {'ECM': np.array(ECM).tolist(), 'naive': naive}
    with open('./{}_connectivity.json'.format(name_in), 'w') as jf:
        json.dump(output_json, jf)
Пример #40
0
def main():
    grid = generate_grid('stpzcrnm')
    print('Solution to problem 1 is', grid.sum())
    print('Solution to problem 2 is', label(grid, neighbors=4).max())
def get_region_props(heatmap_threshold_2d, heatmap_prob_2d):
    labeled_img = label(heatmap_threshold_2d)
    return regionprops(labeled_img, intensity_image=heatmap_prob_2d)
Пример #42
0
    def run(
        self,
        intensities: IntensityTable,
        n_processes: Optional[int] = None
    ) -> Tuple[IntensityTable, ConnectedComponentDecodingResult]:
        """
        Execute the combine_adjacent_features method on an IntensityTable containing pixel
        intensities

        Parameters
        ----------
        intensities : IntensityTable
            Pixel intensities of an imaging experiment

        Returns
        -------
        IntensityTable :
            Table whose features comprise sets of adjacent pixels that decoded to the same target
        ConnectedComponentDecodingResult :
            NamedTuple containing :
                region_properties :
                    the properties of each connected component, in the same order as the
                    IntensityTable
                label_image : np.ndarray
                    An image where all pixels of a connected component share the same integer ID
                decoded_image : np.ndarray
                    Image whose pixels correspond to the targets that the given position in the
                    ImageStack decodes to.

        """

        # map target molecules to integers so they can be reshaped into an image that can
        # be subjected to a connected-component algorithm to find adjacent pixels with the
        # same targets
        targets = intensities[Features.TARGET].values
        target_map = TargetsMap(targets)

        # create the decoded_image
        decoded_image = self._intensities_to_decoded_image(
            intensities,
            target_map,
            self._mask_filtered,
        )

        # label the decoded image to extract connected component features
        label_image: np.ndarray = label(decoded_image,
                                        connectivity=self._connectivity)

        # calculate properties of each feature
        props: List = regionprops(np.squeeze(label_image))

        # calculate mean intensities across the pixels of each feature
        mean_pixel_traces = self._calculate_mean_pixel_traces(
            label_image,
            intensities,
        )

        # Create SpotAttributes and determine feature filtering outcomes
        spot_attributes, passes_filter = self._create_spot_attributes(
            props, decoded_image, target_map, n_processes=n_processes)

        # augment the SpotAttributes with filtering results and distances from nearest codes
        spot_attributes.data[Features.DISTANCE] = mean_pixel_traces[
            Features.DISTANCE]
        spot_attributes.data[Features.PASSES_THRESHOLDS] = passes_filter

        # create new indexes for the output IntensityTable
        channel_index = mean_pixel_traces.indexes[Axes.CH]
        round_index = mean_pixel_traces.indexes[Axes.ROUND]
        coords = IntensityTable._build_xarray_coords(spot_attributes,
                                                     channel_index,
                                                     round_index)

        # create the output IntensityTable
        dims = (Features.AXIS, Axes.CH.value, Axes.ROUND.value)
        intensity_table = IntensityTable(data=mean_pixel_traces,
                                         coords=coords,
                                         dims=dims)

        # combine the various non-IntensityTable results into a NamedTuple before returning
        ccdr = ConnectedComponentDecodingResult(props, label_image,
                                                decoded_image)

        return intensity_table, ccdr
Пример #43
0
def get_segmented_lungs(im,plot=False):
    if plot == True:
        f, plots = plt.subplots(8,1,figsize=(5,40))

    binary = im < 604
    if plot == True:
        plots[0].axis('off')
        plots[0].set_title('binary image')
        plots[0].imshow(binary, cmap=plt.cm.bone)

    cleared = clear_border(binary)
    if plot == True:
        plots[1].axis('off')
        plots[1].set_title('after clear border')
        plots[1].imshow(cleared,cmap=plt.cm.bone)

    label_image = label(cleared)
    if plot == True:
        plots[2].axis('off')
        plots[2].set_title('found all connective graph')
        plots[2].imshow(label_image,cmap=plt.cm.bone)

    areas = [r.area for r in regionprops(label_image)]
    areas.sort()
    if len(areas) > 2:
        for region in regionprops(label_image):
            if region.area < areas[-2]:
                for coordinates in region.coords:
                    label_image[coordinates[0], coordinates[1]] = 0
    binary = label_image > 0
    if plot == True:
        plots[3].axis('off')
        plots[3].set_title(' Keep the labels with 2 largest areas')
        plots[3].imshow(binary, cmap=plt.cm.bone)

    selem = disk(2)
    binary = binary_erosion(binary, selem)
    if plot == True:
        plots[4].axis('off')
        plots[4].set_title('seqerate the lung nodules attacthed to the blood vessels')
        plots[4].imshow(binary,cmap=plt.cm.bone)

    selem = disk(10)
    binary = binary_closing(binary, selem)
    if plot == True:
        plots[5].axis('off')
        plots[5].set_title('keep nodules attached to the lung wall')
        plots[5].imshow(binary,cmap=plt.cm.bone)

    edges = roberts(binary)
    binary = ndi.binary_fill_holes(edges)
    if plot == True:
        plots[6].axis('off')
        plots[6].set_title('Fill in the small holes inside the binary mask of lungs')
        plots[6].imshow(binary, cmap=plt.cm.bone)

    get_high_vals = binary == 0
    im[get_high_vals] = 0
    if plot == True:
        plots[7].axis('off')
        plots[7].set_title('Superimpose the binary mask on the input image')
        plots[7].imshow(binary, cmap=plt.cm.bone)

    return im
    def segmentation(self, LpRegion):
        LpRegion = self.clean_border(LpRegion)
        # cv2.imshow("edge", edged)

        V = cv2.split(cv2.cvtColor(LpRegion, cv2.COLOR_BGR2HSV))[2]
        # adaptive threshold
        T = threshold_local(V, 15, offset=10, method="gaussian")
        thresh = (V > T).astype("uint8") * 255
        # convert black pixel of digits to white pixel
        thresh = cv2.bitwise_not(thresh)
        thresh = imutils.resize(thresh, width=400)
        thresh = clear_border(thresh)
        # cv2.imwrite("step2_2.png", thresh)
        cv2.imshow("thresh", thresh)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        # try:
        #     lines = cv2.HoughLinesP(image=thresh,rho=1,theta=np.pi/180, threshold=200,lines=np.array([]), minLineLength=200,maxLineGap=20)
        #     angle = 0
        #     num = 0
        #     thresh = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
        #     for line in lines:
        #         my_degree = math.degrees(math.atan2(line[0][3]-line[0][1], line[0][2]-line[0][0]))
        #         if -45 < my_degree < 45:
        #             angle += my_degree
        #             num += 1
        #         cv2.line(thresh, (line[0][0], line[0][1]), (line[0][2], line[0][3]), (255, 0, 0))
        #     angle /= num

        #     cv2.imshow("draw", thresh)
        #     cv2.waitKey(0)
        #     cv2.destroyAllWindows()
        #     # cv2.imwrite("draw.png", thresh)
        #     # Rotate image to deskew
        #     (h, w) = thresh.shape[:2]
        #     center = (w // 2, h // 2)
        #     M = cv2.getRotationMatrix2D(center, angle, 1.0)
        #     thresh = cv2.warpAffine(thresh, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
        # except:
        #     pass

        # edges = cv2.Canny(thresh,100,200)
        # thresh = cv2.medianBlur(thresh, 5)
        # cv2.imshow("thresh", edges)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()
        # cv2.imwrite("thresh.png", thresh)
        # connected components analysis
        labels = measure.label(thresh, connectivity=2, background=0)

        # loop over the unique components
        for label in np.unique(labels):
            # if this is background label, ignore it
            if label == 0:
                continue

            # init mask to store the location of the character candidates
            mask = np.zeros(thresh.shape, dtype="uint8")
            mask[labels == label] = 255
            # find contours from mask
            contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            if len(contours) > 0:
                contour = max(contours, key=cv2.contourArea)
                (x, y, w, h) = cv2.boundingRect(contour)

                # rule to determine characters
                aspectRatio = w / float(h)
                solidity = cv2.contourArea(contour) / float(w * h)
                heightRatio = h / float(LpRegion.shape[0])

                if h * w > MIN_PIXEL_AREA and 0.25 < aspectRatio < 1.0 and solidity > 0.2 and 0.35 < heightRatio < 2.0:
                    # extract characters
                    candidate = np.array(mask[y:y + h, x:x + w])
                    square_candidate = convert2Square(candidate)
                    square_candidate = cv2.resize(square_candidate, (28, 28),
                                                  cv2.INTER_AREA)
                    # cv2.imwrite('./characters/' + str(y) + "_" + str(x) + ".png", cv2.resize(square_candidate, (56, 56), cv2.INTER_AREA))
                    square_candidate = square_candidate.reshape((28, 28, 1))
                    # cv2.imshow("square_candidate", square_candidate)
                    # cv2.waitKey(0)
                    # cv2.destroyAllWindows()
                    self.candidates.append((square_candidate, (y, x)))
Пример #45
0
### Normalize and threshold ###

# Gaussian blurring to reduce high frequency noise
blurred_img = cv2.GaussianBlur(rescaled, (7, 7), 1.2)
mean_img, SD_img = cv2.meanStdDev(blurred_img)
min_img, max_img = np.amin(blurred_img), np.amax(blurred_img)

# threshold
thresh_img = cv2.threshold(blurred_img, max_img - 2 * SD_img, 255,
                           cv2.THRESH_BINARY)[1]

cv2.imshow("Image", thresh_img)
cv2.waitKey(0)

labels = measure.label(thresh_img, background=0)
mask = np.zeros(thresh_img.shape, dtype="uint8")

# loop over the unique components
for label in np.unique(labels):

    # if this is the background label, ignore it
    if label == 0:
        continue

    # otherwise, construct the label mask and count the
    # number of pixels
    labelMask = np.zeros(thresh_img.shape, dtype="uint8")
    labelMask[labels == label] = 255
    numPixels = cv2.countNonZero(labelMask)
    print(numPixels)
Пример #46
0
def extractFeatures():
    imageNames = ['a', 'd', 'f', 'h', 'k', 'm', 'n', 'o', 'p', 'q', 'r', 's', 'u', 'w', 'x', 'z']
    #imageNames = ['a']
    Features = []
    featuresLabels = []
    for name in imageNames:
        # Reading an Image File
        img = io.imread(name + '.bmp')
        #print img.shape
        # Visualizing an Image/Matrix
        '''
        io.imshow(img)
        plt.title('Original Image')
        io.show()
        '''
        # Image Histogram
        '''
        hist = exposure.histogram(img)
        plt.bar(hist[1], hist[0])
        plt.title('Histogram')
        plt.show()
        '''
        # Binarization by Thresholding
        ret, binary = cv.threshold(img, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
        #ret, binary = cv.threshold(img, 0, 255, cv.THRESH_BINARY | cv.THRESH_TRIANGLE)
        #print ret
        th = ret
        img_binary = (img < th).astype(np.double)
        img_dilation = morphology.binary_dilation(img_binary, selem=None)
        img_erosion = morphology.binary_erosion(img_binary, selem=None)
        # Displaying Binary Image
        '''
        io.imshow(img_binary)
        plt.title('Binary Image')
        io.show()
        '''
        # Connected Component Analysis
        img_label = label(img_binary, background=0)
        '''
        io.imshow(img_label)
        plt.title('Labeled Image')
        io.show()
        print np.amax(img_label)
        '''
        # Displaying Component Bounding Boxes
        regions = regionprops(img_label)
        io.imshow(img_binary)
        ax = plt.gca()
        thresholdR = 15
        thresholdC = 15
        for props in regions:
            minr, minc, maxr, maxc = props.bbox
            if (maxr - minr) >= thresholdR and (maxc - minc) >= thresholdC:
                # Computing Hu Moments and Removing Small Components
                roi = img_binary[minr:maxr, minc:maxc]
                m = moments(roi)
                cr = m[0, 1] / m[0, 0]
                cc = m[1, 0] / m[0, 0]
                mu = moments_central(roi, cr, cc)
                nu = moments_normalized(mu)
                hu = moments_hu(nu)
                Features.append(hu)
                featuresLabels.append(name)
                plt.text(maxc, minr, name, bbox=dict(facecolor='white', alpha=0.5))
                ax.add_patch(Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=1))
        plt.title('Bounding Boxes')
        #plt.savefig('report/' + name + '.png')
        io.show()
    '''
    D = cdist(Features, Features)
    print D
    io.imshow(D)
    plt.title('Distance Matrix')
    io.show()
    '''
    return Features, featuresLabels
Пример #47
0
# load time series data
store = mm.data().load_data(data_path, True)
max_t = store.get_max_indices().get_t()
cb = mm.data().get_coords_builder()
cb.t(0).p(0).c(0).z(0)

# ------------------------------
# IMAGE ANALYSIS based on position
# ------------------------------
print("### Image analysis: calculate SG mask/pd ...")
# test image of position
temp = store.get_image(cb.t(data_t).c(data_c).z(0).p(data_p).build())
pix = np.reshape(temp.get_raw_pixels(), newshape=[temp.get_height(), temp.get_width()])

sg = find_organelle(pix, thresholding, min_size=min_size, max_size=max_size, local_param=21)
label_sg = label(sg, connectivity=1)
sg_pd = organelle_analysis(pix, sg, 'sg', 0)

# --------------------------
# COLOR CODED IMAGES
# --------------------------
print("### Calculate color coded circ/ecce/int image ...")
# circ image
cmap1 = 'YlOrRd'
cmap1_napari = dis.num_color_colormap(cmap1, 255)[0]
cmap1_plt = dis.num_color_colormap(cmap1, 255)[1]
sg_circ = obj.obj_display_in_circularity(label_sg)

# ecce image
cmap2 = 'Blues'
cmap2_napari = dis.num_color_colormap(cmap2, 255)[0]
Пример #48
0
def test():
    trainFeatures, trainLebels = extractFeatures()
    trainMeans, trainDeviations = normalization(trainFeatures)
    testNames = ['test1', 'test2']
    #testNames = ['test1']
    testFeatures = []
    testLabels = []
    testTruth = []
    correct = 0
    D_copy = np.array(0)
    #textPosition = []
    for i in range(len(testNames)):
        classes, locations = readPkl(testNames[i])
        img = io.imread(testNames[i] + '.bmp')
        #testTruth = ['a']*7+['d']*7+['m']*7+['n']*7+['o']*7+['p']*7+['q']*7+['r']*7+['u']*7+['w']*7
        ret, binary = cv.threshold(img, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
        #ret, binary = cv.threshold(img, 0, 255, cv.THRESH_BINARY | cv.THRESH_TRIANGLE)
        #print ret
        th = ret
        img_binary = (img < th).astype(np.double)
        img_dilation = morphology.binary_dilation(img_binary, selem=None)
        img_erosion = morphology.binary_erosion(img_binary, selem=None)
        img_label = label(img_binary, background=0)
        regions = regionprops(img_label)
        io.imshow(img_binary)
        ax = plt.gca()
        thresholdR = 15
        thresholdC = 15
        for props in regions:
            minr, minc, maxr, maxc = props.bbox
            # Computing Hu Moments and Removing Small Components
            if (maxr - minr) >= thresholdR and (maxc - minc) >= thresholdC:
                #textPosition.append((maxc, minr))
                roi = img_binary[minr:maxr, minc:maxc]
                m = moments(roi)
                cr = m[0, 1] / m[0, 0]
                cc = m[1, 0] / m[0, 0]
                mu = moments_central(roi, cr, cc)
                nu = moments_normalized(mu)
                hu = moments_hu(nu)
                testFeatures.append(hu)
                
                for i in range(7):
                    testFeatures[-1][i] = (testFeatures[-1][i] - trainMeans[i]) / trainDeviations[i]
                D = cdist(testFeatures, trainFeatures)
                #D_copy = copy.deepcopy(D)
                D_index = np.argsort(D, axis=1)
                testLabels.append(trainLebels[D_index[-1][0]])
                
                indexFix = locationFix(locations, minr, minc, maxr, maxc)
                if indexFix is not None:
                    if testLabels[-1] == classes[indexFix]:
                        correct += 1
                
                plt.text(maxc, minr, testLabels[-1], bbox=dict(facecolor='white', alpha=0.5))
                ax.add_patch(Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=1))
        plt.title('Bounding Boxes')
        io.show()
    print correct, len(testLabels)
    correctRate = correct / len(testLabels)
    print correctRate
Пример #49
0
def predict(request):
    f = request.files.get('file')

    # Save the file to ./uploads
    basepath = os.path.dirname(__file__)
    file_path = os.path.join(basepath, 'uploads', secure_filename(f.name))
    write = open(file_path, 'wb')
    write.write(f.body)
    k_ = []
    x_ = []
    y_ = []
    w_ = []
    h_ = []
    t_ = []
    area = []
    # create test generator with predict flag set to True
    test_gen = generator('uploads', [f.name],
                         None,
                         batch_size=1,
                         image_size=512,
                         shuffle=False,
                         predict=True)
    for imgs, filenames in test_gen:
        # predict batch of images
        model = create_network(input_size=512,
                               channels=32,
                               n_blocks=2,
                               depth=4)
        model.load_weights("model/model.h5")
        preds = model.predict(imgs)
        for pred, filename in zip(preds, filenames):
            # resize predicted mask
            pred = resize(pred, (1024, 1024), mode='reflect')
            # threshold predicted mask
            comp = pred[:, :, 0] > 0.3
            # apply connected components
            comp = measure.label(comp)
            # apply bounding boxes
            for region in measure.regionprops(comp):
                # retrieve x, y, height and width
                y, x, y2, x2 = region.bbox
                height = y2 - y
                k_.append(filename.split('.')[0])
                x_.append(x)
                y_.append(y)
                h_.append(height)
                width = x2 - x
                w_.append(width)
                conf = np.mean(pred[y:y + height, x:x + width])
                area.append(width * height)
                t_.append(conf)
        # if len(x_) >= len(test_filenames):
        #     break
    test_predictions = pd.DataFrame()
    test_predictions['patientId'] = k_
    test_predictions['x'] = x_
    test_predictions['y'] = y_
    test_predictions['width'] = w_
    test_predictions['height'] = h_
    test_predictions['Target'] = t_
    test_predictions['area'] = area
    return test_predictions
Пример #50
0
def BoundaryPoints(Mask, XYcalibration, Zcalibration):

    ndim = len(Mask.shape)

    TimedMask = {}
    #YX shaped object
    if ndim == 2:
        Mask = label(Mask)
        Label = []
        VolumeLabel = []
        tree = []
        properties = measure.regionprops(Mask, Mask)
        for prop in properties:

            LabelImage = prop.image
            regionlabel = prop.label
            sizeY = abs(prop.bbox[0] - prop.bbox[2]) * XYcalibration
            sizeX = abs(prop.bbox[1] - prop.bbox[3]) * XYcalibration
            Boundary = find_boundaries(LabelImage)
            Indices = np.where(Boundary > 0)
            Indices = np.transpose(np.asarray(Indices))
            RealIndices = Indices.copy()
            for j in range(0, len(RealIndices)):

                RealIndices[j][0] = RealIndices[j][0] * XYcalibration
                RealIndices[j][1] = RealIndices[j][1] * XYcalibration

            tree.append(spatial.cKDTree(RealIndices))

            if regionlabel not in Label:
                Label.append(regionlabel)
                VolumeLabel.append(
                    math.sqrt(sizeX * sizeX + sizeY * sizeY) / 4)
        #This object contains list of all the points for all the labels in the Mask image with the label id and volume of each label
        TimedMask[str(0)] = [tree, Indices, Label, VolumeLabel]

    #TYX shaped object
    if ndim == 3:

        Boundary = np.zeros([Mask.shape[0], Mask.shape[1], Mask.shape[2]])
        for i in range(0, Mask.shape[0]):

            Mask[i, :] = label(Mask[i, :])
            properties = measure.regionprops(Mask[i, :], Mask[i, :])
            Label = []
            VolumeLabel = []
            tree = []
            for prop in properties:

                LabelImage = prop.image
                regionlabel = prop.label
                sizeY = abs(prop.bbox[0] - prop.bbox[2]) * XYcalibration
                sizeX = abs(prop.bbox[1] - prop.bbox[3]) * XYcalibration
                Boundary[i, :LabelImage.shape[0], :LabelImage.
                         shape[1]] = find_boundaries(LabelImage)
                Indices = np.where(Boundary[i, :, :] > 0)
                Indices = np.transpose(np.asarray(Indices))
                RealIndices = Indices.copy()
                for j in range(0, len(RealIndices)):

                    RealIndices[j][0] = RealIndices[j][0] * XYcalibration
                    RealIndices[j][1] = RealIndices[j][1] * XYcalibration

                tree.append(spatial.cKDTree(RealIndices))
                if regionlabel not in Label:
                    Label.append(regionlabel)
                    VolumeLabel.append(
                        math.sqrt(sizeX * sizeX + sizeY * sizeY) / 4)

            TimedMask[str(i)] = [tree, Indices, Label, VolumeLabel]

    #TZYX shaped object
    if ndim == 4:

        Boundary = np.zeros(
            [Mask.shape[0], Mask.shape[1], Mask.shape[2], Mask.shape[3]])

        #Loop over time
        for i in range(0, Mask.shape[0]):

            Mask[i, :] = label(Mask[i, :])
            properties = measure.regionprops(Mask[i, :], Mask[i, :])
            Label = []
            VolumeLabel = []
            tree = []
            for prop in properties:

                LabelImage = prop.image
                regionlabel = prop.label
                sizeZ = abs(prop.bbox[0] - prop.bbox[3]) * Zcalibration
                sizeY = abs(prop.bbox[1] - prop.bbox[4]) * XYcalibration
                sizeX = abs(prop.bbox[2] - prop.bbox[5]) * XYcalibration
                #Loop over Z
                if regionlabel > 1:
                    for j in range(int(prop.bbox[0]), int(prop.bbox[3])):

                        Boundary[i, j, :LabelImage.shape[1], :LabelImage.
                                 shape[2]] = find_boundaries(
                                     LabelImage[j, :, :])
                else:
                    for j in range(int(prop.bbox[0]), int(prop.bbox[3])):

                        Boundary[i, j, :, :] = find_boundaries(Mask[i,
                                                                    j, :, :])

                Indices = np.where(Boundary[i, :] > 0)

                Indices = np.transpose(np.asarray(Indices))
                RealIndices = Indices.copy()
                for j in range(0, len(RealIndices)):

                    RealIndices[j][0] = RealIndices[j][0] * Zcalibration
                    RealIndices[j][1] = RealIndices[j][1] * XYcalibration
                    RealIndices[j][2] = RealIndices[j][2] * XYcalibration

                tree.append(spatial.cKDTree(RealIndices))
                if regionlabel not in Label:
                    Label.append(regionlabel)
                    VolumeLabel.append(
                        math.sqrt(sizeX * sizeX + sizeY * sizeY) / 4)

            TimedMask[str(i)] = [tree, Indices, Label, VolumeLabel]

    return TimedMask
Пример #51
0
def main():
    opt = Options(isTrain=False)
    opt.parse()
    opt.save_options()

    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
        str(x) for x in opt.test['gpus'])

    img_dir = opt.test['img_dir']
    label_dir = opt.test['label_dir']
    save_dir = opt.test['save_dir']
    model_path = opt.test['model_path']
    save_flag = opt.test['save_flag']

    # data transforms
    test_transform = get_transforms(opt.transform['test'])

    model = ResUNet34(pretrained=opt.model['pretrained'])
    model = torch.nn.DataParallel(model)
    model = model.cuda()
    cudnn.benchmark = True

    # ----- load trained model ----- #
    print("=> loading trained model")
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])
    print("=> loaded model at epoch {}".format(checkpoint['epoch']))
    model = model.module

    # switch to evaluate mode
    model.eval()
    counter = 0
    print("=> Test begins:")

    img_names = os.listdir(img_dir)

    if save_flag:
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)
        strs = img_dir.split('/')
        prob_maps_folder = '{:s}/{:s}_prob_maps'.format(save_dir, strs[-1])
        seg_folder = '{:s}/{:s}_segmentation'.format(save_dir, strs[-1])
        if not os.path.exists(prob_maps_folder):
            os.mkdir(prob_maps_folder)
        if not os.path.exists(seg_folder):
            os.mkdir(seg_folder)

    metric_names = ['acc', 'p_F1', 'p_recall', 'p_precision', 'dice', 'aji']
    test_results = dict()
    all_result = utils.AverageMeter(len(metric_names))

    for img_name in img_names:
        # load test image
        print('=> Processing image {:s}'.format(img_name))
        img_path = '{:s}/{:s}'.format(img_dir, img_name)
        img = Image.open(img_path)
        ori_h = img.size[1]
        ori_w = img.size[0]
        name = os.path.splitext(img_name)[0]
        label_path = '{:s}/{:s}_label.png'.format(label_dir, name)
        gt = misc.imread(label_path)

        input = test_transform((img, ))[0].unsqueeze(0)
        print('\tComputing output probability maps...')
        prob_maps = get_probmaps(input, model, opt)
        pred = np.argmax(prob_maps, axis=0)  # prediction

        pred_labeled = measure.label(pred)
        pred_labeled = morph.remove_small_objects(pred_labeled,
                                                  opt.post['min_area'])
        pred_labeled = ndi_morph.binary_fill_holes(pred_labeled > 0)
        pred_labeled = measure.label(pred_labeled)

        print('\tComputing metrics...')
        metrics = compute_metrics(pred_labeled, gt, metric_names)

        # save result for each image
        test_results[name] = [
            metrics['acc'], metrics['p_F1'], metrics['p_recall'],
            metrics['p_precision'], metrics['dice'], metrics['aji']
        ]

        # update the average result
        all_result.update([
            metrics['acc'], metrics['p_F1'], metrics['p_recall'],
            metrics['p_precision'], metrics['dice'], metrics['aji']
        ])

        # save image
        if save_flag:
            print('\tSaving image results...')
            misc.imsave('{:s}/{:s}_pred.png'.format(prob_maps_folder, name),
                        pred.astype(np.uint8) * 255)
            misc.imsave('{:s}/{:s}_prob.png'.format(prob_maps_folder, name),
                        prob_maps[1, :, :])
            final_pred = Image.fromarray(pred_labeled.astype(np.uint16))
            final_pred.save('{:s}/{:s}_seg.tiff'.format(seg_folder, name))

            # save colored objects
            pred_colored_instance = np.zeros((ori_h, ori_w, 3))
            for k in range(1, pred_labeled.max() + 1):
                pred_colored_instance[pred_labeled == k, :] = np.array(
                    utils.get_random_color())
            filename = '{:s}/{:s}_seg_colored.png'.format(seg_folder, name)
            misc.imsave(filename, pred_colored_instance)

        counter += 1
        if counter % 10 == 0:
            print('\tProcessed {:d} images'.format(counter))

    print('=> Processed all {:d} images'.format(counter))
    print('Average Acc: {r[0]:.4f}\nF1: {r[1]:.4f}\nRecall: {r[2]:.4f}\n'
          'Precision: {r[3]:.4f}\nDice: {r[4]:.4f}\nAJI: {r[5]:.4f}\n'.format(
              r=all_result.avg))

    header = metric_names
    utils.save_results(header, all_result.avg, test_results,
                       '{:s}/test_results.txt'.format(save_dir))
Пример #52
0
    def __call__(self, oriImg):
        scale_search = [0.5, 1.0, 1.5, 2.0]
        # scale_search = [0.5]
        boxsize = 368
        stride = 8
        padValue = 128
        thre = 0.05
        multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
        heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 22))
        # paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))

        for m in range(len(multiplier)):
            scale = multiplier[m]
            imageToTest = cv2.resize(oriImg, (0, 0),
                                     fx=scale,
                                     fy=scale,
                                     interpolation=cv2.INTER_CUBIC)
            imageToTest_padded, pad = util.padRightDownCorner(
                imageToTest, stride, padValue)
            im = np.transpose(
                np.float32(imageToTest_padded[:, :, :, np.newaxis]),
                (3, 2, 0, 1)) / 256 - 0.5
            im = np.ascontiguousarray(im)

            data = torch.from_numpy(im).float()
            if torch.cuda.is_available():
                data = data.cuda()
            # data = data.permute([2, 0, 1]).unsqueeze(0).float()
            with torch.no_grad():
                output = self.model(data).cpu().numpy()
                # output = self.model(data).numpy()q

            # extract outputs, resize, and remove padding
            heatmap = np.transpose(np.squeeze(output),
                                   (1, 2, 0))  # output 1 is heatmaps
            heatmap = cv2.resize(heatmap, (0, 0),
                                 fx=stride,
                                 fy=stride,
                                 interpolation=cv2.INTER_CUBIC)
            heatmap = heatmap[:imageToTest_padded.shape[0] -
                              pad[2], :imageToTest_padded.shape[1] - pad[3], :]
            heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]),
                                 interpolation=cv2.INTER_CUBIC)

            heatmap_avg += heatmap / len(multiplier)

        all_peaks = []
        for part in range(21):
            map_ori = heatmap_avg[:, :, part]
            one_heatmap = gaussian_filter(map_ori, sigma=3)
            binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8)
            # 全部小于阈值
            if np.sum(binary) == 0:
                all_peaks.append([0, 0])
                continue
            label_img, label_numbers = label(binary,
                                             return_num=True,
                                             connectivity=binary.ndim)
            max_index = np.argmax([
                np.sum(map_ori[label_img == i])
                for i in range(1, label_numbers + 1)
            ]) + 1
            label_img[label_img != max_index] = 0
            map_ori[label_img == 0] = 0

            y, x = util.npmax(map_ori)
            all_peaks.append([x, y])
        return np.array(all_peaks)
Пример #53
0
from skimage import measure
from skimage import filters
import matplotlib.pyplot as plt
import numpy as np

n = 12
l = 256
np.random.seed(1)
im = np.zeros((l, l))
points = l * np.random.random((2, n**2))
im[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
im = filters.gaussian(im, sigma=l / (4. * n))
blobs = im > 0.7 * im.mean()

all_labels = measure.label(blobs)
blobs_labels = measure.label(blobs, background=0)

plt.figure(figsize=(9, 3.5))
plt.subplot(131)
plt.imshow(blobs, cmap='gray')
plt.axis('off')
plt.subplot(132)
plt.imshow(all_labels, cmap='nipy_spectral')
plt.axis('off')
plt.subplot(133)
plt.imshow(blobs_labels, cmap='nipy_spectral')
plt.axis('off')

plt.tight_layout()
plt.show()
    def detectCharacterCandidates(self, region):
        # apply a 4-point transform to extract the license plate
        plate = perspective.four_point_transform(self.image, region)
        cv2.imshow("Perspective Transform", imutils.resize(plate, width=400))

        # extract the Value component from the HSV color space and apply adaptive thresholding
        # to reveal the characters on the license plate
        V = cv2.split(cv2.cvtColor(plate, cv2.COLOR_BGR2HSV))[2]
        thresh = threshold_adaptive(V, 29, offset=15).astype("uint8") * 255
        thresh = cv2.bitwise_not(thresh)

        # resize the license plate region to a canonical size
        plate = imutils.resize(plate, width=400)
        thresh = imutils.resize(thresh, width=400)
        cv2.imshow("LP Threshold", thresh)

        # perform a connected components analysis and initialize the mask to store the locations
        # of the character candidates
        labels = measure.label(thresh, neighbors=8, background=0)
        charCandidates = np.zeros(thresh.shape, dtype="uint8")

        # loop over the unique components
        for label in np.unique(labels):
            # if this is the background label, ignore it
            if label == 0:
                continue

            # otherwise, construct the label mask to display only connected components for the
            # current label, then find contours in the label mask
            labelMask = np.zeros(thresh.shape, dtype="uint8")
            labelMask[labels == label] = 255
            (cnts, _) = cv2.findContours(labelMask, cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)

            # ensure at least one contour was found in the mask
            if len(cnts) > 0:
                # grab the largest contour which corresponds to the component in the mask, then
                # grab the bounding box for the contour
                c = max(cnts, key=cv2.contourArea)
                (boxX, boxY, boxW, boxH) = cv2.boundingRect(c)

                # compute the aspect ratio, solidity, and height ratio for the component
                aspectRatio = boxW / float(boxH)
                solidity = cv2.contourArea(c) / float(boxW * boxH)
                heightRatio = boxH / float(plate.shape[0])

                # determine if the aspect ratio, solidity, and height of the contour pass
                # the rules tests
                keepAspectRatio = aspectRatio < 1.0
                keepSolidity = solidity > 0.15
                keepHeight = heightRatio > 0.4 and heightRatio < 0.95

                # check to see if the component passes all the tests
                if keepAspectRatio and keepSolidity and keepHeight:
                    # compute the convex hull of the contour and draw it on the character
                    # candidates mask
                    hull = cv2.convexHull(c)
                    cv2.drawContours(charCandidates, [hull], -1, 255, -1)

        # clear pixels that touch the borders of the character candidates mask and detect
        # contours in the candidates mask
        charCandidates = segmentation.clear_border(charCandidates)
        (cnts, _) = cv2.findContours(charCandidates.copy(), cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)
        cv2.imshow("Original Candidates", charCandidates)

        # if there are more character candidates than the supplied number, then prune
        # the candidates
        if len(cnts) > self.numChars:
            (charCandidates, cnts) = self.pruneCandidates(charCandidates, cnts)
            cv2.imshow("Pruned Canidates", charCandidates)

        # take bitwise AND of raw thresholded image and character candidates to get a more
        # clean segmentation of the characters
        thresh = cv2.bitwise_and(thresh, thresh, mask=charCandidates)
        cv2.imshow("Char Threshold", thresh)

        # return the license plate region object containing the license plate, the thresholded
        # license plate, and the character candidates
        return LicensePlate(success=True,
                            plate=plate,
                            thresh=thresh,
                            candidates=charCandidates)
    io.imsave(cy3_file + "fill_tubes.png",
              img_as_uint(fill_tubes),
              cmap=cm.gray)
    cy3_endpoint_mask = make_endpoints_mask(fill_tubes)

    edges_open_647 = canny(image_647, 2, 1, 25)
    selem = disk(2)
    edges_647 = closing(edges_open_647, selem)
    fill_tubes_647 = ndi.binary_fill_holes(edges_647)
    io.imsave(atto647_file + "fill_tubes.png",
              img_as_uint(fill_tubes_647),
              cmap=cm.gray)
    atto647_endpoint_mask = make_endpoints_mask(fill_tubes_647)

    #label image
    label_image = label(fill_tubes)
    label_image_647 = label(fill_tubes_647)

    regions_joined_647 = []
    regions_joined_cy3 = []
    print "detecting joining"
    print len(regionprops(label_image_647))
    for region_647 in regionprops(label_image_647):
        is_joined = 0
        if region_647.area / tube_width >= length_cutoff and region_647.eccentricity >= eccentricity_cutoff:
            for region in regionprops(label_image):
                if region.area / tube_width < length_cutoff or region.eccentricity < eccentricity_cutoff:
                    continue
                region_647_coords = region_647.coords.tolist()
                region_coords = region.coords.tolist()
def detect(filepath, file):

    font = cv2.FONT_HERSHEY_SIMPLEX   #设置显示字体类型,正常大小无衬线字体
    img = cv2.imread(filepath+file)
    cimg = img
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)#BGR转HSV

    # color range
    lower_red1 = np.array([0,100,100])        #定义在HSV空间中红色的范围
    upper_red1 = np.array([10,255,255])
    lower_red2 = np.array([160,100,100])      #定义在HSV空间中红色的范围
    upper_red2 = np.array([180,255,255])
    lower_green = np.array([40,50,50])        #定义在HSV空间中绿色的范围
    upper_green = np.array([90,255,255])
    lower_yellow = np.array([15,150,150])     #定义在HSV空间中黄色的范围
    upper_yellow = np.array([35,255,255])
    #根据以上定义的红绿黄颜色方位得到个颜色交通灯部分
    mask1 = cv2.inRange(hsv, lower_red1,   upper_red1)
    mask2 = cv2.inRange(hsv, lower_red2,   upper_red2)
    mask_g = cv2.inRange(hsv, lower_green,  upper_green)
    mask_y = cv2.inRange(hsv, lower_yellow, upper_yellow)
    mask_r = cv2.add(mask1, mask2)

    #定义结构元素,形态学开运算
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (1,1))#定义MORPH_CROSS比MORPH_RECT效果好
    #开运算
    opened_r  = cv2.morphologyEx(mask_r, cv2.MORPH_OPEN, element)
    opened_g  = cv2.morphologyEx(mask_g, cv2.MORPH_OPEN, element)
    opened_y  = cv2.morphologyEx(mask_y, cv2.MORPH_OPEN, element)

    ################检测红色交通灯########################
    segmentation.clear_border(opened_r)  #清除与边界相连的目标物
    label_image_r = measure.label(opened_r)  #连通区域标记
    borders_r = np.logical_xor(mask_r, opened_r) #异或
    label_image_r[borders_r] = -1
    for region_r in measure.regionprops(label_image_r): #循环得到每一个连通区域属性集   
        #忽略小区域和大面积	
        if region_r.convex_area < 120 or region_r.area > 2000:
            continue
        #绘制外包矩形
        area = region_r.area                   #连通域面积,区域内像素点总数
        eccentricity = region_r.eccentricity   #连通域离心率
        convex_area  = region_r.convex_area    #凸包内像素总数
        minr, minc, maxr, maxc = region_r.bbox #边界外连接
        radius = max(maxr-minr,maxc-minc)/2    #连通域外接矩形长轴的一半
        centroid = region_r.centroid           #质心坐标
        perimeter    = region_r.perimeter      #区域周长
        x = int(centroid[0])
        y = int(centroid[1])
        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, edgecolor='red', linewidth=2)

        if perimeter == 0:
            circularity = 1
        else:
            circularity = 4*3.141592*area/(perimeter*perimeter)
            circum_circularity      = 4*3.141592*convex_area/(4*3.1592*3.1592*radius*radius) 

        if eccentricity <= 0.4 or circularity >= 0.7 or circum_circularity >= 0.73:
            cv2.circle(cimg, (y,x), radius, (0,0,255),3)
            cv2.putText(cimg,'RED',(y,x), font, 1,(0,0,255),2)
            return "RED",cimg
        else:
            continue

    ################检测绿色交通灯########################
    segmentation.clear_border(opened_g)  #清除与边界相连的目标物
    label_image_g = measure.label(opened_g)  #连通区域标记
    borders_g = np.logical_xor(mask_g, opened_g) #异或
    label_image_g[borders_g] = -1
    #image_label_overlay_g = color.label2rgb(label_image_g, image=opened_g) #不同标记用不同颜色显示
    for region_g in measure.regionprops(label_image_g): #循环得到每一个连通区域属性集
        if region_g.convex_area < 130 or region_g.area > 2000:
            continue
        area = region_g.area   #连通域面积
        eccentricity = region_g.eccentricity   #连通域离心率
        convex_area  = region_g.convex_area    #凸包内像素总数
        minr, minc, maxr, maxc = region_g.bbox #边界外连接
        radius       = max(maxr-minr,maxc-minc)/2    #连通域外接矩形长轴的一半
        centroid     = region_g.centroid           #质心坐标
        perimeter    = region_g.perimeter      #区域周长
        x = int(centroid[0])
        y = int(centroid[1])
        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, edgecolor='red', linewidth=2)

        if perimeter == 0:
            circularity = 1
        else:
            circularity = 4*3.141592*area/(perimeter*perimeter)
            circum_circularity      = 4*3.141592*convex_area/(4*3.1592*3.1592*radius*radius) 

        if eccentricity <= 0.4 or circularity >= 0.7 or circum_circularity >= 0.8:
            cv2.circle(cimg, (y,x), radius, (0,255,0),3)
            cv2.putText(cimg,'GREEN',(y,x), font, 1,(0,255,0),2)
            return "GREEN",cimg
        else:
            continue
##
##    ################检测黄色交通灯########################
##    segmentation.clear_border(opened_y)  #清除与边界相连的目标物
##    label_image_y = measure.label(opened_y)  #连通区域标记
##    borders_y = np.logical_xor(mask_y, opened_y) #异或
##    label_image_y[borders_y] = -1
##    #image_label_overlay_y = color.label2rgb(label_image_y, image=opened_y) #不同标记用不同颜色显示
##    for region_y in measure.regionprops(label_image_y): #循环得到每一个连通区域属性集
##        if region_y.convex_area < 130 or region_y.area > 2000:
##            continue
##        area = region_y.area   #连通域面积
##        eccentricity = region_y.eccentricity   #连通域离心率
##        convex_area  = region_y.convex_area    #凸包内像素总数
##        minr, minc, maxr, maxc = region_y.bbox #边界外连接
##        radius       = max(maxr-minr,maxc-minc)/2    #连通域外接矩形长轴的一半
##        centroid     = region_y.centroid           #质心坐标
##        perimeter    = region_y.perimeter      #区域周长
##        x = int(centroid[0])
##        y = int(centroid[1])
##        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
##                                  fill=False, edgecolor='red', linewidth=2)
##
##        if perimeter == 0:
##            circularity = 1
##        else:
##            circularity = 4*3.141592*area/(perimeter*perimeter)
##            circum_circularity      = 4*3.141592*convex_area/(4*3.1592*3.1592*radius*radius) 
##
##        if eccentricity <= 0.4 or circularity >= 0.7 or circum_circularity >= 0.8:
##            cv2.circle(cimg, (y,x), radius, (0,255,255),3)
##            cv2.putText(cimg,'YELLOW',(y,x), font, 1,(0,255,255),2)
##            return "YELLOW",cimg
##        else:
##            continue

    return "NONE",img
Пример #57
0
pix = np.reshape(temp.get_raw_pixels(),
                 newshape=[temp.get_height(),
                           temp.get_width()])

if analyze_organelle == 'nucleoli':
    # nuclear detection (currently only doable for nucleoli staining image)
    label_nuclear, _ = find_nuclear_nucleoli(pix)
    data_log['num_nuclei_detected'] = [np.amax(label_nuclear)]
    print("Found %d nuclei." % data_log['num_nuclei_detected'][0])

# organelle detection
organelle_before_filter, organelle = find_organelle(pix,
                                                    thresholding,
                                                    min_size=min_size,
                                                    max_size=max_size)
label_organelle = label(organelle, connectivity=1)
data_log['num_%s_detected' % analyze_organelle] = [obj.object_count(organelle)]
print("Found %d %s." %
      (data_log['num_%s_detected' % analyze_organelle][0], analyze_organelle))

# organelle pd dataset
organelle_pd = organelle_analysis(pix, organelle, '%s' % analyze_organelle,
                                  pos)

if analyze_organelle == 'nucleoli':
    # link nucleoli with corresponding nuclear
    round_x = [round(num) for num in organelle_pd['x']]
    round_y = [round(num) for num in organelle_pd['y']]
    organelle_pd['nuclear'] = obj.points_in_objects(label_nuclear, round_y,
                                                    round_x)
image = data.coins()
equalized = exposure.equalize_adapthist(image)
edges = equalized > filters.threshold_otsu(equalized)
edges = segmentation.clear_border(edges)
edges = morphology.closing(edges, morphology.square(3))

f, (ax0, ax1) = plt.subplots(1, 2)
ax0.imshow(image, cmap='gray')
ax1.imshow(edges, cmap='gray');   



# <codecell>

labels = measure.label(edges)
for region in measure.regionprops(labels):
    if region.area < 200:
        rows, cols = region.coords.T
        labels[rows, cols] = 0

print("Number of coins:", len(np.unique(labels)) - 1)
        
out = color.label2rgb(labels, image, bg_label=0)
plt.imshow(out);   


# <markdowncell>
# # Color wheel
# 
# Based on http://stackoverflow.com/questions/21618252/get-blue-colored-
Пример #59
0
def get_segmented_lungs(im, plot=False):
    '''
    This funtion segments the lungs from the given 2D slice.
    '''
    if plot == True:
        f, plots = plt.subplots(8, 1, figsize=(5, 40))
    '''
    Step 1: Convert into a binary image. 
    '''
    binary = im < -600
    if plot == True:
        plots[0].axis('off')
        plots[0].imshow(binary, cmap=plt.cm.bone)
    '''
    Step 2: Remove the blobs connected to the border of the image.
    '''
    cleared = clear_border(binary)
    if plot == True:
        plots[1].axis('off')
        plots[1].imshow(cleared, cmap=plt.cm.bone)
    '''
    Step 3: Label the image.
    '''
    label_image = label(cleared)
    if plot == True:
        plots[2].axis('off')
        plots[2].imshow(label_image, cmap=plt.cm.bone)
    '''
    Step 4: Keep the labels with 2 largest areas.
    '''
    areas = [r.area for r in regionprops(label_image)]
    areas.sort()
    if len(areas) > 2:
        for region in regionprops(label_image):
            if region.area < areas[-2]:
                for coordinates in region.coords:
                    label_image[coordinates[0], coordinates[1]] = 0
    binary = label_image > 0
    if plot == True:
        plots[3].axis('off')
        plots[3].imshow(binary, cmap=plt.cm.bone)
    '''
    Step 5: Erosion operation with a disk of radius 2. This operation is 
    seperate the lung nodules attached to the blood vessels.
    '''
    selem = disk(2)
    binary = binary_erosion(binary, selem)
    if plot == True:
        plots[4].axis('off')
        plots[4].imshow(binary, cmap=plt.cm.bone)
    '''
    Step 6: Closure operation with a disk of radius 10. This operation is 
    to keep nodules attached to the lung wall.
    '''
    selem = disk(10)
    binary = binary_closing(binary, selem)
    if plot == True:
        plots[5].axis('off')
        plots[5].imshow(binary, cmap=plt.cm.bone)
    '''
    Step 7: Fill in the small holes inside the binary mask of lungs.
    '''
    edges = roberts(binary)
    binary = ndi.binary_fill_holes(edges)
    if plot == True:
        plots[6].axis('off')
        plots[6].imshow(binary, cmap=plt.cm.bone)
    '''
    Step 8: Superimpose the binary mask on the input image.
    '''
    get_high_vals = binary == 0
    im[get_high_vals] = 0
    if plot == True:
        plots[7].axis('off')
        plots[7].imshow(im, cmap=plt.cm.bone)

    plt.show()

    return im
Пример #60
0
def two_lung_only(bw, spacing, max_iter=22, max_ratio=4.8):
    def extract_main(bw, cover=0.95):
        for i in range(bw.shape[0]):
            current_slice = bw[i]
            label = measure.label(current_slice)
            properties = measure.regionprops(label)
            properties.sort(key=lambda x: x.area, reverse=True)
            area = [prop.area for prop in properties]
            count = 0
            sum = 0
            while sum < np.sum(area) * cover:
                sum = sum + area[count]
                count = count + 1
            filter = np.zeros(current_slice.shape, dtype=bool)
            for j in range(count):
                bb = properties[j].bbox
                filter[bb[0]:bb[2], bb[1]:bb[3]] = filter[
                    bb[0]:bb[2], bb[1]:bb[3]] | properties[j].convex_image
            bw[i] = bw[i] & filter

        label = measure.label(bw)
        properties = measure.regionprops(label)
        properties.sort(key=lambda x: x.area, reverse=True)
        bw = label == properties[0].label

        return bw

    def fill_2d_hole(bw):
        for i in range(bw.shape[0]):
            current_slice = bw[i]
            label = measure.label(current_slice)
            properties = measure.regionprops(label)
            for prop in properties:
                bb = prop.bbox
                current_slice[bb[0]:bb[2], bb[1]:bb[3]] = current_slice[
                    bb[0]:bb[2], bb[1]:bb[3]] | prop.filled_image
            bw[i] = current_slice

        return bw

    found_flag = False
    iter_count = 0
    bw0 = np.copy(bw)
    while not found_flag and iter_count < max_iter:
        label = measure.label(bw, connectivity=2)
        properties = measure.regionprops(label)
        properties.sort(key=lambda x: x.area, reverse=True)
        if len(properties
               ) > 1 and properties[0].area / properties[1].area < max_ratio:
            found_flag = True
            bw1 = label == properties[0].label
            bw2 = label == properties[1].label
        else:
            bw = scipy.ndimage.binary_erosion(bw)
            iter_count = iter_count + 1

    if found_flag:
        d1 = scipy.ndimage.morphology.distance_transform_edt(bw1 == False,
                                                             sampling=spacing)
        d2 = scipy.ndimage.morphology.distance_transform_edt(bw2 == False,
                                                             sampling=spacing)
        bw1 = bw0 & (d1 < d2)
        bw2 = bw0 & (d1 > d2)

        bw1 = extract_main(bw1)
        bw2 = extract_main(bw2)

    else:
        bw1 = bw0
        bw2 = np.zeros(bw.shape).astype('bool')

    bw1 = fill_2d_hole(bw1)
    bw2 = fill_2d_hole(bw2)
    bw = bw1 | bw2

    return bw1, bw2, bw