Example #1
0
def segment(template, actual):

	ret3,th3 = cv2.threshold(template,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
	
	# noise removal
	kernel = np.ones((3,3),np.uint8)
	opening = cv2.morphologyEx(th3,cv2.MORPH_OPEN,kernel, iterations = 2)

	# sure background area
	sure_bg = cv2.dilate(opening,kernel,iterations=3)

	# Finding sure foreground area
	dist_transform = cv2.distanceTransform(opening,cv2.cv.CV_DIST_L2,5)
	ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)

	# Finding unknown region
	sure_fg = np.uint8(sure_fg)
	unknown = cv2.subtract(sure_bg,sure_fg)
		
	# Marker labelling
	ret, markers = cv2.connectedComponents(sure_fg)

	# Add one to all labels so that sure background is not 0, but 1
	markers = markers+1

	# Now, mark the region of unknown with zero
	markers[unknown==255] = 0

	markers = cv2.watershed(img,markers)
	img[markers == -1] = [255,0,0]
	
	return img
def _generate_training_set(img, image_file):
    save_location = "images/training/"
    _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
    _, regions = cv2.connectedComponents(img, img)

    if not os.path.exists("../images/cc"):
        os.makedirs("../images/cc")

    cv2.imwrite("../images/cc/cc.png", regions)
    cc = cv2.imread("../images/cc/cc.png", 0)
    _, cc_vis = cv2.threshold(cc, 1, 255, cv2.THRESH_BINARY)

    _, contours, hierarchy = cv2.findContours(cc_vis, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    idx = 0
    for cnt in contours:
        area = cv2.contourArea(cnt)
        if area < 50 or area > 1000:
            continue
        if len(cnt) < 5:
            continue
        idx += 1
        x, y, w, h = cv2.boundingRect(cnt)
        roi = img[y: y + h, x: x + w]
        name = image_file.split('.')[0]
        inverted = (255 - roi)
        cv2.imwrite(save_location + name + str(idx) + '.jpg', inverted)
    cv2.waitKey(0)
Example #3
0
def watershed2(seem, img):

    if (img.dtype != np.uint8):
        img = img * 255
        img = np.uint8(img)
    ut.imgDes(img)
    img = cv.medianBlur(img, 3)

    rows, cols = img.shape[:2];

    if np.size(img.shape) < 3:
        img_a = cv.cvtColor(img, cv.COLOR_GRAY2RGB)
    else:
        img_a = img
    print ('ddd')
    ut.imgDes(img_a)
    ret, markers = cv.connectedComponents(seem)
    markers = markers + 1
    markers[seem == 0] = 0
    markers = cv.watershed(img_a, markers)
    markers[img==0] = 0



    return  markers
def watershed(img, thresh):
    # noise removal
    kernel = np.ones((3,3), np.uint8)
    opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 4)

    # sure background area
    sure_bg = cv2.dilate(opening,kernel,iterations=3)

    #sure_bg = cv2.morphologyEx(sure_bg, cv2.MORPH_TOPHAT, kernel)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg,sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers+1

    # Now, mark the region of unknown with zero
    markers[unknown==255] = 0
    '''
    imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    imgray = cv2.GaussianBlur(imgray, (5, 5), 0)
    img = cv2.Canny(imgray,200,500)
    '''
    markers = cv2.watershed(img,markers)
    img[markers == -1] = [255,0,0]

    return sure_bg, sure_fg
def id_tabs(img, avg_line_height=20, line_blur=20, tab_wiggle_room=5, disp=False):
    """
    Attempts to identify the indent level of each line of text, with the assumption that the first line is at level 0.

    :param img: the input image (should contain text)
    :param avg_line_height: the expected vertical height of a line of text
    :param line_blur: how far the image is blurred to extract features (img.width/line_blur)
    :param tab_wiggle_room: how far in pixels tabs are allowed to be from on another before they are considered distinct
    :param disp: whether to display intermediate results
    :return: An integer list representing the tab level for each line
    """
    # load image as grayscale
    # aggressively horizontally blur the image
    r, c = len(img), len(img[0])
    horizontal_size = c / line_blur
    horizontal_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))
    img = cv2.filter2D(img, -1, horizontal_structure)
    if disp:
        vis_img, _ = auto_crop.reduce_image(img.copy())
        cv2.imshow('Horizontally Blur', vis_img)
        cv2.waitKey(0)
    # Identify connected components & generate bounding boxes
    n, regions = cv2.connectedComponents(img, img)
    img = np.uint8(regions)
    bbs = _generate_bounding_boxes(img, n, avg_line_height)

    return _analyze_bounding_boxes(bbs, tab_wiggle_room)
    def watershed(self,img):
        '''
            watershedで領域分割を行う
            args :      -> 
            dst  :      -> 
            param:      -> 
        '''
        gimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(gimg,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

        # noise removal
        kernel = np.ones((3,3),np.uint8)
        opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
        
        # sure background area
        sure_bg = cv2.dilate(opening,kernel,iterations=3)
        
        # Finding sure foreground area
        dist_transform = cv2.distanceTransform(opening,cv.CV_DIST_L2,5)
        ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
        
        # Finding unknown region
        sure_fg = np.uint8(sure_fg)
        unknown = cv2.subtract(sure_bg,sure_fg) 

        # Marker labelling
        ret, markers = cv2.connectedComponents(sure_fg)
        
        # Add one to all labels so that sure background is not 0, but 1
        markers = markers+1
        
        # Now, mark the region of unknown with zero
        markers[unknown==255] = 0
Example #7
0
def water(img, thresh):
    kernel = np.ones((3,3),np.uint8)
    opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)

    # sure background area
    sure_bg = cv2.dilate(opening,kernel,iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening,2,5)
    ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg,sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers += 1

    # Now, mark the region of unknown with zero
    markers[unknown==255] = 0

    markers = cv2.watershed(img,markers)
    img[markers == -1] = [255,0,0]
    return sure_fg, sure_bg, markers, img
Example #8
0
def _level_sets(im_clean, nlevels, prep=_dilation_and_erosion):
    """
    Divide the image into level sets and count the number of objects in each of them.

    :param im_clean: 2d array with :code:`im_clean.max() == 1`
    :param int nlevels: number of levels to search for objects (positive integer)
    :param prep: callable that takes a 2d array as its only argument and returns a 2d array
    :return: sequence with the number of objects in each respective level
    """
    if nlevels <= 0:
        raise ValueError("nlevels must be positive")
    prep = prep or (lambda x: x)  # if no preprocessing should be done, use the identity function

    # TODO change the levels. Reason:
    #  - in the for loop, the > operator is used. The highest level is 1, therefore the highest level set will always
    #    be empty. The ndimage.label function then returns 1 as the number of objects in the empty image, although it
    #    should be zero.
    # Proposed solution:
    # levels = np.linspace(0, 1, nlevels + 2)[1:-1]
    # That is, create nlevels + 2 levels, then throw away the zero level and the one level
    # or:
    # levels = np.linspace(0, 1, nlevels)[1:-1]
    # That is, only use nlevels - 2 levels. This means that the output array will have a size of nlevels - 2
    levels = np.linspace(0, 1, nlevels+1)[:-1]  # np.amin(im), np.amax(im)
    # Go through levels and calculate number of objects
    num_objs = []
    count_func = (lambda im: cv2.connectedComponents(im, connectivity=4)[0] - 1) if opencv_found else (lambda im: ndimage.label(im)[1])
    for lev in levels:
        # Threshold at level
        bw = (im_clean > lev)
        bw = prep(bw)
        # Record objects at this level
        num_objs.append(count_func(bw))
    return num_objs
Example #9
0
 def layerize(self, image):
     uniq_values = np.unique(image)
     print(uniq_values)
     img_layer = image==uniq_values[1]
     plt.imshow(img_layer); plt.show()
     img_layer_comp = cv2.connectedComponents(img_layer.astype(np.int8))
     print(img_layer_comp[0])
     plt.imshow(img_layer_comp[1]); plt.show()
def segmentacion_01(img_ndvi): #unicamente segmetacion

    veg_mask = s.segOtsu(img_ndvi)
    img_veg = cv.bitwise_and(img_ndvi,img_ndvi,mask = veg_mask)
    img_veg = img_veg * 255;

    img_veg = np.uint8(img_veg)
    clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
    cl1 = clahe.apply(img_veg)
    cv.imshow('vegetaciónCL', cl1)



    opening = cv.morphologyEx(cl1, cv.MORPH_OPEN,  cv.getStructuringElement(cv.MORPH_ELLIPSE,(7,7)))
    #opening = cv.GaussianBlur(opening,(7,7),0)
    #opening=cv.blur(opening,(5,5))
    opening=cv.medianBlur(opening,7)

    opening [opening>135] =0
    laplacian = cv.Laplacian(opening,cv.CV_64F,ksize=1, scale=.05 , borderType=cv.BORDER_DEFAULT) #bordes de uniones los voy a enogrodar y a restarlos a la imagen original
    laplacian = cv.dilate(laplacian,cv.getStructuringElement(cv.MORPH_ELLIPSE,(7,7)));

    newimg = opening #np.zeros((907,1209),np.uint8);
    newimg [laplacian > .9 ] = 0

    cv.imshow('laplacian', laplacian)
    cv.imshow('segmentacion_03', newimg)

    #erdoe =  cv.erode(newimg,cv.getStructuringElement(cv.MORPH_ELLIPSE,(5,5)))
    #cv.imshow('segmentacion_03', erdoe)

    opening = cv.morphologyEx(newimg, cv.MORPH_CLOSE,  cv.getStructuringElement(cv.MORPH_ELLIPSE,(7,7)))
    opening = cv.morphologyEx(opening, cv.MORPH_CLOSE,  cv.getStructuringElement(cv.MORPH_ELLIPSE,(7,7)))
    cv.imshow('segmentacion_4', opening)


    #opening = cv.threshold(opening,.8,1,cv.THRESH_BINARY)
     # Marker labelling
    ret, markers = cv.connectedComponents(opening)


    # Add one to all labels so that sure background is not 0, but 1
    markers = markers+1

    # Now, mark the region of unknown with zero
    markers[veg_mask==0] = 0

    vegMask = img_ndvi*255
    vegMask = np.uint8(vegMask)

    vegMask = cv.cvtColor(opening,cv.COLOR_GRAY2BGR );

    markers = cv.watershed(vegMask,markers)
    vegMask[markers == -1] = [255,0,0]

    cv.imshow('segmentacion', vegMask)
    cv.waitKey(0)
    return 0
    def process(self):
        thresholder = ThresholdSegmentation(~self.image, 220, thresh_type='GRAYSCALE')
        image, mask = thresholder.segment_image()

        mask *= 255

        kernel = np.ones((80,80),np.uint8)
        morphed = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel=kernel)

        labelCount, labelledMask = cv2.connectedComponents(morphed)

        counts = np.bincount(labelledMask[labelledMask!=0])
        maxIndex = counts.argmax()

        labelledMask[labelledMask!=maxIndex] = 0
        labelledMask[labelledMask==maxIndex] = 255

        labelledMask = labelledMask.astype(np.uint8)

        cv2.imshow('max', cv2.resize(labelledMask.astype(np.uint8), (450, 600)))

        rows = []
        widths = []

        for row in labelledMask:
            res = np.where(row==255)
            leftPos = res[0][0] if len(res[0]) > 1 else res[0] if len(res[0]) != 0 else -1
            width = np.where(row[leftPos:] == 0)

            width = width[0][0] if len(width[0]) > 1 else width[0] if len(width[0]) != 0 else -1
            rightPos = leftPos + width
            widths.append(rightPos-leftPos)
            rows.append((leftPos, rightPos))

        hist, bins = np.histogram(widths)
        peaks = argrelextrema(hist, np.greater)

        min_peak = peaks[0][0] if len(peaks[0]) > 1 else peaks[0]
        width = int(round(bins[min_peak]))

        height = width*2

        print "First peak is at: {0}, with value {1}".format(min_peak, bins[min_peak])
        print "Estimated Domino Dimensions: {0}x{1}px".format(width, height)

        annotated = self.image.copy()

        cv2.rectangle(annotated, (0, 0), (width, height), (255, 0, 0), 5)
        cv2.imshow('annotated', cv2.resize(annotated, (450, 600)))

        plt.plot(hist)

        #plt.plot(rows)
        plt.show()

        cv2.waitKey(0)
def perform_watershed(foreground, nuclei):
    ret, markers = cv2.connectedComponents(nuclei)
    foreground = 1 - foreground
    markers = foreground + markers
    foreground_reshape = np.zeros((foreground.shape[0], foreground.shape[1], 3), dtype=np.uint8)
    foreground_reshape[:,:,0] = 1-foreground
    foreground_reshape[:,:,1] = 1-foreground
    foreground_reshape[:,:,2] = 1-foreground
    markers_watershed = cv2.watershed(foreground_reshape,markers)
    return markers_watershed
def segmentacion_02(img_ndvi): #wahteshed

    veg_mask = s.segOtsu(img_ndvi)

    kernel = np.ones((5,5),np.uint8)
    veg_mask = cv.morphologyEx(veg_mask,cv.MORPH_OPEN,kernel, iterations = 3)


    img_veg = cv.bitwise_and(img_ndvi,img_ndvi,mask = veg_mask)
    img_veg = img_veg * 255;
    vegMask = np.uint8(img_veg)
    vegMask = cv.cvtColor(vegMask,cv.COLOR_GRAY2BGR );

    #
    # kernel = np.ones((5,5),np.uint8)
    # opening = cv.morphologyEx(veg_mask,cv.MORPH_OPEN,kernel, iterations = 3)
    #
    #
    # # sure background area
    # sure_bg = cv.dilate(opening,kernel,iterations=3)
    #
    # # Finding sure foreground area
    # dist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)
    # ret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)
    #
    # # Finding unknown region
    # sure_fg = np.uint8(sure_fg)
    # unknown = cv.subtract(sure_bg,sure_fg)


    # Marker labelling
    ret, markers = cv.connectedComponents(veg_mask)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers+1

    # Now, mark the region of unknown with zero
    markers[veg_mask==0] = 0



    markers = cv.watershed(vegMask,markers)
    vegMask[markers == -1] = [255,0,0]


    cv.imshow('maskara',veg_mask);
    # cv.imshow('openin',opening);
    # cv.imshow('sure_bg',sure_bg);
    # cv.imshow('dist_trans',dist_transform);
    # cv.imshow('sure_fg',sure_fg);
    # cv.imshow('unknow', unknown);
    # cv.imshow('markers', markers);
    cv.imshow('ffff', vegMask);
    cv.waitKey();
    return 0
Example #14
0
    def __mark_components(image: np.ndarray):
        """
        Oznacza niezależne obszary.

        :param np.ndarray image: Obraz.
        :return: Obraz z wyróżnionymi rozłącznymi obszarami.
        :rtype: np.ndarray
        """

        _, marked = cv2.connectedComponents(image)
        return marked
Example #15
0
 def get_target_position_fast(self, state, player_pos):
     state_cut = state[:player_pos[0],:,:]
     m1 = (state_cut[:, :, 0] == 245)
     m2 = (state_cut[:, :, 1] == 245)
     m3 = (state_cut[:, :, 2] == 245)
     m = np.uint8(np.float32(m1 * m2 * m3) * 255)
     b1, b2 = cv2.connectedComponents(m)
     for i in range(1, np.max(b2) + 1):
         x, y = np.where(b2 == i)
         if len(x) > 280 and len(x) < 310:
             r_x, r_y = x, y
     h, w = int(r_x.mean()), int(r_y.mean())
     return np.array([h, w])
Example #16
0
    def frames(self):
        for frame in self.inputf.frames():
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
            # invert so that background=black, object=white
            thresh = (255-thresh)
            cv2.imwrite("is00_thresh.jpg", thresh)

            # noise removal
            kernel = np.ones((3, 3), np.uint8)
            opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
            cv2.imwrite("is01_opening.jpg", opening)
            # now white covers foreground but has foreground false positives
            # may be sufficient if we don't care about separating touching objects;
            # also I think there will be cases like the spider where the threshould level
            # will lose the joints of its legs

            # black cover background with background false positives
            sure_bg = cv2.dilate(opening, kernel, iterations=3)
            cv2.imwrite("is02a_sure_bg.jpg", sure_bg)

            # Finding sure foreground area
            dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
            ret, sure_fg = cv2.threshold(dist_transform,self.dt_param*dist_transform.max(),255,0)
            cv2.imwrite("is02b_sure_fg.jpg", sure_fg)

            # unkown region, i.e. part of image we are unsure about
            sure_fg = np.uint8(sure_fg)
            unknown = cv2.subtract(sure_bg,sure_fg)
            cv2.imwrite("is03_unknown.jpg", unknown)

            # Marker labelling
            ret, markers = cv2.connectedComponents(sure_fg)

            # Add one to all labels so that sure background is not 0, but 1
            markers = markers+1
            cv2.imwrite("is04_markers.jpg", markers)

            # Now, mark the region of unknown with zero
            markers[unknown==255] = 0
            cv2.imwrite("is05_markers_u0.jpg", markers)

            markers = cv2.watershed(frame, markers)
            frame[markers == -1] = [255,0,0]
            cv2.imwrite("is07_final.jpg", frame)

            break
        yield None
Example #17
0
def cell_watershed(img, dist_thresh = 0.7):
    gray = cv2.cvtColor(img ,cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

    # noise removal with a 3x3 kernel
    kernel = np.ones((3,3),np.uint8)
    opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
    #opening = thresh

    # sure background area, more dilation with more iterations
    sure_bg = cv2.dilate(opening,kernel,iterations = 2)

    # Finding sure foreground area, threshold might need changing: lower threshold-factor gives larger sure_fg
    dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
    ret, sure_fg = cv2.threshold(dist_transform,dist_thresh*dist_transform.max(),255,0) #0.7 default

    # Finding unknown region, borders of bg-fg
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg,sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers+1

    # Now, mark the region of unknown with zero
    markers[unknown==255] = 0

    markers = cv2.watershed(img,markers)

    img[markers == -1] = [255,0,0]

    # Plots for the luls
    plt.figure()
    plt.imshow(markers)

    #plt.figure(123)
    #plt.imshow(sure_bg)

    #plt.figure(124)
    #plt.imshow(sure_fg)

    #plt.figure(126)
    #plt.imshow(thresh)

    return img, ret, markers
Example #18
0
def segmentasi(img):
	gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
	# noise removal
	kernel = np.ones((3,3),np.uint8)
	opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)

	# sure background area
	sure_bg = cv2.dilate(opening,kernel,iterations=3)

	# Finding sure foreground area
	dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
	ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)

	# Finding unknown region
	sure_fg = np.uint8(sure_fg)
	unknown = cv2.subtract(sure_bg,sure_fg)
	# Marker labelling
	ret, markers = cv2.connectedComponents(sure_fg)

	# Add one to all labels so that sure background is not 0, but 1
	markers = markers+1

	# Now, mark the region of unknown with zero
	markers[unknown==255] = 0

	markers = cv2.watershed(img,markers)
	img[markers == -1] = [0,0,255]
	return img


# Tugas Besar PCD
# 1. Sampling 
# 2. Kuantisasi
# 3. Zoom in
# 4. Zoom out
# 5. Flip vertical
# 6. Flip horizontal 
# 7. Rotate (0-360)
# 8. Cut and paste
# 9. Histogram equalisasi per plane 
# 10. Masking 
# 11. Filter low pass (mean, median, modus)
# 12. Filter high pass (prewitt, sobel, canny, laplace, prewitt2)
# 13. Pseudo color
# 14. Segmentasi
# 15. Morfologi
Example #19
0
	def execute(self):
		super(WatershedOp, self).execute()
		kernel = np.ones((3,3),np.uint8)
		opening = cv2.morphologyEx(self.parameters["img"],cv2.MORPH_OPEN,kernel, iterations = 2)
		sure_bg = cv2.dilate(opening, kernel, iterations=3)
		dist_transform = cv2.distanceTransform(opening, cv2.cv.CV_DIST_L2, 5)
		ret, sure_fg = cv2.threshold(dist_transform, 0.7*dist_transform.max(), 255, 0)
		sure_fg = np.uint8(sure_fg)
		unknown = cv2.subtract(sure_bg, sure_fg)
		ret, markers = cv2.connectedComponents(sure_fg)
		markers += 1
		markers[unknown==255] = 0
		markers = cv2.watershed(img, markers)
		img[markers == -1] = [255,0,0]
		cv2.imshow("test", img)
		cv2.waitKey(0)
		cv2.destroyWindow("test")
def watershed(img):
    gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
    ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
    kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
    mb = cv.morphologyEx(binary, cv.MORPH_OPEN, kernel, iterations=2)
    sure_bg = cv.dilate(mb, kernel, iterations=3)
    dist = cv.distanceTransform(mb, cv.DIST_L2, 3)
    dist_output = cv.normalize(dist, 0, 1.0, cv.NORM_MINMAX)
    ret, surface = cv.threshold(dist, dist.max() * 0.6, 255, cv.THRESH_BINARY)
    surface_fg = np.uint8(surface)
    unknown = cv.subtract(sure_bg, surface_fg)
    ref, markers = cv.connectedComponents(sure_bg)
    markers = markers + 1
    markers[unknown == 255] = 0
    markers = cv.watershed(src, markers=markers)
    src[markers == -1] = [0, 0, 255]
    cv.imshow("result", src)
Example #21
0
def water_component(img):
    thresh=1
    ret,binary_img=cv2.threshold(img,1,255,cv2.THRESH_BINARY)
    #dist = cv2.distanceTransform(img,cv2.DIST_L2,5)
    kernel = np.ones((3,3),np.uint8)
    opening = cv2.morphologyEx(binary_img,cv2.MORPH_OPEN,kernel, iterations = 2)
    sure_bg = cv2.dilate(opening,kernel,iterations=3)
    dist = cv2.distanceTransform(opening,cv2.DIST_L2,5)
    ret, sure_fg = cv2.threshold(dist,0.2*dist.max(),255,0)
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg,sure_fg)
    ret, markers = cv2.connectedComponents(sure_fg)
    markers = markers+1
    markers[unknown==255] = 0
    rgb_img=cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
    markers = cv2.watershed(rgb_img,markers)
    extract_max(img,markers)
    return img#markers*10
Example #22
0
def erosion_test(cellarray):
    hsv = convert_to_hsv(cellarray)
    lne = hsv[:, :, 1]
    threshold = 0.7
    lne[lne > threshold * np.amax(lne)] = 1
    lne[lne < threshold * np.amax(lne)] = 0

    kernel = np.ones((2, 2), np.uint8)
    erosion = cv2.erode(lne, kernel, iterations=3)

    ret, markers = cv2.connectedComponents(np.uint8(erosion))
    ret = ret - 1
    print markers, ret

    plt.figure(11)
    plt.subplot(221)
    plt.imshow(markers)
    plt.subplot(222)
    plt.imshow(lne)
Example #23
0
	def process(self, cv_image, do_separate=True):
		# Thresholding
		gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
		ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

		# Noise removal
		kernel = np.ones((3,3), np.uint8)
		opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)

		# Sure background area
		sure_bg = cv2.dilate(opening, kernel, iterations=3)

		# Finding sure foreground area
		sure_fg = None
		if do_separate:
			dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
			ret, sure_fg = cv2.threshold(dist_transform, 0.7*dist_transform.max(), 255, 0)
		else:
			sure_fg = cv2.erode(opening, kernel, iterations=2)

		# Finding unknown region
		sure_fg = np.uint8(sure_fg)
		unknown = cv2.subtract(sure_bg, sure_fg)

		# Marker labelling
		ret, markers = cv2.connectedComponents(sure_fg)

		# Add one to all labels so that sure background is not 0 but 1
		markers = markers+1

		# Now mark the region of unknown with zero
		markers[unknown==255] = 0

		# Apply watershed
		result = cv_image.copy()
		markers = cv2.watershed(cv_image, markers)
		result[markers == -1] = [255, 0, 0]

		return (SpellBase.to_kivy_texture(cv2.cvtColor(opening,cv2.COLOR_GRAY2RGB)),
				SpellBase.to_kivy_texture(cv2.cvtColor(unknown,cv2.COLOR_GRAY2RGB)),
				SpellBase.to_kivy_texture(cv2.applyColorMap(cv2.convertScaleAbs(markers), cv2.COLORMAP_JET)),
				SpellBase.to_kivy_texture(result))
Example #24
0
    def __init__(self, fn):
        self.img = cv2.imread(fn)
        self.DisplayandSave('Input', self.img)
        h, w = self.img.shape[:2]
        self.markers = np.zeros((h, w), np.int32)
        self.colors = np.int32( list(np.ndindex(2, 2, 2)) ) * 255

        #Thresholding
        self.gray = cv2.cvtColor(self.img,cv2.COLOR_BGR2GRAY)
        self.ret, self.thresh = cv2.threshold(self.gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        self.DisplayandSave('Threshold', self.thresh)

        # noise removal
        self.kernel = np.ones((3,3),np.uint8)
        self.opening = cv2.morphologyEx(self.thresh,cv2.MORPH_OPEN,self.kernel, iterations = 2)
        self.closing = cv2.morphologyEx(self.opening,cv2.MORPH_OPEN,self.kernel, iterations = 2)
        self.DisplayandSave('After noise removal', self.closing)

        # Marker labelling
        self.ret, self.markers = cv2.connectedComponents(self.closing)
        self.markers = self.markers+1
Example #25
0
def watershed(img):
    print (img.shape)
    rows, cols = img.shape[:2];
    img2 = np.zeros((rows,cols,3),np.uint8)
    img2 =  cv.cvtColor(img,cv.COLOR_GRAY2RGB)

    ret, thresh = cv.threshold(img,1,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
    cv.imshow('treshh', thresh)
    cv.waitKey(0)

    # noise removal
    kernel = np.ones((3,3),np.uint8)
    opening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2)

    # sure background area
    sure_bg = cv.dilate(opening,kernel,iterations=3)

    # Finding sure foreground area
    dist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)
    ret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv.subtract(sure_bg,sure_fg)

    cv.imshow('treshh2', unknown)
    cv.waitKey(0)

    ret, markers = cv.connectedComponents(sure_fg)
    markers = markers+1
    markers[unknown ==255] = 0
    markers = cv.watershed(img2,markers)
    img2[markers == -1] = [255,0,0]



    cv.imshow('treshh3', img2)
    cv.waitKey(0)

    return thresh
Example #26
0
def main(infile):
    img = cv2.imread(infile)
    assert img is not None

    gimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    # THRESH_OTSU: use Otsu's algorithm for optimal threashold value
    ret,th = cv2.threshold(gimg,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

    # noise removal
    kernel = np.ones((3,3),np.uint8)
    op = cv2.morphologyEx(th,cv2.MORPH_OPEN,kernel,iterations=2)
    # sure background area
    sure_bg = cv2.dilate(op,kernel,iterations=3)
    # sure foregronud area
    dist_transform = cv2.distanceTransform(op,cv2.DIST_L2,5)
    ret,sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
    # unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    #show(sure_bg)
    #show(sure_fg)
    #show(dist_transform)
    #show(unknown)

    # marker labelling
    ret,markers = cv2.connectedComponents(sure_fg)
    markers = markers+1
    # mark unknown region 0
    markers[unknown==255] = 0
    # apply watershed
    markers = cv2.watershed(img,markers)
    # boundary region is -1
    img[markers==-1] = [255,0,0]

    show(img)

    save(img,infile,"watershed_seg")
Example #27
0
def mywatershed(image, imageT):

    # Use morphological opening and the closing to remove noise and show result
    # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9))
    # opening = cv2.morphologyEx(imageT, cv2.MORPH_OPEN, kernel, iterations=1)
    # cv2.namedWindow("After opening", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("After opening", opening)
    #
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    # sure_bg = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel, iterations=10)
    sure_bg = cv2.dilate(imageT, kernel, iterations=5)
    cv2.namedWindow("Sure bg", cv2.WINDOW_AUTOSIZE)
    cv2.imshow("Sure bg", sure_bg)

    dist_transform = cv2.distanceTransform(sure_bg, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform, 0.2*dist_transform.max(),
                                 255, 0)
    cv2.namedWindow("Dist trans", cv2.WINDOW_AUTOSIZE)
    cv2.imshow("Dist trans", cv2.normalize(dist_transform, dist_transform, 0, 1., cv2.NORM_MINMAX))

    cv2.namedWindow("Sure fg", cv2.WINDOW_AUTOSIZE)
    cv2.imshow("Sure fg", sure_fg)

    unknown = cv2.subtract(sure_bg, sure_fg.astype(np.uint8))
    cv2.namedWindow("unknown", cv2.WINDOW_AUTOSIZE)
    cv2.imshow("unknown", unknown)

    ret, markers = cv2.connectedComponents(sure_fg.astype(np.uint8))
    markers = markers+1
    markers[unknown == 255] = 0
    markers = markers.astype(np.int32)
    markers = cv2.watershed(image, markers)
    image[markers == -1] = [0, 255, 255]

    cv2.namedWindow("Watershed", cv2.WINDOW_AUTOSIZE)
    cv2.imshow("Watershed", image)

    return
Example #28
0
    def symbol_bboxes(self, with_labels=False):
        """Extracts bounding boxes from symbols image."""
        cc, labels = cv2.connectedComponents(self.symbols)
        bboxes = {}
        for x, row in enumerate(labels):
            for y, l in enumerate(row):
                if l not in bboxes:
                    bboxes[l] = [x, y, x+1, y+1]
                else:
                    box = bboxes[l]
                    if x < box[0]:
                        box[0] = x
                    elif x + 1 > box[2]:
                        box[2] = x + 1
                    if y < box[1]:
                        box[1] = y
                    elif y + 1 > box[3]:
                        box[3] = y + 1

        if with_labels:
            return bboxes, labels
        else:
            return bboxes
def _bubble_properties_table(binary_image):
    """provide a label for each bubble in the image"""

    nbubbles, marker_image = cv.connectedComponents(1 - binary_image)
    props = regionprops(marker_image)
    bubble_properties = \
        pd.DataFrame([{"label": bubble.label,
                       "area": bubble.area,
                       "centroid": bubble.centroid,
                       "convex_area": bubble.convex_area,
                       "equivalent_diameter": bubble.equivalent_diameter,
                       "perimeter": bubble.perimeter} for bubble in props])

    bubble_properties["convexity"] = \
        calculate_convexity(bubble_properties["perimeter"],
                            bubble_properties["area"])
    bubble_properties["circularity_reciprocal"] = \
        calculate_circularity_reciprocal(bubble_properties["perimeter"],
                                         bubble_properties["area"])

    bubble_properties = bubble_properties.set_index("label")

    return nbubbles, marker_image, bubble_properties
Example #30
0
def watershed_demo():
    img = cv2.imread('water_coins.jpg')
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

    # noise removal
    kernel = np.ones((3,3),np.uint8)
    opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)

    # sure background area
    sure_bg = cv2.dilate(opening,kernel,iterations=3)

    # Finding sure foreground area
    # cv2.distanceTransform(src, distanceType, maskSize[, dst]) → dst
    # distanceType – Type of distance. It can be CV_DIST_L1, CV_DIST_L2 , or CV_DIST_C
    dist_transform = cv2.distanceTransform(opening,1,5) #Calculates the distance to the closest zero pixel for each pixel of the source image.
    ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg,sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers+1

    # Now, mark the region of unknown with zero
    markers[unknown==255] = 0

    markers = cv2.watershed(img,markers)
    img[markers == -1] = [255,0,0]

    cv2.imshow('watershed', unknown)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #31
0
def calculate_weights(clabels=None, instlabels=None, ignore=None,
                      n_dims = 2, bws=10, fds=10, bwf=10, fbr=.1):
    """
    Calculates the weights from the given mask (classlabels `clabels` or `instlabels`).
    """

    assert not (clabels is None and instlabels is None), "Provide either clabels or instlabels"

    # If no classlabels are given treat the problem as binary segmentation
    # ==> Create a new array assigning class 1 (foreground) to each instance
    if clabels is None:
        clabels = (instlabels[:] > 0).astype(int)
    else: clabels = np.array(clabels[:])

    # Initialize label and weights arrays with background
    labels = np.zeros_like(clabels)
    wghts = fbr * np.ones_like(clabels)
    frgrd_dist = np.zeros_like(clabels, dtype='float32')
    classes = np.unique(clabels)[1:]

    #assert len(classes)==clabels.max(), "Provide consecutive classes, e.g. pixel label 1 and 2 for two classes"

    # If no instance labels are given, generate them now
    if instlabels is None:
        # Creating instance labels from mask
        instlabels = np.zeros_like(clabels)
        nextInstance = 1
        for c in classes:
            #comps2, nInstances2 = ndimage.measurements.label(clabels == c)
            nInstances, comps = cv2.connectedComponents((clabels == c).astype('uint8'), connectivity=4)
            nInstances -=1
            instlabels[comps > 0] = comps[comps > 0] + nextInstance
            nextInstance += nInstances

    for c in classes:
        # Extract all instance labels of class c
        il = (instlabels * (clabels == c)).astype(np.int16)
        instances = np.unique(il)[1:]

        # Generate background ridges between touching instances
        # of that class, avoid overlapping instances
        dil = cv2.morphologyEx(il, cv2.MORPH_CLOSE, kernel=np.ones((3,) * n_dims))
        overlap_cand = np.unique(np.where(dil!=il, dil, 0))
        labels[np.isin(il, overlap_cand, invert=True)] = c

        for instance in overlap_cand[1:]:
            objectMaskDil = cv2.dilate((labels == c).astype('uint8'), kernel=np.ones((3,) * n_dims),iterations = 1)
            labels[(instlabels == instance) & (objectMaskDil == 0)] = c

        # Generate weights
        min1dist = 1e10 * np.ones(labels.shape)
        min2dist = 1e10 * np.ones(labels.shape)
        for instance in instances:
            #dt2 = ndimage.morphology.distance_transform_edt(instlabels != instance)
            dt = cv2.distanceTransform((instlabels != instance).astype('uint8'), cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
            frgrd_dist += np.exp(-dt ** 2 / (2*fds ** 2))
            min2dist = np.minimum(min2dist, dt)
            newMin1 = np.minimum(min1dist, min2dist)
            newMin2 = np.maximum(min1dist, min2dist)
            min1dist = newMin1
            min2dist = newMin2
        wghts += bwf * np.exp(-(min1dist + min2dist) ** 2 / (2*bws ** 2))

    # Set weight for distance to the closest foreground object
    wghts[labels == 0] += (1-fbr)*frgrd_dist[labels == 0]
    # Set foreground weights to 1
    wghts[labels > 0] = 1
    pdf = (labels > 0) + (labels == 0) * fbr

    # Set weight and sampling probability for ignored regions to 0
    if ignore is not None:
        wghts[ignore] = 0
        pdf[ignore] = 0

    return (labels.astype(np.int32),
            wghts.astype(np.float32),
            pdf.astype(np.float32))
Example #32
0
                                                     (3, 3)),
                           iterations=1)
        imgray = cv2.dilate(imgray,
                            cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (3, 3)),
                            iterations=3)

        th2 = cv2.adaptiveThreshold(imgray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
                cv2.THRESH_BINARY_INV,11,3)
        img = cv2.erode(th2,
                        cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)),
                        iterations=1)
        th2 = cv2.dilate(img,
                         cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)),
                         iterations=4)
        ret, labels = cv2.connectedComponents(th2)

        #        label_hue = np.uint8(179*labels/np.max(labels))
        #        blank_ch = 255*np.ones_like(label_hue)
        #        labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
        #
        #        labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
        #
        #    # set bg label to black
        #        labeled_img[label_hue==0] = 0
        #
        for index in range(1, np.max(labels) + 1):
            N = np.where(labels == index)

            if (N[0].shape[0] < 1200): continue
            else:
img3 = cv.morphologyEx(img2, 
                       cv.MORPH_CLOSE, 
                       kernel, 
                       iterations = 2)
img4 = cv.dilate(img3,
                 kernel,
                 iterations = 5)

img5 = cv.distanceTransform(img3, 
                            cv.DIST_L2, 
                            5)
ret,img6 = cv.threshold(img5,
                        0.65 * img5.max(), 
                        255, 
                        0)

img6 = np.uint8(img6)
img7 = cv.subtract(img4, img6)

ret, count = cv.connectedComponents(img6)
count = count + 1

count[img7 == 255] = 0
img8 = cv.watershed(img1, count)
img1[count == -1] = [255, 0, 0]


plt.figure(figsize=(30,30))
plt.subplot(121), plt.imshow(img1), plt.title("ORIGINAL"), plt.axis("off")
plt.subplot(122), plt.imshow(img8, cmap='jet'), plt.title("RESULTADO"), plt.axis("off")
plt.show()
Example #34
0
def parallel_landmark_and_conn_component(img_path, landmark_dict, AU_box_dict):

    orig_img = cv2.imread(img_path, cv2.IMREAD_COLOR)
    if landmark_dict is None or len(landmark_dict) == 0:
        try:
            landmark_dict, _, _ = FaceMaskCropper.landmark.landmark(
                image=orig_img, need_txt_img=False)  # slow
        except IndexError:
            if AU_box_dict is None:
                AU_box_dict = defaultdict(list)
                for AU in config.AU_ROI.keys():
                    if AU in config.SYMMETRIC_AU:
                        for _ in range(2):
                            AU_box_dict[AU].append(
                                (0.0, 0.0, config.IMG_SIZE[1],
                                 config.IMG_SIZE[0]))
                    else:
                        AU_box_dict[AU].append(
                            (0.0, 0.0, config.IMG_SIZE[1], config.IMG_SIZE[0]))
            return img_path, AU_box_dict, None, True

    cropped_face, rect = FaceMaskCropper.dlib_face_crop(
        orig_img, landmark_dict)
    cropped_face = cv2.resize(cropped_face, config.IMG_SIZE)
    del orig_img
    if AU_box_dict is None:
        AU_box_dict = defaultdict(list)
        for AU in config.AU_ROI.keys():
            mask = crop_face_mask_from_landmark(
                AU,
                landmark_dict,
                cropped_face,
                rect,
                landmarker=FaceMaskCropper.landmark)
            connect_arr = cv2.connectedComponents(
                mask, connectivity=8,
                ltype=cv2.CV_32S)  # mask shape = 1 x H x W
            component_num = connect_arr[0]
            label_matrix = connect_arr[1]
            # convert mask polygon to rectangle
            for component_label in range(1, component_num):

                row_col = list(zip(*np.where(label_matrix == component_label)))
                row_col = np.array(row_col)
                y_min_index = np.argmin(row_col[:, 0])
                y_min = row_col[y_min_index, 0]
                x_min_index = np.argmin(row_col[:, 1])
                x_min = row_col[x_min_index, 1]
                y_max_index = np.argmax(row_col[:, 0])
                y_max = row_col[y_max_index, 0]
                x_max_index = np.argmax(row_col[:, 1])
                x_max = row_col[x_max_index, 1]
                # same region may be shared by different AU, we must deal with it
                coordinates = (y_min, x_min, y_max, x_max)

                if y_min == y_max and x_min == x_max:  # 尖角处会产生孤立的单个点,会不会有一个mask只有尖角?
                    # print(("single point mask: img:{0} mask:{1}".format(self._images[i], mask_path)))
                    # 然后用concat_example来拼接起来
                    continue
                AU_box_dict[AU].append(coordinates)
            del label_matrix
            del mask
    for AU, box_lst in AU_box_dict.items():
        AU_box_dict[AU] = sorted(box_lst, key=lambda e: int(e[3]))
    return img_path, AU_box_dict, landmark_dict, False
def main():

    # video filepaths
    csv_out = "filename,left,middle,right\n"  # string to store csv output
    root_dir = "/docs/Dropbox/Kleo sociability test data/3 MONTH SOCIABILITY VIDS/"
    out_dir = "/docs/Dropbox/Kleo sociability test data/3 MONTH SOCIABILITY VIDS/"
    filenames = get_filenames(root_dir, out_dir, csv_out=csv_out)

    # loop through each video
    count = 1
    for file_path in filenames:
        file_name = file_path[len(root_dir):]
        printnow(f"Processing Video ({count}/{len(filenames)}) {file_name}")
        count += 1

        # load the video file
        printnow("\tLoading and converting to greyscale... ", end='')
        video_in = cv2.VideoCapture(file_path)
        frame_count = int(video_in.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_width = int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH))
        frame_height = int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = int(video_in.get(cv2.CAP_PROP_FPS))
        print(f'width={frame_width} height={frame_height} fps={fps}', end='')

        if frame_height == 720:
            mask_filename = root_dir + 'mask_720.png'
            # load reference frame
            ref_frame = cv2.imread(root_dir + 'ref_720.png')[:, :, 0]
            mask_threshold = 50
            _, ref_frame = cv2.threshold(ref_frame, mask_threshold, 255, cv2.THRESH_BINARY)
        elif frame_height == 800:
            mask_filename = root_dir + 'mask_800.png'
            # load reference frame
            ref_frame = cv2.imread(root_dir + 'ref_800.png')[:, :, 0]
            mask_threshold = 50
            _, ref_frame = cv2.threshold(ref_frame, mask_threshold, 255, cv2.THRESH_BINARY)
        else:
            raise Exception('unexpected frame height encountered')

        # create the masks for the left, middle, and right portions
        mask = cv2.imread(mask_filename)
        _, mask = cv2.threshold(mask[:, :, 0], 125, 255, cv2.THRESH_BINARY)
        _, components = cv2.connectedComponents(mask)
        left_mask = np.full((frame_height, frame_width), False, dtype=np.bool)
        centre_mask = np.full((frame_height, frame_width), False, dtype=np.bool)
        right_mask = np.full((frame_height, frame_width), False, dtype=np.bool)
        left_mask[components == 1] = True
        centre_mask[components == 2] = True
        right_mask[components == 3] = True

        # create the file handle to write out the debug video
        video_out = cv2.VideoWriter(file_path[:-4] + '_debug' + file_path[-4:], cv2.VideoWriter_fourcc(*'PIM1'), fps, (frame_width, frame_height), True)

        fc = 0
        counter = {'left': 0, 'centre': 0, 'right': 0}
        while True:
            # read the frame
            ret, raw_frame = video_in.read()
            if not ret:  # if we have hit the end
                break  # break the loop

            # convert each frame to greyscale
            frame = np.dot(raw_frame[..., :3], [0.299, 0.587, 0.114])

            # convert to unsigned int
            frame = frame.astype(np.uint8)
            _, frame = cv2.threshold(frame, mask_threshold, 255, cv2.THRESH_BINARY)

            # compare to reference
            diff_frame = np.abs(frame.astype(np.int16) - ref_frame.astype(np.int16))

            # determine the frame with the highest difference
            left_diff = np.sum(diff_frame[left_mask])
            centre_diff = np.sum(diff_frame[centre_mask])
            right_diff = np.sum(diff_frame[right_mask])

            out_frame = raw_frame
            out_frame = cv2.resize(out_frame, ref_frame.shape[::-1])

            if left_diff > centre_diff and left_diff > right_diff:  # if left is the highest
                counter['left'] += 1
                out_frame[:, :, 0][left_mask] += 50
            elif centre_diff > right_diff:  # if middle is the highest
                counter['centre'] += 1
                out_frame[:, :, 0][centre_mask] += 50
            else:  # if right is the highest
                counter['right'] += 1
                out_frame[:, :, 0][right_mask] += 50
            out_frame[:, :, 0][out_frame[:, :, 0] > 255] = 255

            # write out coloured raw frame
            video_out.write(out_frame)

            fc += 1

        # add the time spent in each third to the list of results
        csv_out += f'{file_name},{counter["left"]},{counter["centre"]},{counter["right"]}\n'
        # close video_files
        video_in.release()
        video_out.release()

    # write out the results to a csv
    with open(out_dir + 'results.csv', 'w') as out_csv:
        out/docs/_csv.write(csv_out)
Example #36
0
    def select_area(self, event):
        img = self.imageCV.copy()

        self.image1 = Image.new("RGB", (self.col, self.lin), (0, 0, 0))
        self.draw = ImageDraw.Draw(self.image1)

        try:
            _, _ = img.shape
            im2 = img
        except:
            im2 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

        x = event.x
        y = event.y
        y2, x2 = im2.shape
        areay = int((y2 * y) / (guiY * 1.0))
        areax = int((x2 * x) / (guiX * 1.0))

        if im2[areay][areax] < 10:
            self.c.delete(ALL)
            self.imgright = self.imgOriginalCV.copy()

            self.imgright = cv2.cvtColor(self.imgright, cv2.COLOR_BGR2RGB)

            #self.imgLeft = ImageTk.PhotoImage(Image.fromarray(self.imgright).resize((guiX, guiY), Image.ANTIALIAS))
            #self.c.create_image(0, 0, image=self.imgLeft, anchor=NW)

            self.imageInMermory = cv2.cvtColor(self.imageCV, cv2.COLOR_BGR2GRAY)
            self.imageInMermory[self.imageInMermory > 0] = 255

            self.show_image()
            print("nao tem nada")

            return

        im2 = cv2.threshold(im2, 10, 255, cv2.THRESH_BINARY)[1]  # ensure binary
        ret, labels = cv2.connectedComponents(im2)

        area = labels[areay, areax]

        labels[labels != area] = 0
        labels[labels == area] = 1

        # Colore cada área
        # Map component labels to hue val
        label_hue = np.uint8(179 * labels / np.max(labels))
        blank_ch = 255 * np.ones_like(label_hue)
        labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
        # cvt to BGR for display
        labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
        # set bg label to black
        labeled_img[label_hue == 0] = 0

        # Transforma em escala de cinza
        labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_BGR2GRAY)

        # get the contour
        ret, thresh = cv2.threshold(labeled_img, 10, 255, 0)
        contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

        # separates only the region of interest
        lin, col = im2.shape
        im3 = np.zeros([lin, col], dtype=np.uint8)

        cv2.drawContours(im3, contours, -1, 255, -1)

        self.imageInMermory = im2 - im3

        im3 = np.zeros([lin, col], dtype=np.uint8)

        cv2.drawContours(im3, contours, -1, 255)

        self.image1 = Image.fromarray(cv2.cvtColor(im3, cv2.COLOR_GRAY2BGR))

        self.draw = ImageDraw.Draw(self.image1)

        self.show_image()

        self.timeline.enqueue(self.image1.copy())
Example #37
0
    def callback(self, msg):
        col = msg.info.width
        row = msg.info.height
        #data est sous forme de liste donc on la convertit en matrice
        data = np.array(msg.data, dtype=np.int8)
        img = np.zeros((row, col), dtype=np.uint8)
        I = data.reshape((row, col))

        Unexp = np.zeros((row, col), dtype=np.uint8)
        Libre = np.zeros((row, col), dtype=np.uint8)
        Occ = np.zeros((row, col), dtype=np.uint8)
        Occ[I == 100] = 1
        Unexp[I == -1] = 1
        Libre[I == 0] = 1

        img[I == -1] = 128
        img[I == 0] = 255
        img[I == 100] = 0

        kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], dtype=np.uint8)

        Libre = cv2.erode(Libre, np.ones((3, 3), dtype=np.uint8))
        Libre = cv2.dilate(Libre, np.ones((3, 3), dtype=np.uint8))
        dilation = cv2.dilate(Unexp, kernel, iterations=1)
        good = np.logical_and(Libre == 1, dilation == Libre)
        idx = np.where(good)
        print(idx)
        rospy.loginfo("index dilatation")
        rospy.loginfo(len(idx[0]))

        #affichage des points de idx
        """for i in range(0, len(idx[0])):
			img[idx[0][i], idx[1][i]]= 50"""

        #recentrer image
        xmin = np.min(idx[1])
        ymin = np.min(idx[0])
        xmax = np.max(idx[1])
        ymax = np.max(idx[0])

        #rospy.loginfo("img publiee")

        #position du robot
        try:
            (trans,
             rot) = self.listener.lookupTransform("/map", "/base_link",
                                                  rospy.Time(0))
            rospy.loginfo("translation:")
            rospy.loginfo(trans)
        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            rospy.loginfo('tf fail')
        #position en pixel
        resol = msg.info.resolution
        rospy.loginfo("resolution")
        rospy.loginfo(resol)
        mx = msg.info.origin.position.x
        my = msg.info.origin.position.y
        rx = trans[0]
        ry = trans[1]
        pixel_pos_x = (rx - mx) / resol
        pixel_pos_y = (ry - my) / resol
        img[int(pixel_pos_y)][int(pixel_pos_x)] = 160

        #calcul meilleure distance
        x1 = pixel_pos_x
        y1 = pixel_pos_y
        dmin = 1000000000
        for i in range(0, len(idx[0])):
            x2 = idx[1][i]
            y2 = idx[0][i]
            d = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
            if d < dmin:
                dmin = d
                res = i

        #trouver les regions ou se rendre
        binaire = np.zeros(img.shape, dtype=np.uint8)
        binaire[good] = 1
        ret, labels = cv2.connectedComponents(binaire)
        rospy.loginfo("ret")
        rospy.loginfo(ret)
        largest_region = 0
        areas = []
        for i in range(0, ret):
            res = np.sum(labels == i)
            areas.append(res)
        sorted_idx = np.argsort(areas)
        i_largest = sorted_idx[len(sorted_idx) - 2]
        #mettre en couleur la plus grosse zone inexploree
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        goal_idx = np.where(labels == i_largest)
        for i in range(len(goal_idx[0])):
            img[goal_idx[0][i], goal_idx[1][i], :] = [255, 0, 0]

        cv2.circle(img, (idx[1][res], idx[0][res]), 5, [0, 0, 255])
        cv2.circle(img, (int(pixel_pos_x), int(pixel_pos_y)), 5, [0, 255, 0])

        #determiner le point ou se rendre (calculer le point median)
        sum_x = 0
        for i in range(0, len(goal_idx[1])):
            sum_x = sum_x + goal_idx[1][i]
        mean_x = int(sum_x / len(goal_idx[1]))
        sum_y = 0
        for i in range(0, len(goal_idx[0])):
            sum_y = sum_y + goal_idx[0][i]
        mean_y = int(sum_y / len(goal_idx[0]))
        cv2.circle(img, (mean_x, mean_y), 5, [255, 0, 0])
        img = img[ymin:ymax, xmin:xmax, :]
        self.pub_img.publish(self.bridge.cv2_to_imgmsg(img, "rgb8"))

        msg_pts = PoseStamped()
        msg_pts.pose.position.x = mean_x * resol + mx
        msg_pts.pose.position.y = mean_y * resol + my
        msg_pts.pose.orientation.w = 1
        msg_pts.header.frame_id = "map"
        self.pub_point.publish(msg_pts)

        rospy.loginfo("finito")
        """nb = []
		for i in range (0, row):	
			for j in range (0, col):
				if I[i][j] not in nb:
					nb.append(I[i][j])
		#rospy.loginfo("res=")
		#rospy.loginfo(nb)	
		#rospy.loginfo(I)"""

        #recherche des endroits a explorer
        """candidats= []
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 10
ret, label, center = cv2.kmeans(Z, K, None, criteria, 10,
                                cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
#end KMeans

hsv2bgr = cv2.cvtColor(res2, cv2.COLOR_HSV2BGR)
gray = cv2.cvtColor(hsv2bgr, cv2.COLOR_BGR2GRAY)

# noise removal
# kernel = np.ones((3,3),np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
dilasi = cv2.dilate(res2, kernel, iterations=10)
opening = cv2.morphologyEx(dilasi, cv2.MORPH_OPEN, kernel, iterations=2)
closing = cv2.morphologyEx(dilasi, cv2.MORPH_CLOSE, kernel, iterations=2)

opening = np.uint8(opening)
ret, markers = cv2.connectedComponents(opening)

#cv2.namedWindow("kmeans", cv2.WINDOW_NORMAL)
#cv2.namedWindow("opening", cv2.WINDOW_NORMAL)
#cv2.namedWindow("closing", cv2.WINDOW_NORMAL)
#cv2.imshow("kmeans", res2)
#cv2.imshow("opening", opening)
#cv2.imshow("closing", closing)
#cv2.waitKey(0)
Example #39
0
def main(argv):
    # Take a mosaic, a csv file containing predictions for its labels and the patch size used for the annotations
    # 1) Create trentative automatic mask images (all affected patches are black)
    # 2) Find a clear background and clear foreground part, find unknown part, find the connected components of the foregroung
    # 3) Accumulate labels for all cathegories, carefull to keep the unkwnon updated as it is where the segmentation can grow
    # 4) run watershed

    #hardcoded number of layers and names
    layerNames=["river","decidious","uncovered","evergreen","manmade"]
    layerDict={} #dictionary so that we know what number corresponds to each layer
    for i in range(len(layerNames)):layerDict[layerNames[i]]=i

    # Read parameters
    patch_size = int(sys.argv[1])
    csvFile=sys.argv[2]
    patch_size = int(argv[1])
    csvFile=argv[2]
    #imageDir, full path!
    imageDir=argv[3]

    #read also all the prefixes of all the images that we have
    imagePrefixes=[]
    for x in range(4,len(sys.argv)):imagePrefixes.append(sys.argv[x])
    for x in range(4,len(argv)):imagePrefixes.append(argv[x])
    imageDict={}
    for i in range(len(imagePrefixes)):imageDict[imagePrefixes[i]]=i

    #hardcoded output dir
    outputDir="./outputIm/"

    #print("AnnotationMask creator main, parameters: csv files: "+str(csvFile)+" image directory"+str(imageDir)+" image prefixes "+str(imagePrefixes))

    f = open(csvFile, "r")
    shapeX={}
    shapeY={}
    image={}
    for pref in imagePrefixes:
        image[pref] = cv2.imread(imageDir+pref+".jpg",cv2.IMREAD_COLOR)
        print("Image "+imageDir+pref+".jpg")
        shapeX[pref]=image[pref].shape[0]
        shapeY[pref]=image[pref].shape[1]

    #create a blank image for each layers
    layerList=[]
    i=0
    for pref in imagePrefixes:
        layerList.append([])
        for x in range(len(layerNames)):
            layerList[i].append(np.zeros((shapeX[pref],shapeY[pref]),dtype=np.uint8))
        i+=1

    # go over the csv file, for every line
        # extract the image prefixes
        # extract the lables
        # for every label found, paint a black patch in the correspoding image layer
    for line in f:
        #process every line
        #print(line)
        pref=line.split("p")[0]
        patchNumber=int(line.split("h")[1].split(" ")[0])
        labelList=line.split(" ")[1].strip().split(";")
        numStepsX=int(shapeX[pref]/patch_size)
        numStepsY=int(shapeY[pref]/patch_size)

        for x in labelList:
            if x=="":break
            #now, paint the information of each patch in the layer where it belongs
            xJump=patchNumber//numStepsY
            yJump=patchNumber%numStepsY

            #now find the proper layer (once in the im)
            currentLayerIm=layerList[imageDict[pref]][layerDict[x]]
            impa.paintImagePatch(currentLayerIm,xJump*patch_size,yJump*patch_size,patch_size,255)

    i=0
    for pref in imagePrefixes:
        for x in range(len(layerNames)):
            #print("shape of current layer image "+repr(layerList[i][x].shape))
            cv2.imwrite(outputDir+pref+"layer"+str(x)+".jpg",layerList[i][x])
        i+=1

    i=0
    kernel = np.zeros((3,3),np.uint8)
    kernel[:]=255
    for pref in imagePrefixes:
        print("starting with prefix "+pref)
        layerList.append([])
        #mask accumulator image
        maskImage=np.ones((shapeX[pref],shapeY[pref]),dtype=np.uint8)
        firstLabel=0 #counter so labels from different masks have different labels
        firstLabelList=[0]
        for x in range(len(layerNames)):
            if layerNames[x] in ["river","decidious","uncovered","evergreen"]:
                #print("starting "+layerNames[x])

                #in the case of decidious trees, filter out the snow
                #if layerNames[x]=="decidious":
                if False:
                    snowMask=ut.makeSnowMask(cv2.cvtColor(image[pref], cv2.COLOR_BGR2GRAY))
                    coarseMask=ut.getSnowOutOfGeneratedMask(snowMask,layerList[i][x])
                else:
                    coarseMask=layerList[i][x]

                # Try to refine the segmenation
                opening = cv2.morphologyEx(coarseMask,cv2.MORPH_OPEN,kernel, iterations = 2)

                # sure background area iterations was 10
                sure_bg = cv2.dilate(opening,kernel,iterations=1)

                # Finding sure foreground area dist_transform multiplier was 0.17
                dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
                ret, sure_fg = cv2.threshold(dist_transform,0.15*dist_transform.max(),255,0)
                cv2.imwrite(outputDir+pref+"SUREFG"+str(layerNames[x])+".jpg",sure_fg)

                # Finding unknown region
                sure_fg = np.uint8(sure_fg)
                unknown = cv2.subtract(sure_bg,sure_fg)

                # Marker labelling
                ret, markers = cv2.connectedComponents(sure_fg)

                # Add one to all labels so that sure background is not 0, but 1, also add firstLabel so label numbers are different
                markers = markers+firstLabel+1

                #remark sure background as 1
                markers[markers==(firstLabel+1)]=1

                firstLabel+=ret
                firstLabelList.append(firstLabel)
                # Now, mark the region of unknown with zero
                markers[unknown==255] = 0

                important=layerNames[x] in ["decidious","evergreen"]
                maskImage=addNewMaskLayer(markers,maskImage,important)

                cv2.imwrite(outputDir+pref+"CoarseMaskLayer"+str(layerNames[x])+".jpg",layerList[i][x])
                #cv2.imwrite(outputDir+str(x)+"layerMask.jpg",cv2.applyColorMap(np.uint8(markers*50),cv2.COLORMAP_JET))
                #cv2.imwrite(outputDir+pref+str(x)+"AccumMask.jpg",cv2.applyColorMap(np.uint8(maskImage*50),cv2.COLORMAP_JET))
            else:
                pass
                #print("skypping layer "+layerNames[x])

        cv2.imwrite(outputDir+"finalMask.jpg",cv2.applyColorMap(np.uint8(maskImage*50),cv2.COLORMAP_JET))

        #print("starting watershed ")
        markers = cv2.watershed(image[pref],maskImage)
        #markers = seg.random_walker(image[pref],maskImage)

        image[pref][markers == -1] = [0,0,255]

        cv2.imwrite(outputDir+pref+str(i)+"watershed.jpg",image[pref])
        cv2.imwrite(outputDir+pref+str(i)+"markers.jpg",cv2.applyColorMap(np.uint8(markers*50),cv2.COLORMAP_JET))

        # now we should reconstruct the individual mask segmenations from the final marker
        #print(" list Of first labels"+str(firstLabelList))

        #now, make layer images, for every interval of layers, only include markers inside of it
        #while we are doing it, we can also compute the DICE coefficient
        for j in range(1,len(firstLabelList)):
            refinedLayer=buildBinaryMask(markers,firstLabelList[j-1],firstLabelList[j])
            refinedLayer=np.uint8(refinedLayer)
            coarseLayer=layerList[i][j-1]
            manualLayer=np.invert(cv2.imread(imageDir+pref+"layer"+str(j-1)+".jpg",cv2.IMREAD_GRAYSCALE))
            #cv2.imwrite(outputDir+pref+str(i)+str(j-1)+"manual.jpg",manualLayer)
            #cv2.imwrite(outputDir+pref+str(i)+str(j-1)+"coarse.jpg",coarseLayer)
            cv2.imwrite(outputDir+pref+str(layerNames[j-1])+"refined.jpg",refinedLayer)
            print(" LAYER "+layerNames[j-1])

            currentDice=dice.dice(coarseLayer,manualLayer )
            print("*******************************************dice coarse mask "+str(currentDice))
            currentDice=dice.dice(refinedLayer,manualLayer )
            print("*******************************************dice refined mask "+str(currentDice))

            #experiments with taking out the snow mask, not used at the moment.
            #if layerNames[j-1]=="decidious":
            #    snowMask=ut.makeSnowMask(cv2.cvtColor(image[pref], cv2.COLOR_BGR2GRAY))
            #    newRefinedLayer=ut.getSnowOutOfGeneratedMask(snowMask,refinedLayer)
            #    newManualLayer=ut.getSnowOutOfMask(snowMask,manualLayer)
            #    currentDice=dice.dice(refinedLayer,newManualLayer )
                #print("*******************************************dice refined mask no snow "+str(currentDice))


        i+=1
Example #40
0
    def load_carla(self, dataset_dir, subset, original_directory):
        '''
        Load a subset of the CARLA dataset.
        :param dataset_dir: where dataset images are
        :param subset: 'train' or 'val'
        :param original_directory: where original dataset images are, we unzip the original
         dataset images to dataset_dir
        :return: None
        '''

        # Add classes. We have only one class 'dynamic' to add.
        self.add_class("carla", 1, "Dynamic")

        # load train or validation dataset
        assert subset in ["train", "val"]
        # the structure of directory is: dataset_dir-->{train, val}, train-->{RGB, Mask}, val-->{RGB, Mask}
        dataset_dir = os.path.join(dataset_dir, subset)
        mask_path = os.path.join(dataset_dir, "Mask")

        ######################################################
        # unzip data in the original direcotry to dataset_dir
        ######################################################
        # delete the directory 'dataset_dir' first if it exists
        directory = dataset_dir
        if not os.path.exists(directory):
            os.makedirs(directory)


        with tarfile.open(os.path.join(original_directory, subset, "RGB.tar"), 'r' ) as tar:
            tar.extractall(path=directory)
            tar.close()


        with tarfile.open(os.path.join(original_directory, subset, "Mask.tar"), 'r' ) as tar:
            tar.extractall(path=directory)
            tar.close()
        ##########################################################


        mask_list = [f for f in listdir(mask_path) if isfile(join(mask_path,f))]
        image_counter = 0
        for i, filename in enumerate(mask_list):
            image_path = os.path.join(dataset_dir,"RGB",filename)
            try:
                image = skimage.io.imread(image_path)
            except:
                print("Not a valid image: ",image_path)
                continue
            height, width = image.shape[:2]
            mask_temp = skimage.io.imread(os.path.join(mask_path, filename), as_grey=True)
            # mask has to be bool type
            mask_temp = mask_temp > 0
            mask_temp = np.asarray(mask_temp, np.uint8)
            masks = []
            # extract instances masks from one single mask of the image
            connectivity = 8
            output = cv2.connectedComponents(mask_temp, connectivity, cv2.CV_32S)
            # Get the results. The first element is the number of labels.
            num_labels = output[0]
            labels = output[1]
            # number of mask instances: count
            count = 0
            # zero represents the background, for loop starts from 1
            for i in range(1, num_labels):
                # robust to noise, the instance region must has more than 20 pixels
                temp = labels == i
                temp = scipy.ndimage.morphology.binary_fill_holes( temp ).astype( np.bool )
                if np.sum( temp ) >= 20:
                    masks.append( temp )
                    count = count + 1
            masks = np.asarray(masks)
            # if an image doesn't have instance masks, skip it.
            if not masks.size>0:
                continue

            self.add_image(
                "carla",
                image_id=filename,  # use file name as a unique image id
                path=image_path,
                width=width, height=height,
                polygons=masks)
            image_counter = image_counter+1

        string = "trainging" if subset=="train" else "validation"
        print("The number of {0} samples is {1} at CARLA Dataset".format(string, image_counter))
zoom = 19  # Make sure it is the correct value as defined in image grab func
meters_ppx = 156543.03392 * Math.cos(
    data.latitude[0] * Math.pi / 180) / Math.pow(2, zoom)
actual_lot_area = data.land_sqm[0]
'''
 Display the mask image - For testing / Debugging purpose uncomment 
 following lines. The program won't run further till some key is pressed 
 and the image is closed.
'''
#cv2.imshow('Mask image',mask)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
''' 
Label and show the connected components in the  mask
'''
ret, labels = cv2.connectedComponents(mask)
'''
 Display the connected image - For testing / Debugging purpose uncomment 
 following lines. The program won't run further till some key is pressed 
 and the image is closed. Should be avoided in normal run.
'''

# Map component labels to hue val
#label_hue = np.uint8(179*labels/np.max(labels))
#blank_ch = 255*np.ones_like(label_hue)
#labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
#
## cvt to BGR for display
#labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
#
## set bg label to black
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)

# 膨胀
sure_bg = cv2.dilate(opening, kernel, iterations=3)

# 距离变换
dist_transform = cv2.distanceTransform(opening, 1, 5)
ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255,
                             0)

# 获得未知区域
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)

# 标记
ret, markers1 = cv2.connectedComponents(sure_fg)

# 确保背景是1不是0
markers = markers1 + 1

# 未知区域标记为0
markers[unknown == 255] = 0

markers3 = cv2.watershed(img, markers)
img[markers3 == -1] = [0, 0, 255]

plt.subplot(241), plt.imshow(cv2.cvtColor(src, cv2.COLOR_BGR2RGB)),
plt.title('Original'), plt.axis('off')
plt.subplot(242), plt.imshow(thresh, cmap='gray'),
plt.title('Threshold'), plt.axis('off')
plt.subplot(243), plt.imshow(sure_bg, cmap='gray'),
Example #43
0
ocontours,opara = cv.findContours(oflaw, cv.RETR_TREE, cv.CHAIN_APPROX_TC89_KCOS)
icontours,ipara = cv.findContours(iflaw, cv.RETR_TREE, cv.CHAIN_APPROX_TC89_KCOS)
#img_result3 =cv.drawContours(img_gray, ocontours, -1, (0,0,255),3)

for i in range(len(ocontours)):
    th3_ori = copy.copy(th3)
    x, y, w, h = cv.boundingRect(ocontours[i])
    loc = [x, y, w, h]
    cv.drawContours(th3_ori, ocontours, i, (0, 0, 0), -1)   #去除缺陷,将缺陷涂黑,检查连通域
    t_minus = th3_ori[y - 20:y + h + 20, x - 20:x + w + 20]
    kernel_erode = np.ones((3,3), np.uint8)
    t_minus = cv.erode(t_minus, kernel_erode, iterations=1)
    t_o = th3[y - 20:y + h + 20, x - 20:x + w + 20]
    timg = oflaw[y - int(h/3):y + h + int(h/3), x - int(w/3):x + w + int(w/3)]
    oimg=cv.resize(timg, (32,32))
    n_minus = cv.connectedComponents(t_minus)
    n_o = cv.connectedComponents(t_o)
    res = n_minus[0] - n_o[0]
    if res>0:     #短路
        flawlist.append([loc,"short"])
    elif res == 0:     #线路凸起
        flawlist.append([loc,"spur"])
    else:        #多线,线路外斑点
        flawlist.append([loc,"spurious copper"])

for i in range(len(icontours)):
    th3_ori = copy.copy(th3)
    x,y,w,h = cv.boundingRect(icontours[i])
    loc=[x,y,w,h]
    cv.drawContours(th3_ori, icontours, i, (255, 255, 255), -1)     #去除缺陷,将缺陷涂白,检查连通域
    t_minus = th3_ori[y - 20:y + h + 20, x - 20:x + w + 20]
Example #44
0
def instance_segmentation(prob_field, debug=False):
    """
    prob_field: shape:[h,w], dtype: float32, range[0..1]
    """
    """
    logging.info('Instance segmentation...')
    if prob_field is None or prob_field.size == 0:
        logging.error('Source array is empty')
        return None
    """
    prob_field = (prob_field * 255).astype(np.uint8).squeeze()

    image = prob_field.copy()

    # Detect zero-crossing
    # https://stackoverflow.com/a/48440931/5630599
    # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    kernel = np.ones((3, 3))
    LoG = cv2.Laplacian(image, cv2.CV_16S)
    minLoG = cv2.morphologyEx(LoG, cv2.MORPH_ERODE, kernel)
    maxLoG = cv2.morphologyEx(LoG, cv2.MORPH_DILATE, kernel)
    zeroCross = np.logical_or(np.logical_and(minLoG < 0, LoG > 0),
                              np.logical_and(maxLoG > 0, LoG < 0))
    # zeroCross = np.logical_and(minLoG < 0, LoG > 0)  # left only one condition
    if debug:
        cv2.imwrite('zeroCross.png', zeroCross.astype(np.uint8) * 255)
    del LoG
    del minLoG
    del maxLoG
    gc.collect()

    # Find first derive
    scale = 1
    delta = 0
    ddepth = cv2.CV_16S
    # Here said(see Notes) that cv2.Scharr better than cv2.Sobel
    # https://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html#formulation
    grad_x = cv2.Scharr(image,
                        ddepth,
                        1,
                        0,
                        scale=scale,
                        delta=delta,
                        borderType=cv2.BORDER_DEFAULT)
    abs_grad_x = cv2.convertScaleAbs(grad_x)
    del grad_x
    grad_y = cv2.Scharr(image,
                        ddepth,
                        0,
                        1,
                        scale=scale,
                        delta=delta,
                        borderType=cv2.BORDER_DEFAULT)
    abs_grad_y = cv2.convertScaleAbs(grad_y)
    del grad_y
    grad_magn = cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
    del abs_grad_x
    del abs_grad_y
    gc.collect()

    # Create mask where zeros are crossed EXCEPT flat area (top rocks regions)
    mask = np.logical_and(zeroCross, grad_magn > 127)  # 32 or 127
    del grad_magn
    del zeroCross
    gc.collect()

    # Filter zero-crossed areas except flat area (top rocks regions)
    image[mask] = 0
    del mask

    # Filter low-probability
    (_, prob_field_th) = cv2.threshold(prob_field, 0, 255,
                                       cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    image[prob_field_th == 0] = 0
    del prob_field_th

    # Filter tiny/noise points
    # image = cv2.morphologyEx(image, cv2.MORPH_ERODE, np.ones((3,3)))
    # image = cv2.morphologyEx(image, cv2.MORPH_DILATE, np.ones((3,3)))
    # Binarize image
    image[image > 0] = 255
    image = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
    # image = filter_pix_noise(image)
    # image = cv2.bitwise_not(filter_pix_noise(cv2.bitwise_not(image)))
    if debug:
        cv2.imwrite('image.png', image)

    # Marker labelling
    ret, markers = cv2.connectedComponents(image)
    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1

    # To quickly mark contours by negative instance index, find zero-cross by binarized image
    LoG = cv2.Laplacian(image, cv2.CV_16S)
    minLoG = cv2.morphologyEx(LoG, cv2.MORPH_ERODE, np.ones((3, 3)))
    zeroCross = np.logical_and(minLoG < 0, LoG > 0)
    markers[zeroCross] = 0
    del LoG
    del minLoG
    del zeroCross
    gc.collect()

    t = np.asarray(np.dstack((prob_field, prob_field, prob_field)),
                   dtype=np.uint8)  # img
    instances = cv2.watershed(t, markers)
    if debug:
        t[instances == -1] = [255, 0, 0]
        cv2.imwrite('t.png', t)
    del markers
    del t
    gc.collect()

    (t, prob_field_th) = cv2.threshold(prob_field, 255 * 0.3, 255,
                                       cv2.THRESH_BINARY)
    if debug:
        cv2.imwrite('prob_field_th.png', prob_field_th)

    # shape: [w,h], dtype: int32
    return instances, prob_field_th
Example #45
0
#Let us threshold the dist transform by starting at 1/2 its max value.
#print(dist_transform.max()) gives about 21.9
    ret2, sure_fg = cv2.threshold(dist_transform,0.2*dist_transform.max(),255,0)

#0.2* max value seems to separate the cells well.
#High value like 0.5 will not recognize some grain boundaries.

# Unknown ambiguous region is nothing but bkground - foreground
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg,sure_fg)

#Now we create a marker and label the regions inside. 
# For sure regions, both foreground and background will be labeled with positive numbers.
# Unknown regions will be labeled 0. 
#For markers let us use ConnectedComponents. 
    ret3, markers = cv2.connectedComponents(sure_fg)

#One problem rightnow is that the entire background pixels is given value 0.
#This means watershed considers this region as unknown.
#So let us add 1 to all labels so that sure background is not 0, but 1
    markers = markers+10

# Now, mark the region of unknown with zero
    markers[unknown==255] = 0
#plt.imshow(markers)   #Look at the 3 distinct regions.

#Now we are ready for watershed filling. 
    markers = cv2.watershed(img1,markers)
#The boundary region will be marked -1
#https://docs.opencv.org/3.3.1/d7/d1b/group__imgproc__misc.html#ga3267243e4d3f95165d55a618c65ac6e1
Example #46
0
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt

img = cv.imread('../Images & Videos/coins.jpg')
img_grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
blur = cv.medianBlur(img_grey, ksize=11)
ret, thresh = cv.threshold(blur, 70, 255, cv.THRESH_BINARY)
sure_back = cv.dilate(thresh, (5, 5), iterations=2)
sure_fore = cv.distanceTransform(thresh, cv.DIST_L2, 5)
ret, sure_fore = cv.threshold(sure_fore, 20, 255, cv.THRESH_BINARY)
sure_fore = np.uint8(sure_fore)
ret, matchers = cv.connectedComponents(sure_fore)
print(matchers.min(), matchers.max())
unknown = cv.subtract(sure_back, sure_fore)
matchers[matchers == 1] = 2
matchers[matchers == 0] = 1
matchers[unknown == 255] = 0
ws = cv.watershed(img, markers=matchers)
image, contours, hierarchy = cv.findContours(ws, cv.RETR_CCOMP,
                                             cv.CHAIN_APPROX_SIMPLE)
for i in range(len(contours)):
    if hierarchy[0][i][3] == -1:
        cv.drawContours(img, contours, i, (0, 255, 0), 2)
plt.imshow(img, cmap='gray')
plt.show()
Example #47
0
    imageBoundaries = cv2.putText(imgColor, str(counter), (x, y + 80),
                                  cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 5)
    print('--- Region', counter, '---')
    print('x=', x, 'y=', y, 'h=', h, 'w=', w)

    # finds white pixels in an image
    # in this case, letters (the binary image is: black background, white letters)
    lettersPixels = cv2.countNonZero(imgThresh[x:x + w, y:y + h])
    print('the number of pixels of letters: ', lettersPixels)
    print('Bounding Box Area(px):', h * w)

    # print every step of boundary drawn
    imageBoundariesPrint = cv2.resize(imageBoundaries, (1920, 900))
    #cv2.imshow('rectangles', imageBoundariesPrint)
    #cv2.waitKey(0)

    # find words in subregion currently drawn
    numWords, _ = cv2.connectedComponents(imgDilateWords[x:x + w, y:y + h])
    print('Number of words in subregion:', numWords)

    # find the gray level mean
    grayLevel = img[x:x + w, y:y + h].sum() / (h * w)
    print('Mean gray-level value in bounding box:', grayLevel)

    counter = counter + 1

imageBoundariesPrint = cv2.resize(imageBoundaries, (1000, 900))
cv2.imshow('rectangles', imageBoundariesPrint)
cv2.waitKey(0)
cv2.imwrite('C:/Users/haris/Desktop/doc_db/pythonProject/result.png',
            imageBoundaries)
Example #48
0
def find_clusters(img):
    return cv2.connectedComponents((img > 0.5).astype(np.uint8))[1]
Example #49
0
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

kernel = np.ones((2,2),np.uint8)

#sure background
closing = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel, iterations = 2)
sure_bg = cv2.dilate(closing,kernel,iterations=3)
dist_transform = cv2.distanceTransform(sure_bg,cv2.DIST_L2,3)

#sure foreground
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
ret, sure_fg = cv2.threshold(dist_transform,0.17*dist_transform.max(),255,0)
sure_fg = np.uint8(sure_fg) 
unknown = cv2.subtract(sure_bg,sure_fg)

ret, segmented_image = cv2.connectedComponents(sure_fg) #marker labelling

segmented_image = segmented_image+1 #background is 1
segmented_image[unknown==255] = 0 #unknown is zero

segmented_image = cv2.watershed(image,segmented_image)

image[segmented_image == -1] = [255,0,0]

plt.imshow(thresh)


filename2 = "coins_watershed_kmeans.jpg"
cv2.imwrite(filename2,image)

"""
def textDetectWatershed(thresh, original):
    """ Text detection using watershed algorithm """
    # According to: http://docs.opencv.org/trunk/d3/db4/tutorial_py_watershed.html
    img = original
    thresh = thresh
    # noise removal
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)

    # sure background area
    sure_bg = cv2.dilate(opening, kernel, iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform, 0.01 * dist_transform.max(),
                                 255, 0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers += 1

    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0

    markers = cv2.watershed(img, markers)
    #implt(markers, t='Markers')
    image = img.copy()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Creating result array
    boxes = []
    for mark in np.unique(markers):
        # mark == 0 --> background
        if mark == 0:
            continue

        # Draw it on mask and detect biggest contour
        mask = np.zeros(gray.shape, dtype="uint8")
        mask[markers == mark] = 255

        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        c = max(cnts, key=cv2.contourArea)

        # Draw a bounding rectangle if it contains text
        x, y, w, h = cv2.boundingRect(c)
        cv2.drawContours(mask, c, 0, (255, 255, 255), cv2.FILLED)
        maskROI = mask[y:y + h, x:x + w]
        # Ratio of white pixels to area of bounding rectangle
        r = cv2.countNonZero(maskROI) / (w * h)

        # Limits for text
        if r > 0.1 and 2000 > w > 15 and 1500 > h > 15:
            boxes += [[x, y, w, h]]

    # Group intersecting rectangles
    boxes = group_rectangles(boxes)
    bounding_boxes = np.array([0, 0, 0, 0])
    for (x, y, w, h) in boxes:
        cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 8)
        bounding_boxes = np.vstack(
            (bounding_boxes, np.array([x, y, x + w, y + h])))

    implt(image)

    # Recalculate coordinates to original size
    #boxes = bounding_boxes.dot(ratio(original, img.shape[0])).astype(np.int64)
    return boxes
Example #51
0
def textDetectWatershed(thresh):
    """NOT IN USE - Text detection using watershed algorithm.
    Based on: http://docs.opencv.org/trunk/d3/db4/tutorial_py_watershed.html
    """
    img = cv2.cvtColor(cv2.imread("test/n.jpg"), cv2.COLOR_BGR2RGB)
    print(img)
    img = resize(img, 3000)
    thresh = resize(thresh, 3000)
    # noise removal
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)

    # sure background area
    sure_bg = cv2.dilate(opening, kernel, iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform, 0.01 * dist_transform.max(),
                                 255, 0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers += 1

    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0

    markers = cv2.watershed(img, markers)
    implt(markers, t='Markers')
    image = img.copy()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    for mark in np.unique(markers):
        # mark == 0 --> background
        if mark == 0:
            continue

        # Draw it on mask and detect biggest contour
        mask = np.zeros(gray.shape, dtype="uint8")
        mask[markers == mark] = 255

        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        c = max(cnts, key=cv2.contourArea)

        # Draw a bounding rectangle if it contains text
        x, y, w, h = cv2.boundingRect(c)
        cv2.drawContours(mask, c, 0, (255, 255, 255), cv2.FILLED)
        maskROI = mask[y:y + h, x:x + w]
        # Ratio of white pixels to area of bounding rectangle
        r = cv2.countNonZero(maskROI) / (w * h)

        # Limits for text
        if r > 0.2 and 2000 > w > 15 and 1500 > h > 15:
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

    implt(image)
Example #52
0
def main():
    count = 0
    alpha = 0.3
    fgbg = cv2.createBackgroundSubtractorMOG2()
    kernel = np.ones((3, 3), np.uint8)

    while True:
        img_name = 'res/breakdance/' + '{:05}'.format(count) + '.jpg'
        frame = cv2.imread(img_name, 1)

        overlay = frame.copy()
        output = frame.copy()

        if count == 0:
            frame[:, 0:196] = 0
            frame[:, 560:854] = 0
            frame[402:480, :] = 0

        fgmask = fgbg.apply(frame)
        fgmask = cv2.erode(fgmask, None, iterations=2)
        fgmask = cv2.dilate(fgmask, None, iterations=2)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)

        sure_bg = cv2.dilate(fgmask, kernel, iterations=1)
        dist_transform = cv2.distanceTransform(fgmask, cv2.DIST_L2, 5)
        _, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(),
                                   255, 0)

        sure_fg = np.uint8(sure_fg)
        unknown = cv2.subtract(sure_bg, sure_fg)

        ret, markers = cv2.connectedComponents(sure_fg)
        markers = markers + 1
        markers[unknown == 255] = 0
        markers = cv2.watershed(frame, markers)
        # frame[markers == -1] = [255,0,0]

        unknown = cv2.medianBlur(unknown, 11)
        unknown = cv2.morphologyEx(unknown, cv2.MORPH_OPEN, kernel)
        unknown = cv2.morphologyEx(unknown, cv2.MORPH_CLOSE, kernel)

        _, contours, _ = cv2.findContours(unknown, cv2.RETR_TREE,
                                          cv2.CHAIN_APPROX_SIMPLE)

        for contour in contours:
            area = cv2.contourArea(contour)
            if area > 500:
                cv2.fillPoly(overlay, np.int32([contour]), (0, 255, 0))
                cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)

        if count != 0:
            cv2.imshow('frame', output)

        time.sleep(1)
        count += 1

        k = cv2.waitKey(5) & 0xFF
        if k == 27 or count == 70:
            break

    cv2.destroyAllWindows()
Example #53
0
ret, thresh = cv2.threshold(gray, 0, 255,
                            cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
cv2.imshow('thresh', thresh)
#去除噪声
kernel = np.ones((3, 3), np.uint8)
#opening=cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel,iterations=1)
#cv2.imshow('open',opening)
#确定背景区域
sure_bg = cv2.dilate(thresh, kernel, iterations=2)
cv2.imshow('sure_bg', sure_bg)
#距离变换
dist_transform = cv2.distanceTransform(thresh, 1, 5)
ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255,
                             0)
cv2.imshow('d_trans', np.uint8(dist_transform * 10))
cv2.imshow('sure_fg', sure_fg)
#寻找未知源
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
cv2.imshow('uno', unknown)

ret, markersl = cv2.connectedComponents(
    sure_fg)  #对得到的前景进行标记,它会把将背景标记为 0,其他的对象使用从 1 开始的正整数标记。

markers = markersl + 1
markers[unknown == 255] = 0

markers3 = cv2.watershed(img, markers)
img[markers3 == -1] = [0, 0, 255]
cv2.imshow('1', img)
Example #54
0
    def getResults(self):
        if self.imagePath == "":
            return
        tumorStatus = ""
        tumorLocation = ""
        image = cv2.imread(self.imagePath)
        resizedImage = cv2.resize(image, (400, 400))
        gray = cv2.cvtColor(resizedImage, cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU)
        ret, markers = cv2.connectedComponents(thresh)
        marker_area = [
            np.sum(markers == m) for m in range(np.max(markers)) if m != 0
        ]
        largest_component = np.argmax(marker_area) + 1
        brain_mask = markers == largest_component
        brain_out = resizedImage.copy()
        brain_out[brain_mask == False] = (0, 0, 0)
        gray = cv2.cvtColor(brain_out, cv2.COLOR_BGR2GRAY)
        median = cv2.medianBlur(brain_out, 3)
        data = median.reshape(median.shape[0] * median.shape[1],
                              median.shape[2])
        data = np.float32(data)
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10,
                    1.0)
        attempts = 10
        ret, label, center = cv2.kmeans(data, 3, None, criteria, attempts,
                                        cv2.KMEANS_PP_CENTERS)
        center = np.uint8(center)
        res = center[label.flatten()]
        segmented = res.reshape(median.shape[0], median.shape[1],
                                median.shape[2])
        segmentedImage = self.getQImage(segmented)
        imageToPixmap = QPixmap(QPixmap.fromImage(segmentedImage))
        self.segmentedImage.setPixmap(QPixmap(imageToPixmap))

        area = 0
        rows, cols, dims = segmented.shape
        pixels = list()
        reduced_pixels = list()
        for j in range(rows):
            for k in range(cols):
                pixels.append(segmented[j, k][0])
                pixels.append(segmented[j, k][1])
                pixels.append(segmented[j, k][2])

        pix_array = np.array(pixels)
        reduced_pixels = np.unique(pix_array)
        pixel = int(sum(reduced_pixels) / (len(reduced_pixels) - 1))

        if (pixel >= 120):
            ret, thresh = cv2.threshold(segmented, pixel, 255,
                                        cv2.THRESH_BINARY)
        else:
            ret, thresh = cv2.threshold(segmented, 120, 255, cv2.THRESH_BINARY)

        kernel = np.ones((5, 5), np.uint8)
        erode = cv2.erode(thresh, kernel)
        erode = cv2.cvtColor(erode, cv2.COLOR_BGR2GRAY)
        contours, hierarchy = cv2.findContours(erode, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

        x, y, w, h = 0, 0, erode.shape[1] // 2, erode.shape[0]
        left = erode[y:y + h, x:x + w]
        right = erode[y:y + h, x + w:x + w + w]
        left_pixels = cv2.countNonZero(left)
        right_pixels = cv2.countNonZero(right)
        ratio = -1
        gray = cv2.cvtColor(resizedImage, cv2.COLOR_BGR2GRAY)
        if len(contours) > 0:
            contour = max(contours, key=cv2.contourArea)
            area = cv2.contourArea(contour)
            cv2.drawContours(segmented, contours, -1, (0, 0, 255), 3)
            contourImage = self.getQImage(segmented)
            imageToPixmap = QPixmap(QPixmap.fromImage(contourImage))
            if area > 0:
                if left_pixels == 0 and right_pixels > 0:
                    tumorStatus = "Yes"
                    tumorLocation = "Right"
                elif right_pixels == 0 and left_pixels > 0:
                    tumorStatus = "Yes"
                    tumorLocation = "Left"
                elif right_pixels > left_pixels:
                    ratio = float(right_pixels / left_pixels)
                    if ratio >= 1.5:
                        tumorStatus = "Yes"
                        tumorLocation = "Right"
                    else:
                        tumorStatus = "No"
                else:
                    ratio = float(left_pixels / right_pixels)
                    if ratio >= 1.5:
                        tumorStatus = "Yes"
                        tumorLocation = "Left"
                    else:
                        tumorStatus = "No"
            else:
                tumorStatus = "No"
        else:
            tumorStatus = "No"

        fontStyle = QFont("Arial", 10, QFont.Bold)
        self.tumorInfo.setFont(fontStyle)
        if tumorStatus == "No":
            tumorInfoStr = "Tumor Status: " + tumorStatus
            self.tumorInfo.setStyleSheet("QLabel { color : #39ff14; }")
            self.tumorInfo.setText(tumorInfoStr)
        else:
            self.contourImage.setPixmap(QPixmap(imageToPixmap))
            tumorInfoStr = "Tumor Status: " + tumorStatus + "\nTumor Location: " + tumorLocation
            self.tumorInfo.setStyleSheet("QLabel { color : #ff073a; }")
            self.tumorInfo.setText(tumorInfoStr)
show(cr_crop, "cr_crop_opening" + str(ch_i))

#-----------------------------------------------------------------
flag = 1
while flag == 1:

    flag = 0
    plot_list = []
    # how many convex region
    cr_crop, cr_contours, cr_hierarchy = cv2.findContours(
        cr_crop, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    print("How many convex region: ", len(cr_contours))

    # current label
    cr_crop_marker = cr_crop.copy()
    ret, cr_crop_marker = cv2.connectedComponents(cr_crop_marker)
    ######################## for loop for each convex region ##################

    # create cr object list TODO: create
    convex_region_obj = []
    for_case1_connection = []
    previous = 0
    for i, current in enumerate(cr_contours):
        cur_num = cr_crop_marker[current[0][0][1], current[0][0][0]]
        print(cur_num)
        if cur_num == previous:
            print("repeat", cur_num)
            continue
        # highlight one region
        cr_crop_current = cr_crop.copy()
        cr_crop_current[cr_crop_marker != cur_num] = 0
Example #56
0
def find_cell_ellipses(img):
    threshold = 20
    ret, mask = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)

    # it is hard to see what the media does because the noise has such low amplitude.
    # you can see the dark noise speckles in the bright nucei get reduced.
    median = cv2.medianBlur(mask, 5)

    # Lets try to get rid of dark spots in nuclei with a closing filter.
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                       (11, 11)).astype(np.uint8)
    closing = cv2.morphologyEx(median, cv2.MORPH_CLOSE, kernel, iterations=2)
    opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel, iterations=2)

    # Dilate to get background
    kernel = np.ones((3, 3))
    sure_background = cv2.morphologyEx(opening,
                                       cv2.MORPH_DILATE,
                                       kernel,
                                       iterations=20)

    tmp = mask_on_image(closing, sure_background)

    #plt.imshow(tmp)
    #plt.show()
    # Distance threshold to separate cells

    tmp = closing[..., 0]

    #print(tmp.shape)
    #print(tmp.dtype)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(tmp, cv2.DIST_L2, 5)
    ret, sure_cells = cv2.threshold(dist_transform, 0.4 * dist_transform.max(),
                                    255, 0)

    # Finding unknown region
    sure_cells = np.uint8(sure_cells)

    #print(sure_cells.shape)
    #plt.imshow(sure_cells)
    #plt.show()
    #print(sure_cells.shape)
    cell_mask = sure_cells.copy()
    background_mask = cv2.cvtColor(sure_background, cv2.COLOR_RGB2GRAY)

    unknown = cv2.subtract(background_mask, cell_mask)

    ret, markers = cv2.connectedComponents(cell_mask)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1

    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0

    #print(markers.shape)
    #print(markers.dtype)

    #plt.imshow(markers)
    #plt.show()
    tmp = closing.copy()
    from pprint import pprint
    ws = cv2.watershed(tmp, markers)

    tmp = ws.copy()
    #print((np.min(tmp),np.max(tmp)))
    tmp = np.clip(tmp, 1, 255) - 1
    #print((np.min(tmp),np.max(tmp)))
    tmp = tmp.astype(np.uint8)

    kernel = np.ones((3, 3))
    tmp = cv2.morphologyEx(tmp, cv2.MORPH_ERODE, kernel, iterations=1)

    _, contours, _ = cv2.findContours(tmp, cv2.RETR_LIST,
                                      cv2.CHAIN_APPROX_SIMPLE)

    ellipse_list = []
    for cont in contours:
        if len(cont) > 4:
            area = cv2.contourArea(cont)
            if area > 100:
                e = cv2.fitEllipse(cont)
                ellipse_list.append(e)

    return ellipse_list
Example #57
0
                    #img = cv2.blur(img,(5,5))

                    # 輪郭1の出力
                    img_dst, contours, hierarchy = cv2.findContours(
                        mask, cv2.CHAIN_APPROX_NONE, cv2.CHAIN_APPROX_SIMPLE)

                    # 輪郭2の出力
                    # 輪郭1を膨張させて、エビの位置を取得する。
                    kernel = np.ones((3, 3), np.uint8)
                    #img_dst = cv2.erode(img_dst,kernel,iterations = 1)
                    img_dst = cv2.dilate(img_dst, kernel, iterations=9)
                    img_dst, contours, hierarchy = cv2.findContours(
                        img_dst, 2, 1)

                    # ラベリング処理
                    ret, markers = cv2.connectedComponents(img_dst)

                    # ラベリング結果書き出し準備
                    color_src = cv2.cvtColor(img_dst, cv2.COLOR_GRAY2BGR)
                    height, width = img_dst.shape[:2]
                    colors = []

                    for i in range(1, ret + 1):
                        colors.append(
                            np.array([
                                random.randint(0, 255),
                                random.randint(0, 255),
                                random.randint(0, 255)
                            ]))

                    # ラベリング処理
def processing(data):
    # loading image
    # Getting 3 images to work with
    img = [cv2.imread(i, cv2.IMREAD_UNCHANGED) for i in data]
    print('Original size', img.shape)
    # --------------------------------
    # setting dim of the resize
    height = 220
    width = 220
    dim = (width, height)
    res_img = []
    for i in range(len(img)):
        res = cv2.resize(img[i], dim, interpolation=cv2.INTER_LINEAR)
        res_img.append(res)

    # Checcking the size
    print("RESIZED", res_img[1].shape)

    # Visualizing one of the images in the array
    original = res_img[1]
    display_one(original)
    # Remve Noise using Gaussian Blur
    no_noise = []
    for i in range(len(res_img)):
        blur = cv2.GaussianBlur(res_img[i], (5, 5), 0)
        no_noise.append(blur)
        image = no_noise[1]
        display(original, image, 'Original', 'Blured')
    # Segmentation
    gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    ret, thresh = cv2.threshold(gray, 0, 255,
                                cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    # Displaying segmented images
    display(original, thresh, 'Original', 'Segmented')
    # Further noise removal
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)

    # sure background area
    sure_bg = cv2.dilate(opening, kernel, iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(),
                                 255, 0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    #Displaying segmented back ground
    display(original, sure_bg, 'Original', 'Segmented Background')
    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1

    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0

    markers = cv2.watershed(image, markers)
    image[markers == -1] = [255, 0, 0]

    # Displaying markers on the image
    display(image, markers, 'Original', 'Marked')
def segmentation(img, sequence, origimg=None, wordNo=None, filename=None):
    if (sequence == "word"):  # resize to find the words
        width = 940
        height = int(img.shape[0] * (width / img.shape[1]))
        sigma = 18
    elif (sequence == "character"):  # resize to find the characters
        width = img.shape[1]  # 1280
        height = img.shape[0]  # int(img.shape[0] * (width / img.shape[1]))
        sigma = 0

    img = cv2.resize(img, (width, height))
    blurred = cv2.GaussianBlur(img, (5, 5), sigma)  # apply gaussian blur

    if (sequence == "word"):
        blurred = _edge_detect(blurred)  # edge detect in blurred image (words)
        # Otsu's thresholding with Binary
        ret, img = cv2.threshold(blurred, 0, 255,
                                 cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        # Morphological processing - Black&White
        img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, np.ones((15, 15),
                                                             np.uint8))

    elif (sequence == "character"):
        # Otsu's thresholding with Binary Inverted
        ret, img = cv2.threshold(blurred, 0, 255,
                                 cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    num_labels, labels_im = cv2.connectedComponents(
        img)  # find the connected components

    if (sequence == "word"):
        boxes = []  # for storing the coordinates of the bounding boxes
        for i in range(1, num_labels):
            # select the images with label
            new, nr_objects = ndimage.label(labels_im == i)
            # clipping the image to the edges
            new, new_coord = clipping_image(new)
            if (not (new.shape[0] < 10 or new.shape[1] < 10)):
                boxes.append(new_coord)

    if (sequence == "character"):
        boxes = []
        label_box = []
        for i in range(1, num_labels):
            # select the images with label
            new, nr_objects = ndimage.label(labels_im == i)
            # clipping the image to the edges
            new, new_coord = clipping_image(new)
            if (not (new.shape[0] < 10 or new.shape[1] < 10)):
                label_box.append([i, new_coord])
        label_box = sort_labels(label_box)  # sort the words
        chNo = 0
        for box in label_box:
            ch_img, nr_objects = ndimage.label(labels_im == box[0])
            ch_img, new_coord = clipping_image(ch_img)
            cropped_image = padding_resizing_image(ch_img, 32)
            cropped_image_x64 = padding_resizing_image(ch_img, 64)
            try:
                dst = os.path.join(OUTPUT_FOLDER_PNG, filename,
                                   str(wordNo) + "_" + str(chNo) + ".png")
                dst_x64 = os.path.join(OUTPUT_FOLDER_PNG_x64, filename,
                                       str(wordNo) + "_" + str(chNo) + ".png")
                plt.imsave(dst, cropped_image, cmap=cm.gray)
                plt.imsave(dst_x64, cropped_image_x64, cmap=cm.gray)
            except:
                pass
            finally:
                pass
            chNo += 1
    return img, boxes
Example #60
0
    structuring_element = np.ones((15, 30), np.uint8)
    opening = cv2.morphologyEx(img_grayscale, cv2.MORPH_OPEN,
                               structuring_element)  # Morphological opening
    subtract = cv2.subtract(img_grayscale, opening)

    (thresh, im_bw) = cv2.threshold(subtract, 128, 255, cv2.THRESH_BINARY
                                    | cv2.THRESH_OTSU)  # Binarization

    structuring_element_2 = np.ones((3, 3), np.uint8)
    opening_2 = cv2.morphologyEx(
        im_bw, cv2.MORPH_OPEN, structuring_element_2)  # Morphological opening
    structuring_element_close = np.ones((5, 15), np.uint8)
    plate = cv2.morphologyEx(opening_2, cv2.MORPH_CLOSE,
                             structuring_element_close)

    n_regions, plate2 = cv2.connectedComponents(plate, 4)
    im, contours, hierarchy = cv2.findContours(plate, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
    max_area = 1
    j = 0
    blob_num = 0
    for i in range(0, n_regions - 1):
        x, y, w, h = cv2.boundingRect(contours[i])
        area = cv2.contourArea(contours[i])
        ratio = w / h
        if (((ratio >= 2.7) and (ratio <= 4)) and
            ((w >= 100 and h >= 30))) and (
                (w <= 200 and h <= 60)):  # Brazilian plate values
            if max_area <= (area):
                max_area = (area)
                blob_num = i