示例#1
0
    def update(self,frame,recent_pupil_positions,events):

        falloff = self.falloff.value

        img = frame.img
        img_shape = img.shape[:-1][::-1]#width,height
        norm_gaze = [ng['norm_gaze'] for ng in recent_pupil_positions if ng['norm_gaze'] is not None]
        screen_gaze = [denormalize(ng,img_shape,flip_y=True) for ng in norm_gaze]


        overlay = np.ones(img.shape[:-1],dtype=img.dtype)

        # draw recent gaze postions as black dots on an overlay image.
        for gaze_point in screen_gaze:
            try:
                overlay[int(gaze_point[1]),int(gaze_point[0])] = 0
            except:
                pass

        out = cv2.distanceTransform(overlay,cv2.cv.CV_DIST_L2, 5)

        # fix for opencv binding incositency
        if type(out)==tuple:
            out = out[0]

        overlay =  1/(out/falloff+1)

        img *= cv2.cvtColor(overlay,cv2.COLOR_GRAY2RGB)
示例#2
0
文件: main3.py 项目: Daiver/jff
def discretPoints2ContourGrad(points, contourImg):
    distField = cv2.distanceTransform(contourImg, cv2.DIST_L2, 0)
    dxKernel = np.array([
        [ 0, 0, 0],
        [-1, 0, 1],
        [ 0, 0, 0]
    ], dtype=np.float32) / 2.0
    dyKernel = np.array([
        [0, -1,  0],
        [0,  0,  0],
        [0,  1,  0]
    ], dtype=np.float32) / 2.0

    #dxKernel = np.array([[]], dtype=np.float32)

    dx = cv2.filter2D(distField, -1, dxKernel)
    dy = cv2.filter2D(distField, -1, dyKernel)
    
    res = np.zeros_like(points)
    for i, p in enumerate(points):
        res[i, 0] = takePixelInterpolated(dx, p)
        res[i, 1] = takePixelInterpolated(dy, p)
    print('p>', points)
    print('g>', res)
    return res
示例#3
0
def segment_on_dt(img):
    #http://stackoverflow.com/questions/11294859/how-to-define-the-markers-for-watershed-in-opencv
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)    
    _, img_bin = cv2.threshold(img_gray, 0, 255,cv2.THRESH_OTSU)
    img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN,numpy.ones((3, 3), dtype=int))
    border = cv2.dilate(img_bin, None, iterations=5)
    border = border - cv2.erode(border, None)

    dt = cv2.distanceTransform(img_bin, 2, 3)
    dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(numpy.uint8)
    _, dt = cv2.threshold(dt, 180, 255, cv2.THRESH_BINARY)
    lbl, ncc = label(dt)
    lbl = lbl * (255/ncc)
    # Completing the markers now. 
    lbl[border == 255] = 255

    lbl = lbl.astype(numpy.int32)
    cv2.watershed(img, lbl)

    lbl[lbl == -1] = 0
    lbl = lbl.astype(numpy.uint8)
    result = 255 - lbl
    result[result != 255] = 0
    result = cv2.dilate(result, None)
    img[result == 255] = (0, 0, 255)
    return img
def distance_transform(bin_img, distance_type, mask_size):
    """Creates an image where for each object pixel, a number is assigned that corresponds to the distance to the
    nearest background pixel.

    Inputs:
    img             = Binary image data
    distance_type   = Type of distance. It can be CV_DIST_L1, CV_DIST_L2 , or CV_DIST_C which are 1, 2 and 3,
                      respectively.
    mask_size       = Size of the distance transform mask. It can be 3, 5, or CV_DIST_MASK_PRECISE (the latter option
                      is only supported by the first function). In case of the CV_DIST_L1 or CV_DIST_C distance type,
                      the parameter is forced to 3 because a 3 by 3 mask gives the same result as 5 by 5 or any larger
                      aperture.

    Returns:
    norm_image      = grayscale distance-transformed image normalized between [0, 1]

    :param bin_img: numpy.ndarray
    :param distance_type: int
    :param mask_size: int
    :return norm_image: numpy.ndarray
    """

    params.device += 1
    dist = cv2.distanceTransform(src=bin_img, distanceType=distance_type, maskSize=mask_size)
    norm_image = cv2.normalize(src=dist, dst=dist, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)

    if params.debug == 'print':
        print_image(norm_image, os.path.join(params.debug, str(params.device) + '_distance_transform.png'))
    elif params.debug == 'plot':
        plot_image(norm_image, cmap='gray')

    return norm_image
示例#5
0
def proc_border(img):
    # Compute horizontal Sobel gradient
    sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0)
    sobelx = np.absolute(sobelx)

    # Compute vertical Sobel gradient
    sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1)
    sobely = np.absolute(sobely)

    # Compute gradient magnitude image
    mag = np.sqrt(sobelx ** 2 + sobely ** 2)  
    mag = np.uint8(mag)
    del(img, sobelx, sobely)

    # Threhold the gradient magnitude image
    mag = cv2.threshold(mag, 2, 1, cv2.THRESH_BINARY)[1]

    # Fill holes of the thresholded image
    contour, hier = cv2.findContours(mag, cv2.RETR_CCOMP,
        cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contour:
        cv2.drawContours(mag, [cnt], 0, 255, -1) 

    # Distance transform
    mag = cv2.distanceTransform(mag, cv2.cv.CV_DIST_L1,
        cv2.cv.CV_DIST_MASK_PRECISE)
    return mag
示例#6
0
def swtChenAltered(img):
	'takes distance transform and gives swt'
	dT = cv2.distanceTransform(img,cv2.DIST_L2,5)
	dT = np.uint8(np.absolute(dT))
	_,dist_threshold = cv2.threshold(dT,1,255,cv2.THRESH_BINARY)
	diff = img - dist_threshold
	dist = dT.copy()
	dist[diff==255] = 1
	lookUp = {}
	height,width = dist.shape
	for i in range(height):
		for j in range(width):
			if dist.item(i,j)>0:
				xlist = [j-1,j,j+1]
				ylist = [i-1,i,i+1]
				point = (i,j)
				for y in ylist:
					if (y>0 and y<height):
						for x in xlist:
							if (x>0 and x<width):
								if dist.item(y,x)>dist.item(point):
									point = (y,x)
				lookUp[(i,j)] = point
	swt = dist.copy()
	for i in range(height):
		for j in range(width):
			if dist.item(i,j)>0:
				point = (i,j)
				while point != lookUp[point]:
					point = lookUp[point]
				val = swt.item(point)
				swt.itemset(i,j,val)
	return swt
示例#7
0
def water(img, thresh):
    kernel = np.ones((3,3),np.uint8)
    opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)

    # sure background area
    sure_bg = cv2.dilate(opening,kernel,iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening,2,5)
    ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg,sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers += 1

    # Now, mark the region of unknown with zero
    markers[unknown==255] = 0

    markers = cv2.watershed(img,markers)
    img[markers == -1] = [255,0,0]
    return sure_fg, sure_bg, markers, img
    def watershed(self,img):
        '''
            watershedで領域分割を行う
            args :      -> 
            dst  :      -> 
            param:      -> 
        '''
        gimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(gimg,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)

        # noise removal
        kernel = np.ones((3,3),np.uint8)
        opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
        
        # sure background area
        sure_bg = cv2.dilate(opening,kernel,iterations=3)
        
        # Finding sure foreground area
        dist_transform = cv2.distanceTransform(opening,cv.CV_DIST_L2,5)
        ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
        
        # Finding unknown region
        sure_fg = np.uint8(sure_fg)
        unknown = cv2.subtract(sure_bg,sure_fg) 

        # Marker labelling
        ret, markers = cv2.connectedComponents(sure_fg)
        
        # Add one to all labels so that sure background is not 0, but 1
        markers = markers+1
        
        # Now, mark the region of unknown with zero
        markers[unknown==255] = 0
 def UpdateModelFromMask(self, mask, img, hsv):
     self.avgRGB = cv.mean(img, mask)[0:3]
     self.avgHSV = cv.mean(hsv, mask)[0:3]
     distMap = cv.distanceTransform(1-mask, cv.cv.CV_DIST_L2, 5)[0]
     self.lineProbabilityMap = (1.0/(1.0+0.1*distMap))
     print self.avgRGB 
     print self.avgHSV 
示例#10
0
def segment(template, actual):

	ret3,th3 = cv2.threshold(template,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
	
	# noise removal
	kernel = np.ones((3,3),np.uint8)
	opening = cv2.morphologyEx(th3,cv2.MORPH_OPEN,kernel, iterations = 2)

	# sure background area
	sure_bg = cv2.dilate(opening,kernel,iterations=3)

	# Finding sure foreground area
	dist_transform = cv2.distanceTransform(opening,cv2.cv.CV_DIST_L2,5)
	ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)

	# Finding unknown region
	sure_fg = np.uint8(sure_fg)
	unknown = cv2.subtract(sure_bg,sure_fg)
		
	# Marker labelling
	ret, markers = cv2.connectedComponents(sure_fg)

	# Add one to all labels so that sure background is not 0, but 1
	markers = markers+1

	# Now, mark the region of unknown with zero
	markers[unknown==255] = 0

	markers = cv2.watershed(img,markers)
	img[markers == -1] = [255,0,0]
	
	return img
def segment_on_dt(a, img):
    '''
    find foreground
    '''
    fg=cv2.dilate(img,None, iterations=5)
    fg=fg-cv2.erode(fg, None)
    dt=cv2.distanceTransform(img, 2, 3)
    
    dt=((dt-dt.min())/(dt.max()-dt.min())*255).astype(np.uint8)
   
    _, dt=cv2.threshold(dt, 0, 255, cv2.THRESH_BINARY)
    lbl, ncc=label(dt)
    lbl=lbl*(255/ncc)
    '''
    Completing the markers now
    '''
    lbl[fg==255]=255
    
    lbl=lbl.astype(np.int32)
    cv2.watershed(a, lbl)
    
    lbl[lbl==-1]=0
    lbl=lbl.astype(np.uint8)
    
    return 255-lbl
示例#12
0
    def check_thickness(self, stones: np.ndarray, rs=0, re=gsize, cs=0, ce=gsize, **kwargs):
        """ Check that the provided stones array doesn't contain "big chunks" that wouldn't make sense in a game of Go.

        Args:
            stones: ndarray
                The newly found stones, in other words the result to check.
            rs: int - inclusive
            re: int - exclusive
                Row start and end indexes. Can be used to restrain check to a subregion.
            cs: int - inclusive
            ce: int - exclusive
                Column start and end indexes. Can be used to restrain check to a subregion.
            kwargs:
                Allowing for keyword args enables multiple check methods to be called indifferently. See self.verify().

        Return status: int
            -1, 0, or 1 if the check is respectively refused, undetermined, or passed.
        """
        for color in (B, W):
            avatar = np.vectorize(lambda x: 1 if x is color else 0)(stones[rs:re, cs:ce].flatten())
            # diagonal moves cost as much as side moves
            dist = cv2.distanceTransform(avatar.reshape((re-rs, ce-cs)).astype(np.uint8), cv2.DIST_C, 3)
            if 2 < np.max(dist):
                # a stone surrounded by a 2-stones-thick wall of its own color is most likely not Go
                return -1
        return 0
示例#13
0
    def get_centerline_optimized(self, alpha=1e3, beta=1e6, gamma=0.01,
                                 spacing=20, max_iterations=1000,
                                 endpoints=None):
        """ determines the center line of the polygon using an active contour
        algorithm """
        # use an active contour algorithm to find centerline
        ac = ActiveContour(blur_radius=1, alpha=alpha, beta=beta,
                           gamma=gamma, closed_loop=False)
        ac.max_iterations = max_iterations

        # set the potential from the  distance map
        mask, offset = self.get_mask(1, ret_offset=True)
        potential = cv2.distanceTransform(mask, cv2.DIST_L2, 5)
        ac.set_potential(potential)
        
        # initialize the centerline from the estimate
        points = self.get_centerline_estimate(endpoints)
        points = curves.make_curve_equidistant(points, spacing=spacing)        
        points = curves.translate_points(points, -offset[0], -offset[1])
        # anchor the end points
        anchor = np.zeros(len(points), np.bool)
        anchor[0] = anchor[-1] = True
        
        # find the best contour
        points = ac.find_contour(points, anchor, anchor)
        
        points = curves.make_curve_equidistant(points, spacing=spacing)        
        return curves.translate_points(points, *offset)
示例#14
0
 def centerline(self):
     """ determine the center line of the tail """
     mask, offset = self.mask
     dist_map = cv2.distanceTransform(mask, cv2.DIST_L2, 5)
     
     # setup active contour algorithm
     ac = ActiveContour(blur_radius=self.centerline_blur_radius,
                        closed_loop=False,
                        alpha=0, #< line length is constraint by beta
                        beta=self.centerline_bending_stiffness,
                        gamma=self.centerline_adaptation_rate)
     ac.max_iterations = self.centerline_max_iterations
     ac.set_potential(dist_map)
     
     # find centerline starting from the ventral_side
     points = curves.translate_points(self.ventral_side,
                                      -offset[0], -offset[1])
     spacing = self.centerline_spacing
     points = curves.make_curve_equidistant(points, spacing=spacing)
     # use the active contour algorithm
     points = ac.find_contour(points)
     points = curves.make_curve_equidistant(points, spacing=spacing)
     # translate points back into global coordinate system
     points = curves.translate_points(points, offset[0], offset[1])
     
     # orient the centerline such that it starts at the posterior end
     dist1 = curves.point_distance(points[0], self.endpoints[0])
     dist2 = curves.point_distance(points[-1], self.endpoints[0])
     if dist1 > dist2:
         points = points[::-1]
     
     return points
示例#15
0
文件: Metodos.py 项目: Foued70/yeztli
def transDistancia(img):
    #aplicamos un filtro gausssiano 
    #imgBlur = cv2.GaussianBlur(img,(5,5),0) 
    if len(img.shape) == 3:            
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        print "convirtio la imagen a grises"

    #binarizacion de otsu
    ret, thresh = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)   
    
    
    # removiendo ruido
    kernel = np.ones((3,3),np.uint8)
    opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)    
    
    #area segura 
    sure_bg = cv2.dilate(opening,kernel,iterations=3)

    
    #transformada de la distancia
    dist_transform = cv2.distanceTransform(opening,cv2.cv.CV_DIST_L2,3)
    imshow(dist_transform)
    show()
    
    img = np.uint8(dist_transform)   
    print "Transformada de distancia"    
    return img,sure_bg
示例#16
0
def segment_on_dt(a, img, gray):
    border = cv2.dilate(img, None, iterations=5)
    border = border - cv2.erode(border, None)


    dt = cv2.distanceTransform(img,cv2.DIST_L2,5)
    plt.subplot(3,3,4)
    plt.imshow(dt),plt.title('dt'),plt.xticks([]), plt.yticks([])

    dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(np.uint8)
    _, dt2 = cv2.threshold(dt, 0, 255, cv2.THRESH_BINARY)
    dt2 = cv2.erode(dt2, None, iterations=2)
    # dt2 = cv2.adaptiveThreshold(dt, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 2)
    # dt1 = peak_local_max(gray, indices=False, min_distance=10, labels=img, threshold_abs=5)
    # dt2 = peak_local_max(dt, indices=False, min_distance=5, labels=img, threshold_abs=0)
    lbl, ncc = label(dt2)

    plt.subplot(3,3,5)
    plt.imshow(dt2),plt.title('localMax'),plt.xticks([]), plt.yticks([])
    # plt.subplot(3,3,6)
    # plt.imshow(ncc),plt.title('ncc'),plt.xticks([]), plt.yticks([])

    lbl = lbl * (255/ncc)
    # Completing the markers now.
    lbl[border == 255] = 255

    lbl = lbl.astype(np.int32)
    cv2.watershed(a, lbl)

    lbl[lbl == -1] = 0
    lbl = lbl.astype(np.uint8)

    plt.subplot(3,3,6)
    plt.imshow(lbl),plt.title('lbl_out'),plt.xticks([]), plt.yticks([])
    return 255 - lbl
def watershed(img, thresh):
    # noise removal
    kernel = np.ones((3,3), np.uint8)
    opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 4)

    # sure background area
    sure_bg = cv2.dilate(opening,kernel,iterations=3)

    #sure_bg = cv2.morphologyEx(sure_bg, cv2.MORPH_TOPHAT, kernel)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg,sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers+1

    # Now, mark the region of unknown with zero
    markers[unknown==255] = 0
    '''
    imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    imgray = cv2.GaussianBlur(imgray, (5, 5), 0)
    img = cv2.Canny(imgray,200,500)
    '''
    markers = cv2.watershed(img,markers)
    img[markers == -1] = [255,0,0]

    return sure_bg, sure_fg
示例#18
0
    def recent_events(self, events):
        frame = events.get("frame")
        if not frame:
            return
        falloff = self.falloff

        img = frame.img
        pts = [
            denormalize(pt["norm_pos"], frame.img.shape[:-1][::-1], flip_y=True)
            for pt in events.get("gaze", [])
            if pt["confidence"] >= self.g_pool.min_data_confidence
        ]

        overlay = np.ones(img.shape[:-1], dtype=img.dtype)

        # draw recent gaze postions as black dots on an overlay image.
        for gaze_point in pts:
            try:
                overlay[int(gaze_point[1]), int(gaze_point[0])] = 0
            except:
                pass

        out = cv2.distanceTransform(overlay, cv2.DIST_L2, 5)

        # fix for opencv binding inconsitency
        if type(out) == tuple:
            out = out[0]

        overlay = 1 / (out / falloff + 1)

        img[:] = np.multiply(
            img, cv2.cvtColor(overlay, cv2.COLOR_GRAY2RGB), casting="unsafe"
        )
示例#19
0
def mask2dtAndEdgelsCount(mask):
    maskCopy = mask.copy()
    contours, hierarchy = cv2.findContours(maskCopy, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
    contoursImage = np.zeros(mask.shape, np.uint8)
    cv2.drawContours(contoursImage, contours, -1, (255))
    edgelsCount = len(np.nonzero(contoursImage == 255)[0])
    dt = cv2.distanceTransform(~contoursImage, cv2.cv.CV_DIST_L1, cv2.cv.CV_DIST_MASK_PRECISE)
    return [dt, edgelsCount]
示例#20
0
	def __segment(self, gray):
		# Substract BG to current frame
		mostChanges = np.abs(sum(self.W-self.BG))
		
		# Normalize max-min values
		mostChanges = (mostChanges.clip(0, max=1)*255).astype('uint8')
		self.__recognition = mostChanges.copy()

		# Apply a threshold
		_,mostChanges = cv2.threshold(mostChanges, 150, 255, cv2.THRESH_BINARY)
		mostChanges = cv2.medianBlur(mostChanges, 9)
		self.__mostChanges = mostChanges.copy()

		# Apply median filter and multiply with the current scene
		mask = cv2.medianBlur(mostChanges, 3)
		mask = np.multiply(gray, mask)
		mask = (mask*255).astype('uint8')

		# Apply distance filter to minimize possible collisions
		mask = cv2.distanceTransform(mask, cv2.cv.CV_DIST_L1, 3)
		# Normalize between 0 and 255
		mask = cv2.normalize(mask, mask, 0, 255, cv2.NORM_MINMAX).astype('uint8')
		# Reduce noise
		mask = cv2.GaussianBlur(mask, (-3,-3), 0.5)
		# Apply threshold again to mask out real low values 
		# (Almost black zones, thus possible unions between objects)
		_,mask = cv2.threshold(mask, 10, 255, cv2.THRESH_BINARY)
		self.__mask = mask.copy()

		contours,_ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

		objs = []
		for c in contours:
			leftmost = c[:,:,0].min()
			rightmost = c[:,:,0].max()
			topmost = c[:,:,1].min()
			bottommost = c[:,:,1].max()

			area = ((rightmost - leftmost)*(bottommost-topmost))
			if area < self.min_area:
				continue

			objs.append((leftmost, rightmost, topmost, bottommost))

		found = 1
		while found > 0:
			found = 0

			for a in objs:
				for b in objs:
					if a != b:
						if self.__contains(a, b):
							objs.remove(b)
							found = 1
							break

		return objs
示例#21
0
def findSeeds(img, s_kernel):
    dist = cv.distanceTransform(img, cv.DIST_L2, 3)  # distanceTransform(bw, dist, CV_DIST_L2, 3);
    cv.normalize(dist, dist, 0, 1., cv.NORM_MINMAX);
    dist = dist * 255
    dist = np.uint8(dist)
    # cv.imshow('seem0', dist)
    kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (s_kernel * 2, s_kernel * 2))
    seed = cv.morphologyEx(dist, cv.MORPH_ERODE, kernel, iterations=4)
    seed = cv.morphologyEx(seed, cv.MORPH_CLOSE, kernel, iterations=4)
    return seed
示例#22
0
def calCenterFromContour(pointset, image):
    mask = ac_mask(pointset[:, :2].transpose(), image.transpose().shape).transpose()
    dist = cv2.distanceTransform(mask, cv.CV_DIST_L2, 3)
    value = npy.max(dist)

    indx, indy = npy.where(dist == npy.max(dist))
    indx = npy.cast['float32'](indx)
    indy = npy.cast['float32'](indy)

    center = npy.array([[npy.mean(indy), npy.mean(indx)]])
    return center
示例#23
0
def segment_on_dt(img):
    dt = cv2.distanceTransform(img, 2, 3) # L2 norm, 3x3 mask
    dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(numpy.uint8)
    dt = cv2.threshold(dt, 100, 255, cv2.THRESH_BINARY)[1]
    lbl, ncc = label(dt)

    lbl[img == 0] = lbl.max() + 1
    lbl = lbl.astype(numpy.int32)
    cv2.watershed(cv2.cvtColor(img, cv2.COLOR_GRAY2BGR), lbl)
    lbl[lbl == -1] = 0
    return lbl
示例#24
0
 def update(dummy=None):
     global need_update
     need_update = False
     thrs = cv2.getTrackbarPos('threshold', 'distrans')
     mark = cv2.Canny(img, thrs, 3*thrs)
     dist, labels = cv2.distanceTransform(~mark, cv.CV_DIST_L2, 5)
     if voronoi:
         vis = cm[np.uint8(labels)]
     else:
         vis = cm[np.uint8(dist*2)]
     vis[mark != 0] = 255
     cv2.imshow('distrans', vis)
示例#25
0
def fg_extract(img):
    # Distance transform. Output 32-bit float image.
    dist_transform = cv2.distanceTransform(img, cv2.cv.CV_DIST_L1, maskSize=5)
    print("max of dist_transform is " + str(dist_transform.max()))
    non_zero_d = dist_transform[dist_transform != 0];
    mean_dist = np.mean(non_zero_d)
    print("mean of non_zero_d is " + str(mean_dist))
    ostuval, fg_img = cv2.threshold(dist_transform, mean_dist, 255, cv2.THRESH_BINARY)
    #struct = np.ones((5, 5), dtype=np.uint8)
    #struct[1:3, 1:3] = 0
    #fg_img = cv2.erode(img, struct)
    return fg_img, mean_dist
 def distance_transform_diameter(edge_trace, segmented):
     """
     (dev comments from nefi1)
     my cv2 lacks cv2.DIST_L2, it seems to have the value 2 though, so I use
     that, same for MASK_PRECISE
     <python3 cv2.DIST_L2 equals to 2>
     """
     dt = cv2.distanceTransform(segmented, 2, 0)
     edge_pixels = np.nonzero(edge_trace)
     diameters = defaultdict(list)
     for label, diam in zip(edge_trace[edge_pixels], 2.0 * dt[edge_pixels]):
         diameters[label].append(diam)
     return diameters
示例#27
0
def detect_black_areas(filename):
  m1 = cv2.imread('images/brick.jpg')
  m2 = cv2.cvtColor(m1, cv2.cv.CV_BGR2GRAY)
  m3 = 255 - cv2.threshold(m2, 0, 255, cv2.THRESH_OTSU)[1]
  m4 = cv2.distanceTransform(m3, cv2.cv.CV_DIST_L2, 3)
  m5 = cv2.normalize(m4, alpha=0., beta=1., norm_type=cv2.NORM_MINMAX)
  m6 = cv2.threshold(m5, .8, 1., cv2.THRESH_BINARY)[1]
  m7 = cv2.dilate(m6, np.ones((3,3), np.uint8), iterations=7)
  cnt = cv2.findContours(m7.astype(np.uint8), 
                           cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0]
  cv2.drawContours(m1, cnt, -1, (0,0,255), 2)
  cv2.imshow("image", m1)
  cv2.waitKey()
示例#28
0
    def _divide(self):
        cv2.distanceTransform(self.biofilm, cv.CV_DIST_L2, 5, 
                              dst=self.distances)
        rows, columns = map(list, self.dividing.nonzero())
        displacements = [0 for _ in rows]

        while rows:
            cellIndex = np.array((rows.pop(), columns.pop()))
            displacement = displacements.pop()

            # this also takes care of the situation where updating the biofilm
            # in-place breaks the distance map implmentation.
            if displacement > 10: continue

            foundEmpty = False
            for neighbor in _randomNeighbors():
                index = tuple(cellIndex + neighbor)
                if not self._validIndex(index): continue

                if self.biofilm[index] == EMPTY:
                    foundEmpty = True
                    self.biofilm[index] = ALIVE

            if not foundEmpty:
                minDistance = 999999
                minNeighbor = None
                for neighbor in _neighbors:
                    index = tuple(cellIndex + neighbor)
                    if not self._validIndex(index): continue

                    if self.distances[index] <= minDistance:
                        minDistance = self.distances[index]
                        minNeighbor = index
                rows.append(minNeighbor[0])
                columns.append(minNeighbor[1])
                displacements.append(displacement + 1)
示例#29
0
    def frames(self):
        for frame in self.inputf.frames():
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
            # invert so that background=black, object=white
            thresh = (255-thresh)
            cv2.imwrite("is00_thresh.jpg", thresh)

            # noise removal
            kernel = np.ones((3, 3), np.uint8)
            opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
            cv2.imwrite("is01_opening.jpg", opening)
            # now white covers foreground but has foreground false positives
            # may be sufficient if we don't care about separating touching objects;
            # also I think there will be cases like the spider where the threshould level
            # will lose the joints of its legs

            # black cover background with background false positives
            sure_bg = cv2.dilate(opening, kernel, iterations=3)
            cv2.imwrite("is02a_sure_bg.jpg", sure_bg)

            # Finding sure foreground area
            dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
            ret, sure_fg = cv2.threshold(dist_transform,self.dt_param*dist_transform.max(),255,0)
            cv2.imwrite("is02b_sure_fg.jpg", sure_fg)

            # unkown region, i.e. part of image we are unsure about
            sure_fg = np.uint8(sure_fg)
            unknown = cv2.subtract(sure_bg,sure_fg)
            cv2.imwrite("is03_unknown.jpg", unknown)

            # Marker labelling
            ret, markers = cv2.connectedComponents(sure_fg)

            # Add one to all labels so that sure background is not 0, but 1
            markers = markers+1
            cv2.imwrite("is04_markers.jpg", markers)

            # Now, mark the region of unknown with zero
            markers[unknown==255] = 0
            cv2.imwrite("is05_markers_u0.jpg", markers)

            markers = cv2.watershed(frame, markers)
            frame[markers == -1] = [255,0,0]
            cv2.imwrite("is07_final.jpg", frame)

            break
        yield None
示例#30
0
def get_mask_patch(img_orig, mask):
    dist = cv2.distanceTransform(mask, cv.CV_DIST_L1, 3)
    max_val = np.amax(dist)
    x_mask, y_mask =  mask.shape
    x_orig, y_orig = img_orig.shape
    x_coeff = np.divide(x_orig, x_mask)
    y_coeff = np.divide(y_orig, y_mask)
    # force the patch to be max 50 x 50
    # (in the downsampled image)
    if max_val >= 100:
        delta = 25
    else:
        delta = max_val / 4
    x, y = np.unravel_index(np.argmax(dist), mask.shape)
    return img_orig[(x-delta) * x_coeff:(x+delta) * x_coeff,
                    (y-delta) * y_coeff:(y+delta) * y_coeff]
示例#31
0
def detect(a):

    # convert data into image format
    img = cv2.imdecode(a, cv2.IMREAD_COLOR)  # like cv2.imread()

    # function to snip the image of fly glue trap
    #img = scanProcess(img)
    img = cropped(img)

    # virtually save the image of new cut-out image
    # convert the image format into streaming data
    ret, imgCrop = cv2.imencode('.jpg', img)  # like cv2.imwrite()
    # cnvert into image format for later use
    imgCrop = cv2.imdecode(imgCrop, cv2.IMREAD_COLOR)

    #perform grayscale conversion and thresholding
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    ret, thresh = cv2.threshold(gray, 0, 255,
                                cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    thresh = gridline(gray, thresh)

    #Setting up size of objects(number of pixel) to segment
    ret, markers, stats, centroids = cv2.connectedComponentsWithStats(thresh)
    size = stats[1:, -1]
    ret = ret - 1

    #declaration for answer image
    imgB = np.zeros((markers.shape))

    #to keep only the component in the image that is above min size
    for i in range(0, ret):
        if (size[i] > 350):
            imgB[markers == i + 1] = 255

    #convert image data type to 8 bit unsigned interger
    imgB = np.uint8(imgB)

    img = cover350(imgB, img)

    # noise removal
    kernel = np.ones((2, 3), np.uint8)
    kernel2 = np.ones((1, 2), np.uint8)

    #opening - erosion then dilate
    opening = cv2.morphologyEx(imgB, cv2.MORPH_OPEN, kernel, iterations=2)

    #closing - dilate then erosion
    closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel2, iterations=1)

    #sure background area
    sureBack = cv2.dilate(imgB, kernel, iterations=1)

    #finding sure foreground area
    dist_transform = cv2.distanceTransform(closing, cv2.DIST_L2, 3)

    # Threshold
    ret, sureFore = cv2.threshold(dist_transform, 0.1 * dist_transform.max(),
                                  255, 0)

    #finding unknown region
    sureFore = np.uint8(sureFore)
    unknown = cv2.subtract(sureBack, sureFore)

    #marker labelling
    ret, markers, stats, centroids = cv2.connectedComponentsWithStats(sureFore)

    #add one to all labels so that sure background is not 0, but 1
    markers = markers + 1

    #mark the region of unknown with zero
    markers[unknown == 255] = 0

    # apply watershed
    markers = cv2.watershed(img, markers)
    img[markers == -1] = [0, 0, 255]  #label in red
    img[markers > 1] = [255, 0, 0]  #label in blue

    # to count object detection with area greateer than 350
    am, pm = ((np.unique(markers, return_counts=True)))
    countB = 0

    for i in range(len(am)):
        if am[i] > 1 and pm[i] > 350:
            countB += 1

    # virtually save the result image
    ret, imgMark = cv2.imencode('.jpg', img)
    imgDiff = cv2.imdecode(imgMark, cv2.IMREAD_COLOR)

    # to calculate difference of initial and after detection image
    # use to find coverage percentage
    difference = cv2.absdiff(imgDiff, imgCrop)

    result = difference.astype(
        np.uint8)  #if difference is all zeros it will return False

    percentage = (np.count_nonzero(result) * 100 / result.size)

    if (percentage < 66):
        percentage = percentage * 1.5

    coverage = ("Coverage of the flies is  {0:.2f}%".format(percentage))

    result = ("[INFO] {} unique segments found".format(countB))

    cv2.waitKey(0)
    cv2.destroyAllWindows()

    return imgMark, result, coverage
示例#32
0
def utils_preprocess_img(img,
                         resize=256,
                         denoise=False,
                         remove_color=False,
                         morphology=False,
                         segmentation=False,
                         plot=False,
                         figsize=(20, 13)):
    ## original
    img_processed = img
    lst_imgs = [img_processed]
    lst_titles = ["original:  " + str(img_processed.shape)]

    ## scale
    #img_processed = img_processed/255

    ## resize
    if resize is not False:
        img_processed = cv2.resize(img_processed, (resize, resize),
                                   interpolation=cv2.INTER_LINEAR)
        lst_imgs.append(img_processed)
        lst_titles.append("resized:  " + str(img_processed.shape))

    ## denoise (blur)
    if denoise is True:
        img_processed = cv2.GaussianBlur(img_processed, (5, 5), 0)
        lst_imgs.append(img_processed)
        lst_titles.append("blurred:  " + str(img_processed.shape))

    ## remove color
    if remove_color is True:
        img_processed = cv2.cvtColor(img_processed, cv2.COLOR_RGB2GRAY)
        lst_imgs.append(img_processed)
        lst_titles.append("removed color:  " + str(img_processed.shape))

    ## morphology
    if morphology is True:
        if len(img_processed.shape) > 2:
            ret, mask = cv2.threshold(img_processed, 255 / 2, 255,
                                      cv2.THRESH_BINARY)
        else:
            mask = cv2.adaptiveThreshold(img_processed, 255,
                                         cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                         cv2.THRESH_BINARY, 11, 2)
        img_processed = cv2.morphologyEx(mask,
                                         cv2.MORPH_OPEN,
                                         np.ones((3, 3), np.uint8),
                                         iterations=2)
        lst_imgs.append(img_processed)
        lst_titles.append("morphology:  " + str(img_processed.shape))

        ## segmentation (after morphology)
        if segmentation is True:
            background = cv2.dilate(img_processed,
                                    np.ones((3, 3), np.uint8),
                                    iterations=3)
            if len(img_processed.shape) > 2:
                print("--- need to remove color to segment ---")
            else:
                ret, foreground = cv2.threshold(
                    cv2.distanceTransform(img_processed, cv2.DIST_L2, 5), 0.7 *
                    cv2.distanceTransform(img_processed, cv2.DIST_L2, 5).max(),
                    255, 0)
                foreground = np.uint8(foreground)
                ret, markers = cv2.connectedComponents(foreground)
                markers = markers + 1
                unknown = cv2.subtract(background, foreground)
                markers[unknown == 255] = 0
                img_processed = cv2.watershed(
                    cv2.resize(img,
                               img_processed.shape,
                               interpolation=cv2.INTER_LINEAR), markers)
                lst_imgs.append(img_processed)
                lst_titles.append("segmented:  " + str(img_processed.shape))
    if (segmentation is True) and (morphology is False):
        print("--- need to do morphology to segment ---")

    ## plot
    if plot is True:
        plot_imgs(lst_imgs, lst_titles, figsize)
    return img_processed
示例#33
0
 ycoord=[]
 radius=[]
 silver=[]
 
 for cnt in contours:
     cntarea=cv2.contourArea(cnt)
     if cntarea<=700:
         continue
     #see if its a circle
     
     #find inscribing circle{
     mask=np.zeros((480,640),np.uint8)
     cv2.drawContours(mask, [cnt],-1, (255), -1)
     #cv2.drawContours(frame, [cnt],-1, (0,0,255), 3)
     #_,mask=cv2.threshold(frame,127,255,cv2.THRESH_BINARY)
     dist_transform=cv2.distanceTransform(mask,cv2.DIST_L2,5)
     argmax=dist_transform.argmax()
     #print(argmax)
     x=argmax%640
     y=int(argmax/640)
     r=dist_transform[y][x]
     #maxDT=np.unravel_index(argmax, dist_transform.shape)
     '''
     circlearea=r*r*3.14159
     if((cntarea-circlearea)>5000):
         continue
     '''
     #}
     
     _,R=cv2.minEnclosingCircle(cnt)
     if R-r>60:
示例#34
0
def main():
    """
    """
    if len(sys.argv) > 1:
        image = cv2.imread(sys.argv[1], 0)

    else:
        print "load default image"
        image = cv2.imread('NucleiDAPIconfocal.png', 0)


    #Array de imagenes
    imagenes = [
        #title, image, method
        ['original', image, 'gray'],
    ]

    #imagen a escala de grises
    #si aparece algun mensaje de error en pantalla es por esto
    try:
        print "conversion a gris"
        image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    except:
        #el ejemplo ya estaba en escala de grises :S
        #no conversion
        image_gray = copy.copy(image)
    imagenes.append(['image_gray', image_gray, 'gray'])

    #usar el difuminado gausiano no fue muy practico
    gnucleo = (3, 3)
    gblur_image = cv2.GaussianBlur(image, gnucleo, 0)
    imagenes.append(['gblur_image', gblur_image, 'gray'])

    ret, thresh = cv2.threshold(gblur_image, 80, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    #ret, thresh = cv2.threshold(image_gray, 80, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    imagenes.append(['thresh', thresh, 'gray'])

    kernel = numpy.ones((2,2), numpy.uint8) #nucleo
    cl_image = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=7)
    imagenes.append(['cl_image', cl_image, 'gray'])

    ## quitar puntos internos
    #erosionar imagen
    fg_image = cv2.erode(cl_image, None, iterations=2)
    imagenes.append(['fg_image', fg_image, 'gray'])
    fg_image8 = numpy.uint8(fg_image)

    #dilatacion
    bgt_image = cv2.dilate(cl_image, None, iterations=5)
    ret, bg_image = cv2.threshold(bgt_image, 1, 255, cv2.THRESH_BINARY)

    imagenes.append(['bgt_image', bgt_image, 'gray'])
    imagenes.append(['bg_image', bg_image, 'gray'])

    dist_transform = cv2.distanceTransform(cl_image, cv2.DIST_L2, 5)
    imagenes.append(['dist_transform', dist_transform, 'gray'])

    ret, sure_fg = cv2.threshold(dist_transform, 0.65*dist_transform.max(), 255, 0)
    imagenes.append(['sure_fg', sure_fg, 'gray'])

    sure_fg8 = numpy.uint8(sure_fg)
    ot, contornos, hier = cv2.findContours(sure_fg8, mode=cv2.RETR_CCOMP, method=cv2.CHAIN_APPROX_SIMPLE)
    imagenes.append(['sure_fg8', sure_fg8, 'gray'])

    dist_transform_b = cv2.distanceTransform(bg_image, cv2.DIST_L2, 5)
    imagenes.append(['dist_transform_b', dist_transform_b, 'gray'])

    ret, sure_fg_b = cv2.threshold(dist_transform_b, 0.65*dist_transform.max(), 255, 0)
    imagenes.append(['sure_fg_b', sure_fg_b, 'gray'])

    sure_fg_b8 = numpy.uint8(sure_fg_b)
    ot, contornos_b, hier = cv2.findContours(sure_fg_b8, mode=cv2.RETR_CCOMP, method=cv2.CHAIN_APPROX_SIMPLE)
    imagenes.append(['sure_fg_b8', sure_fg_b8, 'gray'])

    #comparo cual fue la imager que mejores resultados proporciono
    ca = len(contornos or [])
    cb = len(contornos_b or [])
    if ca >= cb:
        print "Cantidad de Objectos: {}".format(ca)
    else:
        print "Cantidad de Objectos: {}".format(cb)
        contornos = contornos_b

    circ_img = copy.copy(image)
    num_img = copy.copy(image)
    i = 1
    for cnt in contornos:
        (x, y), rad = cv2.minEnclosingCircle(cnt)
        center = (int(x), int(y))
        center2 = (int(x)-10, int(y)-10)
        rad = int(rad)
        if rad < 3: #algunos contornos detectados son muy pequenios
            rad += 6
        cv2.circle(circ_img, center, rad, (0,0,0), -1)
        cv2.putText(num_img, str(i), center2, cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,0), 2, cv2.LINE_AA)
        i += 1

    imagenes.append(['num_img', num_img, 'gray'])
    imagenes.append(['circ_img', circ_img, 'gray'])


    save_images(imagenes)
    #muestra todas las imagenes
    plot(imagenes)
    pyplot.show()
示例#35
0
def run_svm_OTSU(modelFile, test_dir):

    global total_Discocyte
    global total_Echinocyte
    global total_Stomatocyte
    global total_Others
    global prediction

    # load model
    pick = open(modelFile, 'rb')  # change model depending on SVC parameter
    model = pickle.load(pick)
    pick.close()

    # image pipeline
    # img1 = glob.glob("test_image/*.jpg")
    # for images in img1:
    #     # load image
    #     bld_img = cv2.imread(images)
    #     bld_img = cv2.resize(bld_img, dsize)
    #     scan_image(bld_img, winW, winH, model)

    # load single image
    blood_img = cv2.imread(test_dir)

    # sharpen
    # kernel = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]], dtype = np.float32)
    # laplacian = cv2.filter2D(blood_img, cv2.CV_32F, kernel)
    # sharp = np.float32(blood_img)
    # blood_img = sharp - laplacian
    # blood_img = np.clip(blood_img, 0, 255)
    # blood_img = blood_img.astype('uint8')
    # laplacian = np.clip(laplacian, 0, 255)
    # laplacian = np.uint8(laplacian)

    #cv2.imshow("sharpened", blood_img)

    cv2.waitKey(0)

    # nagcreate lang ako ng copy sa line na to
    #blood_img2 = blood_img

    # load image as grayscale
    blood_img_gray = cv2.cvtColor(blood_img, cv2.COLOR_BGR2GRAY)

    # perform OTSU's binarization method
    ret, blood_img_otsu = cv2.threshold(blood_img_gray, 0, 255, cv2.THRESH_OTSU)
    cv2.imshow("otsu", blood_img_otsu)

    # inverse otsu (black bg, white foreground)
    invblood_otsu = cv2.bitwise_not(blood_img_otsu)
    # cv2.imshow("inverse otsu", invblood_otsu)

    # noise removal
    kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7)) #7,7
    opening = cv2.morphologyEx(invblood_otsu, cv2.MORPH_OPEN, kernel1, iterations=1)
    cv2.imshow("erode-dilate", opening)

    cv2.waitKey(0)

    # sure background area
    sure_bg = cv2.dilate(opening, kernel1, iterations=10)
    cv2.imshow("bg", sure_bg)

    cv2.waitKey(0)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 3)
    cv2.normalize(dist_transform, dist_transform, 0, 1.0, cv2.NORM_MINMAX)
    cv2.imshow("normalized", dist_transform)

    cv2.waitKey(0)

    ret, sure_fg = cv2.threshold(dist_transform, 0.157 * dist_transform.max(), 255, 0) #0.157
    cv2.imshow("fg", sure_fg)

    # sure_fg = cv2.dilate(sure_fg, kernel1)
    # cv2.imshow("peaks", sure_fg)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    cv2.waitKey(0)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1

    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0

    markers = cv2.watershed(blood_img, markers)
    blood_img[markers == -1] = [0, 255, 255]

    ws = color.label2rgb(markers, bg_label=0)
    cv2.imshow("overlay on original image", blood_img)
    cv2.imshow("watershed", ws)

    cv2.waitKey(0)
    #
    # # # inverse otsu (black bg, white foreground)
    # # invblood_otsu = cv2.bitwise_not(blood_img_otsu)
    # #
    # # cv2.imshow("inverse otsu", invblood_otsu)
    #
    # sure_fg_8u = sure_fg.astype('uint8')
    #
    # contours, hierarchy = cv2.findContours(sure_fg_8u, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    #
    # markers = np.zeros(sure_fg.shape, dtype=np.int32)
    #
    # for i in range(len(contours)):
    #     cv2.drawContours((markers, contours, i, (i+1), -1))
    #
    # cv2.circle(markers, (5,5), 3, (255,255,255), -1)
    # cv2.imshow('Markers', markers * 10000)
    #
    # cv2.waitKey(0)

    contours, hierarchy = cv2.findContours(opening, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        cv2.drawContours(opening, [cnt], 0, 255, -1)

    cv2.imshow("filled", opening)

    img = opening

    # # fill holes found on Inverse Otsu
    # contours, hierarchy = cv2.findContours(invblood_otsu, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # for cnt in contours:
    #     cv2.drawContours(invblood_otsu, [cnt], 0, 255, -1)
    #
    # cv2.imshow("filled", invblood_otsu)
    #
    # # Clean cell edges
    # kernel = np.ones((2, 2), np.uint8)
    # img = cv2.morphologyEx(invblood_otsu, cv2.MORPH_OPEN, kernel, iterations=3)
    # #cv2.imshow('Morphological', img)

    # BlobDetector Parameters
    params = cv2.SimpleBlobDetector_Params()
    params.minThreshold = 0
    params.maxThreshold = 256
    params.filterByArea = True
    params.maxArea = 13000
    params.minArea = 1000 #2000
    params.filterByColor = True
    params.blobColor = 255
    params.filterByCircularity = False
    params.maxCircularity = 1.00
    params.minCircularity = 0.75
    params.filterByConvexity = True
    params.maxConvexity = 1.00
    params.minConvexity = 0.00
    params.filterByInertia = True
    params.maxInertiaRatio = 1.00
    params.minInertiaRatio = 0.00

    # BlobDetector
    detector = cv2.SimpleBlobDetector_create(params)
    keypoints = detector.detect(img)

    draw = cv2.drawKeypoints(img, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
    # for keypoints in keypoints:
    #     print(keypoints.size)

    cv2.imshow("Keypoints", draw)

    cv2.waitKey(0)

    #General Test Param
    # # Position offset variables
    # ul = 50
    # jl = 100
    #
    # #patch size
    # PTCH_SZ = 55 #52


    #Stomatest_Param
    # Position offset variables
    ul = 35
    jl = 70

    # patch size
    PTCH_SZ = 30  # 52

    # #Echinotest Param
    # # Position offset variables
    # ul = 40
    # jl = 70
    #
    # # patch size
    # PTCH_SZ = 40  # 52

    # # find center of each blob
    # contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # # contours, hierarchy = cv2.findContours(blood_img_otsu, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #
    # cv2.imshow("pre-moment", img)
    #
    # for c in contours:
    #     M = cv2.moments(c)
    #
    #     if M["m00"] != 0:
    #         cX = int(M["m10"] / M["m00"])
    #         cY = int(M["m01"] / M["m00"])
    #     else:
    #         cX, cY = 0, 0
    #
    #     cv2.circle(blood_img, (cX, cY), 5, (255, 0, 0), -1)
        # cv2.imshow("Centroids", blood_img)
        #cv2.putText(blood_img, "centroid", (cX - 25, cY - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)

    for kp in keypoints:
        cX = int(kp.pt[0])
        cY = int(kp.pt[1])

        cv2.circle(blood_img, (cX, cY), 3, (255, 0, 0), -1)

        # cv2.waitKey(0)

        # Position variables
        # initial location variables (x1.y1)
        # final location variables (x2,y2)

        # variables used to create ROI on the parts of the image
        x1 = cX - ul
        y1 = cY + ul

        x2 = x1 + jl
        y2 = y1 - jl

        # x1 = cX - w
        # y1 = cY + h
        #
        # x2 = x1 + w
        # y2 = y1 - h

        # create rectangle on each identified blob forming around the center of the blob
        cv2.rectangle(blood_img, (x1, y1), (x2, y2), (0, 255, 200), 1)
        cv2.imshow("Moments", blood_img)

        cv2.waitKey(0)
        # cv2.imshow("Image", blood)

        # pwedeng icomment muna itong since ung currently trained dataset, nakabase sa grayscale lang.


        # If patch of image to be extracted exceeds the image size (640x640),
        # ignore patch, then move on to next patch
        if x1 < 0 or x2 > 640 or y1 > 640 or y2 < 0:
            continue

        try:
            clone = blood_img_otsu.copy()

            patch1 = clone[cY-PTCH_SZ:cY+PTCH_SZ, cX-PTCH_SZ:cX+PTCH_SZ]
            # patch1 = clone[y1:y1 + jl, x1:x1 + jl]
            # patch1 = clone[x1:x2, y1:y2]
            # patch1 = clone[y1:y2, x1,x2]

            # patch1 = clone[x1:x1 + jl, y1:y1 + jl]
            # patch2 = cv2.cvtColor(patch1, cv2.COLOR_BGR2GRAY)
            patch3 = cv2.resize(patch1, (48, 48))

            cv2.namedWindow("cropped", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("cropped", 48 * 6, 48 * 6)
            cv2.imshow("cropped", patch3)

            # cv2.waitKey(0)

            patch4 = np.array(patch3).flatten()
            # patch_final = patch4[0]
            patch_final = patch4.reshape(-1, 2304)

            # prediction = None

            prediction = model.predict(patch_final)

            # prediction[0] = 1

            if prediction[0] == 0:
                total_Others = total_Others + 1
                cv2.rectangle(blood_img, (x1, y1), (x2, y2), (255, 200, 100), 2)
                cv2.putText(blood_img, "others", (cX - 25, cY - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 200, 100), 2)

            elif prediction[0] == 1:
                total_Discocyte = total_Discocyte + 1
                cv2.rectangle(blood_img, (x1, y1), (x2, y2), (0, 0, 255), 2)
                cv2.putText(blood_img, "discocyte", (cX - 25, cY - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)

            elif prediction[0] == 2:
                total_Echinocyte = total_Echinocyte + 1
                cv2.rectangle(blood_img, (x1, y1), (x2, y2), (0, 255, 0), 2)
                cv2.putText(blood_img, "echinocyte", (cX - 25, cY - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

            elif prediction[0] == 3:
                total_Stomatocyte = total_Stomatocyte + 1
                cv2.rectangle(blood_img, (x1, y1), (x2, y2), (255, 80, 0), 2)
                cv2.putText(blood_img, "stomatocyte", (cX - 25, cY - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 80, 0),2)

            cv2.imshow("Image", blood_img)

        except BaseException as ex:
            #print(ex.message, ex.args)
            continue
img = cv2.imread('../img/coins_connected.jpg')
rows, cols = img.shape[:2]
cv2.imshow('original', img)


mean = cv2.pyrMeanShiftFiltering(img, 20, 50)
cv2.imshow('mean', mean)

gray = cv2.cvtColor(mean, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3,3), 0)

_, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
cv2.imshow('thresh', thresh)

dst = cv2.distanceTransform(thresh, cv2.DIST_L2, 3)
dst = ( dst / (dst.max() - dst.min()) * 255 ).astype(np.uint8)
cv2.imshow('dst', dst)


localMx = cv2.dilate(dst, np.ones((50,50), np.uint8))
lm = np.zeros((rows, cols), np.uint8)
lm[(localMx==dst) & (dst != 0)] = 255
cv2.imshow('localMx', lm)


seeds = np.where(lm ==255)
seed = np.stack( (seeds[1], seeds[0]), axis=-1)
fill_mask = np.zeros((rows+2, cols+2), np.uint8)
for x,y in seed:
    ret = cv2.floodFill(mean, fill_mask, (x,y), (255,255,255), (10,10,10), (10,10,10))
        C_flag[min_prv_mask_idy, min_prv_mask_idx - 2] = 1
        C_flag[min_prv_mask_idy + 2, min_prv_mask_idx] = 1
        C_flag[min_prv_mask_idy - 2, min_prv_mask_idx] = 1
        print("Previous mask min at value " + str(min_prv_mask_val) + " K")
    C_Core = C_flag[:]

    #%% Start spreading
    #% First round, sepearate the connect blobs relating to the cyclone
    C_binary = np.where(C_BTemp > 280, 0, C_BTemp)
    C_binary = np.where(C_binary > 1, 1, C_binary)
    C_binary8 = C_binary.astype(np.uint8)

    I_idx = get_coord_to_idx(I_lat[C_i], I_lon[C_i])
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(C_binary8, cv2.MORPH_OPEN, kernel, iterations=2)
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 0)

    #    dist_matrix = dist_transform[I_idx[0]-100:I_idx[0]+100,I_idx[1]-100:I_idx[1]+100]
    #    dist_max = np.unravel_index(np.argmax(dist_matrix, axis=None), dist_matrix.shape)
    #    dist_max_big = [I_idx[0]-100 + dist_max[0],I_idx[1] - 100 + dist_max[1]]
    #    markers_one = np.zeros([DIM_LAT,DIM_LON])
    #    markers_one[dist_max_big[0],dist_max_big[1]] = 1

    #    dist_transform_mod = np.where(dist_transform<20,0,dist_transform)
    labels_ws = watershed(-dist_transform, C_flag, mask=C_binary8)

    C_binary8_second = np.where(labels_ws > 0, C_binary8, 0)

    #%% Second round - divide the previous mask into blobs
    seed = np.copy(C_binary8_second)
    seed[1:-1, 1:-1] = C_binary8_second.max()
示例#38
0
async def setup_learner():
    await download_file(export_file_url, path / export_file_name)
    try:
        img_new = cv2.imread(str(path), cv2.IMREAD_COLOR)
        img_new = cv2.fastNlMeansDenoisingColored(img_new, None, 10, 10, 7, 21)
        img_sv = img_new
        img_test = img_sv
        img = img_new
        r = img.copy()
        r[:, :, 0] = 0
        r[:, :, 1] = 0
        img = r
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        test_r = img
        im_gray = cv2.cvtColor(r, cv2.COLOR_BGR2GRAY)
        im_blur = cv2.GaussianBlur(im_gray, (5, 5), 0)
        kernel_sharpening = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
        im_sharp = cv2.filter2D(im_blur, -1, kernel_sharpening)
        img = im_sharp
        th3 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                    cv2.THRESH_BINARY, 11, 2)
        img = im_sharp
        ret, thresh3 = cv2.threshold(img, 127, 255, cv2.THRESH_TRUNC)
        kernel = np.ones((3, 3), np.uint8)
        opening = cv2.morphologyEx(thresh3,
                                   cv2.MORPH_OPEN,
                                   kernel,
                                   iterations=2)
        ret, th = cv2.threshold(thresh3, 0, 255,
                                cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        kernel = np.ones((3, 3), np.uint8)
        opening = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel, iterations=2)
        sure_bg = cv2.dilate(th, kernel, iterations=3)
        dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
        ret, sure_fg = cv2.threshold(dist_transform,
                                     0.005 * dist_transform.max(), 255, 0)
        sure_fg = np.uint8(sure_fg)
        unknown = cv2.subtract(sure_bg, sure_fg)
        ret, markers = cv2.connectedComponents(sure_fg)
        markers = markers + 1
        markers[unknown == 255] = 0
        markers = cv2.watershed(img_sv, markers)
        img_sv[markers == -1] = [0, 255, 0]
        img_sv[markers == 1] = [0, 0, 0]
        from skimage.filters import threshold_otsu
        thresh_val = threshold_otsu(opening)
        mask = np.where(opening > thresh_val, 1, 0)
        if np.sum(mask == 0) < np.sum(mask == 1):
            mask = np.where(mask, 0, 1)
        from scipy import ndimage
        labels, nlabels = ndimage.label(mask)
        all_cords = {}
        for label_ind, label_coords in enumerate(ndimage.find_objects(labels)):
            cell = im_gray[label_coords]
            all_cords[label_ind] = label_coords
            if np.product(cell.shape) < 10:
                mask = np.where(labels == label_ind + 1, 0, mask)
        labels, nlabels = ndimage.label(mask)
        i = 0
        res = {}
        for ii, obj_indices in enumerate(
                ndimage.find_objects(labels)[0:nlabels]):
            cell = img_sv[obj_indices]
            res[i] = cell
            i = i + 1
        i = 0
        features = {}
        for i in res:
            gcolor = res[i]
            h, w, _ = gcolor.shape
            ggray = np.array([[gcolor[i, j, 2] for j in range(w)]
                              for i in range(h)],
                             dtype=np.uint8)
            area = np.sum(
                np.sum([[1.0 for j in range(w) if ggray[i, j]]
                        for i in range(h)]))
            mean_area = area / (h * w)
            r, b, g = np.sum(
                [gcolor[i, j] for j in range(w)
                 for i in range(h)], axis=0) / (area * 256)
            _, _, eigen_value = pca(ggray)
            eccentricity = eigen_value[0] / eigen_value[1]
            l = [
                mean_area, r, b, g, eigen_value[0], eigen_value[1],
                eccentricity
            ]
            features[i] = np.array(l)
            i = i + 1
        out = {}
        #learn = load_learner(path, export_file_name)
        #Change Working directory of pkl file
        model = keras.models.load_model('../input/weight/weights.pkl')
        out = {}
        for i in features:
            out[i] = model.predict(np.array([features[i]]))
        good = not_good = 0
        for i in out:
            s = res[i]
            if np.argmax(out[i][0]) == 0:
                good += 1
                x1 = all_cords[i][0].start
                y1 = all_cords[i][1].start
                x2 = all_cords[i][1].stop
                y2 = all_cords[i][0].stop
                cv2.rectangle(img_test, (x2, x1), (y1, y2), (255, 0, 0), 8)
            else:
                x1 = all_cords[i][0].start
                y1 = all_cords[i][1].start
                x2 = all_cords[i][1].stop
                y2 = all_cords[i][0].stop
                not_good += 1
                cv2.rectangle(img_test, (x2, x1), (y1, y2), (0, 0, 255), 3)
        p = (good / (good + not_good) * 100)
        print("Number of good grain :", good)
        print("Number of impure grains or impurity:", not_good)
        print("Percentage Purity is:", p)
        return p
    except RuntimeError as e:
        if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:
            print(e)
            message = "\n\nThis model was trained with an old version of fastai and will not work in a CPU environment.\n\nPlease update the fastai library in your training environment and export your model again.\n\nSee instructions for 'Returning to work' at https://course.fast.ai."
            raise RuntimeError(message)
        else:
            raise
示例#39
0
def build_dataset(base="COMP9517 20T2 Group Project Image Sequences/DIC-C2DH-HeLa",
                  kernel_sz=5,
                  scale=1):
    # Initialize the kernel used for morphological operations
    ker = cv.getStructuringElement(cv.MORPH_CROSS,(kernel_sz, kernel_sz))
    # ker = np.ones((kernel_sz, kernel_sz), np.uint8)
    samples = list()
    
    # Build a list of samples
    for d in os.listdir(base):
        if "Mask" in d:
            d_n = base + '/' + d
            for f in os.listdir(d_n):
                f_n = d_n + '/' + f
                samples.append((f_n, f_n.replace(" Masks", "").replace("mask", "")))

    # Iterate through each sample and its mask
    for i, (mask_path, img_path) in enumerate(samples):
        # Load the image and its mask
        img = cv.imread(img_path, cv.IMREAD_GRAYSCALE)
        mask = cv.imread(mask_path, cv.IMREAD_ANYDEPTH)

        seg = np.zeros(mask.shape, np.uint8)
        bor = np.zeros(mask.shape, np.uint8)
        dst_exp = np.zeros(mask.shape, np.uint8)

        # Iterate through unique values in the mask image
        for j in np.unique(mask):
            if j == 0:
                continue

            # Create an image with just that object, erode it to emphasize the 
            # border and add it to the current segmentation mask
            tmp = (mask == j).astype(np.uint8)

            dst = cv.distanceTransform(tmp, cv.DIST_L2, 3)
            dst = ((np.exp((np.log(256) / dst.max())) ** dst) - 1).astype(np.uint8)
            dst_exp += dst

            tmp = cv.erode(tmp, ker)
            seg += tmp

            # Dilate the same object mask, and add it to the border image
            tmp = cv.dilate((mask == j).astype(np.uint8), ker, iterations=3)
            bor += tmp
            
        # If any pixel has a value greater than 1, then at least 2 dilated 
        # objects occupy that space. Filter to get the border, then subtract the
        # object segmentation mask to get only the borders
        bor = (bor > 1).astype(np.uint8)
        bor *= (1 - seg)

        img = cv.resize(img, None, fx=scale, fy=scale) 
        # img = img - img.min()
        # img = img * (255 // img.max())
        seg = cv.resize(seg, None, fx=scale, fy=scale, 
                        interpolation=cv.INTER_NEAREST)
        bor = cv.resize(bor, None, fx=scale, fy=scale, 
                        interpolation=cv.INTER_NEAREST)
        dst_exp = cv.resize(dst_exp, None, fx=scale, fy=scale)

        # Write images to their respective folders
        cv.imwrite(f"data/img/{i}.png", img)
        cv.imwrite(f"data/seg/{i}.png", seg)
        cv.imwrite(f"data/bnd/{i}.png", bor)
        cv.imwrite(f"data/dst/{i}.png", dst_exp)
示例#40
0
def main(img_path):
    src = cv.imread(img_path)
    # Show source image
    cv.imshow('Source Image', src)
    ## [load_image]
    gray = cv.cvtColor(src, code=cv.COLOR_RGB2GRAY)
    thr_b = getTresholdByGrad(gray)
    # thr_b = 40
    print('\n\n\n thr_b: {}'.format(thr_b))
    ## [black_bg]
    # Change the background from white to black, since that will help later to extract
    # better results during the use of Distance Transform
    # src[np.all(src == 255, axis=2)] = 0
    src[np.all(src < thr_b, axis=2)] = 0   # 背景变黑
    # src[np.all(src == 0, axis=2)] = 255
    # Show output image
    # cv.imshow('Black Background Image', src)
    ## [black_bg]

    ## [sharp]
    # Create a kernel that we will use to sharpen our image an approximation of second derivative, a quite strong kernel
    kernel = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]], dtype=np.float32)

    # do the laplacian filtering as it is
    # well, we need to convert everything in something more deeper then CV_8U
    # because the kernel has some negative values,
    # and we can expect in general to have a Laplacian image with negative values
    # BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255
    # so the possible negative number will be truncated
    imgLaplacian = cv.filter2D(src, cv.CV_32F, kernel)
    sharp = np.float32(src)
    imgResult = sharp - imgLaplacian

    # convert back to 8bits gray scale
    imgResult = np.clip(imgResult, 0, 255)
    imgResult = imgResult.astype('uint8')
    imgLaplacian = np.clip(imgLaplacian, 0, 255)
    imgLaplacian = np.uint8(imgLaplacian)

    cv.imshow('Laplace Filtered Image', imgLaplacian)
    cv.imshow('New Sharped Image', imgResult)
    ## [sharp]

    ## [bin]
    # Create binary image from source image
    bw = cv.cvtColor(imgResult, cv.COLOR_BGR2GRAY)
    _, bw = cv.threshold(bw, thr_b, 255, cv.THRESH_BINARY)
    cv.imshow('Binary Image', bw)
    ## [bin]

    ## [dist]
    # Perform the distance transform algorithm
    dist = cv.distanceTransform(bw, cv.DIST_L2, 3)

    # Normalize the distance image for range = {0.0, 1.0}
    # so we can visualize and threshold it
    cv.normalize(dist, dist, 0, 1.0, cv.NORM_MINMAX)
    cv.imshow('Distance Transform Image', dist)
    ## [dist]

    ## [peaks]
    # Threshold to obtain the peaks
    # This will be the markers for the foreground objects
    _, dist = cv.threshold(dist, 0.4, 1.0, cv.THRESH_BINARY)

    # Dilate a bit the dist image
    kernel1 = np.ones((3, 3), dtype=np.uint8)
    dist = cv.dilate(dist, kernel1)
    cv.imshow('Peaks', dist)
    ## [peaks]

    ## [seeds]
    # Create the CV_8U version of the distance image
    # It is needed for findContours()
    dist_8u = dist.astype('uint8')

    # Find total markers
    _, contours, _ = cv.findContours(dist_8u, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)

    # Create the marker image for the watershed algorithm
    markers = np.zeros(dist.shape, dtype=np.int32)

    # Draw the foreground markers
    for i in range(len(contours)):
        # drawContours(image, contours, contourIdx, color, thickness=None,
        # lineType=None, hierarchy=None, maxLevel=None, offset=None)
        # fills the area bounded by the contours if thickness<0
        print('len(contours): {}'.format(len(contours)))
        cv.drawContours(markers, contours, i, (2, ), thickness=-1)

    # Draw the background marker
    # cv.circle(markers, (5,5), 122, (255,255,255), -1)
    cv.imshow('Markers', markers*10000)
    ## [seeds]

    ## [watershed]
    # Perform the watershed algorithm
    cv.watershed(imgResult, markers)

    #mark = np.zeros(markers.shape, dtype=np.uint8)
    mark = markers.astype('uint8')
    mark = cv.bitwise_not(mark)
    # uncomment this if you want to see how the mark
    # image looks like at that point
    # cv.imshow('Markers_v2', imgResult)

    # Generate random colors
    colors = []
    for contour in contours:
        colors.append((rng.randint(0, 256), rng.randint(0, 256), rng.randint(0, 256)))

    # Create the result image
    dst = np.zeros((markers.shape[0], markers.shape[1], 3), dtype=np.uint8)

    # Fill labeled objects with random colors
    for i in range(markers.shape[0]):
        for j in range(markers.shape[1]):
            index = markers[i,j]
            if index > 0 and index <= len(contours):
                dst[i,j,:] = colors[index-1]

    # Visualize the final image
    cv.imshow('Final Result', dst)
    ## [watershed]

    cv.waitKey()
示例#41
0
def main():

    img = cv2.imread(filename)
    imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    ## Binary (OTSU's method)
    ret, thresh = cv2.threshold(imgray, 0, 255,
                                cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
    #ret, thresh = cv2.threshold(imgray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    print 'Threshold (pixel) : ', ret

    ## Remove noise
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)

    ## Distance transformation
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    #	dist_transform = cv2.distanceTransform(opening,cv2.DIST_L1,3)
    #	dist_transform = cv2.distanceTransform(opening,cv2.DIST_C,3)
    dist_transform_8u = np.uint8(dist_transform)

    ## Find particles
    particles = find_peak(dist_transform_8u, k_size)
    print particles
    print 'Number of particles : ', len(particles)

    ## Shape discription (Snake algorithm)
    init_list = []
    init_list2 = []
    snake_list = []
    particle_list = []
    k = 0
    for p in particles:
        k += 1
        print k
        init = set_init(p[0], p[1], p[2], p[3])
        init_list.append(init), init_list2.extend(init)
        contour = (active_contour(gaussian(thresh, 3),
                                  init,
                                  alpha=0.015,
                                  beta=10,
                                  gamma=0.001))
        snake_list.append(contour)
        particle_list.extend(contour)
    print np.array(snake_list)
    print cv2.contourArea(np.array(snake_list))
    with open("snake_list_%s.csv" % filename.split('.')[0], "wb") as f:
        writer = csv.writer(f)
        writer.writerows(snake_list)
    f.close()

    ## Plot figures
    plt.figure(), plt.imshow(img), plt.title('Original'), plt.xticks(
        []), plt.yticks([])
    plt.savefig("figures_%s_%s.png" % (filename.split('.')[0], 'Original'))

    plt.figure(), plt.imshow(imgray), plt.title('Gray'), plt.xticks(
        []), plt.yticks([])
    plt.savefig("figures_%s_%s.png" % (filename.split('.')[0], 'Gray'))

    plt.figure(), plt.imshow(thresh), plt.title('Binary'), plt.xticks(
        []), plt.yticks([])
    plt.savefig("figures_%s_%s.png" % (filename.split('.')[0], 'Binary'))

    plt.figure(), plt.imshow(
        dist_transform_8u,
        cmap=plt.cm.gray), plt.title('Distance transform'), plt.xticks(
            []), plt.yticks([])
    plt.savefig("figures_%s_%s.png" % (filename.split('.')[0], 'Distance'))

    plt.figure(), plt.hist(imgray.ravel(), 256,
                           color='black'), plt.title('Histogram'), plt.xticks(
                               []), plt.yticks([])
    plt.savefig("figures_%s_%s.png" % (filename.split('.')[0], 'Histogram'))

    plt.close()

    plt.figure(), plt.imshow(thresh), plt.title('Binary+Snake'), plt.xticks(
        []), plt.yticks([])
    for p in range(len(particles)):
        #plt.plot(particles[p][0], particles[p][2], 'og')#, lw=3)
        plt.plot(init_list[p][:, 0], init_list[p][:, 1], '--r')  #, lw=3)
        plt.plot(snake_list[p][:, 0], snake_list[p][:, 1], '-b')  #, lw=3)
    plt.savefig("figures_%s_Binary_Snake.png" % filename.split('.')[0])
    plt.close()

    plt.figure(), plt.imshow(
        dist_transform_8u,
        cmap=plt.cm.gray), plt.title('Distance+Snake'), plt.xticks(
            []), plt.yticks([])
    for p in range(len(particles)):
        #plt.plot(particles[p][0], particles[p][2], 'og')#, lw=3)
        plt.plot(init_list[p][:, 0], init_list[p][:, 1], '--r')  #, lw=3)
        plt.plot(snake_list[p][:, 0], snake_list[p][:, 1], '-b')  #, lw=3)
    plt.savefig("figures_%s_Distance_Snake.png" % filename.split('.')[0])
    plt.close()

    plt.figure(), plt.imshow(img), plt.title('Original+Snake'), plt.xticks(
        []), plt.yticks([])
    for p in range(len(particles)):
        #plt.plot(particles[p][0], particles[p][2], 'og')#, lw=3)
        #plt.plot(init_list[p][:, 0], init_list[p][:, 1], '--r')#, lw=3)
        plt.plot(snake_list[p][:, 0], snake_list[p][:, 1], '-b')  #, lw=3)
    plt.savefig("figures_%s_Original_Snake.png" % filename.split('.')[0])
    plt.close()
示例#42
0
def estimate_navigation_channels(floor_mask,
                                 idealized_height_image,
                                 m_per_pix,
                                 display_on=False,
                                 verbose=False):
    # min_robot_width_m : The best case minimum width of the robot in meters when moving forward and backward.

    # threshold based on minimum radius of the robot
    min_robot_radius_m = 0.4
    # subtract pixels to provide an optimistic estimate and account for possible quantization
    min_robot_radius_pix = (min_robot_radius_m / m_per_pix) - 2.0

    # create optimistic traversable region mask
    traversable_image = np.zeros_like(floor_mask, dtype=np.uint8)
    # if the region is floor or unobserved, consider it to be traversable
    traversable_selector = (floor_mask > 0) | (idealized_height_image == 0)
    traversable_image[traversable_selector] = 255

    if display_on:
        cv2.imshow('traversable image', traversable_image)

    # compute distance map: distance from untraversable regions
    original_dist_transform = cv2.distanceTransform(traversable_image,
                                                    cv2.DIST_L2, 5)
    dist_transform = original_dist_transform.copy()
    norm_dist_transform = cv2.normalize(dist_transform, None, 0, 255,
                                        cv2.NORM_MINMAX, cv2.CV_8U)
    if display_on:
        cv2.imshow('distance transform of the traversable image',
                   norm_dist_transform)

    ########
    # remove unobserved and untraversable pixels from the distance map

    # set parts of the distance transform that are off of the observed
    # floor to zero
    floor_selector = (floor_mask < 1)
    dist_transform[floor_selector] = 0

    # set parts of the distance transform that represent free space
    # less than the robot would require to zero
    robot_radius_selector = (dist_transform < min_robot_radius_pix)
    dist_transform[robot_radius_selector] = 0
    if display_on:
        norm_dist_transform = cv2.normalize(dist_transform, None, 0, 255,
                                            cv2.NORM_MINMAX, cv2.CV_8U)
        cv2.imshow('cropped distance transform', norm_dist_transform)
    ########

    # create kernel for morphological operations
    kernel_width_pix = 11
    kernel_radius_pix = (kernel_width_pix - 1) / 2
    kernel = np.zeros((kernel_width_pix, kernel_width_pix), np.uint8)
    cv2.circle(kernel, (kernel_radius_pix, kernel_radius_pix),
               kernel_radius_pix, 255, -1)

    # fill in floor mask holes
    closed_floor_mask = cv2.morphologyEx(floor_mask, cv2.MORPH_CLOSE, kernel)

    # find the boundary of the floor
    dilated_floor = cv2.dilate(closed_floor_mask, kernel, iterations=1)
    floor_boundary = dilated_floor - closed_floor_mask

    # Estimate the locations of exits. Creates a binary image with exits marked with 255.
    min_connectivity_dist_m = min_robot_radius_m
    min_connectivity_dist_pix = min_robot_radius_pix
    connectivity_image = original_dist_transform.copy()
    connectivity_image[connectivity_image < min_connectivity_dist_pix] = 0
    connectivity_image[floor_boundary == 0] = 0
    map_exits = np.zeros_like(connectivity_image, dtype=np.uint8)
    map_exits[connectivity_image > 0] = 255
    exit_dilation = True
    if exit_dilation:
        # attempt to increase the chance of a vertex being labeled as an exit
        kernel = np.ones((21, 21), np.uint8)
        map_exits = cv2.dilate(map_exits, kernel, iterations=1)

    # find exit regions
    number_of_exits, exit_label_image = simple_connected_components(map_exits)
    print 'number_of_exits =', number_of_exits

    distance_map = original_dist_transform.copy()
    distance_map[closed_floor_mask < 1] = 0

    exit_locations = []
    for i in range(number_of_exits)[1:]:
        distance = distance_map.copy()
        not_component_selector = (exit_label_image != i)
        distance[not_component_selector] = 0
        y, x = np.unravel_index(np.argmax(distance), distance.shape)
        exit_locations.append([x, y])

    if display_on:
        norm_connectivity_image = cv2.normalize(connectivity_image, None, 0,
                                                255, cv2.NORM_MINMAX,
                                                cv2.CV_8U)
        cv2.imshow('connectivity image', norm_connectivity_image)
        cv2.imshow('estimated map exits', map_exits)
        cv2.imshow('floor boundary', floor_boundary)
        norm_distance_map = cv2.normalize(distance_map, None, 0, 255,
                                          cv2.NORM_MINMAX, cv2.CV_8U)
        cv2.imshow('raw distance map', norm_distance_map)

    return dist_transform, map_exits, distance_map, exit_locations
import cv2  # Import relevant libraries
import numpy as np

img = cv2.imread('cameraman.png', 0)  # Read in image

height = img.shape[0]  # Get the dimensions
width = img.shape[1]

# Define mask
mask = 255 * np.ones(img.shape, dtype='uint8')

# Draw circle at x = 100, y = 70 of radius 25 and fill this in with 0
cv2.circle(mask, (100, 70), 25, 0, -1)

# Apply distance transform to mask
out = cv2.distanceTransform(mask, cv2.DIST_L2, 3)

# Define scale factor
scale_factor = 10

# Create output image that is the same as the original
filtered = img.copy()

# Create floating point copy for precision
img_float = img.copy().astype('float')

# Number of channels
if len(img_float.shape) == 3:
    num_chan = img_float.shape[2]
else:
    # If there is a single channel, make the images 3D with a singleton
    opening = cv2.morphologyEx(gray_mask, cv2.MORPH_CLOSE, kernel)

    erosion1 = cv2.erode(opening, kernel, iterations=1)

    opening2 = cv2.morphologyEx(erosion1,
                                cv2.MORPH_CLOSE,
                                kernel,
                                iterations=2)

    #####################################################################
    sure_img = cv2.dilate(opening2, kernel, iterations=3)
    ######################################################################
    # contours, hierarchy = cv2.findContours(sure_img, cv2.RETR_TREE+cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    #######################################################################
    dist_transform = cv2.distanceTransform(opening2, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform, 0.5 * dist_transform.max(),
                                 255, 0)

    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_img, sure_fg)

    ret, markers = cv2.connectedComponents(sure_fg)
    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1
    # Now, mark the region of unknown with zero
    markers[unknown == 250] = 0

    markers = cv2.watershed(bilateralFilter, markers)

    # print(markers.shape, markers.dtype)
示例#45
0
def textDetectWatershed(thresh):
    """ Text detection using watershed algorithm - NOT IN USE """
    # According to: http://docs.opencv.org/trunk/d3/db4/tutorial_py_watershed.html
    img = cv2.cvtColor(cv2.imread("data/textdet/%s.jpg" % IMG),
                       cv2.COLOR_BGR2RGB)
    img = resize(img, 3000)
    thresh = resize(thresh, 3000)
    # noise removal
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)

    # sure background area
    sure_bg = cv2.dilate(opening, kernel, iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform, 0.01 * dist_transform.max(),
                                 255, 0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers += 1

    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0

    markers = cv2.watershed(img, markers)
    implt(markers, t='Markers')
    image = img.copy()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    for mark in np.unique(markers):
        # mark == 0 --> background
        if mark == 0:
            continue

        # Draw it on mask and detect biggest contour
        mask = np.zeros(gray.shape, dtype="uint8")
        mask[markers == mark] = 255

        cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        c = max(cnts, key=cv2.contourArea)

        # Draw a bounding rectangle if it contains text
        x, y, w, h = cv2.boundingRect(c)
        cv2.drawContours(mask, c, 0, (255, 255, 255), cv2.FILLED)
        maskROI = mask[y:y + h, x:x + w]
        # Ratio of white pixels to area of bounding rectangle
        r = cv2.countNonZero(maskROI) / (w * h)

        # Limits for text
        if r > 0.2 and 2000 > w > 15 and 1500 > h > 15:
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

    implt(image)
示例#46
0
def segment_grabcut(image, window=None, seeds=[]):
    """Segments an image using grabcut technique. Initialised with edges.

    Parameters
    ----------
    image : (M, N, 3) array
        Image to process.
    window : tuple, (x, y, w, h)
        Optional subwindow in image.

    Returns
    -------
    (rects, display) : list, (M, N, 3) array
        Region results and visualization image.
    """
    if window:
        subimage = np.array(image)
        x, y, w, h = window
        image = subimage[y:y + h, x:x + w]
    rects, display = segment_edges(image,
                                   variance_threshold=100,
                                   line_filter=0,
                                   size_filter=0)
    gray = cv2.cvtColor(image, cv2.cv.CV_BGR2GRAY)
    (k, mag0) = cv2.threshold(gray, 128, 255,
                              cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)

    h, w = image.shape[:2]
    initial = np.zeros((h, w), np.uint8)
    initial[:] = cv2.GC_BGD
    initial[mag0 == 255] = cv2.GC_FGD
    for i, rect in enumerate(rects):
        cv2.drawContours(initial, [rect[4]], -1, int(cv2.GC_FGD), -1)

    initial[display[:, :, 0] > 0] = cv2.GC_PR_FGD
    bgmodel = np.zeros((1, 65), np.float64)
    fgmodel = np.zeros((1, 65), np.float64)
    mask = initial
    rect = None
    cv2.grabCut(image, mask, rect, bgmodel, fgmodel, 1, cv2.GC_INIT_WITH_MASK)
    mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')

    contours, hierarchy = cv2.findContours(mask2.copy(), cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
    rects = [cv2.boundingRect(c) for c in contours]

    display = np.dstack(3 * [255 * mask2.astype(np.uint8)])
    if seeds:
        distance = cv2.distanceTransform(mask2, cv2.cv.CV_DIST_L2, 3)
        markers = np.zeros(mask2.shape, dtype=np.int32)
        markers[mask2 == 0] = 255
        for i, seed in enumerate(seeds):
            sx, sy = seed
            markers[sy, sx] = i + 1
        if USE_OPENCV_WATERSHED:
            cv2.watershed(display, markers)
        else:
            markers = watershed(mask2, markers, mask=mask2)
        new_rects = []
        for i, seed in enumerate(seeds):
            mask = np.array(markers == i + 1, dtype=np.uint8)
            contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_SIMPLE)
            if contours:
                contours.sort(
                    lambda x, y: cmp(cv2.contourArea(y), cv2.contourArea(x)))
                new_rects.append(cv2.boundingRect(contours[0]))
                colour = [randint(100, 255), randint(100, 255), 0]
                cv2.drawContours(display, [contours[0]], -1, colour, -1)
        rects = new_rects
    if window:
        new_rects = []
        gray = cv2.cvtColor(image, cv2.cv.CV_BGR2GRAY)
        for rect in rects:
            dx, dy = 0, 0
            new_rect = (rect[0] + x - dx, rect[1] + y - dy, rect[2] + 2 * dx,
                        rect[3] + 2 * dy)

            im = gray[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]]
            if np.var(im) > 200 and rect[2] * rect[3] > w * h / 4E3:
                new_rects.append(new_rect)
        rects = new_rects
    return rects, display
示例#47
0
def get_distances(map):
    """
        Computes distance
        transform.
    """
    return cv.distanceTransform(map, cv.DIST_L2, 5)
示例#48
0
def main():
    ## load datqa
    #img = cv.imread("water_coins.jpg")
    img, singleJJ = my_data(show=False, return_stamp=True)

    # let's look at a small part
    '''
    ## try 3D plot
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    xs = np.linspace(0,1,200)
    ys = np.linspace(0,1,80)
    xy = np.array( np.meshgrid(xs,ys)[0] )
    ax.plot_surface(xy,xy,singleJJ)
    plt.title("Cut")
    plt.show()
    return 0;
    #'''

    ## convert to openCV's favorite version
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    plt.figure()
    plt.title("Gray")
    plt.imshow(gray, cmap='gray')

    ## threshold the devices to find only exposed region:
    ## short story: Gaussian blur then filter gives clean-looking segmentation
    ##              the true magic, is the OTSU method
    blur = cv.GaussianBlur(gray, (5, 5), 0)
    ret, thresh = cv.threshold(blur,
                               0,
                               255,
                               type=cv.THRESH_BINARY_INV + cv.THRESH_OTSU)
    plt.figure()
    plt.title("thresh")
    plt.imshow(thresh, cmap='gray')
    return 0
    '''
    #ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
    ret, thresh = cv.threshold(gray,0,255,type=cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
    plt.figure()
    plt.title("thresh")
    plt.imshow(thresh, cmap='gray') 

    ## try a Gaussian blur first
    blur = cv.GaussianBlur(gray,(5,5),0)
    ret, thresh_blur = cv.threshold(blur,0,255,type=cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
    plt.figure()
    plt.title("blur --> thresh")
    plt.imshow(thresh_blur, cmap='gray')

    ## or perhaps an adaptive thresholding
    th3 = cv.adaptiveThreshold(gray,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv.THRESH_BINARY,11,2)
    plt.figure()
    plt.title("Adaptive")
    plt.imshow(th3, cmap='gray')
    '''

    ## first estimation of noise
    '''
    # noise removal
    kernel = np.ones((3,3),np.uint8)
    opening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2)
    plt.figure()
    plt.imshow(opening, cmap='gray')
    plt.title("Opening")
    # sure background area
    sure_bg = cv.dilate(opening,kernel,iterations=3)
    plt.figure()
    plt.imshow(sure_bg, cmap='gray')
    plt.title("sure_bg")
    '''
    # Finding sure foreground area
    dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)
    ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(),
                                255, 0)
    plt.figure()
    plt.imshow(sure_fg, cmap='gray')
    plt.title("sure_fg")
    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv.subtract(sure_bg, sure_fg)

    # Marker labelling
    ret, markers = cv.connectedComponents(sure_fg)
    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1
    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0

    markers = cv.watershed(img, markers)
    img[markers == -1] = [255, 0, 0]

    plt.figure()
    plt.imshow(markers, cmap='gray')
    plt.title("Markers")
示例#49
0
def spatter(x, severity=1):
    c = [(0.65, 0.3, 4, 0.69, 0.6, 0), (0.65, 0.3, 3, 0.68, 0.6, 0),
         (0.65, 0.3, 2, 0.68, 0.5, 0), (0.65, 0.3, 1, 0.65, 1.5, 1),
         (0.67, 0.4, 1, 0.65, 1.5, 1)][severity - 1]
    x_PIL = x
    x = np.array(x, dtype=np.float32) / 255.

    liquid_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1])

    liquid_layer = gaussian(liquid_layer, sigma=c[2])
    liquid_layer[liquid_layer < c[3]] = 0
    if c[5] == 0:
        liquid_layer = (liquid_layer * 255).astype(np.uint8)
        dist = 255 - cv2.Canny(liquid_layer, 50, 150)
        dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)
        _, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)
        dist = cv2.blur(dist, (3, 3)).astype(np.uint8)
        dist = cv2.equalizeHist(dist)
        ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
        dist = cv2.filter2D(dist, cv2.CV_8U, ker)
        dist = cv2.blur(dist, (3, 3)).astype(np.float32)

        m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)
        m /= np.max(m, axis=(0, 1))
        m *= c[4]
        # water is pale turqouise
        color = np.concatenate(
            (175 / 255. * np.ones_like(m[..., :1]), 238 / 255. *
             np.ones_like(m[..., :1]), 238 / 255. * np.ones_like(m[..., :1])),
            axis=2)

        color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)

        if len(x.shape) < 3 or x.shape[2] < 3:
            add_spatter_color = cv2.cvtColor(np.clip(m * color, 0, 1),
                                             cv2.COLOR_BGRA2BGR)
            add_spatter_gray = rgb2gray(add_spatter_color)

            return np.clip(x + add_spatter_gray, 0, 1) * 255

        else:

            x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)

            return cv2.cvtColor(np.clip(x + m * color, 0, 1),
                                cv2.COLOR_BGRA2BGR) * 255
    else:
        m = np.where(liquid_layer > c[3], 1, 0)
        m = gaussian(m.astype(np.float32), sigma=c[4])
        m[m < 0.8] = 0

        x_rgb = np.array(x_PIL.convert('RGB'))

        # mud brown
        color = np.concatenate((63 / 255. * np.ones_like(x_rgb[..., :1]),
                                42 / 255. * np.ones_like(x_rgb[..., :1]),
                                20 / 255. * np.ones_like(x_rgb[..., :1])),
                               axis=2)
        color *= m[..., np.newaxis]
        if len(x.shape) < 3 or x.shape[2] < 3:
            x *= (1 - m)
            return np.clip(x + rgb2gray(color), 0, 1) * 255

        else:
            x *= (1 - m[..., np.newaxis])
            return np.clip(x + color, 0, 1) * 255
示例#50
0
def find_exits(floor_mask,
               max_height_image,
               m_per_pix,
               min_robot_width_m,
               robot_x_pix,
               robot_y_pix,
               display_on=False):
    # A mask with optimistic traversable pixels consisting of floor
    # and unobserved pixels. This should result in traversable paths
    # that move from observed floor to unobserved floor, which can be
    # used to find exits.
    traversable_selector = (floor_mask > 0) | (max_height_image == 0)
    traversable_mask = 255 * np.uint8(traversable_selector)

    # Fill in small non-floor regions (likely noise)

    # TODO: improve this. For example, fill in isolated pixels that
    # are too small to trust as obstacles. Options include a hit or
    # miss filter, matched filter, or speckle filter.
    fill_in = True
    if fill_in:
        kernel = np.ones((3, 3), np.uint8)
        traversable_mask = cv2.morphologyEx(traversable_mask, cv2.MORPH_CLOSE,
                                            kernel)

    # Optimistic estimate of robot width. Use ceil to account for
    # possible quantization. Consider adding a pixel, also.
    min_robot_radius_pix = np.ceil((min_robot_width_m / 2.0) / m_per_pix)

    # compute distance map: distance from untraversable regions
    #
    # cv2.DIST_L2 : Euclidean distance
    #
    # 5 is the mask size : "finds the shortest path to the nearest
    # zero pixel consisting of basic shifts: horizontal, vertical,
    # diagonal, or knight's move (the latest is available for a 5x5
    # mask)" - OpenCV documentation
    distance_map = cv2.distanceTransform(traversable_mask, cv2.DIST_L2, 5)

    # fill in floor mask holes
    #kernel = np.ones((11,11), np.uint8)
    kernel_width_pix = 11
    kernel_radius_pix = (kernel_width_pix - 1) / 2
    kernel = np.zeros((kernel_width_pix, kernel_width_pix), np.uint8)
    cv2.circle(kernel, (kernel_radius_pix, kernel_radius_pix),
               kernel_radius_pix, 255, -1)
    closed_floor_mask = cv2.morphologyEx(floor_mask, cv2.MORPH_CLOSE, kernel)

    # find the boundary of the floor
    dilated_floor = cv2.dilate(closed_floor_mask, kernel, iterations=1)
    floor_boundary = dilated_floor - closed_floor_mask

    # Estimate the locations of exits. Creates a binary image with exits marked with 255.
    min_connectivity_dist_pix = min_robot_radius_pix  #(min_connectivity_dist_mm / mm_per_pix) - 2.0
    connectivity_image = distance_map.copy()
    # remove regions that are too narrow for the robot to drive through
    connectivity_image[connectivity_image < min_connectivity_dist_pix] = 0
    # Only keep pixels that are at the boundary of the observed
    # floor. This should cut between the observed and unobserved
    # traversible regions.
    connectivity_image[floor_boundary == 0] = 0
    # convert float image to uint8 image
    map_exits = np.zeros_like(connectivity_image, dtype=np.uint8)
    # exit pixels have a value of 255
    map_exits[connectivity_image > 0] = 255
    # enlarge the map exit pixels so that they will intersect with the floor
    exit_dilation = True
    if exit_dilation:
        # attempt to increase the chance of a vertex being labeled as an exit
        # create kernel for morphological operations
        kernel_width_pix = 11
        kernel_radius_pix = (kernel_width_pix - 1) / 2
        kernel = np.zeros((kernel_width_pix, kernel_width_pix), np.uint8)
        cv2.circle(kernel, (kernel_radius_pix, kernel_radius_pix),
                   kernel_radius_pix, 255, -1)
        map_exits = cv2.dilate(map_exits, kernel, iterations=1)

    # Select the connected component of the floor on which the robot
    # is located.
    h, w = closed_floor_mask.shape
    accessible_floor_mask = np.zeros((h + 2, w + 2), np.uint8)
    #possibly add to floodFill in the future: flags = cv2.FLOODFILL_FIXED_RANGE
    cv2.floodFill(floor_mask, accessible_floor_mask,
                  (robot_x_pix, robot_y_pix), 255)
    accessible_floor_mask = 255 * accessible_floor_mask[1:-1, 1:-1]

    # only consider map exit candidates that are connected to parts of
    # the floor that can be reached
    map_exits[accessible_floor_mask == 0] = 0

    # Ignore exits that are very close to the robot. These can be due
    # to the robot's own body occluding the floor. Otherwise, they can
    # represent real exits that the robot is already next to and does
    # not need to navigate to.
    ignore_radius_pix = int(4.0 * min_robot_radius_pix)
    cv2.circle(map_exits, (robot_x_pix, robot_y_pix), ignore_radius_pix, 0, -1)

    # Dilate exits in order to merge exits that are very close to one
    # another.
    kernel_width_pix = 21  #11 #15
    kernel_radius_pix = (kernel_width_pix - 1) / 2
    kernel = np.zeros((kernel_width_pix, kernel_width_pix), np.uint8)
    cv2.circle(kernel, (kernel_radius_pix, kernel_radius_pix),
               kernel_radius_pix, 255, -1)
    map_exits = cv2.dilate(map_exits, kernel, iterations=1)

    # only consider map exit candidates that are connected to parts of
    # the floor that can be reached
    map_exits[accessible_floor_mask == 0] = 0

    # find exit regions
    number_of_exits, exit_label_image = simple_connected_components(
        map_exits, connectivity=8)

    # find geometric centers of the exits
    label_indices = range(number_of_exits)[1:]
    label_image = exit_label_image
    centers_of_mass = nd.measurements.center_of_mass(label_image, label_image,
                                                     label_indices)
    ones = np.ones_like(label_image)
    sums = nd.measurements.sum(ones, label_image, label_indices)
    print('centers_of_mass =', centers_of_mass)

    if display_on:
        print('find_exits: number_of_exits =', number_of_exits)
        h, w = max_height_image.shape
        color_im = np.zeros((h, w, 3), np.uint8)
        color_im[:, :, 0] = max_height_image
        color_im[:, :, 1] = max_height_image
        color_im[:, :, 2] = map_exits
        for s, (c_y, c_x) in zip(sums, centers_of_mass):
            if s > 50:
                c_x = int(round(c_x))
                c_y = int(round(c_y))
                radius = 5  #3
                cv2.circle(color_im, (c_x, c_y), radius, [255, 255, 255], -1)
        scale_divisor = 2
        nh = h / scale_divisor
        nw = w / scale_divisor
        color_im = cv2.resize(color_im, (nw, nh))
        cv2.imshow('find_exits: exits on the map', color_im)

    map_exits_mask = map_exits

    min_area = 50
    exit_points = [[int(round(c_x)), int(round(c_y))]
                   for s, (c_y, c_x) in zip(sums, centers_of_mass)
                   if s > min_area]

    return exit_points, map_exits_mask, number_of_exits, exit_label_image
示例#51
0
# BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255
# so the possible negative number will be truncated
imgLaplacian = cv.filter2D(src, cv.CV_32F, kernel)
sharp = np.float32(src)
imgResult = sharp - imgLaplacian
# convert back to 8bits gray scale
imgResult = np.clip(imgResult, 0, 255)
imgResult = imgResult.astype('uint8')
imgLaplacian = np.clip(imgLaplacian, 0, 255)
imgLaplacian = np.uint8(imgLaplacian)
#cv.imshow('Laplace Filtered Image', imgLaplacian)
cv.imshow('New Sharped Image', imgResult)
bw = cv.cvtColor(imgResult, cv.COLOR_BGR2GRAY)
_, bw = cv.threshold(bw, 40, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
cv.imshow('Binary Image', bw)
dist = cv.distanceTransform(bw, cv.DIST_L2, 3)
# Normalize the distance image for range = {0.0, 1.0}
# so we can visualize and threshold it
cv.normalize(dist, dist, 0, 1.0, cv.NORM_MINMAX)
cv.imshow('Distance Transform Image', dist)
_, dist = cv.threshold(dist, 0.4, 1.0, cv.THRESH_BINARY)
# Dilate a bit the dist image
kernel1 = np.ones((3, 3), dtype=np.uint8)
dist = cv.dilate(dist, kernel1)
cv.imshow('Peaks', dist)
dist_8u = dist.astype('uint8')
# Find total markers
_, contours = cv.findContours(dist_8u, cv.RETR_EXTERNAL,
                              cv.CHAIN_APPROX_SIMPLE)
# Create the marker image for the watershed algorithm
markers = np.zeros(dist.shape, dtype=np.int32)
示例#52
0
    def incomingDepthData(self, data):
        self.started()

        t0 = rospy.Time.now()
        img = self.ros2cv(data, dt="passthrough")
        img = np.asarray(img, dtype=np.float32)
        img /= self.maxDepth * 1000  # convert to 0-1 units, 1=maxDepth
        img[np.bitwise_not(np.isfinite(img))] = 0
        img[img < 0.01] = 1
        img = np.clip(img, 0.0, 1.0)
        img = img.reshape((480, 640))

        t1 = rospy.Time.now()

        # Only draw images if we actually display them
        showing = self.pub_depth.get_num_connections(
        ) > 0 or self.pub_camera.get_num_connections() > 0

        # Dont do anything until we first create a background

        if self.counterSteps > 0:
            # gather background
            if self.counter < self.counterSteps:
                self.sig_backgroundStep.emit(self.counter, self.counterSteps)
                img[img < 0.001] = 1
                self.acc[:, :, self.counter] = img

                if showing:
                    show = np.asarray(cv2.cvtColor(img * 255,
                                                   cv2.COLOR_GRAY2BGR),
                                      dtype=np.uint8)
                    cv2.putText(show, str(self.counterSteps - self.counter),
                                (5, 30), cv2.FONT_HERSHEY_PLAIN, 2,
                                (255, 0, 0))
                self.counter += 1

            # create background
            if self.counter == self.counterSteps:

                if self.method == 0:  # Median
                    med = np.median(self.acc, 2)
                    max = np.max(self.acc, 2)
                    std = np.std(self.acc, 2)

                    self.depth = max
                    for i in range(self.counterSteps):
                        local = self.acc[:, :, i]
                        mask = cv2.absdiff(med, local) < std
                        new_min = cv2.min(self.depth, local)
                        self.depth[mask] = new_min[mask]
                else:
                    min = np.min(self.acc, 2)
                    self.depth = min

                rospy.loginfo("Built background")
                self.sig_backgroundStep.emit(self.counter, self.counterSteps)
                self.sig_background.emit(True)
                self.counter += 1

            # extract foreground
            if self.counter > self.counterSteps:

                t2 = rospy.Time.now()
                img = np.clip(img, 0.0, 1.0)

                # Our BW image
                img = cv2.min(self.depth, img)

                # Binary image of close objects
                d = np.subtract(self.depth, img)
                d = cv2.morphologyEx(d,
                                     cv2.MORPH_OPEN,
                                     self.kernel,
                                     iterations=self.morphIterations)
                bm = cv2.threshold(d, self.bg_thresh / self.maxDepth, 255,
                                   cv2.THRESH_BINARY)[1]
                bm = np.asarray(bm, dtype=np.uint8)
                dsts = cv2.distanceTransform(bm, cv2.cv.CV_DIST_L2, 3)

                t3 = rospy.Time.now()

                if showing:
                    pass
                    show = np.asarray(cv2.cvtColor(img * 255,
                                                   cv2.COLOR_GRAY2BGR),
                                      dtype=np.uint8)
                    show[:, :, 2] = cv2.max(bm, show[:, :, 2])
                t4 = rospy.Time.now()

                # Detect possible flies
                flies = []
                while (True):
                    # Look for biggest blob
                    mn, dist, mnl, mxl = cv2.minMaxLoc(dsts, mask=bm)

                    if dist < 3:
                        break
                    # Extract blob info

                    h, w = img.shape[:2]
                    mask = np.zeros((h + 2, w + 2), np.uint8)

                    retval, rect = cv2.floodFill(bm,
                                                 mask,
                                                 mxl,
                                                 0,
                                                 flags=8
                                                 | cv2.FLOODFILL_FIXED_RANGE
                                                 | cv2.FLOODFILL_MASK_ONLY)

                    # Get pose %TODO should compute average over whole blob
                    if self.depthEstMode == 0:
                        # Center Pixel Depth
                        x, y, z = self.projectRay(
                            mxl[0], mxl[1],
                            self.maxDepth * img[mxl[1], mxl[0]])
                    else:
                        x, y, z = self.projectRay(
                            mxl[0], mxl[1],
                            self.maxDepth * img[mxl[1], mxl[0]])
                        rospy.logerr(
                            "Not implemented depth estimation mode [%d], defaulting to center depth",
                            self.depthEstMode)

                    #elif self.depthEstMode == 1:
                    #    # Median depth of blob
                    #elif self.depthEstMode == 2:
                    #    # Closest point of blob

                    # Estimate width
                    xD, yD, zD = self.projectRay(
                        mxl[0] + dist, mxl[1] + dist,
                        self.maxDepth * img[mxl[1], mxl[0]])
                    w = math.sqrt((x - xD)**2 + (y - yD)**2) * 2

                    # Estimatea distance from background
                    b_diff = cv2.mean(d, mask=mask[1:-1,
                                                   1:-1])[0] * self.maxDepth

                    if abs((w - self.estSize
                            )) > self.estSizeTol:  #if w<0.02 or w>0.10:
                        if showing:
                            cv2.circle(show, mxl, 1, (0, 200, 0))
                            cv2.circle(show, mxl, int(dist), (120, 0, 0), 1)
                        #cv2.putText(show, str(round(w*100, 1))+"cm", mxl, cv2.FONT_HERSHEY_PLAIN, 1.2, (255,0,0))
                    else:
                        flies.append([dist, mxl, x, y, z, w, b_diff])
                    bm -= mask[1:-1, 1:-1] * 255

                t5 = rospy.Time.now()

                if len(flies) > 0:
                    # Sort by distance from background#
                    #TODO

                    if self.priority == 0:
                        # Distance from background
                        flies = sorted(flies, key=itemgetter(6), reverse=True)
                    elif self.priority == 1:
                        # Closest to goal
                        self.getCameraGoal()
                        flies = sorted([
                            f + [(f[2] - self.goal[0])**2 +
                                 (f[3] - self.goal[1])**2 +
                                 (f[4] - self.goal[2])**2] for f in flies
                        ],
                                       key=itemgetter(7),
                                       reverse=False)
                    elif self.priority == 2:
                        # Depth
                        flies = sorted(flies, key=itemgetter(4), reverse=False)
                        # Closest to GT
                        #gt = self.getGTDist()
                        #flies = sorted([f+[(f[2]-gt[0])**2+(f[3]-gt[1])**2+(f[4]-gt[2])**2] for f in flies], key=itemgetter(7),reverse=False)

                    elif self.priority == 3:
                        # Estimated object size
                        flies = sorted(
                            [f + [abs(self.estSize - f[5])] for f in flies],
                            key=itemgetter(7),
                            reverse=False)
                    elif self.priority == 4:
                        # Blob Size
                        flies = sorted(flies, key=itemgetter(0), reverse=True)
                    else:
                        rospy.logerr("Unknown priority type: %d",
                                     self.priority)

                    if len(flies) > 0:
                        dist, mxl, x, y, z, w, b_diff = flies[0][0:7]

                        # publish with level rotation
                        t, r, = self.getKinectPoint()
                        #self.pub_tf.sendTransform([z,-x-0.02,-y], (0,0,0,1), rospy.Time.now(), "cf_xyz", "camera_depth_frame")#TODO maybe we need to rotate 90" so x is aligned with optical axis
                        #self.pub_tf.sendTransform([x,y,z], (-r[0],-r[1],-r[2],r[3]), rospy.Time.now(), "cf_xyz", "camera_depth_optical_frame")#TODO maybe we need to rotate 90" so x is aligned with optical axis

                        # Point in world frame with R from crayzflie

                        self.pub_tf.sendTransform(
                            [x, y, z], quaternion_inverse(r), rospy.Time.now(),
                            "cf_xyz", "camera_depth_optical_frame"
                        )  #TODO maybe we need to rotate 90" so x is aligned with optical axis

                    t6 = rospy.Time.now()

                    if showing:
                        pass
                        for i, flie in enumerate(flies):
                            dist, mxl, x, y, z, w, b_diff = flies[i][0:7]
                            if i == 0:
                                cv2.circle(show, mxl, int(dist), (0, 255, 0),
                                           4)
                            else:
                                cv2.circle(show, mxl, int(dist), (12, 50, 12),
                                           1)
                            #cv2.circle(show, mxl, 2, (0, 255, 0))
                            #cv2.circle(show, mxl, int(b_diff*10), (255, 0, 0))
                            #cv2.putText(show, str(i),                    (mxl[0]-30, mxl[1]),    cv2.FONT_HERSHEY_PLAIN, 1.4, (0, 255, 255))
                            #cv2.putText(show, str(round(z, 2))+"m",      (mxl[0]+14, mxl[1]+00), cv2.FONT_HERSHEY_PLAIN, 1.4, (255, 255, 0))
                            #cv2.putText(show, str(round(w*100, 1))+"cm", (mxl[0]+14, mxl[1]+20), cv2.FONT_HERSHEY_PLAIN, 1.4, (255,0,255))
                            #cv2.putText(show, str(round(b_diff,2))+"m",  (mxl[0]+14, mxl[1]+40), cv2.FONT_HERSHEY_PLAIN, 1.4, (255, 0, 0))
                    t7 = rospy.Time.now()

                    rospy.loginfo(
                        "%7.1f %7.1f %7.1f %7.1f %7.1f %7.1f  = %10.1f ",
                        1000. * (t1 - t0).to_sec(), 1000. * (t2 - t1).to_sec(),
                        1000. * (t3 - t2).to_sec(), 1000. * (t4 - t3).to_sec(),
                        1000. * (t5 - t4).to_sec(), 1000. * (t7 - t6).to_sec(),
                        1000 * (t7 - t0).to_sec())
        else:
            show = np.asarray(cv2.cvtColor(img * 255, cv2.COLOR_GRAY2BGR),
                              dtype=np.uint8)

        if showing:
            pass
            try:
                img = self.bridge.cv2_to_imgmsg(show, "bgr8")
                self.cameraInfo.header.stamp = rospy.Time.now()
                img.header = self.cameraInfo.header
                self.pub_camera.publish(self.cameraInfo)
                self.pub_depth.publish(img)
            except CvBridgeError, e:
                rospy.logerr("Image sending problem: %s", e)
示例#53
0
        k = 1
    if k % 2 == 0:
        k += 1
    b1 = b1 / 100
    okern = cv2.getStructuringElement(cv2.MORPH_RECT, (k, k))
    ekern = cv2.getStructuringElement(cv2.MORPH_RECT, (ek, ek))
    dkern = cv2.getStructuringElement(cv2.MORPH_RECT, (dk, dk))
    blurimg = cv2.GaussianBlur(img, (blur, blur), 0)
    noise = cv2.morphologyEx(blurimg, cv2.MORPH_OPEN, okern, iterations=oi)
    imggray = cv2.cvtColor(noise, cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(imggray, 0, 255,
                                cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    back = cv2.dilate(thresh, dkern, iterations=di)
    #fore=cv2.erode(thresh,ekern,iterations=ei)
    dist_transform = cv2.distanceTransform(thresh, cv2.DIST_L2, a1)
    ret, fore = cv2.threshold(dist_transform, b1 * dist_transform.max(), 255,
                              0)
    dist_transform = cv2.normalize(dist_transform, dist_transform, 0, 255,
                                   cv2.NORM_MINMAX)
    dist_transform = np.uint8(dist_transform)
    cv2.imshow('dist_transform', dist_transform)
    fore = np.uint8(fore)
    unknown = cv2.subtract(back, fore)
    ret, markers = cv2.connectedComponents(fore)
    markers += 1
    markers[unknown == 255] = 0
    markers = cv2.watershed(img, markers)
    img[markers == -1] = [255, 0, 0]
    cv2.imshow('fore', fore)
    cv2.imshow('image', img)
示例#54
0
def threshold_callback(params):
    global img_gray, thresh, gamma, img_gamma
    thresh = params
    #blur grey 3*3
    img_gray = cv2.medianBlur(img_gray, 5)
    #        img_gray = cv2.GaussianBlur(img_gray,(5,5), 0)

    #    ret, thresh_out = cv2.threshold(img_gray, thresh, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    #    img_gray = adjust_gamma(img_gray, gamma)
    #    cv2.imshow('gamma', img_gray)
    thresh_out = cv2.adaptiveThreshold(img_gray, 255,
                                       cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                       cv2.THRESH_BINARY, 11, 2)
    #    thresh_out = cv2.adaptiveThreshold(img_gray, 255, \
    #		cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,11,2)
    #    cv2.imshow('thresh out', thresh_out)

    # noise removal
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh_out,
                               cv2.MORPH_GRADIENT,
                               kernel,
                               iterations=5)
    #opening = cv2.morphologyEx(thresh_out, cv2.MORPH_OPEN, kernel, iterations = 2)
    #opening = cv2.morphologyEx(thresh_out, cv2.MORPH_CLOSE, kernel, iterations = 2)
    #opening = cv2.morphologyEx(thresh_out, cv2.MORPH_CLOSE + cv2.MORPH_OPEN, kernel, iterations = 2)
    #    cv2.imshow('opening', opening)
    # sure background area
    sure_bg = cv2.dilate(opening, kernel, iterations=5)
    #    cv2.imshow('sure_bg', sure_bg)

    dist = cv2.distanceTransform(opening, cv2.DIST_L2, 3)
    cv2.normalize(dist, dist, 0, 255, cv2.NORM_MINMAX)
    dist = np.uint8(dist)

    #    cv2.imshow('dist', dist)
    #    ret, sure_fg = cv2.threshold(dist, dist.max()*0.7, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    sure_fg = cv2.adaptiveThreshold(dist, 255, \
      cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
    #    cv2.imshow('sure_fg', sure_fg)

    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)
    #    cv2.imshow('unknown', unknown)

    #unknown = cv2.subtract(unknown, thresh_out)
    #cv2.imshow('unknown', unknown)
    #    laplacian = cv2.Laplacian(unknown,cv2.CV_64F)
    #    cv2.imshow('laplacian', laplacian)
    # circles = cv2.HoughCircles(unknown ,cv2.HOUGH_GRADIENT,1,20,
    #                         param1=50,param2=30,minRadius=0,maxRadius=0)

    #    im2, contours, hierarchy = cv2.findContours(dist, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #    im2, contours, hierarchy = cv2.findContours(unknown, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    im2, contours, hierarchy = cv2.findContours(unknown, cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_SIMPLE)
    #    hull = []
    minRect = []
    #    minEllipse = []
    drawing = np.zeros(thresh_out.shape, np.uint8)
    #    drawing = np.zeros(dist.size, np.uint8)

    #    for i in range(len(contours)):
    #	hull.append(cv2.convexHull(contours[i]))
    hsv_drawing = hsv.copy()
    ret = []
    for i in range(len(contours)):
        minRect.append(cv2.minAreaRect(contours[i]))
#        if len(contours[i]) < 5:
#       	    minRect.append(cv2.minAreaRect(contours[i]))
#            minEllipse.append(cv2.fitEllipse(contours[i]))
#            cv2.ellipse(drawing, minEllipse[i], (255,0,0), 2)
#            cv2.drawContours(drawing, contours, int(i), (0,255,0), 0)

#    cv2.drawContours(drawing, contours, 0, (0,0,255), 1)
    for i in range(len(contours)):
        #	if contours[i].size > 100:
        #        cv2.drawContours(drawing, contours, i, (0,255,0), 1)
        #        cv2.drawContours(drawing, hull, i, (0,255,0), 0)

        box = cv2.boxPoints(minRect[i])
        box = np.int0(box)
        if box[1][1] < box[3][1] and box[0][0] < box[2][0]:
            img_roi = hsv[int(box[1][1]):int(box[3][1]),
                          int(box[0][0]):int(box[2][0])]

            img_rgb_roi = cv2.cvtColor(img_roi, cv2.COLOR_HSV2BGR)

            img_roi = cv2.cvtColor(img_roi, cv2.COLOR_BGR2GRAY)
            #cv2.imshow('ROI', img_roi)
            # create a CLAHE object (Arguments are optional).
            clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(3, 3))
            cl1 = clahe.apply(img_roi)
            #            res = np.hstack((img_roi, cl1))

            #            equ = cv2.equalizeHist(img_roi)
            #            res = np.hstack((img_roi, img_roi))
            #            cv2.imshow('equ', res)
            #            lower_blue = np.array([110,50,50])
            #            upper_blue = np.array([130,255,255])
            #            mask = cv2.inRange(img_roi, lower_blue, upper_blue)
            #            res = cv2.bitwise_and(img_roi,img_roi, mask= mask)
            circles = cv2.HoughCircles(cl1,
                                       cv2.HOUGH_GRADIENT,
                                       1,
                                       1,
                                       param1=80,
                                       param2=20,
                                       minRadius=0,
                                       maxRadius=0)
            #	    circles = cv2.HoughCircles(img_roi, cv2.HOUGH_GRADIENT, 1, 1, param1=80, param2=20, minRadius=0, maxRadius=0)
            if circles != None:
                for i in circles[0, :]:
                    # draw the outer circle

                    #                    cv2.imshow('ROI', img_roi)
                    cv2.circle(hsv_drawing,
                               (int(box[1][0] + i[0]), int(box[1][1] + i[1])),
                               i[2], (0, 255, 0), 2)
                    # draw the center of the circle
                    #                    cv2.circle(hsv,(i[0],i[1]),2,(0,0,255),3)
                    cv2.circle(hsv_drawing,
                               (int(box[1][0] + i[0]), int(box[1][1] + i[1])),
                               2, (0, 0, 255), 3)
                    #                    cv2.imshow('circle', hsv_drawing)
                    #                    cv2.imshow('img_rgb_roi', img_rgb_roi)

                    # create a water index pixel mask
                    w = img_rgb_roi[0, 0]
                    #                    print w
                    b, g, r = cv2.split(img_rgb_roi)
                    mask = np.zeros(img_rgb_roi.shape[:2], np.uint8)

                    mask[:, :] = w[0]
                    #mask_inv = cv2.bitwise_not(mask)
                    #                    cv2.imshow('b_mask',mask)
                    b_masked_img = cv2.subtract(b, mask)
                    #                    cv2.imshow('b_masked_img', b_masked_img)
                    b_histr, bins = np.histogram(b_masked_img.ravel(), 256,
                                                 [0, 256])
                    b_cdf = b_histr.cumsum()
                    #                    b_cdf_normalized = b_cdf * b_histr.max()/ b_cdf.max()

                    mask[:, :] = w[1]
                    #mask_inv = cv2.bitwise_not(mask)
                    #cv2.imshow('g_mask',mask)
                    g_masked_img = cv2.subtract(g, mask)
                    #                    cv2.imshow('g_masked_img', g_masked_img)
                    g_histr, bins = np.histogram(g_masked_img.ravel(), 256,
                                                 [0, 256])
                    g_cdf = g_histr.cumsum()
                    #                    g_cdf_normalized = g_cdf * g_histr.max()/ g_cdf.max()

                    mask[:, :] = w[2]
                    #mask_inv = cv2.bitwise_not(mask)
                    #cv2.imshow('r_mask',mask)
                    r_masked_img = cv2.subtract(r, mask)
                    #                    cv2.imshow('r_masked_img', r_masked_img)
                    r_histr, bins = np.histogram(r_masked_img.ravel(), 256,
                                                 [0, 256])
                    r_cdf = r_histr.cumsum()
                    #                    r_cdf_normalized = r_cdf * r_histr.max()/ r_cdf.max()

                    #                    m_img = cv2.merge((b_masked_img,g_masked_img,r_masked_img))
                    #                    cv2.imshow('m_img', m_img)
                    #                    b_histr = cv2.calcHist([img_rgb_roi],[0],None,[256],[0,256])
                    #                    g_histr = cv2.calcHist([img_rgb_roi],[1],None,[256],[0,256])
                    #                    r_histr = cv2.calcHist([img_rgb_roi],[2],None,[256],[0,256])
                    total = (b_cdf.max() - b_histr[0]) + (
                        g_cdf.max() - g_histr[0]) + (r_cdf.max() - r_histr[0])
                    Pb = (float(b_cdf.max() - b_histr[0]) / float(total))
                    Pg = (float(g_cdf.max() - g_histr[0]) / float(total))
                    Pr = (float(r_cdf.max() - r_histr[0]) / float(total))
                    if total == 0:
                        Pb = 1.0
                        Pg = 1.0
                        Pr = 1.0
                    else:
                        Pb = (float(b_cdf.max() - b_histr[0]) / float(total))
                        Pg = (float(g_cdf.max() - g_histr[0]) / float(total))
                        Pr = (float(r_cdf.max() - r_histr[0]) / float(total))

                    if abs(Pg - Pr) < 0.2:
                        color = 'y'
                    elif Pg > Pb and Pg > Pr:
                        color = 'g'
                    elif Pr > Pb and Pr > Pg:
                        color = 'r'
                    #print "Pb{}Pg{}Pr{}".format(Pb, Pg, Pr)
#                    hist,bins = np.histogram(img_roi.flatten(), 256,[0,256])
#                    cdf = hist.cumsum()
#                    cdf_normalized = cdf * hist.max()/ cdf.max()
#                    plt.plot(cdf_normalized, color = 'b')
#                    plt.hist(img.flatqten(),256,[0,256], color = 'r')
#                    plt.xlim([0,256])
#                    plt.legend(('cdf','histogram'), loc = 'upper left')
#                    plt.show()

#                    return (int(box[1][0]+i[0]), int(box[1][1]+i[1]), i[2], b_histr.max(), #g_histr.max(), r_histr.max())
#return (int(box[1][0]+i[0]), int(box[1][1]+i[1]), int(i[2]), Pb, Pg, Pr, color)
                    ret.append({"width":img_gray.shape[1], "height":img_gray.shape[0], "origin_x":int(box[1][0]+i[0]), \
                        "origin_y":int(box[1][1]+i[1]), "radius":int(i[2]), "prob_blue":Pb, "prob_green":Pg, "prob_red":Pr,  "color":color})
                    #buoy = "window_x={7},window_y={8},origin_x={0},origin_y={1},radius={2},prob_blue={3},prob_green={4},prob_red={5},color={6}" \
                    #             .format(ret[i][0], ret[i][1], ret[i][2], ret[i][3], ret[i][4], ret[i][5], ret[i][6],  img_gray.shape[1], img_gray.shape[0])
    print("Count all: {}".format(len(ret)))
    rospy.loginfo(ret)
    return ret
示例#55
0
total = len(img_files)
for count, i in enumerate(img_files):
    image_name = i.split("/")[-1]
    print("Progress : ", count, "/", total)
    img = cv2.imread(i)

    # Split the 3 channels into Blue,Green and Red
    b, g, r = cv2.split(img)

    # Apply Basic Transformation
    b = basicTransform(b)
    r = basicTransform(r)
    g = basicTransform(g)

    # Perform the distance transform algorithm
    b = cv2.distanceTransform(b, cv2.DIST_L2, 5)  # ELCUDIAN
    g = cv2.distanceTransform(g, cv2.DIST_L1, 5)  # LINEAR
    r = cv2.distanceTransform(r, cv2.DIST_C, 5)  # MAX

    # Normalize
    r = cv2.normalize(r, r, 0, 1.0, cv2.NORM_MINMAX)
    g = cv2.normalize(g, g, 0, 1.0, cv2.NORM_MINMAX)
    b = cv2.normalize(b, b, 0, 1.0, cv2.NORM_MINMAX)

    # Merge the channels
    dist = cv2.merge((b, g, r))
    dist = cv2.normalize(dist, dist, 0, 4.0, cv2.NORM_MINMAX)
    dist = cv2.cvtColor(dist, cv2.COLOR_BGR2GRAY)

    # In order to save as jpg, or png, we need to handle the Data
    # format of image
示例#56
0
def image_detect(filename):
    from PIL import Image
    import numpy as np
    import cv2
    from matplotlib import pyplot as plt
    from PIL import Image
    from skimage.morphology import extrema
    from skimage.morphology import watershed as skwater
    print(filename)

    #canny starts
    def auto_canny(image, sigma=0.33):
        # compute the median of the single channel pixel intensities
        v = np.median(image)

        # apply automatic Canny edge detection using the computed median
        lower = int(max(0, (1.0 - sigma) * v))
        upper = int(min(255, (1.0 + sigma) * v))
        edged = cv2.Canny(image, lower, upper)

        # return the edged image
        return edged

    def ShowImage(title, img, ctype):
        plt.figure(figsize=(10, 10))
        if ctype == 'bgr':
            b, g, r = cv2.split(img)  # get b,g,r
            rgb_img = cv2.merge([r, g, b])  # switch it to rgb
            plt.imshow(rgb_img)
        elif ctype == 'hsv':
            rgb = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
            plt.imshow(rgb)
        elif ctype == 'gray':
            plt.imshow(img, cmap='gray')
        elif ctype == 'rgb':
            plt.imshow(img)
        else:
            raise Exception("Unknown colour type")
        plt.axis('off')
        plt.title(title)
        plt.show()

    img = cv2.imread(filename)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    #ShowImage('Brain MRI',gray,'gray')

    ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU)
    #ShowImage('Thresholding image',thresh,'gray')

    ret, markers = cv2.connectedComponents(thresh)

    #Get the area taken by each component. Ignore label 0 since this is the background.
    marker_area = [
        np.sum(markers == m) for m in range(np.max(markers)) if m != 0
    ]
    #Get label of largest component by area
    largest_component = np.argmax(
        marker_area) + 1  #Add 1 since we dropped zero above
    #Get pixels which correspond to the brain
    brain_mask = markers == largest_component

    brain_out = img.copy()
    #In a copy of the original image, clear those pixels that don't correspond to the brain
    brain_out[brain_mask == False] = (0, 0, 0)

    img = cv2.imread(filename)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, thresh = cv2.threshold(gray, 0, 255,
                                cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
    #canny-edge-detection
    auto = auto_canny(thresh)
    #cv2.imshow("canny",auto)
    cv2.waitKey(0)
    resu = auto + thresh
    #cv2.imshow("res=canny+thresh",resu)
    cv2.waitKey(0)
    #cv2.imshow("invert res",cv2.bitwise_not(resu))
    cv2.waitKey(0)
    # noise removal
    kernel = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)

    # sure background area
    sure_bg = cv2.dilate(opening, kernel, iterations=3)

    # Finding sure foreground area
    dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
    ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(),
                                 255, 0)

    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    # Marker labelling
    ret, markers = cv2.connectedComponents(sure_fg)

    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1

    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0
    markers = cv2.watershed(img, markers)
    img[markers == -1] = [255, 0, 0]

    im1 = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
    ShowImage('Watershed segmented image', im1, 'gray')
    cv2.imwrite('./static/watershed.jpg', im1,
                [int(cv2.IMWRITE_JPEG_QUALITY), 100])

    brain_mask = np.uint8(brain_mask)
    kernel = np.ones((8, 8), np.uint8)
    closing = cv2.morphologyEx(brain_mask, cv2.MORPH_CLOSE, kernel)
    #ShowImage('Closing', closing, 'gray')

    brain_out = img.copy()
    #In a copy of the original image, clear those pixels that don't correspond to the brain
    brain_out[closing == False] = (0, 0, 0)


#img_path="./static/WhatsApp_Image_2020-10-14_at_19.35.58.jpeg"
#image_detect(img_path)
示例#57
0
            dx, dy = 0, 0
            new_rect = (rect[0] + x - dx, rect[1] + y - dy, rect[2] + 2 * dx,
                        rect[3] + 2 * dy)
            new_rects.append(new_rect)
        segment_rects = new_rects
    return segment_rects, np.dstack(3 * [display])


if __name__ == "__main__":
    image = cv2.imread("../../data/drawer.jpg")
    scaled = 1.0
    image = cv2.resize(
        image, (int(image.shape[1] * scaled), int(image.shape[0] * scaled)))
    test_grabcut = False
    if test_grabcut:
        seeds = [[800, 400], [820, 740], [830, 840], [630, 240], [560, 270]]
        rects, display = segment_grabcut(image, seeds=seeds)
        display = np.array(display[:, :, 0])
        display = cv2.distanceTransform(display, cv2.cv.CV_DIST_L2, 5)
        cv2.imshow("disp", (display).astype(np.uint8))
    else:
        rects, display = segment_edges(image,
                                       window=None,
                                       resize=(5000, 5000),
                                       variance_threshold=100,
                                       size_filter=1)
        cv2.imshow("disp", (display).astype(np.uint8))

    while cv2.waitKey(0) != 27:
        pass
示例#58
0
    ret, thresh = cv2.threshold(gray, 0, 255,
                                cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    #remocao de ruido
    kernel = np.ones((2, 2), np.uint8)
    closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)

    # background certo do frame
    sure_bg = cv2.dilate(closing, kernel, iterations=3)

    #remocao do background ja encontrado, para deixar apenas o corredor
    sure_bg = cv2.absdiff(avg, sure_bg)
    sure_bg = cv2.erode(sure_bg, kernel, iterations=5)

    #encontrando o plano de frente do video
    dist_transform = cv2.distanceTransform(sure_bg, cv2.DIST_L2, 3)

    # Threshold
    ret, sure_fg = cv2.threshold(dist_transform, 0.1 * dist_transform.max(),
                                 255, 0)

    # encontrando a regiao desconhecida da imagem para conectar e realizar o watershed
    sure_fg = np.uint8(sure_fg)
    unknown = cv2.subtract(sure_bg, sure_fg)

    # criacao dos marcadores
    ret, markers = cv2.connectedComponents(sure_fg)
    markers = markers + 1
    markers[unknown == 255] = 0

    #aplicando o watershed e pintando o quadro original
示例#59
0
def calculatePlantMask(image, toolsize, bilf=[11, 5, 17], morpho_it=[5, 5], debugdir=None):

   ExG = calculateExcessGreen(image)
   M = ExG.max()
   m = ExG.min()
        
   # Scale all values to the range (0, 255)
   colorIndex = (255 * (ExG - m) / (M - m)).astype(np.uint8)
        
   # Smooth the image using a bilateral filter
   colorIndex = cv2.bilateralFilter(colorIndex, bilf[0], bilf[1], bilf[2])

   if debugdir:
      cv2.imwrite("%s/03-exgnorm.jpg" % debugdir, colorIndex)
        
   # Calculte the mask using Otsu's method (see
   # https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.html)
   th, mask = cv2.threshold(colorIndex, 0, 255, cv2.THRESH_OTSU)

   if debugdir:
      cv2.imwrite("%s/04-mask1.jpg" % debugdir, mask)

   if debugdir:
      plt.subplot(1, 5, 1), plt.imshow(image)
      plt.title("image"), plt.xticks([]), plt.yticks([])
        
      plt.subplot(1, 5, 2), plt.imshow(ExG, 'gray')
      plt.title("ExG"), plt.xticks([]), plt.yticks([])
        
      plt.subplot(1, 5, 3), plt.imshow(colorIndex, 'gray')
      plt.title("filtered"), plt.xticks([]), plt.yticks([])
        
      plt.subplot(1, 5, 4), plt.hist(colorIndex.ravel(), 256), plt.axvline(x=th, color="red", linewidth=0.1)
      plt.title("histo"), plt.xticks([]), plt.yticks([])
      
      plt.subplot(1, 5, 5), plt.imshow(mask, 'gray')
      plt.title("mask"), plt.xticks([]), plt.yticks([])
      
      plt.savefig("%s/05-plot.jpg" % debugdir, dpi=300)

   # The kernel is a cross:
   #   0 1 0
   #   1 1 1
   #   0 1 0
   kernel = np.ones((3, 3)).astype(np.uint8)
   kernel[[0, 0, 2, 2], [0, 2, 2, 0]] = 0

   # See https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html
   mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=morpho_it[0])
   if debugdir:
      cv2.imwrite("%s/06-mask2.jpg" % debugdir, mask)

   mask = cv2.dilate(mask, kernel=kernel, iterations=morpho_it[1])
   if debugdir:
      cv2.imwrite("%s/07-mask3.jpg" % debugdir, mask)

   # Invert the mask and calculate the distance to the closest black pixel.  
   # See https://docs.opencv.org/2.4.8/modules/imgproc/doc/miscellaneous_transformations.html#distancetransform
   dist = cv2.distanceTransform(255 - (mask.astype(np.uint8)),
                                cv2.DIST_L2,
                                cv2.DIST_MASK_PRECISE)
   # Turn white all the black pixels that are less than half the
   # toolsize away from a white (=plant) pixel
   mask = 255 * (1 - (dist > toolsize/2)).astype(np.uint8)

   if debugdir:
      cv2.imwrite("%s/08-mask.jpg" % debugdir, mask)
         
   return mask
示例#60
0
def distance_map_simple(floor_mask,
                        m_per_pix,
                        min_robot_width_m,
                        robot_x_pix,
                        robot_y_pix,
                        robot_ang_rad,
                        disallow_too_narrow=True,
                        display_on=False,
                        verbose=False):

    # min_robot_width_m : The best case minimum width of the robot in meters when moving forward and backward.
    traversable_mask = floor_mask

    # model the robot's footprint as being traversable
    draw_robot_footprint_rectangle(robot_x_pix, robot_y_pix, robot_ang_rad,
                                   m_per_pix, traversable_mask)
    footprint_test_image = np.zeros_like(traversable_mask)
    draw_robot_footprint_rectangle(robot_x_pix, robot_y_pix, robot_ang_rad,
                                   m_per_pix, footprint_test_image)
    if display_on:
        cv2.imshow('robot footprint drawing', footprint_test_image)
        cv2.imshow('floor mask after drawing robot footprint',
                   traversable_mask)

    # Optimistic estimate of robot width. Use ceil to account for
    # possible quantization. Consider adding a pixel, also.
    min_robot_radius_pix = np.ceil((min_robot_width_m / 2.0) / m_per_pix)

    # Fill in small non-floor regions (likely noise)
    #
    # TODO: improve this. For example, fill in isolated pixels that
    # are too small to trust as obstacles. Options include a hit or
    # miss filter, matched filter, or speckle filter.
    fill_in = True
    if fill_in:
        kernel = np.ones((3, 3), np.uint8)
        traversable_mask = cv2.morphologyEx(traversable_mask, cv2.MORPH_CLOSE,
                                            kernel)
        if display_on:
            cv2.imshow('traversable_mask after filling', traversable_mask)

    # ERROR? : The floodfill operation should occur after removing
    # filtering candidate robot poses due to the footprint
    # radius. Right? Was that too aggressive in the past, so it got
    # dropped?

    # Select the connected component of the floor on which the robot
    # is located.
    h, w = traversable_mask.shape
    new_traversable_mask = np.zeros((h + 2, w + 2), np.uint8)
    #possibly add to floodFill in the future: flags = cv2.FLOODFILL_FIXED_RANGE
    cv2.floodFill(traversable_mask, new_traversable_mask,
                  (robot_x_pix, robot_y_pix), 255)
    traversable_mask = 255 * new_traversable_mask[1:-1, 1:-1]

    # In previous versions, the traversability mask has treated
    # unobserved pixels and observed non-floor pixels
    # differently. Such as by treating unobserved pixels
    # optimistically as traversable.

    # compute distance map: distance from untraversable regions
    #
    # cv2.DIST_L2 : Euclidean distance
    #
    # 5 is the mask size : "finds the shortest path to the nearest
    # zero pixel consisting of basic shifts: horizontal, vertical,
    # diagonal, or knight's move (the latest is available for a 5x5
    # mask)" - OpenCV documentation
    distance_map = cv2.distanceTransform(traversable_mask, cv2.DIST_L2, 5)
    if display_on:
        norm_dist_transform = cv2.normalize(distance_map, None, 0, 255,
                                            cv2.NORM_MINMAX, cv2.CV_8U)
        cv2.imshow('distance map without threshold for the robot width',
                   norm_dist_transform)

    # Restricts the maximum distance of the distance_map. This will
    # favor shorter paths (i.e., straight line paths) when a corridor
    # is wide enough instead of moving to the middle of the
    # corridor. When the corridor is narrower than the threshold, the
    # robot will prefer paths that move it to the center of the
    # corridor. However, simple path planning via 4 connected grid and
    # Dijkstra's algorithm results in vertical and horizontal motions
    # in flat regions rather than point-to-point straight lines.
    clip_max_distance = False
    if clip_max_distance:
        max_distance = 3.0 * min_robot_radius_pix
        print('max_distance =', max_distance)
        print('np.max(distance_map) =', np.max(distance_map))
        # should perform in place clipping
        np.clip(distance_map, None, max_distance, distance_map)
        print('after clipping np.max(distance_map) =', np.max(distance_map))
        if display_on:
            norm_dist_transform = cv2.normalize(distance_map, None, 0, 255,
                                                cv2.NORM_MINMAX, cv2.CV_8U)
            cv2.imshow('distance map with clipped maximum distance',
                       norm_dist_transform)

    if disallow_too_narrow:
        # set parts of the distance transform that represent free space
        # less than the robot would require to zero
        distance_map[distance_map < min_robot_radius_pix] = 0
        if display_on:
            norm_dist_transform = cv2.normalize(distance_map, None, 0, 255,
                                                cv2.NORM_MINMAX, cv2.CV_8U)
            cv2.imshow('distance map with robot width threshold',
                       norm_dist_transform)

    # traversable_mask is a binary image that estimates where the
    # robot can navigate given the robot's current pose and the map,
    # but ignoring the robot's radius.

    # distance_map is a scalar image that estimates the distance to
    # the boundaries of the traversable mask.
    return distance_map, traversable_mask