Example #1
0
File: Final.py Project: AkaZuko/gPb
def edge_detection(img):
	print '*' * 50
	print 'edge_detection called'
	print '*' * 50
	print

	img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
	blur = cv2.GaussianBlur(img,(3,3),0)
	edges = cv2.Canny(blur, 300, 400)

	kernel = np.ones((5,5), np.uint8)
	dilation = cv2.dilate(edges, kernel, iterations = 1)

	im_floodfill = dilation.copy()

	h, w = dilation.shape[:2]
	mask = np.zeros((h+2, w+2), np.uint8)
	cv2.floodFill(im_floodfill, mask, (0,0), 255)

	im_floodfill_inv = cv2.bitwise_not(im_floodfill)
	im_out = dilation | im_floodfill_inv

	contours, hierarchy = cv2.findContours(im_out,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
	
	boundary_points = []
	for cnt in contours:
		# we can put some area threshold condition here
		boundary_points.append(np.reshape(cnt.ravel(), (-1, 2) ) )

	return boundary_points
Example #2
0
def get_mask(ucm,viz=False):
    ucm = ucm.copy()
    h,w = ucm.shape[:2]
    mask = np.zeros((h-2,w-2),'float32')

    i = 0
    sx,sy = np.where(mask==0)
    seed = get_seed(sx,sy,ucm)
    areas = []
    labels=[]
    while seed is not None and i<1000:
        cv2.floodFill(mask,ucm,seed,i+1)
        # calculate the area (no. of pixels):
        areas.append(np.sum(mask==i+1))
        labels.append(i+1)

        # get the location of the next seed:
        sx,sy = np.where(mask==0)
        seed = get_seed(sx,sy,ucm)
        i += 1
    print "  > terminated in %d steps"%i

    if viz:
        plt.imshow(mask)
        plt.show()

    return mask,np.array(areas),np.array(labels)
 def update(dummy=None):
     if self.seed_pt is None:
         #print 'seed_pt is None!!'
         cv2.imshow('floodfill', img)
         self.seed_pt = False
         return
     elif self.seed_pt is False:
         return
     #print 'seed_pt is: {}'.format(self.seed_pt)
     
     mask[:] = 0
     lo = cv2.getTrackbarPos('lo', 'floodfill')
     hi = cv2.getTrackbarPos('hi', 'floodfill')
     flags = connectivity
     if fixed_range:
         flags |= cv2.FLOODFILL_FIXED_RANGE
     cv2.floodFill(self.flooded, mask, self.seed_pt, (255, 255, 255), (lo,)*3, (hi,)*3, flags)
     #cv2.circle(self.flooded, self.seed_pt, 2, (0, 0, 255), -1)
     #cv2.destroyAllWindows()
     #cv2.waitKey(-1)
     #cv2.setMouseCallback('floodfill', onmouse)
     #cv2.createTrackbar('lo', 'floodfill', 20, 255, update)
     #cv2.createTrackbar('hi', 'floodfill', 20, 255, update)
     
     cv2.imshow('floodfill', self.flooded)
def preprocess_image(image):
    # Copy the depth part of the image
    depth_pixels = image.pixels[..., 2].copy()
    depth_pixels = rescale_to_opencv_image(depth_pixels)
    filtered_depth_pixels = median_filter(depth_pixels, 5)

    # Build mask for floodfilling, this lets me ignore all the pixels
    # from the background and around the ears
    mask = np.zeros((depth_pixels.shape[0] + 2, depth_pixels.shape[1] + 2),
                    dtype=np.uint8)
    # Flood fill from top left
    cv2.floodFill(filtered_depth_pixels, mask, (0, 0),
                  (255, 255, 255), flags=cv2.FLOODFILL_MASK_ONLY)
    # Flood fill from top right
    cv2.floodFill(filtered_depth_pixels, mask, (depth_pixels.shape[1] - 1, 0),
                  (255, 255, 255), flags=cv2.FLOODFILL_MASK_ONLY)
    # Truncate and negate the flood filled areas to find the facial region
    floodfill_mask = (~mask.astype(np.bool))[1:-1, 1:-1]

    # Build a mask of the areas inside the face that need inpainting
    inpaint_mask = ~image.mask.mask & floodfill_mask
    # Inpaint the image and filter to smooth
    inpainted_pixels = cv2.inpaint(depth_pixels,
                                   inpaint_mask.astype(np.uint8),
                                   5, cv2.INPAINT_NS)
    inpainted_pixels = median_filter(inpainted_pixels, 5)

    # Back to depth pixels
    image.pixels[..., 2] = rescale_to_depth_image(image, inpainted_pixels)
    # Reset the mask!
    image.mask.pixels[..., 0] = ~np.isnan(image.pixels[..., 2])
def _fill_image(img, connectivity):
    """Fills all holes in connected components in a binary image.

    Parameters
    ------
    img : numpy array
        binary image to fill

    Returns
    ------
    filled : numpy array
        The filled image
    """
    # Copy the image with an extra border
    h, w = img.shape[:2]
    img_border = np.zeros((h + 2, w + 2), np.uint8)
    img_border[1:-1, 1:-1] = img

    floodfilled = img_border.copy()
    mask = np.zeros((h + 4, w + 4), np.uint8)
    cv2.floodFill(floodfilled, mask, (0, 0), 255, flags=connectivity)
    floodfill_inv = cv2.bitwise_not(floodfilled)
    filled = img_border | floodfill_inv
    filled = filled[1:-1, 1:-1]
    return filled
    def getPoly(self):
        
        self.image = self.image[0:][300:]
        imgray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
        ret,thresh = cv2.threshold(imgray,0,255,0)
        
        #cv2.bitwise_not(thresh)
        height = thresh.shape[0]
        width = thresh.shape[1]
        tempImage = np.copy(thresh)
        fillPoint = None
        for x in range(height - 1, 0, -1):
            currPixel = thresh[x][width / 2]
            if currPixel.all()  != 0:
                 fillPoint = ((width/ 2), x)
                 print fillPoint
                 break
        dim = (height + 2, width + 2)
        mask = np.zeros(dim, dtype=np.uint8)
        
        #Produces nothing if the fill point is used
        #If (0, 0) is used it fills in noise
        cv2.floodFill(thresh, mask, (0, 0), 255)
        cv2.imshow("filledImage", thresh)

        #removes most noise from the thresholded image
        noiseRemoved = cv2.bitwise_xor(thresh, tempImage)
        
        #Dilates in order to remove more noise
        cv2.dilate(noiseRemoved, np.ones((4,4), dtype=np.uint8), noiseRemoved, (-1, -1), 1)
        
        cv2.imshow("f", noiseRemoved)
Example #7
0
def filter_words(img):
    orig = img.copy()
    cv2.Laplacian(img, 0, img, 1)

    kernel = np.ones((3, 5), np.uint16)
    img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    img_ret, contours, hierarchy = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    (h, w) = img.shape
    new_value = 100
    mask = np.zeros((h + 2, w + 2, 1), np.uint8)
    for contour in contours:
        x, y, w, h = cv2.boundingRect(contour)
        seed_point = (x + 3 * w / 4, y + h / 2)
        cv2.floodFill(img, mask, seed_point, new_value)

    cv2.threshold(img, 30, 255, cv2.THRESH_BINARY, img)

    img_ret, post_contours, hierarchy = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    img = draw_contours(img, post_contours, 100)
    orig = draw_contours(orig, post_contours, (0, 0, 255))

    return img, orig
Example #8
0
    def _segment_arm(self, frame):
               
        
        center_half = 10
        center = frame[self.h / 2 -  center_half:self.h/2 + center_half, self.w/2-center_half:self.w/2+center_half]
        med_val = np.median(center)
        print "median in blue box: " + str(med_val)        
        
        frame = np.where(abs(frame-med_val) <= self.abs_depth_dev, 128, 0).astype(np.uint8)
        
        
        
        kernel = np.ones((3,3), np.uint8)
        frame = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, kernel)

             
        
        small_kernel = 3
        frame[self.h/2-small_kernel:self.h/2+small_kernel, self.w/2 - small_kernel: self.w/2+small_kernel] = 128
        
        mask = np.zeros((self.h+2, self.w+2), np.uint8)
        flood = frame.copy()
        cv2.floodFill(flood, mask, (self.w/2, self.h/2), 255, flags = 4 | (255 << 8))
        
        
        ret, flooded = cv2.threshold(flood, 129, 255, 0)
       
        return flooded
Example #9
0
	def _segment_arm(self, frame):

		""" segments the arm region based on depth """
		center_half = 10 # half-width of 21 is 21/2-1
		lowerHeight = self.height/2 - center_half
		upperHeight = self.height/2 + center_half
		lowerWidth = self.width/2 - center_half
		upperWidth = self.width/2 + center_half
		center = frame[lowerHeight:upperHeight,lowerWidth:upperWidth]

		med_val = np.median(center)

		frame = np.where(abs(frame - med_val) <= self.abs_depth_dev, 128, 0).astype(np.uint8)

		kernel = np.ones((3, 3), np.uint8)
		frame = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, kernel)

		small_kernel = 3
		frame[ self.height/2-small_kernel: self.height/2+small_kernel, 
			   self.width/2-small_kernel: self.width/2+small_kernel    ] = 128
		
		mask = np.zeros((self.height+2, self.width+2), np.uint8)

		flood = frame.copy()
		cv2.floodFill(flood, mask, (self.width/2, self.height/2), 255, flags=4 | (255 << 8))

		ret, flooded = cv2.threshold(flood, 129, 255, cv2.THRESH_BINARY)
def significantSQS(img,bigCont,minArea,maxArea,direction):
	sigSquares = findFrame(bigCont,(img.shape[1],img.shape[0]),2,255,5)
	removeNotConnected(sigSquares)
	sigSquares = refine(sigSquares,img,bigCont,minArea,maxArea,direction,(5,5))
	
	aux = paintSQS(sigSquares,np.ones((img.shape),np.uint8)*255)

	rawContours = cv2.findContours(cv2.split(aux)[0].copy(),
		cv2.cv.CV_RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)[0]
	convex = [cv2.convexHull(points) for points in rawContours]
	#cv2.drawContours(img,convex,-1, (0,0,255),3)
	
	#numerateConectComp(sigSquares)

	floodPts = getPoints(img.shape,sigSquares)
	

	# for pt in floodPts:
	# 	if not isContained(pt,rawContours):
	# 		floodPts.remove(pt)
	
	lo = 30
	hi = 30
	mask = np.zeros((img.shape[0]+2,img.shape[1]+2),np.uint8)
	
	for square in enumerate(floodPts):
		for pt in square[1]:		
			color = img[pt[1],pt[0]]
			#print color
			cv2.floodFill(img, mask, pt, (255-square[0]*20,255-square[0]*20,255-square[0]*20),
				(lo,)*3, (hi,)*3,cv2.cv.CV_FLOODFILL_FIXED_RANGE)
			#(color,color,color)
	
	return img
Example #11
0
    def get_mask(self, image):
        """
        Returns mask of image. We use floodfill algorithm that
        compares 4 neighboring pixels and based on specified
        threashold fills mask image that is bigger by two pixels
        in every direction with white color and than we remove
        left noise by running dilation

        Args:
            image(numpy.ndarray): Preprocessed image

        Returns:
            Mask of given image
        """
        h, w = image.shape[:2]
        mask = np.zeros((h+2, w+2), np.uint8)
        connectivity = 4
        mask[:] = 0
        if self.debug is True:
            self.lo = cv.getTrackbarPos('lo', 'result')
            self.hi = cv.getTrackbarPos('hi', 'result')
        flags = connectivity
        flags |= cv.FLOODFILL_MASK_ONLY
        flags |= 255 << 8
        self.seed = self.get_seed(image)
        cv.floodFill(image, mask, self.seed, (255, 255, 255), (self.lo,)*3,
                     (self.hi,)*3, flags)
        kernel = np.ones((1, 1), np.uint8)
        mask = cv.dilate(mask, kernel, iterations=4)
        return mask
def floodImage(img, sample_spot):
    h, w = img.shape[:2]
    mask = np.zeros((h+2, w+2), np.uint8)
    mask[:] = 0
    flooded = img.copy()
    cv2.floodFill(flooded, mask, tuple(sample_spot), (255, 255, 255), (4,)*3, (10,)*3, 4)
    return flooded
Example #13
0
def __tutorial_hough_circle_detection_cv_old(img_path):
    img = cv2.imread(img_path)
    img = cv2.cvtColor(img, cv2.COLOR_BGRA2GRAY)
    frame_gray = cv2.GaussianBlur(img, (5, 5), 2)

    edges = frame_gray - cv2.erode(frame_gray, None)
    _, bin_edge = cv2.threshold(edges, 0, 255, cv2.THRESH_OTSU)
    height, width = bin_edge.shape
    mask = numpy.zeros((height + 2, width + 2), dtype=numpy.uint8)
    cv2.floodFill(bin_edge, mask, (0, 0), 255)

    components = segment_on_dt(bin_edge)

    circles, obj_center = [], []
    contours, _ = cv2.findContours(components,
                                   cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

    for c in contours:
        c = c.astype(numpy.int64)  # XXX OpenCV bug.
        area = cv2.contourArea(c)
        if 100 < area < 3000:
            arclen = cv2.arcLength(c, True)
            circularity = (pi_4 * area) / (arclen * arclen)
            if circularity > 0.5:  # XXX Yes, pretty low threshold.
                circles.append(c)
                box = cv2.boundingRect(c)
                obj_center.append((box[0] + (box[2] / 2), box[1] + (box[3] / 2)))

    return circles, obj_center
    def get_patches(self):
        # TODO: The patch runs may include runs from other patches.
        # Possible fix: flag them using negative values
        all_patches = []
        for i, s in enumerate(self._sprites):
            mask_shape = (s.shape[0] + 2, s.shape[1] + 2)
            r, g, b, a = cv2.split(s)
            s_patches = []
            for x, y in zip(*np.nonzero(a)):
                if a[x][y] == 0:
                    continue
                mask = np.zeros(mask_shape, dtype='ubyte')
                temp = s[:, :, :3].astype('ubyte')
                cv2.floodFill(temp, mask, (y, x), (0, 255, 0), flags=cv2.FLOODFILL_MASK_ONLY)
                # Trim off the 1 px wide border that is required by cv2.floodFill
                trimmed_mask = mask[1:-1, 1:-1]

                # bounding box
                bb = np.nonzero(trimmed_mask)
                x1, y1 = min(bb[0]), min(bb[1])
                x2, y2 = max(bb[0]), max(bb[1])
                bb = self.get_bounding_box(trimmed_mask)

                a = np.logical_and(np.logical_not(trimmed_mask), a).astype('ubyte')
                s_patches.append({
                    'color': s[x][y][:3],
                    'height': x2 - x1,
                    'width': y2 - y1,
                    'bounding_box': bb,
                    'area': len(np.nonzero(trimmed_mask)[0]),
                    'runs': self.get_patch_runs(trimmed_mask),
                    'offset': bb[0],
                })
            all_patches.append(s_patches)
        return all_patches
Example #15
0
def extractCentralComponent(mask):
    sqrSize2 = 2
    mheight, mwidth = mask.shape
    markers = np.zeros(mask.shape, np.uint8)
    minSize = min(mheight, mwidth)
    minSize2 = minSize/2
    centerX = mwidth/2
    centerY = mheight/2
    while sqrSize2 < minSize2 - 20:
        starty = centerY-sqrSize2
        endy = centerY+sqrSize2+1
        startx = centerX-sqrSize2
        endx = centerX+sqrSize2+1
        markers[starty:endy, startx:endx] = 255
        seeds = markers & mask
        seed_point = np.argmax(seeds)
        seed_point = np.unravel_index(seed_point, mask.shape)
        max = mask[seed_point]
        if max != 255:
            sqrSize2 = sqrSize2+3
        else:
            cv2.floodFill(mask, np.zeros((mheight+2, mwidth+2), np.uint8), (seed_point[1], seed_point[0]), 127)
            mask[mask != 127] = 0
            histo, bins = np.histogram(mask, bins=[0, 127, 255])
            if histo[1] < 100: #object too small, remove it
                mask[mask == 127] = 0
            else:
                mask[mask == 127] = 255
                break
Example #16
0
 def getCardName(img):
     #cardsize = 976,1364
     secx1 = int(cardsize[0]*0.06)
     secx2 = int(cardsize[0]*0.7)
     secy1 = int(cardsize[1]*0.04)
     secy2 = int(cardsize[1]*0.12)
     sech = secy2-secy1
     secw = secx2-secx1
     crop_img = img[secy1:secy2, secx1:secx2]
     gray = cv2.cvtColor(crop_img,cv2.COLOR_BGR2GRAY)
     gray = ~gray
     flag, gray = cv2.threshold(gray, 130, 255, cv2.THRESH_BINARY_INV)
     
     #@TODO Look at finding the largest contour here which should be the area behind the text
     h, w = gray.shape[:2]
     mask = np.zeros((h+2, w+2), np.uint8)
     mask[:] = 0
     lo = 20
     hi = 20
     flags = 4
     flags |= cv2.FLOODFILL_FIXED_RANGE
     
     cv2.line(gray, (0, 0), (0, sech-1), (0,0,0))
     cv2.line(gray, (0, 0), (secw-1, 0), (0,0,0))
     cv2.line(gray, (secw-1, 0), (secw-1, sech-1), (0,0,0))
     cv2.line(gray, (0, sech-1), (secw-1, sech-1), (0,0,0))
     cv2.floodFill(gray, mask, (0,0), (255, 255, 255), (lo,)*3, (hi,)*3, flags)
     
     #cv2.imshow('full', test)
     return gray
Example #17
0
def preserve_outermost(image, foreground_mask):
  """Only leaves foreground image components adjacent to the border.

  Args:
    image: 2D numpy array with white foreground and black background.
    foreground_mask: mask with white where the image foreground is.

  Returns:
    2D array like image but with only foreground components adjacent to
        "outside" preserved.
  """
  # Leave the values [0, 254] to the outside.
  outside_mask = cv2.threshold(foreground_mask, 254, 255,
                               cv2.THRESH_BINARY_INV)[1]
  with_outside = np.maximum(image, outside_mask)

  flood_fill_mask = 255 - dataset.pad_image(with_outside, 1)

  # Some point outside the foreground mask.
  seed_point = np.argwhere(outside_mask == 255)[0]

  # Unused flood fil value.
  new_val = 128

  # flood_fill_mask is set to 1, where the flood fill was applied.
  cv2.floodFill(with_outside, flood_fill_mask, tuple(seed_point[::-1]), new_val)

  return cv2.bitwise_and(
      image, image, mask=(flood_fill_mask == 1).astype(np.uint8)[1:-1, 1:-1])
def approach4(img):
    print "init approach 4"
    imgColor = img.copy()
    imgGray = cv2.cvtColor(imgColor, cv2.COLOR_BGR2GRAY)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    #image = cv2.pyrMeanShiftFiltering(img, 40, 30)
    minRange = cv2.cv.Scalar(1.0 , 94.0, 1.0)
    maxRange = cv2.cv.Scalar(29.0, 255.0, 255.0)
    #cv2.imshow("pyrMean", image)
    image = cv2.inRange(img, minRange, maxRange)    
    #cv2.imshow("range", image)
    #cv2.waitKey(0)
    contours, _ = cv2.findContours(image, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)

    biggestContourProps = regionProps.CalcContourProperties(contours[0], ["Area", "Centroid"])
    for contour in contours:
        props = tools.RegionProps.CalcContourProperties(contour, ["Area", "Centroid"])
        if(biggestContourProps["Area"] < props["Area"]):        
            biggestContourProps = props

    center = (int(biggestContourProps["Centroid"][0]), int(biggestContourProps["Centroid"][1]))
    #cv2.circle(imgColor, center, 5, (0, 0, 255), (int(biggestContourProps["Area"]*.00005) + 1) ) 
    #cv2.drawContours(imgColor, [biggestContour], -1, (0, 255, 0))
    #hull =  cv2.convexHull(biggestContour)
    #cv2.drawContours(imgColor, [hull], -1, (0, 0, 255))
    h, w = imgGray.shape[:2]
    mask = np.zeros((h+2, w+2), np.uint8)
    mask[:] = 0
    cv2.floodFill(imgGray, mask, center, (255,255,255), 10, 5)
    cv2.imshow("flooded", imgGray)
    cv2.waitKey(0)
    
    print "done approach 4"
    return imgColor
Example #19
0
def get_page(I):
    I = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
    #I = resize_to_size(I, max_height=800)
    H,W = I.shape[:2]

    # clear page from text
    I = impreproc(I)
    showImageDebug(I)    
    
    # find all approx-horiz/verti long lines
    I = detect_edges(I,50)  
    showImageDebug(I)
    I = find_lines(I,100,50,500) 
    showImageDebug(I)

    # fill between close lines/gaps before floodfill
    dilate_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
    I = cv2.morphologyEx(I, cv2.MORPH_CLOSE, dilate_kernel)
#    showImageDebug(I)

    # fill page area (assuming center of photo is inside page)
    mask = np.zeros((H+2, W+2), np.uint8)
    cv2.floodFill(I,mask,(int(round(W/2)),int(round(H/2))),128)
    showImageDebug(I)
    
    # erase all lines
    I[np.where(I == 255)] = 0
    
    # close gaps in page filling (i.e. erased lines)
    I = cv2.medianBlur(I,11)
#    showImageDebug(I)
    
    return I
def identify_colors(image, color):

	
	colorlist = create_colorlist(color)
	#loop over the colorlist
	for (lower, upper) in colorlist:
		# create NumPy arrays from the colorlist
		lower = np.array(lower, dtype = "uint8")
		upper = np.array(upper, dtype = "uint8")


	
		#econverts image to b/w with white being anything in the BGR value range
		mask = cv2.inRange(image, lower, upper)
		#converts that specified range back to its orginal color
		output = cv2.bitwise_and(image, image, mask = mask)

		# Remove outer black area Source: http://stackoverflow.com/questions/36508001/determining-if-a-color-is-within-a-contour-opencv
		flooded = image.copy()
		x = 5
		y = 5
		flooded = output.copy()
		h, w = output.shape[:2]
		mask = np.zeros((h+2, w+2), np.uint8)
		mask[:] = 0
		cv2.floodFill(flooded,mask,(x,y),(255,)*3, (40,)*3, (40,)*3, 4 )

		#show the photos side by side
		#cv2.imshow("images", np.hstack([image, flooded]))
		#cv2.waitKey(0)
		
	return flooded, colorlist
Example #21
0
def main():
    img = cv2.imread("text.bmp", cv2.CV_LOAD_IMAGE_COLOR)  # Read image file
    img = cv2.GaussianBlur(img, (3, 3), 0)
    img = cv2.Laplacian(img, 0)
    cv2.threshold(img, 70, 255, cv2.THRESH_BINARY, img)
    kernel = np.ones((4, 6), np.uint8)
    img = cv2.dilate(img, kernel, iterations = 1)
    img = cv2.erode(img, kernel, iterations = 1)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    contours, hierarchy = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

    color = (255, 255, 255)
    drawing = np.zeros(img.shape, np.uint8)
    for cnt in contours:
        x, y, w, h = cv2.boundingRect(cnt)
        cv2.rectangle(drawing, (x, y), (x + w, y + h), color)

    drawing = cv2.cvtColor(drawing, cv2.COLOR_RGB2GRAY)
    h, w = img.shape[:2]
    outpImg = np.zeros((h-2, w-2), np.uint8)
    cv2.floodFill(outpImg, drawing, (0, 0), color)

    cv2.threshold(outpImg, 254, 255, cv2.THRESH_BINARY_INV, outpImg)

    cv2.imwrite("outputTask3.jpg", outpImg)
    cv2.namedWindow('Display Window', cv2.WINDOW_NORMAL)        # Create window for display
    cv2.imshow('Display Window', outpImg)         # Show image in the window
    cv2.moveWindow('Display Window', 1, 1)
    print ("size of image: ", img.shape)      # print size of image
    cv2.waitKey(0)                           # Wait for keystroke
    cv2.destroyAllWindows()                  # Destroy all windows
Example #22
0
	def _extract_arm(self, img):
		# find center region of image frame (assume center region is 21 x 21 px)
		center_half = 10 # (=(21-1)/2)	
		center = img[self.height/2 - center_half : self.height/2 + center_half, self.width/2 - center_half : self.width/2 + center_half]

		# determine median depth value
		median_val = np.median(center)

		'''mask the image such that all pixels whose depth values
		lie within a particular range are gray and the rest are black
		'''

		img = np.where(abs(img-median_val) <= self.abs_depth_dev, 128, 0).astype(np.uint8)

		# Apply morphology operation to fill small holes in the image
		kernel = np.ones((5,5), np.uint8)
		img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)

		# Find connected regions (to hand) to remove background objects
		# Use floodfill with a small image area (7 x 7 px) that is set gray color value
		kernel2 = 3
		img[self.height/2-kernel2:self.height/2+kernel2, self.width/2-kernel2:self.width/2+kernel2] = 128
		
		# a black mask to mask the 'non-connected' components black
		mask = np.zeros((self.height + 2, self.width + 2), np.uint8)
		floodImg = img.copy()

		# Use floodFill function to paint the connected regions white 
		cv2.floodFill(floodImg, mask, (self.width/2, self.height/2), 255, flags=(4 | 255 << 8))
		
		# apply a binary threshold to show only connected hand region
		ret, floodedImg = cv2.threshold(floodImg, 129, 255, cv2.THRESH_BINARY)

		return floodedImg
def find_areas(mask):
    areas = []

    h, w = mask.shape
    #mask = np.copy(mask)
    flood_fill_mask = np.zeros((h + 2, w + 2), np.uint8)

    area_id = 0
    for y in range(1, h):
        for x in range(1, w):
            if mask[y][x] == 255:
                area = Area(area_id, (x, y))
                areas.append(area)
                cv.floodFill(mask, flood_fill_mask, area.seed, area.id + 1)
                area_id += 1

    for y in range(1, h):
        for x in range(1, w):
            color = mask[y][x]
            if color != 0:
                area = areas[color - 1]
                area.points.append((x, y))

    areas = filter(lambda ar: len(ar.points) > AREA_MIN_SIZE, areas)
    areas.sort(key=lambda ar: len(ar.points), reverse=True)

    for i, area in enumerate(areas):
        area.id = i
        area.find_center()

    return areas
  def process(self):
    #cv2.imwrite("Card.png", self.input_img)
    img_gray = cv2.cvtColor(self.input_img, cv2.COLOR_RGB2GRAY)
    img_gray = cv2.equalizeHist(img_gray)
    img_normalized = img_gray.copy()
    img_normalized = cv2.normalize(img_gray, img_normalized, 0, 255, cv2.NORM_MINMAX)
    blur = cv2.GaussianBlur(img_normalized, (CardNameDetector.blur_kernel_size, CardNameDetector.blur_kernel_size), 0)
    #blur = cv2.blur(img_normalized, (CardNameDetector.blur_kernel_size, CardNameDetector.blur_kernel_size))
    #cv2.imshow("Blur",blur)
    
    bin = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 15, 0)
    #bin = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 2)
    #cv2.imshow("adaptiveThreshold mean", bin)
    #_, bin = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    #cv2.imshow("Otsu", bin)
    #_, bin = cv2.threshold(blur, 200, 255, cv2.THRESH_BINARY)
    #cv2.imshow("Binary Threshold", bin)

    bin = (255 - bin)
    #bin = cv2.dilate(bin, (3,3))
    bin = cv2.morphologyEx(bin, cv2.MORPH_CLOSE, (20,20), iterations = 1)

    img_h, img_w = bin.shape
    x_min = int(0.1 * img_w)
    x_max = int(0.9 * img_w)
    y = int(0.08 * img_h)

    last_pixel_was_white = False
    mask = np.zeros((img_h+2, img_w+2), np.uint8)
    box_list = []
    for x in range(x_min, x_max) :
      #if last_pixel_was_white :
        #continue
      #else :
        #last_pixel_was_white = True
      mask[:] = 0
      flooded = bin.copy()
      cv2.floodFill(flooded, mask, (x,y), 255)
      diff = flooded - bin
      #cv2.imshow("diff", diff)

      _, contours, hierarchy = cv2.findContours(diff.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
      if len(contours) != 1 :
        continue
      x_box,y_box,w_box,h_box = cv2.boundingRect(contours[0])
      #cv2.rectangle(self.input_img, (x_box,y_box),(x_box+w_box, y_box+h_box), (255,0,0), 2)
      #cv2.imshow("res", self.input_img
      #cv2.drawContours(diff, contours, -1, 150 , 2)
      box_list.append((x_box + int(0.01*w_box), y_box, int(0.99*w_box), int(0.90*h_box)))
    
    id_ = str(random.random())
    
    box_list.sort(key=lambda elt : elt[2]*elt[3], reverse=True)

    x_box,y_box, w_box, h_box = box_list[0]
    cv2.rectangle(self.input_img, (x_box,y_box),(x_box+w_box, y_box+h_box), (0,0,255), 1)

    #cv2.line(self.input_img, (int(0.1 * img_w), int(0.08 * img_h)), (int(0.9 * img_w), int(0.08 * img_h)), (255,128,0), 2)
    cv2.imshow("Text box" + id_, self.input_img)
Example #25
0
def find_all_template(im_source, im_search, threshold=0.5, maxcnt=0, rgb=False, bgremove=False):
    '''
    Locate image position with cv2.templateFind

    Use pixel match to find pictures.

    Args:
        im_source(string): 图像、素材
        im_search(string): 需要查找的图片
        threshold: 阈值,当相识度小于该阈值的时候,就忽略掉

    Returns:
        A tuple of found [(point, score), ...]

    Raises:
        IOError: when file read error
    '''
    # method = cv2.TM_CCORR_NORMED
    # method = cv2.TM_SQDIFF_NORMED
    method = cv2.TM_CCOEFF_NORMED

    if rgb:
        s_bgr = cv2.split(im_search) # Blue Green Red
        i_bgr = cv2.split(im_source)
        weight = (0.3, 0.3, 0.4)
        resbgr = [0, 0, 0]
        for i in range(3): # bgr
            resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], method)
        res = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
    else:
        s_gray = cv2.cvtColor(im_search, cv2.COLOR_BGR2GRAY)
        i_gray = cv2.cvtColor(im_source, cv2.COLOR_BGR2GRAY)
        # 边界提取(来实现背景去除的功能)
        if bgremove:
            s_gray = cv2.Canny(s_gray, 100, 200)
            i_gray = cv2.Canny(i_gray, 100, 200)

        res = cv2.matchTemplate(i_gray, s_gray, method)
    w, h = im_search.shape[1], im_search.shape[0]

    result = []
    while True:
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc
        if DEBUG: 
            print 'templmatch_value(thresh:%.1f) = %.3f' %(threshold, max_val) # not show debug
        if max_val < threshold:
            break
        # calculator middle point
        middle_point = (top_left[0]+w/2, top_left[1]+h/2)
        result.append((middle_point, max_val))
        if maxcnt and len(result) >= maxcnt:
            break
        # floodfill the already found area
        cv2.floodFill(res, None, max_loc, (-1000,), max_val-threshold+0.1, 1, flags=cv2.FLOODFILL_FIXED_RANGE)
    return result
Example #26
0
 def getFloodFilled(self,image=None,newVal=255,seedPoint=(0,0),**kwargs):
     if image==None:
         image=self.IMG_gray
     timage=deepcopy(image)
     h,w = timage.shape[:2]
     floodfilled = np.zeros((h+2, w+2), np.uint8)
     cv2.floodFill( timage,floodfilled,seedPoint,newVal,flags=cv2.FLOODFILL_MASK_ONLY)
     return floodfilled[1:-1,1:-1]
Example #27
0
 def process(self, img):
     h, w = img.shape[:2]
     mask = np.zeros((h + 2, w + 2), np.uint8)
     cv2.floodFill(
         img, mask, self.seed_pt, (255, 255, 255), (int(self.lo),) * 3, (int(self.hi),) * 3, int(self.connectivity)
     )
     cv2.circle(img, self.seed_pt, 3, (0, 0, 255), -1)
     return img
Example #28
0
	def floodFill(self,frame):
		im_floodfill = frame.copy()			 
		h, w = frame.shape[:2]
		kernel = np.zeros((h+2, w+2), np.uint8)
		cv2.floodFill(im_floodfill, kernel, (0,0), 255)
		im_floodfill_inv = cv2.bitwise_not(im_floodfill)
		frame = frame | im_floodfill_inv
		return frame
Example #29
0
 def _magicwand(self):
     self._flood_mask[:] = 0
     flags = self.connectivity | 255 << 8   # bit shift
     flags |= cv2.FLOODFILL_FIXED_RANGE | cv2.FLOODFILL_MASK_ONLY
     flood_image = self._image.copy()
     cv2.floodFill(flood_image, self._flood_mask, self._seed_point, 0,
                   self._tolerance, self._tolerance, flags)
     self._mask = self._flood_mask[1:-1, 1:-1].copy()
     self._update_window()
Example #30
0
def imfill(im_in, seed):
    im_th = im_in*255
    im_floodfill = im_th.copy()
    h, w = im_th.shape[:2]
    mask = np.zeros((h + 2, w + 2), np.uint8)
    cv2.floodFill(im_floodfill, mask, seed, 255);
    im_floodfill_inv = cv2.bitwise_not(im_floodfill)
    im_out = im_th | im_floodfill_inv
    return (im_out+256)/255
Example #31
0
nodes = np.argwhere(temp == 1)
kernel = np.ones((3, 3), np.uint8)
mask = np.zeros((m - 2, n - 2))
for filename in filenames:
    print filename
    # test
    a0 = cv2.imread('image/movie0_axon_result/' + filename,
                    0).astype('float32') / 255
    m, n = a0.shape

    result = np.copy(mask)
    for i in nodes:
        row, cal = i
        if row >= m - 3 or row < 0 or cal > n - 3 or cal < 0:
            continue
        b1 = cv2.floodFill(mask.astype('uint8'), a0.astype('uint8'),
                           (cal - 1, row - 1), 255, cv2.FLOODFILL_MASK_ONLY)[1]
        result[b1 == 255] = 255

    cv2.imwrite(c + filename, result)

    skeleton = skeletonize(result / 255) * 255
    cv2.imwrite('image/movie0_sk2/' + filename, skeleton)
    '''
    img2 = cv2.imread('image/old/movie0_mark_2/'+filename, 0)
    img2 = img2[1:-1, 1:-1]
    blank = np.zeros(img2.shape)
    blank[img2 == 50] = 100
    blank[skeleton == 255] = 255
    cv2.imwrite('image/movie_ds/'+filename, blank)
    '''
Example #32
0
def predict_color_chip_whitevals(cropped_cc):
    def _remove_wrong_white_loc(cropped_cc):
        prop_diff_height = abs(h / 2 - maxLoc[1]) / h
        prop_diff_width = abs(w / 2 - maxLoc[0]) / w

        if prop_diff_height > prop_diff_width and maxLoc[1] > h / 2:
            cropped_cc = cropped_cc[0:maxLoc[1] - 2, 0:w]
        elif prop_diff_height > prop_diff_width and maxLoc[1] < h / 2:
            cropped_cc = cropped_cc[maxLoc[1] + 2:h, 0:w]
        elif prop_diff_height < prop_diff_width and maxLoc[0] > w / 2:
            cropped_cc = cropped_cc[0:h, 0:maxLoc[0] - 2]
        else:
            cropped_cc = cropped_cc[0:h, maxLoc[0] + 2:w]

        return cropped_cc

    for _ in range(10):
        grayImg = cv2.cvtColor(cropped_cc, cv2.COLOR_RGB2GRAY)
        minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(grayImg)
        var_threshold = int((maxVal - minVal) * .5)
        cropped_cc = cropped_cc
        h, w, chn = cropped_cc.shape

        seed = maxLoc
        mask = np.zeros((h + 2, w + 2), np.uint8)
        floodflags = 8
        floodflags |= cv2.FLOODFILL_FIXED_RANGE
        floodflags |= cv2.FLOODFILL_MASK_ONLY
        floodflags |= (int(maxVal) << 8)
        num, cropped_cc, mask, rect = cv2.floodFill(cropped_cc, mask, seed, 0,
                                                    (var_threshold, ) * 3,
                                                    (var_threshold, ) * 3,
                                                    floodflags)
        mask = mask[1:-1, 1:-1, ...]
        area = h * w
        contour_area_floor = area // 50
        contour_area_ceiling = area // 1
        squares = find_squares(mask,
                               contour_area_floor=contour_area_floor,
                               contour_area_ceiling=contour_area_ceiling)

        if len(squares) == 0:
            cropped_cc = _remove_wrong_white_loc(cropped_cc)
            continue

        squares = sorted(squares, key=cv2.contourArea, reverse=True)
        for square in squares:
            x_arr = square[..., 0]
            y_arr = square[..., 1]
            x1, y1, x2, y2 = np.min(x_arr), np.min(y_arr), np.max(
                x_arr), np.max(y_arr)
            square_width, square_height = x2 - x1, y2 - y1
            longest_side = max(square_width, square_height)
            shortest_side = min(square_width, square_height)
            ratio = longest_side / shortest_side

            if 0.85 < ratio < 1.15 or 1.45 < ratio < 1.75:
                break
        else:
            cropped_cc = _remove_wrong_white_loc(cropped_cc)
            continue
        break
    else:
        raise ValueError("Could not find the proper white square!")

    extracted = cropped_cc[mask != 0]
    extracted = extracted.reshape(-1, extracted.shape[-1])
    mode_white = np.apply_along_axis(lambda x: np.bincount(x).argmax(),
                                     axis=0,
                                     arr=extracted)

    return list(mode_white), minVal
def labelAreas(list_labels, list_regions, lastEle, image_number, user,
               dataset_typ, GET_REGIONS, level_num, tooltype):
    print('labelAreas')
    print(GET_REGIONS)
    k = 0
    j = 0
    wrong_label = False
    first_image = False

    SAVE_IMAGES = os.path.join(
        os.path.dirname(os.path.abspath(__file__)),
        'Seg_masks_' + user + '_' + dataset_typ + '_' + tooltype)
    #GET_REGIONS= os.path.join(os.path.dirname(os.path.abspath(__file__)), 'label_masks_temp')
    #GET_REGIONS = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Tutorial/mask/first_SingelCells')
    print('level number: ', level_num)
    if (level_num == 4 or level_num == 5 or level_num == 0):
        file = os.path.join(GET_REGIONS,
                            str(image_number) + '_labelregions.npy')
    else:
        file = os.path.join(
            GET_REGIONS,
            str(level_num) + 'level_' + str(image_number) +
            '_labelregions.npy')

    print('File: ', file)
    while not os.path.exists(file):
        time.sleep(1)
    if os.path.isfile(file):
        image_label = np.load(file)  #load LabelMask
    else:
        raise ValueError("%s isn't a file!" % file)

    # because 0-5 reserved for cell labels, imagelabel has value from 0-#numberofareas, can get in conflice with labels for cells
    image_label = image_label * 100
    """for areas labeled whole region, more cells in one box but same label"""
    if list_regions != 'none':
        list_regions = list_regions.split(",")
        if len(list_regions) == 3:
            first_image = True

        while (j < len(list_regions)):
            x_value = round(float(list_regions[j]))
            y_value = round(float(list_regions[j + 1]))
            width = round(float(list_regions[j + 2]))
            height = round(float(list_regions[j + 3]))
            label_str = list_regions[j + 4]
            label_str = label_str.strip()
            print('data of regions: ', x_value, y_value, width, height,
                  label_str)
            if (label_str == 'Single Cell'):
                label = 1
            elif (label_str == 'Aggregate'):
                label = 2
            elif (label_str == 'Platelet'):
                label = 3
            elif (label_str == 'Background'):
                label = 0
            elif (label_str == 'Other'):
                label = 5
            else:
                print('no label: default label = 0')
                label = 0
            image_label[y_value:y_value + height, x_value:x_value +
                        width][image_label[y_value:y_value + height,
                                           x_value:x_value +
                                           width] != 0] = label

            box = image_label[y_value:y_value + height,
                              x_value:x_value + width]
            print('max value:', np.argmax(box))
            #image_label_new = image_label
            if lastEle == 'region':
                if np.argmax(box) != 0:
                    #if [image_label[y_value : y_value + height, x_value : x_value + width].any() == label]:
                    wrong_label = False
                else:
                    wrong_label = True

            j = j + 5
    """for areas labeled with onyl one cell, crosses labeled"""
    if list_labels != 'none':
        list_labels = list_labels.split(",")
        if len(list_labels) == 3:
            first_image = True

        #print('image label min max after *100: ', image_label.min(), image_label.max())
        while (k < len(list_labels)):
            x_value = list_labels[k]
            y_value = list_labels[k + 1]

            label_str = list_labels[k + 2]
            x_value = round(float(x_value))
            y_value = round(float(y_value))
            pixel_value = image_label[y_value, x_value]
            print(x_value, y_value, label_str)
            label_str = label_str.strip()
            #print(label_str)
            # Singel Cell, Aggregate, Parasite, Background, Other
            if (label_str == 'Single Cell'):
                label = 1
            elif (label_str == 'Aggregate'):
                label = 2
            elif (label_str == 'Platelet'):
                label = 3
            elif (label_str == 'Background'):
                label = 0
            elif (label_str == 'Other'):
                label = 5
            else:
                print('no label: default label = 0')
                label = 0
                #if pixel_value != 0:
            # appening all pixel_values?
            print('pixelvalues: ', x_value, y_value, pixel_value)

            #only no background label to class
            if lastEle == 'cell':
                print('pixel value: ', pixel_value)
                if pixel_value != 0:
                    print('label area with: ', label)
                    retval, image_label, mask1, rect = cv2.floodFill(
                        image_label, None, (x_value, y_value), label)
                    wrong_label = False
                else:
                    wrong_label = True
            k = k + 3
        #deletes not labeled Areas
    image_label[image_label > 5] = 0  #set all to background
    #SegmentationMask = image_label
    print(image_label.shape)
    SAVE_MASK = os.path.join(
        SAVE_IMAGES,
        'level' + str(level_num) + '_' + str(image_number) + '_SegMask.npy')
    print(SAVE_MASK)
    np.save(SAVE_MASK, image_label)
    #status = cv2.imwrite(os.path.join(SAVE_IMAGES, str(image_number)+'_SegMask.png'), SegmentationMask)
    print('SegMask saved in: ', SAVE_IMAGES)
    print('WRONG LABEL: ', wrong_label)
    #plot(SegmentationMask, 'Segmentation Mask labeled by User', image_number, SAVE_IMAGES, 'SegMask')

    return image_label, wrong_label, first_image
Example #34
0
def component_detection(binary,
                        min_obj_area=C.THRESHOLD_OBJ_MIN_AREA,
                        line_thickness=C.THRESHOLD_LINE_THICKNESS,
                        min_rec_evenness=C.THRESHOLD_REC_MIN_EVENNESS,
                        max_dent_ratio=C.THRESHOLD_REC_MAX_DENT_RATIO,
                        step_h=5,
                        step_v=2,
                        rec_detect=False,
                        show=False,
                        test=False):
    """
    :param binary: Binary image from pre-processing
    :param min_obj_area: If not pass then ignore the small object
    :param min_obj_perimeter: If not pass then ignore the small object
    :param line_thickness: If not pass then ignore the slim object
    :param min_rec_evenness: If not pass then this object cannot be rectangular
    :param max_dent_ratio: If not pass then this object cannot be rectangular
    :return: boundary: [top, bottom, left, right]
                        -> up, bottom: list of (column_index, min/max row border)
                        -> left, right: list of (row_index, min/max column border) detect range of each row
    """
    mask = np.zeros((binary.shape[0] + 2, binary.shape[1] + 2), dtype=np.uint8)
    compos_all = []
    compos_rec = []
    compos_nonrec = []
    row, column = binary.shape[0], binary.shape[1]
    for i in range(0, row, step_h):
        for j in range(i % 2, column, step_v):
            if binary[i, j] == 255 and mask[i, j] == 0:
                # get connected area
                # region = util.boundary_bfs_connected_area(binary, i, j, mask)

                mask_copy = mask.copy()
                cv2.floodFill(binary, mask, (j, i), None, 0, 0,
                              cv2.FLOODFILL_MASK_ONLY)
                mask_copy = mask - mask_copy
                region = np.nonzero(mask_copy[1:-1, 1:-1])
                region = list(zip(region[0], region[1]))

                # ignore small area
                if len(region) < min_obj_area:
                    continue
                component = Component(region, binary.shape)
                # calculate the boundary of the connected area
                # ignore small area
                if component.width <= 3 or component.height <= 3:
                    continue
                if test:
                    print('Area:%d' % (len(region)))
                    draw.draw_boundary([component], binary.shape, show=True)
                # check if it is line by checking the length of edges
                # if component.area > min_obj_area * 5 and component.compo_is_line(line_thickness):
                #     continue
                compos_all.append(component)

                if rec_detect:
                    # rectangle check
                    if component.compo_is_rectangle(min_rec_evenness,
                                                    max_dent_ratio):
                        component.rect_ = True
                        compos_rec.append(component)
                    else:
                        component.rect_ = False
                        compos_nonrec.append(component)

                if show:
                    print('Area:%d' % (len(region)))
                    draw.draw_boundary(compos_all, binary.shape, show=True)

    # draw.draw_boundary(compos_all, binary.shape, show=True)
    if rec_detect:
        return compos_rec, compos_nonrec
    else:
        return compos_all
  def callback(self,data):
    try:
      ir = self.bridge.imgmsg_to_cv2(data)
    except CvBridgeError as e:
      print(e)
      
    global lines,coeff,dilation, outer, circles, edges, cls 
    global grip,img,metr,zeros,gray,cloud
    
    mtxloaded = np.load('mtx.npy')
    distloaded = np.load('dist.npy')
    display_min = 1
    display_max = 30000
    
    img = lut_display(ir,display_min,display_max)
    h,  w = img.shape[:2]
    newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtxloaded,distloaded,(w,h),1,(w,h))
    zeros = np.zeros((h,w),np.uint8)
    img = cv2.undistort(img, mtxloaded, distloaded, None, newcameramtx)
  
    x,y,w,h = roi
    img = img[y:y+h, x:x+w]
   
    ##########################
    metric = np.zeros((3,5),np.float16)
    metric[0] = [3.1,3.5,3.9,2.7,4.3]    # BOLT
    metric[1] = [100,0.7,0.55,-1,-1]     # NUT
    metric[2] = [3.6, 4.0, 3.2,4.4,-1]   # BALK
    metr = ['BOLT','NUT','BALK']

    met_count = np.zeros((3),np.uint16)
    met_count[0] = 1
    met_count[1] = 2
    met_count[2] = 1
    met = []
    for i in range(3):
        met.append([])
    pairs = []
    grip = []
    for i in range(15):
        grip.append([])
    met_circ = []
    for i in range(3):
        met_circ.append([])
    
    gray = img
    gray = cv2.medianBlur(gray, 3)
    h, w = gray.shape[:2]
    #self.dstPlane(0)
    x,y=[400,200]
    #cv2.circle(img,(x,y),7,255,-1)
    #print(self.cloud[y-30,x+20])
    lines, outer, circles, edges, dilation,cls = line(gray)
    if lines is not(None) and 1==0:
        a = len(lines)

        mask = np.zeros((h+2,w+2), np.uint8)

        coeff = []
        for i in xrange(lines.shape[0]):#
            x1,y1,x2,y2 = lines.item(i,0,0),lines.item(i,0,1),lines.item(i,0,2 \
                                ), lines.item(i,0,3)                  
            lenght = int(math.sqrt((x1-x2)**2+(y1-y2)**2))
            if x1 != x2:
                k = round((float(y2 - y1))/(float(x2 - x1)),5)
                b = y1 - k*x1
            else:
                k = -337
                b = y1 - k*x1
            coeff.append([k,b,lenght,(x1+x2)/2,(y1+y2)/2,y2-y1,x2-x1])
        coeff = np.array(coeff)
        
        best_I, best_J, maxDist = 0,0,0
        k=0
        ###   removin' close lines   ###
        for i in xrange(lines.shape[0]):
            if (lines[i,0,0] != 0):
                for j in xrange(lines.shape[0]):
                    if (lines[j,0,0] != 0) and (i != j):
                        cdst = math.sqrt(math.pow(coeff[j,3]-coeff[i,3],2) + math.pow(coeff[j,4]-coeff[i,4],2))
                        if (abs((math.atan2(coeff[i,5],coeff[i,6])-math.atan2(coeff[j,5],coeff[j,6]))) < 0.2) and       (cdst < 100):
                            dst = dist(lines[i,0,0],lines[i,0,1],coeff[i,0],coeff[j,0],coeff[j,1])
                            if (coeff[j,2] < coeff[i,2]) and (dst<6):
                                lines[j]=0
                                k += 1
        ################################

        ###  showin' remaining lines ###
        for i in xrange(lines.shape[0]):#
            cv2.line(img, (lines[i,0,0], lines[i,0,1]), (lines[i,0,2], lines[i,0,3]), \
                     (0,255,0),1,cv2.LINE_AA)
            
        ################################

        ### findin' parallel line   ####
        m=1
        a = len(lines)

        for i in xrange(lines.shape[0]):#
            best_J=-1
            maxDist = 0
            d = dilation.item(lines[i,0,1],lines[i,0,0])
            if  (lines[i,0,0]!=0):
                for j in xrange(lines.shape[0]):
                    if (lines[j,0,0]!=0) and (abs((math.atan2(coeff[i,5],coeff[i,6])- \
                                        math.atan2(coeff[j,5],coeff[j,6]))) < 0.25) and (i!=j):
                        if (dilation[lines[j,0,1],lines[j,0,0]] == 255):
                            cv2.floodFill(dilation,mask,(lines[j,0,0],lines[j,0,1]),m*10+100)
                            m=m+1
                        if (dilation[lines[j,0,1],lines[j,0,0]] == dilation[lines[i,0,1],lines[i,0,0]]):
                            dst = dist(lines[i,0,0],lines[i,0,1],coeff[i,0],coeff[j,0],coeff[j,1])
                            if (dst >= maxDist) and (float(min(coeff[i,2],coeff[j,2]))/float( \
                                max(coeff[i,2],coeff[j,2])) > 0.5):
                                maxDist = dst              
                                best_J = j
            
            if (best_J != -1):
                
                c = 2
                d = dilation[lines[best_J,0,1],lines[best_J,0,0]]
                best_I = parallel(best_J, a, d)
                
                length = (coeff[best_I,2]+coeff[best_J,2])/2
                dst = dist(lines[best_I,0,0],lines[best_I,0,1],coeff[best_I,0],coeff[best_J,0],coeff[best_J,1])
                
                cdst = math.sqrt(math.pow(coeff[best_J,3]-coeff[best_I,3],2) + math.pow(coeff[best_J,4]-coeff[best_I,4],2))
                if (dst != 0):
                    
                    if ((float(min(dst, cdst)))/(float(max(dst, cdst))) > 0.5):
                        
                        cv2.line(img, (lines[best_J,0,0], lines[best_J,0,1]), \
                                     (lines[best_J,0,2], lines[best_J,0,3]), \
                                     (0,0,255),1,cv2.LINE_AA)
                        cv2.line(img, (lines[best_I,0,0], lines[best_I,0,1]), \
                                     (lines[best_I,0,2], lines[best_I,0,3]), \
                                     (0,0,255),1,cv2.LINE_AA)
                        if best_I not in pairs:
                            dst = dist(lines[best_I,0,0],lines[best_I,0,1],coeff[best_I,0],coeff[best_J,0],coeff[best_J,1])
                            m2 = (d-100)/10
                            if dst==0:dst=0.001
                            coef = float(max(coeff[best_J,2],coeff[best_I,2]))/float(dst)
                           
                            k = np.where(abs(metric - coef) <= 0.2)
                            
                            for i in range(len(k[0])):
                                grip[m2].append([best_I,best_J])
                                met[k[0][i]].append(m2)
                                
                                cv2.line(img, (lines[best_J,0,0], lines[best_J,0,1]), \
                                     (lines[best_J,0,2], lines[best_J,0,3]), \
                                     (255),3,cv2.LINE_AA)
                                
                                cv2.line(img, (lines[best_I,0,0], lines[best_I,0,1]), \
                                     (lines[best_I,0,2], lines[best_I,0,3]), \
                                     (255),3,cv2.LINE_AA)
                                
                        if best_I not in pairs:
                            pairs.append(best_I)
                        if best_J not in pairs:
                            pairs.append(best_J)
                              
        
        if circles is not None:
            for (x,y,r) in circles[0,:]:
                d = dilation[int(y),int(x)]
                m2 = (d-100)/10
                k = np.where(metric == 100)
                if d > 0:
                    for i in range(len(k[0])):
                        met_circ[k[0][i]].append(m2)
                        
                        cv2.circle(edges, (x, y), r, 255 , 2)
               
        c = 0
        c0 = 0
        for i in range(1,m):
            bolt = ifBolt(i,img)
            for j in range(len(metric)):
                for z in range(len(met[j])):
                    if met[j][z] == i:
                        c += 1
                for l in range(len(met_circ[j])):   
                    if met_circ[j][l] == i:
                        c0 = 1
                if c >= met_count[j] and (metric[j][0]==100 and c0==1 or
                        metric[j][0]!=100 and c0==0):
                    if (metr[j] == 'BOLT'):
                        if (bolt==1):
                            print(str(i)+' - '+metr[j])
                            for o in range(0, len(grip[i])):
                                gripper(grip[i][o][0],grip[i][o][1],j)
                    elif (metr[j] == 'BALK'):
                        if (bolt==0):
                            print(str(i)+' - '+metr[j])
                            for o in range(0, len(grip[i])):
                                gripper(grip[i][o][0],grip[i][o][1],j)
                            
                    else:    
                        print(str(i)+' - '+metr[j])
                        for o in range(0, len(grip[i])):
                            gripper(grip[i][o][0],grip[i][o][1],j)
                        
                c = 0
                c0 = 0
    #print(np.nanmean(self.h))

    cv2.imshow('img',img)
    cv2.imshow('img2',self.grayDst)
    #cv2.imshow('edges',edges)
    #cv2.imshow('cls',cls)

    cv2.waitKey(3)

    #print(met)
    
    try:
      self.image_pub.publish(self.bridge.cv2_to_imgmsg(ir))
    except CvBridgeError as e:
      print(e)
def hole_filling(grey):
    binary = cv2.adaptiveThreshold(~grey, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                   cv2.THRESH_BINARY, 15, -10)
    # if isShowImage:
    #     showCV2Image('binary', binary)

    binary1 = cv2.adaptiveThreshold(~grey, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                    cv2.THRESH_BINARY_INV, 15, -10)
    # if isShowImage:
    #     showCV2Image('binary1', binary1)

    a = cv2.bitwise_xor(grey, binary1)
    # if isShowImage:
    #     showCV2Image('a', a)

    # a反色
    a1 = cv2.bitwise_not(a)
    # if isShowImage:
    #     showCV2Image('a1', a1)
    # cv2.imwrite('APT003_a1.jpg', a1)  # 需要反色的区域

    rows, cols = binary.shape
    mask = np.zeros((rows + 2, cols + 2), np.uint8)
    cv2.floodFill(a, mask, (0, 0), 255)
    # if isShowImage:
    #     showCV2Image('fill', a)

    scale = 20
    # 识别横线
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (cols // scale, 1))
    eroded = cv2.erode(binary, kernel, iterations=1)
    dilatedcol = cv2.dilate(eroded, kernel, iterations=1)
    # if isShowImage:
    #     showCV2Image("Dilated Image", dilatedcol)

    # 识别竖线
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, rows // scale))
    eroded = cv2.erode(binary, kernel, iterations=1)
    dilatedrow = cv2.dilate(eroded, kernel, iterations=1)
    # if isShowImage:
    #     showCV2Image("Dilated Image", dilatedrow)

    # 识别表格线
    table = cv2.bitwise_or(dilatedcol, dilatedrow)
    # if isShowImage:
    #     showCV2Image("table line", table)

    # 去掉表格线
    no_tab_line = cv2.bitwise_xor(binary, table)
    # if isShowImage:
    #     showCV2Image("no table line", no_tab_line)

    # 黑白反色
    n_tab_line = cv2.bitwise_not(no_tab_line)
    # if isShowImage:
    #     showCV2Image("n_table line", n_tab_line)

    # cv2.imwrite('APT003_a2.jpg', n_tab_line)  # 去线后的图

    #     src = a1
    #         cv2.imread('APT003_a1.jpg', cv2.IMREAD_GRAYSCALE)  # 直接返回一个灰度图
    #     if isShowImage:
    #         showCV2Image('src', src)
    #
    #     src1 = \
    #         n_tab_line
    # cv2.imread('APT003_a2.jpg', cv2.IMREAD_GRAYSCALE)
    #     if isShowImage:
    #         showCV2Image('src1', src1)

    th, im_th = cv2.threshold(a1, 220, 255, cv2.THRESH_BINARY_INV)
    h, w = im_th.shape

    im_floodfill = im_th.copy()

    mask = np.zeros((h + 2, w + 2), np.uint8)
    cv2.floodFill(im_floodfill, mask, (0, 0), 255)

    im_floodfill_inv = cv2.bitwise_not(im_floodfill)

    im_out = im_th | im_floodfill_inv

    out = cv2.bitwise_xor(im_floodfill, n_tab_line)
    # if isShowImage:
    #     showCV2Image('out1', out)

    final_out = cv2.bitwise_not(out)
    # if isShowImage:
    #     showCV2Image('f_out', final_out)
    #
    # cv2.imwrite('APT003_out.jpg', final_out)
    return final_out
def cropImage(base, filename, filetype):
    openfile = base + "/" + filename + "." + filetype
    # Load an color image in grayscale
    og_image = cv2.imread(openfile, 0)
    #cv2.imshow('Original sudoku',og_image)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()

    blank_image = np.zeros(shape=og_image.shape, dtype=np.uint8)
    #cv2.imshow('Blank Image',blank_image)
    #cv2.waitKey(0)

    blank_image = cv2.GaussianBlur(og_image, (11, 11), 0)
    #cv2.imshow('Gaussian Blur',og_image)
    #cv2.waitKey(0)

    blank_image = cv2.adaptiveThreshold(blank_image, 255,
                                        cv2.ADAPTIVE_THRESH_MEAN_C,
                                        cv2.THRESH_BINARY, 5, 2)
    #cv2.imshow('Adaptive Threshold',blank_image)
    #cv2.waitKey(0)

    blank_image = cv2.bitwise_not(blank_image)
    #cv2.imshow('Inverted Image',blank_image)
    #cv2.waitKey(0)

    kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    blank_image = cv2.dilate(blank_image, kernel, iterations=1)
    #cv2.imshow('Dilated Image',blank_image)
    #cv2.waitKey(0)

    count = 0
    maxarea = -1
    for y in range(blank_image.shape[0]):
        for x in range(blank_image.shape[1]):
            if (blank_image[y, x] >= 128):
                area = cv2.floodFill(blank_image, None, (x, y), 64)
                if (area[0] > maxarea):
                    maxPt = (x, y)
                    maxarea = area[0]
    cv2.floodFill(blank_image, None, maxPt, 255)
    for y in range(blank_image.shape[0]):
        for x in range(blank_image.shape[1]):
            if (blank_image[y, x] == 64 and x != maxPt[0] and y != maxPt[1]):
                area = cv2.floodFill(blank_image, None, (x, y), 0)
    blank_image = cv2.erode(blank_image, kernel, iterations=1)
    #cv2.imshow('Lines',blank_image)
    #cv2.waitKey(0)
    """
    minx=1000
    miny=1000
    maxx=-1
    maxy=-1
    for y in range(blank_image.shape[0]):
        for x in range(blank_image.shape[1]):
            if(blank_image[y,x]==255 and x<minx):
                minx = x
            if(blank_image[y,x]==255 and y<miny):
                miny = y
            if(blank_image[y,x]==255 and x>maxx):
                maxx = x
            if(blank_image[y,x]==255 and y>maxy):
                maxy = y
    print(minx,miny,maxx,maxy)
    for y in range(blank_image.shape[0]):
        for x in range(blank_image.shape[1]):
            if(blank_image[y,x]==0 and x>minx and x< maxx and y>miny and y<maxy):
                print(x,y)
                cv2.floodFill(blank_image,None, (x,y), 255)
                cv2.imshow('Lines1',blank_image)
                cv2.waitKey(0)
    cv2.imshow('Lines1',blank_image)
    cv2.waitKey(0)


    """
    edges = cv2.Canny(blank_image, 50, 150, apertureSize=3)
    thresh = 101
    while (1):
        lines = cv2.HoughLines(blank_image, 1, np.pi / 180, thresh)
        if (lines is None):
            thresh -= 1
        else:
            break
    for current in lines:
        for rho, theta in current:
            if (rho == 0 and theta == -100):
                continue
            a = np.cos(theta)
            b = np.sin(theta)
            if (theta > np.pi * 45 / 180 and theta < np.pi * 135 / 180):
                x1 = 0
                y1 = rho / b
                x2 = blank_image.shape[1]
                y2 = -x2 * (a / b) + rho / b
            else:
                y1 = 0
                x1 = rho / a
                y2 = blank_image.shape[0]
                x2 = -y2 * (b / a) + rho / a
        for pos in lines:
            if ((current == pos).all()):
                continue
            for rho1, theta1 in pos:
                if (rho1 == 0 and theta1 == -100):
                    continue
                if (abs(rho - rho1) < 20
                        and abs(theta - theta1) < np.pi * 10 / 180):
                    a1 = np.cos(theta1)
                    b1 = np.sin(theta1)
                    if (theta1 > np.pi * 45 / 180
                            and theta1 < np.pi * 135 / 180):
                        x11 = 0
                        y11 = rho1 / b1
                        x21 = blank_image.shape[1]
                        y21 = -x21 * (a1 / b1) + rho1 / b1
                    else:
                        y11 = 0
                        x11 = rho1 / a1
                        y21 = blank_image.shape[0]
                        x21 = -y21 * (b1 / a1) + rho1 / a1
                    if (((x11 - x1) * (x11 - x1) + (y11 - y1) *
                         (y11 - y1)) < 64 * 64
                            and ((x21 - x2) * (x21 - x2) + (y21 - y2) *
                                 (y21 - y2)) < 64 * 64):
                        current[0][0] = (current[0][0] + pos[0][0]) / 2
                        current[0][1] = (current[0][1] + pos[0][1]) / 2
                        pos[0][0] = 0
                        pos[0][1] = -100
    for someline in lines:
        for rho, theta in someline:
            a = np.cos(theta)
            b = np.sin(theta)
            if (theta != 0):
                m = -1 * (a / b)
                c = rho / b
                blank_image = cv2.line(
                    blank_image, (0, int(c)),
                    (blank_image.shape[1], int(m * blank_image.shape[1] + c)),
                    255, 1)
            else:
                blank_image = cv2.line(blank_image, (rho, 0),
                                       (rho, blank_image.shape[0]), 255, 1)
    #cv2.imshow('Hough Lines',blank_image)
    #cv2.waitKey(0)

    topEdge = (1000, 1000)
    topYIntercept = 100000
    topXIntercept = 0

    bottomEdge = (-1000, -1000)
    bottomYIntercept = 0
    bottomXIntercept = 0

    leftEdge = (1000, 1000)
    leftXIntercept = 100000
    leftYIntercept = 0

    rightEdge = (-1000, -1000)
    rightXIntercept = 0
    rightYIntercept = 0

    for current in lines:
        for rho, theta in current:
            if (rho == 0 and theta == -100):
                continue
            a = np.cos(theta)
            b = np.sin(theta)
            xIntercept = rho / a
            yIntercept = rho / (a * b)
            if (theta > np.pi * 80 / 180 and theta < np.pi * 100 / 180):
                if (rho < topEdge[0]):
                    topEdge = (rho, theta)
                if (rho > bottomEdge[0]):
                    bottomEdge = (rho, theta)
            elif (theta < np.pi * 10 / 180 or theta > np.pi * 170 / 180):
                if (xIntercept > rightXIntercept):
                    rightEdge = (rho, theta)
                    rightXIntercept = xIntercept
                if (xIntercept <= leftXIntercept):
                    leftEdge = (rho, theta)
                    leftXIntercept = xIntercept
    flines = [topEdge, bottomEdge, rightEdge, leftEdge]
    for someline in flines:
        rho = someline[0]
        theta = someline[1]
        a = np.cos(theta)
        b = np.sin(theta)
        if (theta != 0):
            m = -1 * (a / b)
            c = rho / b
            og_image = cv2.line(
                og_image, (0, int(c)),
                (blank_image.shape[1], int(m * blank_image.shape[1] + c)), 0,
                1)
        else:
            og_image = cv2.line(og_image, (rho, 0),
                                (rho, blank_image.shape[0]), 0, 1)
    #cv2.imshow('Final Lines',og_image)
    #cv2.waitKey(0)

    left1, left2, right1, right2, bottom1, bottom2, top1, top2 = [0, 0], [
        0, 0
    ], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]

    height = og_image.shape[0]
    width = og_image.shape[1]

    leftcos = np.cos(leftEdge[1])
    leftsin = np.sin(leftEdge[1])
    lefttan = (leftsin / leftcos)

    rightcos = np.cos(rightEdge[1])
    rightsin = np.sin(rightEdge[1])
    righttan = (rightsin / rightcos)

    if (leftEdge[1] != 0):
        left1[0] = 0
        left1[1] = leftEdge[0] / leftsin
        left2[0] = width
        left2[1] = -left2[0] / lefttan + left1[1]
    else:
        left1[1] = 0
        left1[0] = leftEdge[0] / leftcos
        left2[1] = height
        left2[0] = left1[0] - height * lefttan

    if (rightEdge[1] != 0):
        right1[0] = 0
        right1[1] = rightEdge[0] / rightsin
        right2[0] = width
        right2[1] = -right2[0] / righttan + right1[1]
    else:
        right1[1] = 0
        right1[0] = rightEdge[0] / rightcos
        right2[1] = height
        right2[0] = right1[0] - height * righttan

    bottomcos = np.cos(bottomEdge[1])
    bottomsin = np.sin(bottomEdge[1])
    bottomtan = (bottomsin / bottomcos)

    topcos = np.cos(topEdge[1])
    topsin = np.sin(topEdge[1])
    toptan = (topsin / topcos)

    bottom1[0] = 0
    bottom1[1] = bottomEdge[0] / bottomsin
    bottom2[0] = width
    bottom2[1] = -bottom2[0] / bottomtan + bottom1[1]

    top1[0] = 0
    top1[1] = topEdge[0] / topsin
    top2[0] = width
    top2[1] = -top2[0] / toptan + top1[1]

    #Next, we find the intersection of  these four lines
    leftA = left2[1] - left1[1]
    leftB = left1[0] - left2[0]
    leftC = leftA * left1[0] + leftB * left1[1]

    rightA = right2[1] - right1[1]
    rightB = right1[0] - right2[0]
    rightC = rightA * right1[0] + rightB * right1[1]

    topA = top2[1] - top1[1]
    topB = top1[0] - top2[0]
    topC = topA * top1[0] + topB * top1[1]

    bottomA = bottom2[1] - bottom1[1]
    bottomB = bottom1[0] - bottom2[0]
    bottomC = bottomA * bottom1[0] + bottomB * bottom1[1]

    #Intersection of left and top
    detTopLeft = leftA * topB - leftB * topA
    ptTopLeft = ((topB * leftC - leftB * topC) / detTopLeft,
                 (leftA * topC - topA * leftC) / detTopLeft)

    #Intersection of top and right
    detTopRight = rightA * topB - rightB * topA
    ptTopRight = ((topB * rightC - rightB * topC) / detTopRight,
                  (rightA * topC - topA * rightC) / detTopRight)

    #Intersection of right and bottom
    detBottomRight = rightA * bottomB - rightB * bottomA
    ptBottomRight = ((bottomB * rightC - rightB * bottomC) / detBottomRight,
                     (rightA * bottomC - bottomA * rightC) / detBottomRight)

    #Intersection of bottom and left
    detBottomLeft = leftA * bottomB - leftB * bottomA
    ptBottomLeft = ((bottomB * leftC - leftB * bottomC) / detBottomLeft,
                    (leftA * bottomC - bottomA * leftC) / detBottomLeft)

    maxLength = (ptBottomLeft[0] -
                 ptBottomRight[0]) * (ptBottomLeft[0] - ptBottomRight[0]) + (
                     ptBottomLeft[1] - ptBottomRight[1]) * (ptBottomLeft[1] -
                                                            ptBottomRight[1])
    temp = (ptTopRight[0] - ptBottomRight[0]) * (
        ptTopRight[0] - ptBottomRight[0]
    ) + (ptTopRight[1] - ptBottomRight[1]) * (ptTopRight[1] - ptBottomRight[1])

    if (temp > maxLength):
        maxLength = temp
    temp = (ptTopRight[0] - ptTopLeft[0]) * (ptTopRight[0] - ptTopLeft[0]) + (
        ptTopRight[1] - ptTopLeft[1]) * (ptTopRight[1] - ptTopLeft[1])

    if (temp > maxLength):
        maxLength = temp
    temp = (ptBottomLeft[0] - ptTopLeft[0]) * (
        ptBottomLeft[0] - ptTopLeft[0]) + (ptBottomLeft[1] - ptTopLeft[1]) * (
            ptBottomLeft[1] - ptTopLeft[1])

    if (temp > maxLength):
        maxLength = temp

    maxLength = int(math.sqrt(maxLength))

    src = (ptTopLeft, ptTopRight, ptBottomRight, ptBottomLeft)
    src = np.array(src, np.float32)
    dst = ((0, 0), (maxLength - 1, 0), (maxLength - 1, maxLength - 1),
           (0, maxLength - 1))
    dst = np.array(dst, np.float32)
    #print(src,dst)
    undistort = np.zeros(shape=(maxLength, maxLength), dtype=np.uint8)
    undistort = cv2.warpPerspective(og_image,
                                    cv2.getPerspectiveTransform(src, dst),
                                    dsize=(maxLength, maxLength))
    #cv2.imshow('Final Image',undistort)
    #cv2.waitKey(0)

    closefile = base + "/" + filename + "-cropped." + filetype
    cv2.imwrite(closefile, undistort)


#cropImage("sudoku-giant","jpeg")
def distance_map_simple(floor_mask,
                        m_per_pix,
                        min_robot_width_m,
                        robot_x_pix,
                        robot_y_pix,
                        robot_ang_rad,
                        disallow_too_narrow=True,
                        display_on=False,
                        verbose=False):

    # min_robot_width_m : The best case minimum width of the robot in meters when moving forward and backward.
    traversable_mask = floor_mask

    # model the robot's footprint as being traversable
    draw_robot_footprint_rectangle(robot_x_pix, robot_y_pix, robot_ang_rad,
                                   m_per_pix, traversable_mask)
    footprint_test_image = np.zeros_like(traversable_mask)
    draw_robot_footprint_rectangle(robot_x_pix, robot_y_pix, robot_ang_rad,
                                   m_per_pix, footprint_test_image)
    if display_on:
        cv2.imshow('robot footprint drawing', footprint_test_image)
        cv2.imshow('floor mask after drawing robot footprint',
                   traversable_mask)

    # Optimistic estimate of robot width. Use ceil to account for
    # possible quantization. Consider adding a pixel, also.
    min_robot_radius_pix = np.ceil((min_robot_width_m / 2.0) / m_per_pix)

    # Fill in small non-floor regions (likely noise)
    #
    # TODO: improve this. For example, fill in isolated pixels that
    # are too small to trust as obstacles. Options include a hit or
    # miss filter, matched filter, or speckle filter.
    fill_in = True
    if fill_in:
        kernel = np.ones((3, 3), np.uint8)
        traversable_mask = cv2.morphologyEx(traversable_mask, cv2.MORPH_CLOSE,
                                            kernel)
        if display_on:
            cv2.imshow('traversable_mask after filling', traversable_mask)

    # ERROR? : The floodfill operation should occur after removing
    # filtering candidate robot poses due to the footprint
    # radius. Right? Was that too aggressive in the past, so it got
    # dropped?

    # Select the connected component of the floor on which the robot
    # is located.
    h, w = traversable_mask.shape
    new_traversable_mask = np.zeros((h + 2, w + 2), np.uint8)
    #possibly add to floodFill in the future: flags = cv2.FLOODFILL_FIXED_RANGE
    cv2.floodFill(traversable_mask, new_traversable_mask,
                  (robot_x_pix, robot_y_pix), 255)
    traversable_mask = 255 * new_traversable_mask[1:-1, 1:-1]

    # In previous versions, the traversability mask has treated
    # unobserved pixels and observed non-floor pixels
    # differently. Such as by treating unobserved pixels
    # optimistically as traversable.

    # compute distance map: distance from untraversable regions
    #
    # cv2.DIST_L2 : Euclidean distance
    #
    # 5 is the mask size : "finds the shortest path to the nearest
    # zero pixel consisting of basic shifts: horizontal, vertical,
    # diagonal, or knight's move (the latest is available for a 5x5
    # mask)" - OpenCV documentation
    distance_map = cv2.distanceTransform(traversable_mask, cv2.DIST_L2, 5)
    if display_on:
        norm_dist_transform = cv2.normalize(distance_map, None, 0, 255,
                                            cv2.NORM_MINMAX, cv2.CV_8U)
        cv2.imshow('distance map without threshold for the robot width',
                   norm_dist_transform)

    # Restricts the maximum distance of the distance_map. This will
    # favor shorter paths (i.e., straight line paths) when a corridor
    # is wide enough instead of moving to the middle of the
    # corridor. When the corridor is narrower than the threshold, the
    # robot will prefer paths that move it to the center of the
    # corridor. However, simple path planning via 4 connected grid and
    # Dijkstra's algorithm results in vertical and horizontal motions
    # in flat regions rather than point-to-point straight lines.
    clip_max_distance = False
    if clip_max_distance:
        max_distance = 3.0 * min_robot_radius_pix
        print('max_distance =', max_distance)
        print('np.max(distance_map) =', np.max(distance_map))
        # should perform in place clipping
        np.clip(distance_map, None, max_distance, distance_map)
        print('after clipping np.max(distance_map) =', np.max(distance_map))
        if display_on:
            norm_dist_transform = cv2.normalize(distance_map, None, 0, 255,
                                                cv2.NORM_MINMAX, cv2.CV_8U)
            cv2.imshow('distance map with clipped maximum distance',
                       norm_dist_transform)

    if disallow_too_narrow:
        # set parts of the distance transform that represent free space
        # less than the robot would require to zero
        distance_map[distance_map < min_robot_radius_pix] = 0
        if display_on:
            norm_dist_transform = cv2.normalize(distance_map, None, 0, 255,
                                                cv2.NORM_MINMAX, cv2.CV_8U)
            cv2.imshow('distance map with robot width threshold',
                       norm_dist_transform)

    # traversable_mask is a binary image that estimates where the
    # robot can navigate given the robot's current pose and the map,
    # but ignoring the robot's radius.

    # distance_map is a scalar image that estimates the distance to
    # the boundaries of the traversable mask.
    return distance_map, traversable_mask
def constructMap(lineList, width, height, numLines, robotPose, outputfile):
    
    # The image
    size = height, width, 3
    m = np.zeros(size, dtype=np.uint8)
    m[:] = (150, 150, 150)
    m = cv2.cvtColor(m, cv2.COLOR_BGR2GRAY)

    # Drawing the lines
    for n in lineList:
       p1 = (n[0][0], height-n[0][1])
       p2 = (n[1][0], height-n[1][1])
       cv2.line(m, p1, p2, 0, 2) # line width, not pixel count!!! e.g. 2 => 2+1

    dlimit=5

    #robotPose = (robotPose[0], height-robotPose[1])
    robotPose = (width/2, height/3)
    print robotPose

    # Fill image with gray color
    msize = height+2, width+2, 3
    mask = np.zeros(msize, dtype=np.uint8)
    mask[:] = 0
    mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
  
    # Dilate walls (erode free-space)
    kernel = np.ones((3,3),np.uint8)
    m = cv2.erode(m,kernel,iterations = 1)

    # fill free space with white color
    diff = (6, 6, 6)
    cv2.floodFill(m, mask, robotPose, 255, diff, diff, cv2.FLOODFILL_FIXED_RANGE)
    
    #print m

    # store image in archive file a text
    f = open(outputfile, 'w')
    f.write(str(width)+' '+str(height)+'\n')
    counter = 0
    for i in range(height):
        s = ''
        for j in range(width):
            if m[i,j] == 150:
                s+="- "
            if m[i,j] == 0:
                s+="1 "
                counter+=1
            if m[i,j] == 255:
                s+="0 "
                counter+=1
                 
        s+='\n'
        f.write(s)
    f.close()
    print counter

    cv2.namedWindow("The map")
    rm = cv2.resize(m, (0,0), fx=0.4, fy=0.4)

    while True:
        cv2.imshow("The map", rm)    
        ch = 0xFF & cv2.waitKey(1)
        if ch == 27:
            break
    cv2.destroyAllWindows()
    paths = response.download(
        arguments)  #passing the arguments to the function
    list.append(s)
#hole filling
sketch = "/Users/sarthakdandriyal/Downloads/sketch.jpg"
im_in = cv2.imread(sketch, cv2.IMREAD_GRAYSCALE)
th, im_th = cv2.threshold(im_in, 220, 255, cv2.THRESH_BINARY_INV)
im_floodfill = im_th.copy()

# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_th.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)

# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0, 0), 255)

# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)

# Combine the two images to get the foreground.
im_out = im_th | im_floodfill_inv

im = im_floodfill
cv2.imwrite("/Users/sarthakdandriyal/Desktop/vb.jpg", im)
li = []
i = 0
for obj in data['img']:
    i = i + 1
    s = obj['min_X']
    li.append(s)
Example #41
0
def main():
    if len(argv) != 2:
        print("Usage: videomerge.py <video_name>")
        return
    vid_name = argv[1]


    cap = cv2.VideoCapture(vid_name)

    fps = cap.get(cv2.CAP_PROP_FPS)  * speed_mult
    msec_per_frame = int(1000/fps)

    windowName = 'Videomerge : ' + vid_name
    cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
    cv2.setMouseCallback(windowName, click_callback)

    global mouseX, mouseY
    mouseX, mouseY = -1, -1

    # Capture frame-by-frame
    ret, base_img = cap.read()


    running = False
    writefile = True
    while ret:
        

        # Display the resulting frame
        cv2.imshow(windowName,base_img)

        if running:
            #print("new frame")
            ret, base_img = cap.read()


        k = cv2.waitKey(msec_per_frame) & 0xFF

        if k  == 27: #esc
            writefile = False #quit
            running = False
            break

        elif k == 32: # space   
            running = not running
            #print("SPACE")
      

        elif mouseX >0 and mouseY >0: #click happened
            #print("CLICK HAPPENED!")
            break   #storing click in mouseX/Y, using current frame as start frame
    
    #end while
    print("Starting frame found, beginning merge")

    startcenter = (int(mouseX), int(mouseY))
    
    base_gray = cv2.cvtColor(base_img, cv2.COLOR_BGR2GRAY)
    composite_img = base_img.copy()
    h, w = base_img.shape[:2]

    ret, above_img = cap.read()
    if not ret: #video finished
        running = False
                

    while ret and writefile:
        global opening_size

        if running:
            ret, above_img = cap.read()
            if not ret: #video finished
                running = False
                break


        above_gray = cv2.cvtColor(above_img, cv2.COLOR_BGR2GRAY)
        _, thresh = cv2.threshold(np.abs(above_gray - base_gray),20,255,cv2.THRESH_BINARY)
        thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, np.ones((opening_size,opening_size),np.uint8))
        cv2.floodFill(thresh, np.zeros((h + 2, w + 2), np.uint8), startcenter, 0)
        composite_img = cv2.bitwise_and(base_img, base_img, mask=cv2.bitwise_not(thresh))
        composite_img += cv2.bitwise_or(above_img, above_img, mask=thresh)

        # Display the resulting frame
        cv2.imshow(windowName,composite_img)

        k = cv2.waitKey(msec_per_frame) & 0xFF
        if k  == 27: #esc
            writefile = False
            running = False #quit

        elif k == 13: # enter
            # we want to save this composite as the new base   
            base_img = composite_img.copy()

        elif k == 32: # space   
            running = not running
            #print("SPACE")

        elif k == ord('+'): #increase opening size
            opening_size+=1

        elif k == ord('-'): #decrease opening size, min 1
            opening_size = 1 if opening_size==1 else opening_size-1

        elif k == ord('d'): #done! save
            running = False
            break

    #end while
    

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()


    if writefile: #if we finished correctly
        im_name = os.path.splitext(vid_name)[0]+'.png'
        cv2.imwrite(im_name, base_img)
Example #42
0
# 开始进行测试
# 读取图片
Image_Test = cv2.imread(UnProcessedImage_Dir + 'Chlorella/1.tif')
# 源图像转灰度图
GrayImage = cv2.cvtColor(Image_Test, cv2.COLOR_BGR2GRAY)
cv2.imwrite(TestImg_Save_Dir + 'GrayImage.jpg', GrayImage)

# 对灰度图做二值化
ret, BinaryImage = cv2.threshold(GrayImage, 200, 255, cv2.THRESH_BINARY_INV)
cv2.imwrite(TestImg_Save_Dir + 'BinaryImage.jpg', BinaryImage)

# 二值化图像孔洞填充
Binary_FloodFill = BinaryImage.copy()
h, w = BinaryImage.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(Binary_FloodFill, mask, (0, 0), 255)
Binary_FloodFill_Inv = cv2.bitwise_not(Binary_FloodFill)
Binary_FloodFill_Out = BinaryImage | Binary_FloodFill_Inv
cv2.imwrite(TestImg_Save_Dir + 'FloodFillImage.jpg', Binary_FloodFill_Out)

# 轮廓提取
_, contours, hierarchy = cv2.findContours(Binary_FloodFill_Out, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(Image_Test, contours, -1, (0, 0, 255), 1)
cv2.imwrite(TestImg_Save_Dir + 'ContoursImage.jpg', Image_Test)


# 图片测试
# cv2.imshow('Test', GrayImage)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
Example #43
0
e = new.max()
new = new - w
new = new / e * 255
new = np.array(new, dtype='uint8')
# -----------------------------------------------------------------------

# Otsu filtering method and fill the hole
# -----------------------------------------------------------------------
# A Otsu filtering method is used for filtering
ret2, th2 = cv2.threshold(new, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Check the threshold of Otsu filtering method
print("threshold of Otsu filtering:", ret2)
# Otsu filtering's results are replicated to hole
hole = th2.copy()
# Find the hole and fill it
cv2.floodFill(hole, None, (0, 0), 255)
hole = cv2.bitwise_not(hole)
filledEdgesOut = cv2.bitwise_or(th2, hole)
# ---------------------------------------------------------------------

# The image of corrosion
# -------------------------------------------------------------------------------
# A circle of diameter 5 is used as the corrosion structure
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
# image of corrosion
eroded = cv2.erode(filledEdgesOut, kernel)
# -------------------------------------------------------------------------------

# Eliminate connected region
# -------------------------------------------------------------------------------
import cv2
import sys

path = sys.argv[1]

img = cv2.imread(cv2.samples.findFile(path), cv2.IMREAD_GRAYSCALE)

if img is None:
    sys.exit("Could not read the image")

cv2.imshow("Image - Before", img)
rows, columns = img.shape

# cleaning borders
for i in range(columns):
    cv2.floodFill(img, None, (i, rows-1), 0)
    cv2.floodFill(img, None, (i, 0), 0)

for j in range(rows):
    cv2.floodFill(img, None, (columns-1, j), 0)
    cv2.floodFill(img, None, (0, j), 0)

cv2.imshow('Image - Border cleaning', img)
cv2.imwrite("Image - Border cleaning.png", img)

img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

#counting all objects after removing those touching the borders
noObjects = 0
for i in range(columns):
    for j in range(rows):
Example #45
0
    def verify_color(self, rotate_rect,src_image):
        img_h,img_w = src_image.shape[:2]
        mask = np.zeros(shape=[img_h+2,img_w+2],dtype=np.uint8)
        connectivity = 4 #种子点上下左右4邻域与种子颜色值在[loDiff,upDiff]的被涂成new_value,也可设置8邻域
        loDiff,upDiff = 30,30
        new_value = 255
        flags = connectivity
        flags |= cv2.FLOODFILL_FIXED_RANGE  #考虑当前像素与种子象素之间的差,不设置的话则和邻域像素比较
        flags |= new_value << 8
        flags |= cv2.FLOODFILL_MASK_ONLY #设置这个标识符则不会去填充改变原始图像,而是去填充掩模图像(mask)

        rand_seed_num = 5000 #生成多个随机种子
        valid_seed_num = 200 #从rand_seed_num中随机挑选valid_seed_num个有效种子
        adjust_param = 0.1
        box_points = cv2.boxPoints(rotate_rect)
        box_points_x = [n[0] for n in box_points]
        box_points_x.sort(reverse=False)
        adjust_x = int((box_points_x[2]-box_points_x[1])*adjust_param)
        col_range = [box_points_x[1]+adjust_x,box_points_x[2]-adjust_x]
        box_points_y = [n[1] for n in box_points]
        box_points_y.sort(reverse=False)
        adjust_y = int((box_points_y[2]-box_points_y[1])*adjust_param)
        row_range = [box_points_y[1]+adjust_y, box_points_y[2]-adjust_y]
        # 如果以上方法种子点在水平或垂直方向可移动的范围很小,则采用旋转矩阵对角线来设置随机种子点
        if (col_range[1]-col_range[0])/(box_points_x[3]-box_points_x[0])<0.4\
            or (row_range[1]-row_range[0])/(box_points_y[3]-box_points_y[0])<0.4:
            points_row = []
            points_col = []
            for i in range(2):
                pt1,pt2 = box_points[i],box_points[i+2]
                x_adjust,y_adjust = int(adjust_param*(abs(pt1[0]-pt2[0]))),int(adjust_param*(abs(pt1[1]-pt2[1])))
                if (pt1[0] <= pt2[0]):
                    pt1[0], pt2[0] = pt1[0] + x_adjust, pt2[0] - x_adjust
                else:
                    pt1[0], pt2[0] = pt1[0] - x_adjust, pt2[0] + x_adjust
                if (pt1[1] <= pt2[1]):
                    pt1[1], pt2[1] = pt1[1] + adjust_y, pt2[1] - adjust_y
                else:
                    pt1[1], pt2[1] = pt1[1] - y_adjust, pt2[1] + y_adjust
                temp_list_x = [int(x) for x in np.linspace(pt1[0],pt2[0],int(rand_seed_num /2))]
                temp_list_y = [int(y) for y in np.linspace(pt1[1],pt2[1],int(rand_seed_num /2))]
                points_col.extend(temp_list_x)
                points_row.extend(temp_list_y)
        else:
            points_row = np.random.randint(row_range[0],row_range[1],size=rand_seed_num)
            points_col = np.linspace(col_range[0],col_range[1],num=rand_seed_num).astype(np.int)

        points_row = np.array(points_row)
        points_col = np.array(points_col)
        hsv_img = cv2.cvtColor(src_image, cv2.COLOR_BGR2HSV)
        h,s,v = hsv_img[:,:,0],hsv_img[:,:,1],hsv_img[:,:,2]
        # 将随机生成的多个种子依次做漫水填充,理想情况是整个车牌被填充
        flood_img = src_image.copy()
        seed_cnt = 0
        for i in range(rand_seed_num):
            rand_index = np.random.choice(rand_seed_num,1,replace=False)
            row,col = points_row[rand_index],points_col[rand_index]
            # 限制随机种子必须是车牌背景色
            if (((h[row,col]>26)&(h[row,col]<34))|((h[row,col]>100)&(h[row,col]<124)))&(s[row,col]>70)&(v[row,col]>70):
                cv2.floodFill(src_image, mask, (col,row), (255, 255, 255), (loDiff,) * 3, (upDiff,) * 3, flags)
                cv2.circle(flood_img,center=(col,row),radius=2,color=(0,0,255),thickness=2)
                seed_cnt += 1
                if seed_cnt >= valid_seed_num:
                    break
        #======================调试用======================#
        show_seed = np.random.uniform(1,100,1).astype(np.uint16)
        # cv2.imshow('floodfill'+str(show_seed),flood_img)
        # cv2.imshow('flood_mask'+str(show_seed),mask)
        #======================调试用======================#
        # 获取掩模上被填充点的像素点,并求点集的最小外接矩形
        mask_points = []
        for row in range(1,img_h+1):
            for col in range(1,img_w+1):
                if mask[row,col] != 0:
                    mask_points.append((col-1,row-1))
        mask_rotateRect = cv2.minAreaRect(np.array(mask_points))
        if self.verify_scale(mask_rotateRect):
            return True,mask_rotateRect
        else:
            return False,mask_rotateRect
rgbIm = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)

# 显示原图
fig = plt.figure()
subplot(121)
plt.gray()
imshow(rgbIm)
title(u'原图', fontproperties=font)
axis('off')

# 获取图像尺寸
h, w = im.shape[:2]
# 泛洪填充
diff = (6, 6, 6)
mask = zeros((h + 2, w + 2), numpy.uint8)
cv2.floodFill(im, mask, (10, 10), (255, 255, 0), diff, diff)

# 显示泛洪填充后的结果
subplot(122)
imshow(im)
title(u'泛洪填充', fontproperties=font)
axis('off')

show()
fig.savefig("../images/ch10/floodFill.png")

# 在OpenCV窗口中显示泛洪填充后的结果
cv2.imshow('flood fill', im)
cv2.waitKey()
# 保存结果
cv2.imwrite('../images/ch10/floodFill.jpg', im)
Example #47
0
def goal_detect(img,body_position):
    region_upper=int(img.shape[0]*0.3)
    region_lower=int(img.shape[0]*0.6)
    # 判断小人在左边还是右边
    # 小人在左,方块在右
    if body_position[0]<(img.shape[1]/2.0):
        region_left=body_position[0]+30
        region_right=img.shape[1]-30
    else:
        region_left=30
        region_right=body_position[0]-30
    region = img[region_upper:region_lower,region_left:region_right]
    # cv2.imshow("test1",region)

    edge_list = [0, 0, 0, 0]
    for i in range(3):
        region_gray = cv2.cvtColor(region, cv2.COLOR_BGR2HSV)[:, :, i]
        # 获取三个通道

        # region_gray=cv2.equalizeHist(region_gray)
        # 全都进行轮廓检测,存入数组里
        edge_list[i] = cv2.Canny(region_gray, 100, 160)
        # cv2.imshow(str(i),edge_list[i])
    # 原格子图转换为灰度图
    region_gray = cv2.cvtColor(region, cv2.COLOR_BGR2GRAY)
    # region_gray = cv2.equalizeHist(region_gray)
    # egion_gray = cv2.GaussianBlur(region_gray, (5, 5), 0)
    # cv2.imshow("gray",region_gray)
    # 设置算子的大小5*5
    edge_list[3] = cv2.Canny(region_gray, 100, 160, apertureSize=5)
    # 大于max阈值则为边界,通过梯度进行判断
    edge_list[1] = np.bitwise_or(edge_list[0], edge_list[1])
    edge_list[2] = np.bitwise_or(edge_list[2], edge_list[1])
    edge_final = np.bitwise_or(edge_list[3], edge_list[2])

    # cv2.imshow('edge', edge_final)

    contours = cv2.findContours(edge_final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contours = contours[1] if imutils.is_cv3() else contours[0]
    # 寻找轮廓
    # max_contour = max(contours, key=cv2.contourArea)
    # max_contour = cv2.convexHull(max_contour)

    y = 99999
    for contour in contours:
        # 将轮廓进行排序
        # lambda函数
        sorted_top = sorted(contour, key=lambda contour: contour[0][1])
        # print(sorted_top)
        if sorted_top[0][0][1] < y:
            raw_x = x = sorted_top[0][0][0]
            raw_y = y = sorted_top[0][0][1]

    # 确定第一个白点的位置
    print((int(x + region_left), int(y + region_upper)))
    # 在方块定义一个全零的矩阵
    # 如果是对于完整图像都要使用,则掩码层大小为原图行数 + 2,列数 + 2.
    # 是一个二维的0矩阵,边缘一圈会在使用算法是置为1。而只有对于掩码层上对应为0的位置才能泛洪,所以掩码层初始化为0矩阵
    # seed:为泛洪算法的种子点,也是根据该点的像素判断决定和其相近颜色的像素点,是否被泛洪处理
    mask = np.zeros((region_lower - region_upper + 2, region_right - region_left + 2), np.uint8)
    mask = cv2.floodFill(region, mask, (raw_x, raw_y + 16), [0, 100, 0])[2]
    # 感兴趣部分填1
    # print(mask)
    cv2.circle(img, (int(x + region_left), int(y + region_upper)), 5, (255, 0, 5), -1)
    # cv2.imshow("region",region)
    # 画第一个白点
    #
    M = cv2.moments(mask)
    # 计算中心距离
    x = int(M["m10"] / M["m00"])
    y = int(M["m01"] / M["m00"])
    print("yuan",[x,y])
    if y < raw_y or abs(x - raw_x) > 40:
        x = raw_x
        y = raw_y
        y += region_upper
        x += region_left
        y = (-abs(x - body_position[0]) / pow(3, 0.5) + body_position[1])
        print("TESTTEST")

    # cv2.imshow("test",mask)
    # cv2.imshow("edge", edge_final)
    else:
        y += region_upper
        x += region_left

    # y = (-abs(x-body_position[0])/pow(3,0.5)+body_position[1])
    cv2.circle(img, (int(x), int(y)), 5, (0, 0, 255), -1)
    # cv2.imshow('dealed', img)
    plt_show0(img)
    return [x, y]
def find_exits(floor_mask,
               max_height_image,
               m_per_pix,
               min_robot_width_m,
               robot_x_pix,
               robot_y_pix,
               display_on=False):
    # A mask with optimistic traversable pixels consisting of floor
    # and unobserved pixels. This should result in traversable paths
    # that move from observed floor to unobserved floor, which can be
    # used to find exits.
    traversable_selector = (floor_mask > 0) | (max_height_image == 0)
    traversable_mask = 255 * np.uint8(traversable_selector)

    # Fill in small non-floor regions (likely noise)

    # TODO: improve this. For example, fill in isolated pixels that
    # are too small to trust as obstacles. Options include a hit or
    # miss filter, matched filter, or speckle filter.
    fill_in = True
    if fill_in:
        kernel = np.ones((3, 3), np.uint8)
        traversable_mask = cv2.morphologyEx(traversable_mask, cv2.MORPH_CLOSE,
                                            kernel)

    # Optimistic estimate of robot width. Use ceil to account for
    # possible quantization. Consider adding a pixel, also.
    min_robot_radius_pix = np.ceil((min_robot_width_m / 2.0) / m_per_pix)

    # compute distance map: distance from untraversable regions
    #
    # cv2.DIST_L2 : Euclidean distance
    #
    # 5 is the mask size : "finds the shortest path to the nearest
    # zero pixel consisting of basic shifts: horizontal, vertical,
    # diagonal, or knight's move (the latest is available for a 5x5
    # mask)" - OpenCV documentation
    distance_map = cv2.distanceTransform(traversable_mask, cv2.DIST_L2, 5)

    # fill in floor mask holes
    #kernel = np.ones((11,11), np.uint8)
    kernel_width_pix = 11
    kernel_radius_pix = (kernel_width_pix - 1) / 2
    kernel = np.zeros((kernel_width_pix, kernel_width_pix), np.uint8)
    cv2.circle(kernel, (kernel_radius_pix, kernel_radius_pix),
               kernel_radius_pix, 255, -1)
    closed_floor_mask = cv2.morphologyEx(floor_mask, cv2.MORPH_CLOSE, kernel)

    # find the boundary of the floor
    dilated_floor = cv2.dilate(closed_floor_mask, kernel, iterations=1)
    floor_boundary = dilated_floor - closed_floor_mask

    # Estimate the locations of exits. Creates a binary image with exits marked with 255.
    min_connectivity_dist_pix = min_robot_radius_pix  #(min_connectivity_dist_mm / mm_per_pix) - 2.0
    connectivity_image = distance_map.copy()
    # remove regions that are too narrow for the robot to drive through
    connectivity_image[connectivity_image < min_connectivity_dist_pix] = 0
    # Only keep pixels that are at the boundary of the observed
    # floor. This should cut between the observed and unobserved
    # traversible regions.
    connectivity_image[floor_boundary == 0] = 0
    # convert float image to uint8 image
    map_exits = np.zeros_like(connectivity_image, dtype=np.uint8)
    # exit pixels have a value of 255
    map_exits[connectivity_image > 0] = 255
    # enlarge the map exit pixels so that they will intersect with the floor
    exit_dilation = True
    if exit_dilation:
        # attempt to increase the chance of a vertex being labeled as an exit
        # create kernel for morphological operations
        kernel_width_pix = 11
        kernel_radius_pix = (kernel_width_pix - 1) / 2
        kernel = np.zeros((kernel_width_pix, kernel_width_pix), np.uint8)
        cv2.circle(kernel, (kernel_radius_pix, kernel_radius_pix),
                   kernel_radius_pix, 255, -1)
        map_exits = cv2.dilate(map_exits, kernel, iterations=1)

    # Select the connected component of the floor on which the robot
    # is located.
    h, w = closed_floor_mask.shape
    accessible_floor_mask = np.zeros((h + 2, w + 2), np.uint8)
    #possibly add to floodFill in the future: flags = cv2.FLOODFILL_FIXED_RANGE
    cv2.floodFill(floor_mask, accessible_floor_mask,
                  (robot_x_pix, robot_y_pix), 255)
    accessible_floor_mask = 255 * accessible_floor_mask[1:-1, 1:-1]

    # only consider map exit candidates that are connected to parts of
    # the floor that can be reached
    map_exits[accessible_floor_mask == 0] = 0

    # Ignore exits that are very close to the robot. These can be due
    # to the robot's own body occluding the floor. Otherwise, they can
    # represent real exits that the robot is already next to and does
    # not need to navigate to.
    ignore_radius_pix = int(4.0 * min_robot_radius_pix)
    cv2.circle(map_exits, (robot_x_pix, robot_y_pix), ignore_radius_pix, 0, -1)

    # Dilate exits in order to merge exits that are very close to one
    # another.
    kernel_width_pix = 21  #11 #15
    kernel_radius_pix = (kernel_width_pix - 1) / 2
    kernel = np.zeros((kernel_width_pix, kernel_width_pix), np.uint8)
    cv2.circle(kernel, (kernel_radius_pix, kernel_radius_pix),
               kernel_radius_pix, 255, -1)
    map_exits = cv2.dilate(map_exits, kernel, iterations=1)

    # only consider map exit candidates that are connected to parts of
    # the floor that can be reached
    map_exits[accessible_floor_mask == 0] = 0

    # find exit regions
    number_of_exits, exit_label_image = simple_connected_components(
        map_exits, connectivity=8)

    # find geometric centers of the exits
    label_indices = range(number_of_exits)[1:]
    label_image = exit_label_image
    centers_of_mass = nd.measurements.center_of_mass(label_image, label_image,
                                                     label_indices)
    ones = np.ones_like(label_image)
    sums = nd.measurements.sum(ones, label_image, label_indices)
    print('centers_of_mass =', centers_of_mass)

    if display_on:
        print('find_exits: number_of_exits =', number_of_exits)
        h, w = max_height_image.shape
        color_im = np.zeros((h, w, 3), np.uint8)
        color_im[:, :, 0] = max_height_image
        color_im[:, :, 1] = max_height_image
        color_im[:, :, 2] = map_exits
        for s, (c_y, c_x) in zip(sums, centers_of_mass):
            if s > 50:
                c_x = int(round(c_x))
                c_y = int(round(c_y))
                radius = 5  #3
                cv2.circle(color_im, (c_x, c_y), radius, [255, 255, 255], -1)
        scale_divisor = 2
        nh = h / scale_divisor
        nw = w / scale_divisor
        color_im = cv2.resize(color_im, (nw, nh))
        cv2.imshow('find_exits: exits on the map', color_im)

    map_exits_mask = map_exits

    min_area = 50
    exit_points = [[int(round(c_x)), int(round(c_y))]
                   for s, (c_y, c_x) in zip(sums, centers_of_mass)
                   if s > min_area]

    return exit_points, map_exits_mask, number_of_exits, exit_label_image
def get_cropped_img(input_img):
    # Make copy
    img = input_img.copy()

    # Perform median filtering
    median = cv.medianBlur(img, 3)

    # Perform adaptive histogram equalization
    clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    img_cl = clahe.apply(median)

    # Apply threshold
    _, thresh = cv.threshold(
        img_cl, 0, 255, cv.THRESH_BINARY_INV+cv.THRESH_OTSU)

    # Get dimensions of image
    img_hgt, img_wdt = thresh.shape
    # Set background to 0
    f = 0.1
    mask = np.zeros((img_hgt+2, img_wdt+2), np.uint8)
    cv.floodFill(thresh, mask, (int(img_wdt*f), int(img_hgt*f)), 0)
    cv.floodFill(thresh, mask, (int(img_wdt*(1-f)), int(img_hgt*f)), 0)
    cv.floodFill(thresh, mask, (int(img_wdt*f), int(img_hgt*(1-f))), 0)
    cv.floodFill(thresh, mask, (int(img_wdt*(1-f)), int(img_hgt*(1-f))), 0)

    f = 0.01
    cv.floodFill(thresh, mask, (int(img_wdt*f), int(img_hgt*f)), 0)
    cv.floodFill(thresh, mask, (int(img_wdt*(1-f)), int(img_hgt*f)), 0)
    cv.floodFill(thresh, mask, (int(img_wdt*f), int(img_hgt*(1-f))), 0)
    cv.floodFill(thresh, mask, (int(img_wdt*(1-f)), int(img_hgt*(1-f))), 0)

    # Perform dilation
    imgBW = cv.dilate(thresh, np.ones((3, 3), np.uint8), iterations=2)

    contours, _ = cv.findContours(imgBW.copy(), cv.RETR_LIST,
                                  cv.CHAIN_APPROX_SIMPLE)
    radius = 2
    for cnt in contours:
        isEdge = False
        for pt in cnt:
            pt_y = pt[0][1]
            pt_x = pt[0][0]

            # Check if within radius of border
            check_y = (pt_y >= 0 and pt_y < radius) or (
                pt_y >= img_hgt-1-radius and pt_y < img_hgt)
            check_x = (pt_x >= 0 and pt_x < radius) or (
                pt_x >= img_wdt-1-radius and pt_x < img_wdt)

            if check_y or check_x:
                isEdge = True
                cv.fillPoly(imgBW, pts=[cnt], color=(0, 0, 0))
                break
        if not isEdge:
            cv.fillPoly(imgBW, pts=[cnt], color=(255, 255, 255))

    contours, _ = cv.findContours(imgBW.copy(), cv.RETR_LIST,
                                  cv.CHAIN_APPROX_SIMPLE)

    threshold = 1500
    contours_p = get_lung_cnts(contours, threshold)

    count = 0
    while(len(contours_p) != 2):
        if len(contours_p) > 2:
            threshold += 100
        if len(contours_p) < 2:
            threshold -= 100
            count += 1
            if count == 10:
                break

        contours_p = get_lung_cnts(contours, threshold)

     min_y, min_x = int(img.shape[0]/2), int(img.shape[1]/2)
    max_y, max_x = int(img.shape[0]/2), int(img.shape[1]/2)

    for cnt in contours_p:
        for pt in cnt:
            if pt[0][1] < min_y:
                min_y = pt[0][1]
            if pt[0][1] > max_y:
                max_y = pt[0][1]
            if pt[0][0] < min_x:
                min_x = pt[0][0]
            if pt[0][0] > max_x:
                max_x = pt[0][0]

    min_x -= 5
    min_y -= 5
    max_x += 5
    max_y += 5

    if min_x < 0:
        min_x = 0
    if min_y < 0:
        min_y = 0
    if max_x > img.shape[1]:
        max_x = img.shape[1]
    if max_y > img.shape[0]:
        max_y = img.shape[0]

    img_crop = img[min_y:max_y, min_x:max_x]

    clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    img_crop = clahe.apply(img_crop)

    img_pad = cv.copyMakeBorder(img_crop,
                                int((max(img_crop.shape)-img_crop.shape[0])/2),
                                int((max(img_crop.shape)-img_crop.shape[0])/2),
                                int((max(img_crop.shape)-img_crop.shape[1])/2),
                                int((max(img_crop.shape)-img_crop.shape[1])/2),
                                cv.BORDER_CONSTANT,
                                value=[0])

    output_img_size = 280

    if img_pad.shape[0] > output_img_size:
        # Zoom out
        img_rs = cv.resize(
            img_pad, (output_img_size, output_img_size), interpolation=cv.INTER_AREA)
    else:
        # Zoom in
        img_rs = cv.resize(
            img_pad, (output_img_size, output_img_size), interpolation=cv.INTER_CUBIC)
    # 224 by 280
    img_rs = img_rs[28:252, 0:280]

    return img_rs
def select_next_scan_location(floor_mask,
                              max_height_im,
                              min_robot_width_m,
                              robot_x_pix,
                              robot_y_pix,
                              robot_ang_rad,
                              camera_height_m,
                              max_scan_distance_m,
                              display_on=False):
    h, w = max_height_im.image.shape
    m_per_pix = max_height_im.m_per_pix
    min_robot_radius_pix = (min_robot_width_m / 2.0) / m_per_pix
    disallow_too_narrow = False
    easy_distance_map, traversable_mask = distance_map_simple(
        floor_mask,
        m_per_pix,
        min_robot_width_m,
        robot_x_pix,
        robot_y_pix,
        robot_ang_rad,
        disallow_too_narrow=disallow_too_narrow,
        display_on=display_on,
        verbose=True)

    ## MAX RIDGES
    # find ridges of the distance map by finding pixels that have
    # higher values than most of pixels surrounding them
    max_ridges = create_ridge_mask(easy_distance_map, min_robot_radius_pix)
    if display_on:
        cv2.imshow('max_ridges', halve_image(max_ridges))

    ## Sample the ridges of the floor distance map. What if an area of
    ## the floor is very large relative to the range of the sensor?
    window_width = 50
    norm_easy_distance_map = cv2.normalize(easy_distance_map, None, 0, 255,
                                           cv2.NORM_MINMAX, cv2.CV_8U)
    accessible_floor_mask = np.zeros((h + 2, w + 2), np.uint8)
    cv2.floodFill(floor_mask, accessible_floor_mask,
                  (robot_x_pix, robot_y_pix), 255)
    accessible_floor_mask = 255 * accessible_floor_mask[1:-1, 1:-1]
    if False:
        max_filtered_map = nd.maximum_filter(norm_easy_distance_map,
                                             size=window_width,
                                             mode='constant')
        # Select the connected component of the floor on which the
        # robot is located.
        max_sample_selector = (max_filtered_map == norm_easy_distance_map) & (
            accessible_floor_mask > 0)
        max_samples = 255 * np.uint8(max_sample_selector)
    else:
        max_samples = numba_sample_ridge(window_width, max_ridges,
                                         norm_easy_distance_map,
                                         min_robot_radius_pix)
        max_samples_list = numba_sample_ridge_list(window_width, max_ridges,
                                                   norm_easy_distance_map,
                                                   min_robot_radius_pix)
    if display_on:
        cv2.imshow('max_samples', max_samples)

    good_scan_candidates = find_good_scan_candidates(
        max_samples_list, camera_height_m, max_scan_distance_m, max_height_im,
        floor_mask, easy_distance_map, robot_x_pix, robot_y_pix)
    best_scan_candidate = find_best_scan_candidate(good_scan_candidates)
    if best_scan_candidate is None:
        best_xy = None
    else:
        best_xy = best_scan_candidate[0][:]

    return best_xy
def imageToSegment(filename, outFilename):
    global a

    # Read image, convert to grayscale, do threshold
    im_in = cv2.imread(filename)
    gray = cv2.cvtColor(im_in, cv2.COLOR_BGR2GRAY)
    th, im_th = cv2.threshold(gray, 0, 255,
                              cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    # Copy the thresholded image.
    im_floodfill = im_th.copy()

    # Mask used to flood filling.
    # Notice the size needs to be 2 pixels than the image.
    h, w = im_th.shape[:2]
    mask = np.zeros((h + 2, w + 2), np.uint8)

    # Floodfill from point (0, 0)
    cv2.floodFill(im_floodfill, mask, (0, 0), 255)

    # Invert floodfilled image
    im_floodfill_inv = cv2.bitwise_not(im_floodfill)

    # Combine the two images to get the foreground.
    im_out = im_th | im_floodfill_inv

    # Display images.
    if a.DEBUG:
        # cv2.imshow("Original Image", im_in)
        # cv2.imshow("Thresholded Image", im_th)
        # cv2.imshow("Floodfilled Image", im_floodfill)
        # cv2.imshow("Inverted Floodfilled Image", im_floodfill_inv)
        cv2.imshow("Foreground", im_out)
        cv2.waitKey(0)

    # now try to get the largest segment
    nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(
        im_out, connectivity=4)
    sizes = stats[:, -1]
    max_label = 1
    max_size = sizes[1]
    for i in range(1, nb_components):
        if sizes[i] > max_size:
            max_label = i
            max_size = sizes[i]

    maskWithLargestSegment = np.zeros(output.shape)
    maskWithLargestSegment[output == max_label] = 255

    if a.DEBUG:
        cv2.imshow("Biggest component", maskWithLargestSegment)
        cv2.waitKey(0)

    # get bounding box
    width = stats[max_label, cv2.CC_STAT_WIDTH]
    height = stats[max_label, cv2.CC_STAT_HEIGHT]
    x = stats[max_label, cv2.CC_STAT_LEFT]
    y = stats[max_label, cv2.CC_STAT_TOP]

    imageMaskWithLargestSegment = Image.fromarray(maskWithLargestSegment)
    imageMaskWithLargestSegment = imageMaskWithLargestSegment.convert("L")
    imageMaskWithLargestSegment = imageMaskWithLargestSegment.crop(
        (x, y, x + width, y + height))

    srcImage = Image.open(filename)
    srcImage = srcImage.convert("RGBA")
    srcImage = srcImage.crop((x, y, x + width, y + height))

    segmentOut = alphaMask(srcImage, imageMaskWithLargestSegment)
    segmentOut.save(outFilename)
    print("Saved %s" % outFilename)

    return (x, y, width, height)
Example #52
0
def get_bbox_from_seed(inp_img, seed, bbox=True):
    """
    Using a seed point for a pixel connected fill, return a bounding box (or extreme point quadrilateral) for that
    connected pixel structure.
    Args:
        inp_img (np.array): Input image that the seed image should be extracted from
        seed (tuple): X, Y co-ordinate that has a pixel connected structure from which you want to extract the bounding
            box.
        bbox (bool): If True will mark the bounding box enclosing the whole seed feature. Otherwise will return a
            quadrilateral made of the extreme corners of that feature.
    Returns:
        tuple: First element is an `np.array` of the image with only the seed structure left in place.
        Second element is  an `np.array` that contains the top left, top right, bottom right and bottom left co-ordinates
        of a quadrilateral marking either the bounding box of the feature or the extreme point quadrilateral of that
        feature.
    """
    print('Get bbox from seed func')
    img = inp_img.copy()  # Copy the image, leaving the original untouched
    height, width = img.shape[:2]
    mask = np.zeros((height + 2, width + 2),
                    np.uint8)  # Mask that is 2 pixels bigger than the image

    # Colour everything grey
    for x in range(width):
        for y in range(height):
            if img.item(y, x) == 255 and x < width and y < height:
                cv2.floodFill(img, None, (x, y), 64)

    # Highlight the main feature
    if all([p is not None for p in seed]):
        cv2.floodFill(img, mask, seed, 255)

    top_left = [width, height]
    top_right = [0, height]
    bottom_left = [width, 0]
    bottom_right = [0, 0]

    top = height
    bottom = 0
    left = width
    right = 0

    # Loop again to fill in all the gray (temporary highlighted) areas, leaving just the grid
    for x in range(width):
        for y in range(height):
            if img.item(y,
                        x) == 64:  # Hide anything that isn't the main feature
                cv2.floodFill(img, mask, (x, y), 0)

            # If it is a highlighted point, use it to determine the bounding position
            if img.item(y, x) == 255:
                if bbox:
                    if y < top:
                        top = y

                    if y > bottom:
                        bottom = y

                    if x < left:
                        left = x

                    if x > right:
                        right = x
                else:
                    if x + y < sum(top_left):
                        top_left = [x, y]

                    if x + y > sum(bottom_right):
                        bottom_right = [x, y]

                    if x - y > top_right[0] - top_right[1]:
                        top_right = [x, y]

                    if x - y < bottom_left[0] - bottom_left[1]:
                        bottom_left = [x, y]

    if bbox:
        top_left = [left, top]
        bottom_right = [right, bottom]
        rect = [top_left,
                bottom_right]  # Only need the top left and bottom right points
    else:
        rect = [top_left, top_right, bottom_right, bottom_left]

    return img, np.array(rect, dtype='float32')
    denoised_mask = cv2.morphologyEx(diff_mask, cv2.MORPH_OPEN, kernel)
    return denoised_mask


for CAMERA_NUMBER in range(TOTAL_CAMERA):
    foreground_prob = get_diff_mask(CAMERA_NUMBER, 0)
    for i in range(1, 40):
        foreground_prob = cv2.bitwise_or(foreground_prob,
                                         get_diff_mask(CAMERA_NUMBER, i))
    image_flooded = (foreground_prob.copy() * 255.0).astype(np.uint8)
    image_height, image_width = image_flooded.shape[:2]
    flood_mask = np.zeros((image_height + 2, image_width + 2), dtype=np.uint8)
    # top bar
    for i in range(image_flooded.shape[1]):
        if image_flooded[0, i] != 255:
            cv2.floodFill(image_flooded, flood_mask, (0, i), 255)
    # left bar
    for i in range(image_flooded.shape[0]):
        if image_flooded[i, 0] != 255:
            cv2.floodFill(image_flooded, flood_mask, (i, 0), 255)

    # right bar
    most_right = image_flooded.shape[1] - 1
    for i in range(image_flooded.shape[0]):
        if image_flooded[i, most_right] != 255:
            cv2.floodFill(image_flooded, flood_mask, (i, most_right), 255)

    # bottom bar
    most_bottom = image_flooded.shape[0] - 1
    for i in range(image_flooded.shape[1]):
        if image_flooded[most_bottom, i] != 255:
Example #54
0
def getFrontground(img_path):
    # 1.1 载入图像
    img = cv2.imread(img_path)
    height, width = img.shape[:2]  # 获取图像的高和宽
    print(height, width)
    # cv2.imshow('Origin', img)
    # 1.2 滤波降噪
    blured = cv2.blur(img, (5, 5))  # 进行滤波去掉噪声,参数二为低通滤波器的大小
    # cv2.imshow('Blur', blured)
    # 1.3 mask是掩码图像,用来确定哪些区域是背景,哪些区域是前景
    mask = np.zeros((height+2, width+2), np.uint8)  # 掩码长和宽都比输入图像多两个像素点,满水填充不会超出掩码的非零边缘
    # 为什么要加2可以这么理解:当从0行0列开始泛洪填充扫描时,mask多出来的2可以保证扫描的边界上的像素都会被处理
    # 1.4 进行泛洪填充
    cv2.floodFill(blured, mask, (width-1, height-1), (255, 255, 255), (1, 1, 1), (1, 1, 1), 8)
    # cv2.imshow('floodfill', blured)
    # 1.5 转换为灰度图
    gray = cv2.cvtColor(blured, cv2.COLOR_BGR2GRAY)
    # cv2.imshow('gray', gray)
    # 1.6 定义结构元素
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(50, 50))
    # 1.7 开闭运算,先开运算去除背景噪声,再继续闭运算填充目标内的孔洞
    opened = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel)
    closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, kernel)
    # cv2.imshow('closed', closed)
    # 1.8 求二值图
    ret, binary = cv2.threshold(closed, 250, 255, cv2.THRESH_BINARY)
    # cv2.imshow('binary', binary)
    # 1.9 找到前景物轮廓
    _, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # 注意,contours[0]表示外轮廓,contours[1]表示内轮廓
    # 1.10 绘制轮廓
    draw_img = cv2.drawContours(img.copy(), contours, -1, (0, 0, 255), 3)
    cv2.imshow('result', draw_img)
    # 1.11 遍历像素点是否在轮廓内,改变轮廓外的像素点
    color = (255, 255, 255)
    for h in range(height):
        for w in range(width):
            try:
                test = cv2.pointPolygonTest(contours[1], (w, h), False)
                if test == -1 or test == 0:
                    img[h, w] = color
            except:
                test = cv2.pointPolygonTest(contours[0], (w, h), False)
                if test == 1:
                    img[h, w] = color
    cv2.imshow('handle', img)
    # 1.12 使用轮廓建立最小矩形区域
    for c in contours:
        # 计算包围目标的最小矩形区域
        rect = cv2.minAreaRect(c)
        # 计算矩形的 4 点坐标,返回结果为float数据类型
        points = cv2.boxPoints(rect)
        # 转换为int类型
        box = np.int0(points)
    # 1.13 根据box将对图片进行裁剪
    Xs = [i[0] for i in box]
    Ys = [i[1] for i in box]
    x1 = min(Xs)
    x2 = max(Xs)
    y1 = min(Ys)
    y2 = max(Ys)
    height = y2 - y1
    width = x2 - x1
    crop_img = img[y1:y1+height, x1:x1+width]
    cv2.imshow('crop_img', crop_img)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
    return crop_img
Example #55
0
    def run_in_series(self) -> Any:
        if self.agent.kwargs.get("point_cloud", None) is not None:
            try:
                points: np.ndarray = self.agent.kwargs.get(
                    "point_cloud").copy()
                depth_data: np.ndarray = self.agent.front_depth_camera.data.copy(
                )
                # print("received pointcloud", np.amin(points, axis=0), np.amax(points, axis=0), self.agent.vehicle.transform.location)
                # from points find normal vectors
                h = self.agent.front_depth_camera.image_size_y
                w = self.agent.front_depth_camera.image_size_x

                # start of efficiency bottle neck TODO: @christian
                x = points[self.f1, :] - points[self.f2, :]
                y = points[self.f3, :] - points[self.f4, :]
                xyz_norm = self.normalize_v3(np.cross(x, y))
                # end of efficiency bottle neck

                # reshape and make-positive the normal vector since directions don't matter for ground plane detection
                xyz_norm = np.abs(xyz_norm)
                xyz_norm = xyz_norm.reshape((h, w, 3)).astype(np.float32)

                # we only need to consider the a single axis norm
                Y_norm_array: np.ndarray = xyz_norm[self.min_x:self.max_x,
                                                    self.min_y:self.max_y, 1]
                x, y = np.unravel_index(np.argmax(Y_norm_array),
                                        np.shape(Y_norm_array))
                seed_h, seed_w = y + self.min_y, x + self.min_x

                # floodfill
                ground_mask = np.zeros(
                    (xyz_norm.shape[0] + 2, xyz_norm.shape[1] + 2), np.uint8)
                fillvalue = 255
                cv2.floodFill(
                    image=xyz_norm,
                    mask=ground_mask,
                    seedPoint=(seed_w, seed_h),
                    newVal=fillvalue,
                    loDiff=(self.threshold, self.threshold, self.threshold),
                    upDiff=(self.threshold, self.threshold, self.threshold),
                    flags=8 | (fillvalue << 8) | cv2.FLOODFILL_MASK_ONLY)
                ground_mask = ground_mask[1:-1, 1:-1]
                sky_mask = depth_data > 0.1
                ground_mask[sky_mask] = 0

                ret, thresh = cv2.threshold(
                    ground_mask, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
                # You need to choose 4 or 8 for connectivity type
                connectivity = 8
                # Perform the operation
                num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(
                    image=thresh, connectivity=connectivity, ltype=cv2.CV_32S)
                # find the label with the biggest area
                nr = np.arange(num_labels)
                ground_area, ground_label = sorted(zip(stats[:, 4], nr),
                                                   reverse=True)[1]
                if ground_area < 10000:
                    return
                ground_mask = np.zeros(labels.shape)
                ground_mask[labels == ground_label] = 1
                ground_mask = cv2.morphologyEx(ground_mask, cv2.MORPH_CLOSE,
                                               np.ones((5, 5), np.uint8))

                obstacle_mask = np.ones(shape=depth_data.shape)
                obstacle_mask[sky_mask] = 0
                obstacle_mask[ground_mask == 1] = 0
                obstacle_mask[:depth_data.shape[1] // 8 * 3, :] = 0
                self.curr_mask = obstacle_mask
                # cv2.imshow("obstacle mask", obstacle_mask)

                xyz = np.reshape(a=points, newshape=(h, w, 3))
                ground_coords = xyz[ground_mask == 1]
                self.agent.kwargs["ground_coords"] = ground_coords

                obstacle_coords = xyz[obstacle_mask == 1]
                vehicle_location = self.agent.vehicle.transform.location.to_array(
                )
                dists = np.linalg.norm(obstacle_coords - vehicle_location,
                                       axis=1)
                obstacle_coords = obstacle_coords[
                    dists < 100]  # consider doing this filter early on

                self.agent.kwargs["obstacle_coords"] = obstacle_coords

            except Exception as e:
                self.logger.error(f"Failed to find ground plane: {e}")
Example #56
0
img = cv2.imread('form.png')

# BGR -> グレースケール
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# エッジ抽出 (Canny)
edges = cv2.Canny(gray, 1, 100, apertureSize=3)
cv2.imwrite('edges.png', edges)
# クロージング処理
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
edges = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)

h, w = edges.shape[:2]
mask = np.zeros((h + 2, w + 2), dtype=np.uint8)

retval, edges, mask, rect = cv2.floodFill(edges,
                                          mask,
                                          seedPoint=(0, 0),
                                          newVal=(255, 255, 255))
edges = cv2.bitwise_not(edges)

#edges = cv2.erode(edges, kernel, iterations = 0.5)

cv2.imwrite('edges2.png', edges)
# 輪郭抽出
contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
# 面積でフィルタリング
rects = []

for cnt, hrchy in zip(contours, hierarchy[0]):
    if cv2.contourArea(cnt) < 200:
        continue  # 面積が小さいものは除く
Example #57
0
def fill_color_demo(image):
    copyImage = image.copy()
    h, w = copyImage.shape[:2]
    mask = np.zeros([h + 2, w + 2], np.uint8)
    cv.floodFill(copyImage, mask, (30, 30), (300, 300, 300), (100, 100, 100), cv.FLOODFILL_FIXED_RANGE)
    cv.imshow('flood', copyImage)
Example #58
0
def drawCircles(app, canvas):
    

cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows = False)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
while True:
    _, frame = cap.read()
    #face subtraction
    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    # for (x,y,w,h) in faces:
    #     faceMask = np.zeros((h, w, 3))
    #     cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
    #     roi_color = frame[y:y+h, x:x+w]
    #     frame[y:y+h, x:x+w] = 0

    #background subtraction
    fgmask = fgbg.apply(frame)
    forground = cv2.bitwise_and(frame, frame, mask=fgmask)
    #cv2.imshow("Forground", forground)
    hsv_frame = cv2.cvtColor(forground, cv2.COLOR_BGR2HSV)
        # Blue color
    low_blue = np.array([94, 80, 2])
    high_blue = np.array([126, 255, 255])
    blue_mask = cv2.inRange(hsv_frame, low_blue, high_blue)
    blue = cv2.bitwise_and(frame, frame, mask=blue_mask)
        # Green color
    low_green = np.array([25, 52, 72])
    high_green = np.array([102, 255, 255])
    green_mask = cv2.inRange(hsv_frame, low_green, high_green)
    green = cv2.bitwise_and(frame, frame, mask=green_mask)
        # Red color
    low_red = np.array([161, 155, 84])
    high_red = np.array([179, 255, 255])
    red_mask = cv2.inRange(hsv_frame, low_red, high_red)
    red = cv2.bitwise_and(frame, frame, mask=red_mask)

        # Skin Color
    low = np.array([0, 24, 145])
    high = np.array([179, 114, 255])
    mask = cv2.inRange(hsv_frame, low, high)
    result = cv2.bitwise_and(frame, frame, mask=mask)

    #cv2.imshow("SkinColor Mask", result)

    #imfill
    gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
    th, im_th = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV)
    im_floodfill = im_th.copy()
    h, w = im_th.shape[:2]
    mask = np.zeros((h+2, w+2), np.uint8)
    cv2.floodFill(im_floodfill, mask, (0,0), 255)
    im_floodfill_inv = cv2.bitwise_not(im_floodfill)
    im_floodfill_inv = cv2.GaussianBlur(im_floodfill_inv, (13,13), 7)
    #cv2.imshow("flood filled result", im_floodfill_inv)

    #cleaning up 
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
    foreground = cv2.morphologyEx(im_floodfill_inv, cv2.MORPH_OPEN, kernel)
    foreground = cv2.morphologyEx(foreground, cv2.MORPH_CLOSE, kernel)
    #cv2.imshow("Cleaned up foreground", foreground)

    #find contours
    CONTOURS, _ = cv2.findContours(foreground, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    newContours = []
    for c in CONTOURS:
        area = cv2.contourArea(c)
        #print(area)
        if area > 28000:
            newContours.append(c)
    cv2.drawContours(frame, newContours, -1, (0,255,0), 3)
    #cv2.imshow('hand contour', frame)

    #Convex Hull
    hull = [cv2.convexHull(c) for c in newContours] 
    cv2.drawContours(frame, hull, -1, (0,0,255), 3)

    cv2.imshow('Convex Hull', frame)

    # Approximate contours to polygons + get bounding rects and circles
    contours_poly = [None]*len(newContours)
    boundRect = [None]*len(newContours)
    centers = [None]*len(newContours)
    radius = [None]*len(newContours)
    for i, c in enumerate(newContours):
        contours_poly[i] = cv2.approxPolyDP(c, 3, True)
        boundRect[i] = cv2.boundingRect(contours_poly[i])
        centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i])

    try:
        center = centers[0]
        r = radius[0]
        r = int(r)
        x = int(center[0])
        y = int(center[1])
        print(f'r now is {int(r)} and center now is {int(center[0])} {int(center[1])}')
    except:
        pass
    # Identify the palm center
    if center != None:
        cv2.circle(frame, (x,y), 2, (255,0,0), 5)
    cv2.imshow('frame', frame)

    # convex hull defects
    try:
        max_cont = max(newContours, key=cv2.contourArea)
    except:
        pass
    if max_cont is not None:
        hull = cv2.convexHull(max_cont, returnPoints=False)
        defects = cv2.convexityDefects(max_cont, hull)
        #cv2.circle(frame, cnt_centroid, 2, (0,255,0), 5)
        #print(f'defects is of type {type(defects)}, {defects}, the shape of the defects is {np.shape(defects)}')

    #plotting convex hull defects
    for i in range(len(defects)):
        s,e,f,d = defects[i][0]
        x, y = max_cont[f][0]
        #print(type(max_cont[f]))
        #print(np.shape(max_cont[f]))
        #print(f'x and y is ({x},{y})')
        cv2.circle(frame, (x,y), 10, (255,0,0), 5)
    cv2. imshow('frame-convex hull defects pts', frame)
           
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
Example #59
0
img = cv.imread(filename, 0)

img = cv.subtract(255, img)

# height, width, channels = img.shape
print(filename)
base_name = os.path.basename(filename)
save_name = base_name.split('_')[0]

# fill hole
# read image, ensure binary
img[img != 0] = 255

# flood fill background to find inner holes
holes = img.copy()
cv.floodFill(holes, None, (0, 0), 255)

# invert holes mask, bitwise or with img fill in holes
holes = cv.bitwise_not(holes)
filled_holes = cv.bitwise_or(img, holes)

# remove_small_objects

# find all your connected components (white blobs in your image)
nb_components, output, stats, centroids = cv.connectedComponentsWithStats(filled_holes, connectivity=8)
# connectedComponentswithStats yields every seperated component with information on each of them,
# such as size the following part is just taking out the background which is also considered a component,
# but most of the time we don't want that.
sizes = stats[1:, -1]
nb_components = nb_components - 1
Example #60
0
        for c in range(image.shape[2]):
            contrast[y,x,c] = np.clip(alpha*image[y,x,c] + beta, 0, 255)
cv2.imshow('contrast', contrast)
cv2.waitKey(0)            
'''
h, w, _ = contrast.shape 
contrast = cv2.resize(contrast,(w*5,h*5), interpolation = cv2.INTER_AREA)
contrast = cv2.resize(contrast,(w*4,h*4), interpolation = cv2.INTER_AREA)

#contrastBW = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
contrastBW = cv2.cvtColor(contrast, cv2.COLOR_BGR2GRAY)
contoursBW = contrastBW.copy()

cv2.imshow('contrastBW', contrastBW)
cv2.waitKey(0)
cv2.floodFill(contrastBW, None, (0,0), 255)
contrastBW = cv2.adaptiveThreshold(contrastBW, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,3,1)

cv2.imshow('contrastBW', contrastBW)
cv2.waitKey(0)  
text = pytesseract.image_to_string(contrastBW)#, lang="fita")
print(text)

contoursBW = cv2.Canny(contoursBW, 30, 200)
#contoursBW = cv2.medianBlur(contoursBW,3)
kernel2 = np.ones((2, 2), np.uint8)  
kernel1 = np.ones((1, 1), np.uint8)  
# Using cv2.erode() method  
#contoursBW = cv2.dilate(contoursBW, kernel)  
contoursBW = cv2.GaussianBlur(contoursBW,(1,1),0)
#contoursBW = cv2.erode(contoursBW, kernel, 1)