Example #1
0
    def find_pipe(self, img):
        rows, cols = img.shape[:2]

        blur = cv2.GaussianBlur(img, (5, 5), 0)

        hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)

        mask = cv2.inRange(hsv, ORANGE_MIN, ORANGE_MAX)

        bmask = cv2.GaussianBlur(mask, (5, 5), 0)

        contours, _ = cv2.findContours(bmask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        blank_img = np.zeros((rows, cols), np.uint8)

        if contours:
            # sort contours by area (greatest --> least)
            contours = sorted(contours, key=cv2.contourArea, reverse=True)[:1]
            cnt = contours[0]  # contour with greatest area
            if cv2.contourArea(cnt) > 1000:  # this value will change based on our depth/the depth of the pool
                rect = cv2.minAreaRect(cnt)   # find bounding rectangle of min area (including rotation)
                box = cv2.cv.BoxPoints(rect)  # get corner coordinates of that rectangle
                box = np.int0(box)            # convert coordinates to ints

                # draw minAreaRect around pipe
                cv2.drawContours(blank_img, [box], 0, (255, 255, 255), -1)

                # get all coordinates (y,x) of pipe
                why, whx = np.where(blank_img)
                # align coordinates --> (x,y)
                wh = np.array([whx, why])

                # estimate covariance matrix and get corresponding eigenvectors
                cov = np.cov(wh)
                eig_vals, eig_vects = np.linalg.eig(cov)

                # use index of max eigenvalue to find max eigenvector
                i = np.argmax(eig_vals)
                max_eigv = eig_vects[:, i] * np.sqrt(eig_vals[i])

                # flip indices to find min eigenvector
                min_eigv = eig_vects[:, 1 - i] * np.sqrt(eig_vals[1 - i])

                # define center of pipe
                center = np.average(wh, axis=1)

                # define vertical vector (sub's current direction)
                vert_vect = np.array([0, -1 * np.int0(center[1])])

                # calculate angle between vertical and max eigenvector
                num = np.dot(max_eigv, vert_vect)
                denom = np.linalg.norm(max_eigv) * np.linalg.norm(vert_vect)
                angle_rad = np.arccos(num / denom)

                quaternion = transformations.quaternion_from_euler(0.0, 0.0, angle_rad)

                return [center[0], center[1], None], [quaternion[0], quaternion[1], quaternion[2], quaternion[3]]

            else:
                return None
Example #2
0
def estimate_bbox(cnt, img):
	# calculate bounding box
	rect = cv2.minAreaRect(cnt)
	bbox = cv2.boxPoints(rect)
	bbox = np.int0(bbox)
	#cv2.drawContours(img, [bbox], 0, (0,255,0), 2)

	# rotate bounding box to get a vertical rectangle
	M = cv2.getRotationMatrix2D(rect[0], rect[2], 1)
	pts = np.ones((4, 3))
	pts[:,:-1] = bbox
	bbox_rot = np.int0(np.dot(pts, M.T))

	# resize bounding box to cover the whole document
	bbox_rot[0][0] -= 15
	bbox_rot[0][1] += 120
	bbox_rot[1][0] -= 15
	bbox_rot[2][0] += 5
	bbox_rot[3][0] += 5
	bbox_rot[3][1] += 120

	# rotate back bounding box to original orientation
	p = (bbox_rot[1][0], bbox_rot[1][1])
	M = cv2.getRotationMatrix2D(p, -rect[2], 1)
	pts = np.ones((4, 3))
	pts[:,:-1] = bbox_rot
	bbox = np.int0(np.dot(pts, M.T))
	return bbox
Example #3
0
def find_first_transmitters(contours):
    rects = []
    boxs = []
    for contour in contours:
        rect = cv2.minAreaRect(contour)
        if cv2.contourArea(contour) < 100000:  # arbitrary
            continue
        else:
            # find center
            box = cv2.cv.BoxPoints(rect)
            box = numpy.int0(box)
            box = rot_box(box)
            box = numpy.int0(box)
            rects.append(rect)
            boxs.append(box)
    number_of_transmitters = len(rects)
    centers = []

    for i in range(number_of_transmitters):
        # create new algorithm for center of mass calculation. what type of box am i
        x = [p[0] for p in boxs[i]]
        y = [p[1] for p in boxs[i]]
        center = (sum(y) / 4, sum(x) / 4)
        centers.append(center)

    return rects, boxs, centers, number_of_transmitters
Example #4
0
 def __getCentroid(self, mask):
     """ Calculate the centroid of object"""
     x, y = mask.nonzero()
     x = np.int0(x.mean())
     y = np.int0(y.mean())
     centroid =(x, y)
     return centroid
Example #5
0
def colour_norm(img):
	sum_img = np.int0(img[:,:,0]) + \
			np.int0(img[:,:,1]) + \
			np.int0(img[:,:,2])
	sum_img = np.dstack([sum_img, sum_img, sum_img])
	img = ((255 * img.astype("int64")) / (sum_img + 1)).astype("uint8")
	return img
    def measure_target_width_on_segment(self, pt1, pt2):
        """
        Given the line segment L defined by 2d points pt1 and pt2 from a camera 
        frame, find the points pt3 and pt4 the nearest points to pt1 and pt2 
        on L that are masked according to self.mask8. Then calculate the 
        distance D between 3d points pt5 and pt6 in self.xyz which 
        correspond to pt3 and pt4.
        return pt3, pt4, D, fx, fy,
            where 
                pt3 = (x, y)
                pt4 = (x, y)
                fx is the function f(distance from pt3 on L) = x
                fy is the function f(distance from pt3 on L) = y
        If anything goes wrong, return None
        """
        from scipy.interpolate import interp1d

        dist2d = distance(pt1, pt2)
        interpx = interp1d([0, dist2d], [pt1[0], pt2[0]])
        interpy = interp1d([0, dist2d], [pt1[1], pt2[1]])
        t = numpy.linspace(0, int(dist2d), int(dist2d)+1)
        xs = numpy.int0(interpx(t))
        ys = numpy.int0(interpy(t))
        ixs, = self.mask8[ys, xs].nonzero()
        if len(ixs) >= 2:
            x1 = xs[ixs[0]]
            y1 = ys[ixs[0]]
            x2 = xs[ixs[-1]]
            y2 = ys[ixs[-1]]
            xyz1 = self.xyz[:, y1, x1]
            xyz2 = self.xyz[:, y2, x2]
            dist3d = distance(xyz1, xyz2)
            interpx2 = lambda d: (x2-x1)*d/dist2d + x1
            interpy2 = lambda d: (y2-y1)*d/dist2d + y1
            return (x1, y1), (x2, y2), dist3d, interpx2, interpy2
Example #7
0
def get_centroids (contours, frame):
	centres = []
	if contours:
		for i in range(len(contours)):
			moments = cv2.moments(contours[i])
			centres.append((int(moments['m10']/moments['m00']), int(moments['m01']/moments['m00'])))
		
			if i>0:                
				dist = calculateDistance(centres[i-1][0],centres[i-1][1],centres[i][0],centres[i][1])
				area=cv2.contourArea(contours[i])
				prevarea=cv2.contourArea(contours[i-1])
				if dist < 120:                    
					if area > prevarea:
						rect = cv2.minAreaRect(contours[i])
						box = cv2.boxPoints(rect)
						box = np.int0(box)
						print(box)
						frame = cv2.drawContours(frame,[box],0,(0,0,255),2)
					else :
						rect = cv2.minAreaRect(contours[i-1])
						box = cv2.boxPoints(rect)
						box = np.int0(box)
						print(box)
						frame = cv2.drawContours(frame,[box],0,(0,0,255),2)
			else:
 	
				rect = cv2.minAreaRect(contours[i])
				box = cv2.boxPoints(rect)
				box = np.int0(box)
				frame = cv2.drawContours(frame,[box],0,(0,0,255),2)
				print(box)
	return centres, frame
Example #8
0
    def draw_walls(self):
        left_wall_points = np.array([self.transform(point) for point in self.left_wall_points])
        right_wall_points = np.array([self.transform(point) for point in self.right_wall_points])

        rect = cv2.minAreaRect(left_wall_points[:,:2].astype(np.float32))
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(self.grid, [box], 0, 128, -1)

        rect = cv2.minAreaRect(right_wall_points[:,:2].astype(np.float32))
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(self.grid, [box], 0, 128, -1)

        # So I dont have to comment abunch of stuff out for debugging
        dont_display = True
        if dont_display:
            return

        # Bob Ross it up (just for display)
        left_f, right_f = self.transform(self.left_f), self.transform(self.right_f)
        left_b, right_b = self.transform(self.left_b), self.transform(self.right_b)

        boat = self.transform(self.boat_pos)
        target = self.transform(self.target)

        cv2.circle(self.grid, tuple(boat[:2].astype(np.int32)), 8, 255)
        cv2.circle(self.grid, tuple(target[:2].astype(np.int32)), 15, 255)
        cv2.circle(self.grid, tuple(self.transform(self.mid_point)[:2].astype(np.int32)), 5, 255)
        cv2.circle(self.grid, tuple(left_f[:2].astype(np.int32)), 10, 255)
        cv2.circle(self.grid, tuple(right_f[:2].astype(np.int32)), 10, 255)
        cv2.circle(self.grid, tuple(left_b[:2].astype(np.int32)), 3, 125)
        cv2.circle(self.grid, tuple(right_b[:2].astype(np.int32)), 3, 128)
        cv2.imshow("test", self.grid)
        cv2.waitKey(0)
  def track_by_camshif(self, frame, contour):
    area = cv2.contourArea(contour)
    rect = cv2.minAreaRect(contour)
    (vx, vy), (x, y), angle = rect
    vx, vy, x, y = np.int0((vx, vy, x, y))

    roi = frame[vy:(vy+y), vx:(x+vx)]
    roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    #roi = cv2.cvtColor(roi, cv2.COLOR_BGR2LAB)

    # compute a HSV histogram for the ROI and store the
    # bounding box
    roiHist = cv2.calcHist([roi], [0], None, [16], [0, 180])
    roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
    roiBox = (vx, vy, x, y)
    termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)

    while(1):
      ret, current_frame = self.camera.read()
      current_image = cv2.resize(current_frame,(self.frame_width, self.frame_height), interpolation=cv2.INTER_LINEAR)
      self.custom_wait_key('origin_frame', current_image, current_image)

      hsv = cv2.cvtColor(current_image, cv2.COLOR_BGR2HSV)
      backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1)

      # apply cam shift to the back projection, convert the
      # points to a bounding box, and then draw them
      (r, roiBox) = cv2.CamShift(backProj, roiBox, termination)
      pts = np.int0(cv2.cv.BoxPoints(r))
      cv2.polylines(current_image, [pts], True, (0, 255, 0), 2)
      self.custom_wait_key('frame', current_image, current_image)
Example #10
0
def ChannelValidity(fileName):
    """
    A function to examine the data from different channels of a tetrode stored in Neuralynx ntt file.
    """ 
    try:
        ntt = mmap_ntt_file(fileName)
        nttUp = True
    except:
        nttUp = False
    if nttUp and ntt.size > 1:
        RndIdx = np.random.randint(ntt.size-1,size=100)
        sample = np.array(ntt['waveforms'][RndIdx])
        chV = np.array([])
        ChannelValidity = np.array([])
        for item in sample:
            chV = np.append(chV,np.array([item[:,ii].sum() for ii in range(4)]))
        chV = chV.reshape(chV.size/4,4)
        ChannelValidity = np.append(ChannelValidity,[chV[:,jj].sum() for jj in range(4)])
        for ii in range(4):
            if np.abs(ChannelValidity)[ii] > 10:
                ChannelValidity[ii] = 1
            else:
                ChannelValidity[ii] = 0
        return np.int0(ChannelValidity)
    else:
        return np.int0([0,0,0,0])
Example #11
0
def sliceImg(img, slice_part=None, color=(255, 255, 255)):
    if slice_part is not None:
        h, w = img.shape[:2]
        if isinstance(slice_part[0], float):
            cv2.rectangle(img, (np.int0(slice_part[0] * w), 0), (np.int0((slice_part[1]) * w), h), (255, 255, 255), -1)
        else:
            for part in slice_part:
                cv2.rectangle(img, (np.int0(part[0] * w), 0), (np.int0((part[1]) * w), h), (255, 255, 255), -1)
Example #12
0
def draw_box(largest_contour, img):
        ## Find the box excompassing the largest red blob
        rect = cv2.minAreaRect(largest_contour)
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        box = np.int0(box)
        cv2.drawContours(img,[box], 0, (0, 0, 255), 2)
        return img
Example #13
0
def draw_markers(img,markers):
    for m in markers:
        centroid = [m['verts'].sum(axis=0)/4.]
        origin = m['verts'][0]
        hat = np.array([[[0,0],[0,1],[.5,1.25],[1,1],[1,0]]],dtype=np.float32)
        hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
        cv2.polylines(img,np.int0(hat),color = (0,0,255),isClosed=True)
        cv2.polylines(img,np.int0(centroid),color = (255,255,0),isClosed=True,thickness=2)
        cv2.putText(img,'id: '+str(m['id']),tuple(np.int0(origin)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(255,100,50))
Example #14
0
	def callback(self,data):
		try:
			img = self.bridge.imgmsg_to_cv2(data, "bgr8")
		except CvBridgeError as e:
			print(e)

		#imageHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    		contours = ThreshAndContour(img, self.upper, self.lower)
		contours = contours[1]
    		#output = cv2.bitwise_and(img, img, mask=mask)

    		if len(contours) == 0:
        		return None

		rects = []
		#cv2.drawContours(img,contours,-1, (0,255,0), 3)
		for contour in contours: #adapted from https://github.com/opencv/opencv/blob/master/samples/python/squares.py
			epsilon = cv2.arcLength(contour, True)*0.05
	        	contour = cv2.approxPolyDP(contour, epsilon, True)
	       		if len(contour) == 4 and cv2.isContourConvex(contour):
	            		contour = contour.reshape(-1, 2)
	            		max_cos = np.max([angle_cos( contour[i], contour[(i+1) % 4], contour[(i+2) % 4] ) for i in range(4)])
	            		if max_cos < 0.1:
	                		rects.append(contour)
	        
	        	if len(rects) > 1:
				rects = sorted(contours, key=cv2.contourArea, reverse=True)
				rect1 = cv2.minAreaRect(rects[0])
	            		rect2 = cv2.minAreaRect(rects[1])

	            		if(rect1[1][0] < rect1[1][1]): #Fix wonky angles from opencv (I think)
	                		rect1 = (rect1[0], rect1[1], (rect1[2] + 180) * 180/3.141)
	            		else:
	                		rect1 = (rect1[0], rect1[1], (rect1[2] + 90) * 180/3.141)
	                
	            		if(rect2[1][0] < rect2[1][1]):
	                		rect2 = (rect2[0], rect2[1], (rect2[2] + 180) * 180/3.141)
	            		else:
	                		rect2 = (rect2[0], rect2[1], (rect2[2] + 90) * 180/3.141)

                                box = cv2.boxPoints(rect1)
                                box = np.int0(box)
                                #cv2.drawContours(img,[box],-1,(0,0,255),2)
                                box = cv2.boxPoints(rect2)
                                box = np.int0(box)
                                #cv2.drawContours(img,[box],-1,(0,0,255),2)

				gateLocation = None
	            		gateAxis = None
	            		gateAngle = None                     
	            		gateCenter = (int((rect1[0][0] + rect2[0][0])/2), int((rect1[0][1] + rect2[0][1])/2))
				cv2.circle(img,gateCenter,5,(0,255,0),3)

		try:
			self.image_pub.publish(self.bridge.cv2_to_imgmsg(img,"bgr8"))
		except CvBridgeError as e:
			print(e)
Example #15
0
def motion_all(event):
    if event.inaxes == ax0:
        yprof.set_xdata(np.int0(yind.clip(event.xdata,event.xdata)))
        xprof.set_ydata(np.int0(xind.clip(event.ydata,event.ydata)))
        pvert.set_xdata(img[:,np.int(event.xdata)])
        pvertc.set_ydata(np.int0(yind.clip(event.ydata,event.ydata)))
        phorz.set_ydata(img[np.int(event.ydata),:])
        phorzc.set_xdata(np.int0(xind.clip(event.xdata,event.xdata)))
        fig.canvas.draw_idle()
Example #16
0
def posterize(image, level):
	indices = np.arange(0,256)
	divider = np.linspace(0,255,level+1)[1]
	quantiz = np.int0(np.linspace(0,255,level))
	color_levels = np.clip(np.int0(indices/divider),0,level-1)
	palette = quantiz[color_levels]
	img2 = palette[image]
	img2 = cv2.convertScaleAbs(img2)
	return img2
Example #17
0
def posterization(n, img):
    indices = np.arange(0,256)   # List of all colors 
    divider = np.linspace(0,255,n+1)[1] # we get a divider
    quantiz = np.int0(np.linspace(0,255,n)) # we get quantization colors
    color_levels = np.clip(np.int0(indices/divider),0,n-1) # color levels 0,1,2..
    palette = quantiz[color_levels] # Creating the palette

    img = palette[img]  # Applying palette on image
    return cv2.convertScaleAbs(img) # Converting image back to uint8
Example #18
0
def ShowCornerAndFeaturePoints(image1, image2, corners1, corners2, histogram1, histogram2):

    # draw rectangle of the corner
    corners1 = np.int0(corners1)
    corners2 = np.int0(corners2)

    # draw a rectangle around the corner
    for i in range(0, corners1.shape[0]):
        cv2.rectangle(
            image1, (corners1[i, 0] - 18, corners1[i, 1] - 18), (corners1[i, 0] + 18, corners1[i, 1] + 18), [0, 0, 0]
        )
    for j in range(0, corners2.shape[0]):
        cv2.rectangle(
            image2, (corners2[j, 0] - 18, corners2[j, 1] - 18), (corners2[j, 0] + 18, corners2[j, 1] + 18), [0, 0, 0]
        )

    cno = 1
    min_distance = 0
    min_index = 0

    for i in range(0, len(corners1)):
        for j in range(0, len(corners2)):
            # compare histogram for each corner,find the smallest distance between corner in image 1 and image 2.
            distance = np.sum((histogram1[i] - histogram2[j]) * (histogram1[i] - histogram2[j]), None)
            if j == 0:
                min_distance = distance
                min_index = j
            else:
                if distance < min_distance:
                    min_distance = distance
                    min_index = j

        cv2.putText(
            image1,
            str(cno),
            (corners1[i, 0], corners1[i, 1]),
            cv2.FONT_HERSHEY_SIMPLEX,
            0.5,
            (0, 0, 255),
            2,
            cv2.LINE_AA,
        )
        cv2.putText(
            image2,
            str(cno),
            (corners2[min_index, 0], corners2[min_index, 1]),
            cv2.FONT_HERSHEY_SIMPLEX,
            0.5,
            (0, 0, 255),
            2,
            cv2.LINE_AA,
        )
        cno = cno + 1

    cv2.imshow("image1", image1)
    cv2.imshow("image2", image2)
Example #19
0
    def __init__(self, contour, hue=None, tracking_id=None):
        self.contour = np.int0(contour.reshape((4, 2)))
        self.box = np.int0(cv2.cv.BoxPoints(cv2.minAreaRect(self.contour)))

        self.moments = cv2.moments(np.float32([self.box]))
        self.center = np.array([
            self.moments['m10'] / self.moments['m00'], 
            self.moments['m01'] / self.moments['m00']])

        self.hue = hue
Example #20
0
    def _draw_squares(self, squares, img):
        for s in self._squares:
            colour_hls = hls_to_rgb(s.hue / 180.0, 0.5, 1.0)
            colour_rgb = [x * 255 for x in colour_hls]
            colour_bgr = (colour_rgb[2], colour_rgb[1], colour_rgb[0])

            cv2.drawContours(img, np.int0([s.box]), -1, colour_bgr, 3)
            cv2.circle(img, tuple(np.int0(s.center)), 4, colour_bgr, -1)

        return img
Example #21
0
def DrawMark(image,contours,mark,I=255,border=2):
	rect = cv2.minAreaRect(contours[mark])
	box = cv2.cv.BoxPoints(rect)
	box = np.int0(box)
	#Get Corners for box 2
	rect1 = cv2.minAreaRect(contours[mark-1])
	box1 = cv2.cv.BoxPoints(rect1)
	box1 = np.int0(box1)
	#Draw
	cv2.drawContours(image,[box],0,(I,I,I),border)
	cv2.drawContours(image,[box1],0,(I,I,I),border)
Example #22
0
def hausdroff_distance(a, b):
    a = np.int0(a)
    b = np.int0(b)
    maxDistAB = calc_distance(a, b)
    if maxDistAB == 10000000:
        return maxDistAB
    maxDistBA = calc_distance(b, a)
    if maxDistBA == 10000000:
        return maxDistAB
    maxDist = max(maxDistAB, maxDistBA)
    return math.sqrt(maxDist)
Example #23
0
 def motion_all(self, event):
     """Updates the profile plots as the mouse cursor moves over the figure"""
     if event.inaxes == self.ax0:
         self.yprof.set_xdata(np.int0(self.yind.clip(event.xdata,event.xdata)))
         self.xprof.set_ydata(np.int0(self.xind.clip(event.ydata,event.ydata)))
         self.pvert.set_xdata(self.z[:,np.int(event.xdata)])
         self.pvertc.set_ydata(np.int0(self.yind.clip(event.ydata,event.ydata)))
         self.phorz.set_ydata(self.z[np.int(event.ydata),:])
         self.phorzc.set_xdata(np.int0(self.xind.clip(event.xdata,event.xdata)))
         self.fig.canvas.draw_idle()
     plt.draw()
Example #24
0
    def find_pipe_new(self, img):
        #img[:, -100:] = 0
        #img = cv2.GaussianBlur(img, (7, 7), 15)
        last_image_timestamp = self.last_image_timestamp
        hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)

        lower = np.array([self.calculate_threshold(hsv), 0, 0])
        upper = np.array([179, 255, 255])

        # Take the threholded mask, remove noise, find the biggest contour, then draw a the best fit rectangle
        #   around that box, finally use same algorithm on the best fit rectangle.
        mask = cv2.inRange(hsv, lower, upper)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, self.kernel)
        contours, _ = cv2.findContours(np.copy(mask), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        if len(contours) < 1:
            print "None found"
            return

        # Find biggest area contour
        self.last_draw_image = np.copy(hsv)
        areas = [cv2.contourArea(c) for c in contours]
        max_index = np.argmax(areas)
        cnt = contours[max_index]

        # Draw a miniAreaRect around the contour and find the area of that.
        rect = cv2.minAreaRect(cnt)
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        mask = np.zeros(shape=mask.shape)
        cv2.drawContours(mask, [box], 0, 255, -1)
        rect_area = cv2.contourArea(box)

        center, angle_rad, [max_eigv, min_eigv] = self.get_pose(mask)

        # Check if the box is too big or small.
        xy_position, height = self.occ_grid.get_tf(timestamp=last_image_timestamp)
        expected_area = self.occ_grid.calculate_marker_area(height)
        if expected_area * .3 < rect_area < expected_area * 2:
            cv2.drawContours(self.last_draw_image, [box], 0, (255, 255, 255), -1)
        else:
            angle_rad = 0
            max_eigv = np.array([0, -20])
            min_eigv = np.array([-20, 0])
            #cv2.drawContours(self.last_draw_image, [box], 0, (255, 0, 30), -1)
            print "Size out of bounds!"

        cv2.line(self.last_draw_image, tuple(np.int0(center)), tuple(np.int0(center + (2 * max_eigv))), (0, 255, 30), 2)
        cv2.line(self.last_draw_image, tuple(np.int0(center)), tuple(np.int0(center + (2 * min_eigv))), (0, 30, 255), 2)

        print center, angle_rad
        print rect_area, expected_area
        print

        return center, angle_rad, rect_area
Example #25
0
def drawRectangles(bgrimg, rectangles):
    imgCopy = bgrimg.copy()
    for rect in rectangles:
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(imgCopy, [box], 0, (0,0,255), -1)
    
    nContours, nRectangles, nEllipses = getCRE(imgCopy)
    for rect in nRectangles:
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(bgrimg, [box], 0, (0,0,255), 2)
Example #26
0
 def on_stroke_process(self, event):
     try:
         if self.stroke_data.found_gesture and not self.write_mode:
             points = []
             for stroke in self.stroke_data.strokes:
                 if len(stroke) == 0:
                     continue
                 points.append([])
                 points[-1].append([[v[0],v[1]]
                                for v in stroke])
                 if self.stroke_data.gesture == 'Circle':
                     points = np.array(
                         [item for sublist in
                          points for item in sublist]).reshape(-1,2)
                     center = np.int0(np.mean(points,axis=0))
                     radius = np.int0(norm(np.std(points,axis=0)))
                     cv2.circle(self.drawing_im,(center[0],
                                             center[1]), radius,
                            [255,255,255], self.size)
                 elif self.stroke_data.gesture == 'Line':
                     points = np.array(
                         [item for sublist in points for item in sublist])
                     cv2.line(self.drawing_im, tuple(points[0][0])
                              , tuple(points[-1][-1]),
                              [255, 255, 255], self.size)
                 elif self.stroke_data.gesture == 'Rectangle':
                     points = np.array(
                         [item for sublist in points for item in sublist])
                     rect = cv2.minAreaRect(points)
                     box = np.int0(cv2.boxPoints(rect))
                     cv2.drawContours(self.drawing_im, [box], 0,
                                      [255, 255, 255], self.size)
                 elif self.stroke_data.gesture == 'Triangle':
                     points = np.array(
                         [item for sublist in points 
                          for item in sublist]).reshape(1,-1,2)
                     triangle = np.int0(cv2.minEnclosingTriangle(
                         points)[1].squeeze())
                     cv2.drawContours(self.drawing_im,
                                      [triangle], 0,
                                      [255, 255, 255], self.size)
                 self.temporary_im = np.zeros_like(self.drawing_im)
         else:
             if self.write_mode:
                 self.drawing_im += self.temporary_im
             else:
                 self.temporary_im = np.zeros_like(self.drawing_im)
     except Exception as e:
          exc_type, exc_value, exc_traceback = sys.exc_info()
          traceback.print_exception(exc_type,
                             exc_value,
                             exc_traceback, limit=2, file=sys.stdout)
Example #27
0
    def draw(self):
        """
        draw - Method
        @summary: 
        """
        #Internal shape colour - Blue
        colours = [(89, 73, 48)]
        
        for x in self.interior:
            cv2.drawContours(self.img, [x], 0, colours[random.randint(0, len(colours)-1)],2)
        
        cv2.drawContours(self.img, [self.exterior], 0, (43, 58, 255),2)      
        rect = cv2.minAreaRect(self.exterior)
        box = cv2.cv.BoxPoints(rect)
        box = numpy.int0(box)
        
        if config.DEBUG:
            #Draw the center point 
            cv2.circle(self.img, self.getCentrePoint(), 10, (0,0,255))    
            cv2.drawContours(self.img, [box],0,(0,0,255),2)
            cv2.imshow('Material', self.img)
            
        img1 = Image.fromarray(cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB))
        
        self._drawLine(img1, [(box[0][0], box[0][1]), (box[1][0], box[1][1])], (255, 58, 48))
        self._drawLine(img1, [(box[1][0], box[1][1]), (box[2][0], box[2][1])], (255, 58, 48))
        self._drawLine(img1, [(box[2][0], box[2][1]), (box[3][0], box[3][1])], (255, 58, 48))
        self._drawLine(img1, [(box[3][0], box[3][1]), (box[0][0], box[0][1])], (255, 58, 48))
        
        for x in self.interior:
            rect = cv2.minAreaRect(x)
            box = cv2.cv.BoxPoints(rect)
            box = numpy.int0(box)
            
            self._drawLine(img1, [(box[0][0], box[0][1]), (box[1][0], box[1][1])], (48, 73, 89))
            self._drawLine(img1, [(box[1][0], box[1][1]), (box[2][0], box[2][1])], (48, 73, 89))
            self._drawLine(img1, [(box[2][0], box[2][1]), (box[3][0], box[3][1])], (48, 73, 89))
            self._drawLine(img1, [(box[3][0], box[3][1]), (box[0][0], box[0][1])], (48, 73, 89))

        quad = self.tag.quad
        self._drawLine(img1, [(quad[0][0], quad[0][1]), (quad[1][0], quad[1][1])], (96, 96, 96), 2, False)
        self._drawLine(img1, [(quad[1][0], quad[1][1]), (quad[2][0], quad[2][1])], (96, 96, 96), 2, False)
        self._drawLine(img1, [(quad[2][0], quad[2][1]), (quad[3][0], quad[3][1])], (96, 96, 96), 2, False)
        self._drawLine(img1, [(quad[3][0], quad[3][1]), (quad[0][0], quad[0][1])], (96, 96, 96), 2, False)
        
            
        if config.DEBUG:
            img1.show()
            
        img1.save(os.path.abspath(config.OUTLINE_DIR+os.path.basename(self.filename)), "JPEG")
Example #28
0
def draw_markers(img,markers):
    for m in markers:
        centroid = [m['verts'].sum(axis=0)/4.]
        origin = m['verts'][0]
        hat = np.array([[[0,0],[0,1],[.5,1.25],[1,1],[1,0]]],dtype=np.float32)
        hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m))
        if m['id_confidence']>.9:
            cv2.polylines(img,np.int0(hat),color = (0,0,255),isClosed=True)
        else:
            cv2.polylines(img,np.int0(hat),color = (0,255,0),isClosed=True)
        cv2.polylines(img,np.int0(centroid),color = (255,255,int(255*m['id_confidence'])),isClosed=True,thickness=2)
        m_str = 'id: {:i}'.format(m['id'])
        org = origin.copy()
        # cv2.rectangle(img, tuple(np.int0(org+(-5,-13))[0,:]), tuple(np.int0(org+(100,30))[0,:]),color=(0,0,0),thickness=-1)
        cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'id_confidence' in m:
            m_str = 'idc: {:.3f}'.format(m['id_confidence'])
            org += (0, 12)
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'loc_confidence' in m:
            m_str = 'locc: {:.3f}'.format(m['loc_confidence'])
            org += (0, 12 )
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'frames_since_true_detection' in m:
            m_str = 'otf: {}'.format(m['frames_since_true_detection'])
            org += (0, 12 )
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
        if 'opf_vel' in m:
            m_str = 'otf: {}'.format(m['opf_vel'])
            org += (0, 12 )
            cv2.putText(img,m_str,tuple(np.int0(org)[0,:]),fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0,0,255))
 def discretize_color_space(self):
     '''
     Generates self.palette, which maps the 8-bit color channels to bins in the discretized space.
     Also generates self.colors, which is a list of all possible colors (a,b components) in this space.
     '''
     inds = np.arange(0, 256)
     div = np.linspace(0, 255, self.levels+1)[1]
     quantiz = np.int0(np.linspace(0, 255, self.levels))
     color_levels = np.clip(np.int0(inds/div), 0, self.levels-1)
     self.palette = quantiz[color_levels]
     bins = np.unique(self.palette) #the actual color bins
     self.colors = list(itertools.product(bins, bins)) #find all permutations of a/b bins
     self.color_to_label_map = {c:i for i,c in enumerate(self.colors)} #this maps the color pair to the index of the color
     self.label_to_color_map = dict(zip(self.color_to_label_map.values(),self.color_to_label_map.keys())) #takes a label and returns a,b
Example #30
0
def showMainImage2(current_frame):
    #current_frame2 = cv2.getTrackbarPos("Silder1", "Video")
    cap2.set(1,current_frame)
    ret,frame2 = cap2.read()
#     skeleton = skeletonArr2[current_frame]
#     if skeleton[0]!="untracked":  
#         for i in range(0,10): #for i in range(0,14)
#             index_x = 5 + i*7
#             index_y = 6 + i*7
#             cv2.circle(frame2,(int(skeleton[index_x]),int(skeleton[index_y])),3,(0,0,255),-1)
    
    label = labelArr2[current_frame]
    prv_label = labelArr2[current_frame-1]
    if label[1]!="None":
        if label[1] == "Right" or label[1] == "Left" or label[1] == "Intersect":
            cnt = (float(label[2]),float(label[3])),(float(label[4]),float(label[5])),float(label[6])
            box = cv2.cv.BoxPoints(cnt)
            box = np.int0(box)
            cv2.drawContours(frame2,[box],0,(0,0,255),2)
            cv2.circle(frame2,(int(float(label[2])),int(float(label[3]))),7,(0,0,255),-1)
            if prv_label[1]!="None":
                cv2.circle(frame2,(int(float(prv_label[2])),int(float(prv_label[3]))),7,(100,100,250),-1)
        
        if label[1] == "Both":
            cnt1 = (float(label[2]),float(label[3])),(float(label[4]),float(label[5])),float(label[6])
            box1 = cv2.cv.BoxPoints(cnt1)
            box1 = np.int0(box1)
            cv2.drawContours(frame2,[box1],0,(0,0,255),2)
            cv2.circle(frame2,(int(float(label[2])),int(float(label[3]))),7,(0,0,255),-1)
            if prv_label[1]!="None":
                cv2.circle(frame2,(int(float(prv_label[2])),int(float(prv_label[3]))),7,(100,100,250),-1)
            
            cnt2 = (float(label[8]),float(label[9])),(float(label[10]),float(label[11])),float(label[12])
            box2 = cv2.cv.BoxPoints(cnt2)
            box2 = np.int0(box2)
            cv2.drawContours(frame2,[box2],0,(0,0,255),2)  
            cv2.circle(frame2,(int(float(label[8])),int(float(label[9]))),7,(0,0,255),-1)
            if prv_label[1]!="None":
                cv2.circle(frame2,(int(float(prv_label[8])),int(float(prv_label[9]))),7,(100,100,250),-1)
             
    #tempImage = cv2.cv.fromarray(frame2)
    #cv2.cv.DrawContours(tempImage, [box], (0,0,0), (255,255,255), 0)
    #im = np.asarray(tempImage)
    #h2,w2 = frame2.shape[:2]
    rsFrame2 = cv2.resize(frame2,(320,240))
    rsFrame2 = cv2.copyMakeBorder(rsFrame2,2,2,2,2,cv2.BORDER_CONSTANT,value=(0,0,255))
    merged_frame[254:498, 10:334] = rsFrame2
    cv2.imshow('Video', merged_frame)
    return frame2
Example #31
0
def get_corners(img):
    """
    Get the corners of a rectangular object (the map) from an image of the map taken at an angle
    Parameters:
        image(img): source image in color
    Returns:
       approx(np.array): 4x2 array of [x,y] coordinates of 4 corners of the map
    """
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    _, blackAndWhite = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)

    # convert img to grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = 255 - gray

    # do adaptive threshold on gray image
    thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                   cv2.THRESH_BINARY, 17, 1)
    thresh = 255 - thresh

    # apply morphology
    kernel = np.ones((3, 3), np.uint8)
    morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
    morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)

    # separate horizontal and vertical lines to filter out spots outside the rectangle
    kernel = np.ones((7, 3), np.uint8)
    vert = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel)
    kernel = np.ones((3, 7), np.uint8)
    horiz = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel)

    # combine
    rect = cv2.add(horiz, vert)

    # thin
    kernel = np.ones((3, 3), np.uint8)
    rect = cv2.morphologyEx(rect, cv2.MORPH_ERODE, kernel)

    nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(
        rect, None, None, None, 8, cv2.CV_32S)
    sizes = stats[1:, -1]  #get CC_STAT_AREA component
    img2 = np.zeros((labels.shape), np.uint8)

    # remove spots
    for i in range(0, nlabels - 1):
        if sizes[i] >= 210:  #filter small dotted regions
            img2[labels == i + 1] = 255

    res = img2

    # get largest contour
    contours = cv2.findContours(res, cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    contours = contours[0] if len(contours) == 2 else contours[1]
    for c in contours:
        area_thresh = 0
        area = cv2.contourArea(c)
        if area > area_thresh:
            area = area_thresh
            big_contour = c

    # define main island contour approx. and hull
    #perimeter = cv2.arcLength(big_contour,True)
    epsilon = 0.01 * cv2.arcLength(big_contour, True)
    approx = cv2.approxPolyDP(big_contour, epsilon, True)
    #print(approx)
    r, h, c = approx.shape

    if r != 4:
        print(
            'ERROR! Could not find vertices to warp image. Make sure the entire map is shown in the image frame...'
        )
        pass
    else:
        approx = np.reshape(approx,
                            (4, 2))  # getting verticies of map from image

        # get rotated rectangle from contour
        rot_rect = cv2.minAreaRect(big_contour)
        box = cv2.boxPoints(rot_rect)
        box = np.int0(box)
        #print(box)

        # draw rotated rectangle on copy of img
        rot_bbox = img.copy()
        cv2.drawContours(rot_bbox, [box], 0, (0, 0, 255), 2)

    save = True
    if save:
        # write img with red rotated bounding box to disk
        cv2.imwrite("rectangle_thresh.jpg", thresh)
        cv2.imwrite("rectangle_res.jpg", res)
        cv2.imwrite("rectangle_rect.jpg", rect)
        # cv2.imwrite("rectangle_bounds.png", rot_bbox)

    show = False
    if show:
        # display it
        cv2.imshow('remove spots', res)
        cv2.imshow("IMAGE", img)
        #cv2.imshow("THRESHOLD", thresh)
        cv2.imshow("RECT", rect)
        cv2.imshow("BBOX", rot_bbox)
        cv2.waitKey(0)

    return approx
Example #32
0
def processing_keyboard(x1, y1, x2, y2, obj_id):
    global slope_deg, send_point, keyb_frame
    global keyb_top_left, keyb_bottom_right

    box_h = int(((y2 - y1) / unpad_h) * img.shape[0])
    box_w = int(((x2 - x1) / unpad_w) * img.shape[1])
    y1    = int(((y1 - pad_y // 2) / unpad_h) * img.shape[0])
    x1    = int(((x1 - pad_x // 2) / unpad_w) * img.shape[1])
    color = colors[int(obj_id) % len(colors)]
    cls   = 'object'

    keyb_top_left     = (x1, y1)
    keyb_bottom_right = (x1+box_w, y1+box_h)

    cv2.rectangle(frame, (x1, y1), (x1+box_w, y1+box_h), color, 2)
    label = cls + " ({:.2f} deg)".format(slope_deg)
    cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_TRIPLEX, 1, color, lineType=cv2.LINE_AA)
    keyb_frame  = src_frame[ y1:y1 + box_h, x1:x1 + box_w]

    if keyb_frame.size != 0:
        imgray      = cv2.cvtColor(keyb_frame,cv2.COLOR_BGR2GRAY)
        ret,thresh  = cv2.threshold(imgray,127,255,0)
        thresh      = cv2.bitwise_not(thresh)
        # cv2.imshow('thresh', thresh)

        _,contours,_ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        max_contours = max(contours, key=cv2.contourArea)
        # hull         = cv2.convexHull(max_contours)
        hull         = max_contours
        # cv2.drawContours(keyb_frame, [hull], 0, (255,255,0), 2, offset=(0,0))
        rect         = cv2.minAreaRect(hull)
        box          = cv2.boxPoints(rect)
        box_d        = np.int0(box)

        if show_keyb_frame:
            cv2.drawContours(keyb_frame, [box_d], 0, (0,255,0), 1)

        # Sort 2D numpy array by 1nd Column (X)
        sorted_boxd = box_d[box_d[:,0].argsort()]
        left        = sorted_boxd[0:2]
        right       = sorted_boxd[2:4]

        P1 = left [ np.argmin( left [:,1]) ] + np.array([x1, y1])
        P3 = left [ np.argmax( left [:,1]) ] + np.array([x1, y1])
        P2 = right[ np.argmin( right[:,1]) ] + np.array([x1, y1])
        P4 = right[ np.argmax( right[:,1]) ] + np.array([x1, y1])

        cv2.circle(frame, tuple(P1), 5, (255,0,0),   -1) # B
        cv2.circle(frame, tuple(P2), 5, (0,255,0),   -1) # G
        cv2.circle(frame, tuple(P3), 5, (0,0,255),   -1) # R
        cv2.circle(frame, tuple(P4), 5, (0,255,255), -1) # Y

        left_mid  = (P1 + np.array(P3)) // 2
        right_mid = (P2 + np.array(P4)) // 2
        # cv2.line(frame,   tuple(left_mid),  tuple(right_mid), (255,255,255), 2)

        h_keyb, w_keyb  = thresh.shape
        keyb_mid        = w_keyb // 2
        offset_mid      = int( keyb_mid * 0.10 )
        
        xl_vline = keyb_mid-offset_mid 
        l_vline  = thresh[:,xl_vline] 
        yl_upper = np.where(l_vline==255)[0][0]
        xr_vline = keyb_mid+offset_mid 
        r_vline  = thresh[:,xr_vline] 
        yr_upper = np.where(r_vline==255)[0][0]

        m_upper  = (yr_upper-yl_upper) / (xr_vline-xl_vline)
        b_upper  = yl_upper - (m_upper*xl_vline)
        l_upper  = ( 0, int(b_upper) )
        r_upper  = ( w_keyb, int(m_upper * w_keyb + b_upper)  )

        if show_keyb_frame:
            cv2.line(keyb_frame, l_upper, r_upper, (255,255,255), 2)
            cv2.circle(keyb_frame, (xl_vline, yl_upper), 5, (255,0,0),   -1)
            cv2.circle(keyb_frame, (xr_vline, yr_upper), 5, (0,0,255),   -1)
        slope_deg = np.degrees(m_upper)#.astype(int)

        if mode != "point_cloud":
            check_roi('left_arm',  left_mid,  left_ws)
            check_roi('right_arm', right_mid, right_ws)
    
    if l_point and r_point:
        cv2.circle(frame, l_point, 5, (255,0,0), -1) # Left arm point
        cv2.circle(frame, r_point, 5, (0,0,255), -1) # Right arm point

        keyboard.x     = (l_point[0]  + r_point[0]) // 2
        keyboard.y     = (l_point[1]  + r_point[1]) // 2
        keyboard.theta = slope_deg
        cv2.circle(frame, (keyboard.x, keyboard.y), 10, (255,255,255), -1) # Middle keyboard point
        
        if send_point:
            pub_point(left_arm_pos_pub,  l_point)
            pub_point(right_arm_pos_pub, r_point)
            keyboard_pos_pub.publish(keyboard)
            send_point = False
Example #33
0
flat_chess = cv2.imread('../DATA/flat_chessboard.png')
flat_chess = cv2.cvtColor(flat_chess, cv2.COLOR_BGR2RGB)
gray_flat_chess = cv2.cvtColor(flat_chess, cv2.COLOR_BGR2GRAY)

real_chess = cv2.imread('../DATA/real_chessboard.jpg')
real_chess = cv2.cvtColor(real_chess, cv2.COLOR_BGR2RGB)
gray_real_chess = cv2.cvtColor(real_chess, cv2.COLOR_BGR2GRAY)

# aplicando Shi-Tomasi
# src,
# nº de cantos desejados (-1 para detectar todos)
corners = cv2.goodFeaturesToTrack(gray_flat_chess, 5, 0.01, 10)

# ele n marca os cantos, então precisamos dar um flat no array de retorno e desenhar os círculos nos msm
corners = np.int0(corners)  # passando de float para int

# achatando e desenhando
for i in corners:
    x, y = i.ravel()  # achatando
    cv2.circle(flat_chess, (x, y), 3, (255, 0, 0), -1)

plt.imshow(flat_chess)

corners = cv2.goodFeaturesToTrack(gray_flat_chess, 64, 0.01, 10)

for i in corners:
    x, y = i.ravel()  # achatando
    cv2.circle(flat_chess, (x, y), 3, (255, 0, 0), -1)

plt.imshow(flat_chess)
Example #34
0
def find_boxes(boxes_mask: np.ndarray,
               mode: str= 'min_rectangle',
               min_area: float=0.,
               p_arc_length: float=0.01,
               n_max_boxes=math.inf) -> list:
    """
    Finds the coordinates of the box in the binary image `boxes_mask`.

    :param boxes_mask: Binary image: the mask of the box to find. uint8, 2D array
    :param mode: 'min_rectangle' : minimum enclosing rectangle, can be rotated
                 'rectangle' : minimum enclosing rectangle, not rotated
                 'quadrilateral' : minimum polygon approximated by a quadrilateral
    :param min_area: minimum area of the box to be found. A value in percentage of the total area of the image.
    :param p_arc_length: used to compute the epsilon value to approximate the polygon with a quadrilateral.
                         Only used when 'quadrilateral' mode is chosen.
    :param n_max_boxes: maximum number of boxes that can be found (default inf).
                        This will select n_max_boxes with largest area.
    :return: list of length n_max_boxes containing boxes with 4 corners [[x1,y1], ..., [x4,y4]]
    """

    assert len(boxes_mask.shape) == 2, \
        'Input mask must be a 2D array ! Mask is now of shape {}'.format(boxes_mask.shape)

    contours, _ = cv2.findContours(boxes_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    if contours is None:
        print('No contour found')
        return None
    found_boxes = list()

    h_img, w_img = boxes_mask.shape[:2]

    def validate_box(box: np.array) -> (np.array, float):
        """

        :param box: array of 4 coordinates with format [[x1,y1], ..., [x4,y4]]
        :return: (box, area)
        """
        polygon = geometry.Polygon([point for point in box])
        if polygon.area > min_area * boxes_mask.size:

            # Correct out of range corners
            box = np.maximum(box, 0)
            box = np.stack((np.minimum(box[:, 0], boxes_mask.shape[1]),
                            np.minimum(box[:, 1], boxes_mask.shape[0])), axis=1)

            # return box
            return box, polygon.area

    if mode not in ['quadrilateral', 'min_rectangle', 'rectangle']:
        raise NotImplementedError
    if mode == 'quadrilateral':
        for c in contours:
            epsilon = p_arc_length * cv2.arcLength(c, True)
            cnt = cv2.approxPolyDP(c, epsilon, True)
            # box = np.vstack(simplify_douglas_peucker(cnt[:, 0, :], 4))

            # Find extreme points in Convex Hull
            hull_points = cv2.convexHull(cnt, returnPoints=True)
            # points = cnt
            points = hull_points
            if len(points) > 4:
                # Find closes points to corner using nearest neighbors
                tree = KDTree(points[:, 0, :])
                _, ul = tree.query((0, 0))
                _, ur = tree.query((w_img, 0))
                _, dl = tree.query((0, h_img))
                _, dr = tree.query((w_img, h_img))
                box = np.vstack([points[ul, 0, :], points[ur, 0, :],
                                 points[dr, 0, :], points[dl, 0, :]])
            elif len(hull_points) == 4:
                box = hull_points[:, 0, :]
            else:
                    continue
            # Todo : test if it looks like a rectangle (2 sides must be more or less parallel)
            # todo : (otherwise we may end with strange quadrilaterals)
            if len(box) != 4:
                mode = 'min_rectangle'
                print('Quadrilateral has {} points. Switching to minimal rectangle mode'.format(len(box)))
            else:
                # found_box = validate_box(box)
                found_boxes.append(validate_box(box))
    if mode == 'min_rectangle':
        for c in contours:
            rect = cv2.minAreaRect(c)
            box = np.int0(cv2.boxPoints(rect))
            found_boxes.append(validate_box(box))
    elif mode == 'rectangle':
        for c in contours:
            x, y, w, h = cv2.boundingRect(c)
            box = np.array([[x, y], [x + w, y], [x + w, y + h], [x, y + h]], dtype=int)
            found_boxes.append(validate_box(box))
    # sort by area
    found_boxes = [fb for fb in found_boxes if fb is not None]
    found_boxes = sorted(found_boxes, key=lambda x: x[1], reverse=True)
    if n_max_boxes == 1:
        if found_boxes:
            return found_boxes[0][0]
        else:
            return None
    else:
        return [fb[0] for i, fb in enumerate(found_boxes) if i < n_max_boxes]
Example #35
0
def track_vot(model,
              video,
              hp=None,
              mask_enable=False,
              refine_enable=False,
              device='cpu'):
    regions = []  # result and states[1 init / 2 lost / 0 skip]
    image_files, gt = video['image_files'], video['gt']

    start_frame, end_frame, lost_times, toc = 0, len(image_files), 0, 0

    for f, image_file in enumerate(image_files):
        im = cv2.imread(image_file)
        tic = cv2.getTickCount()
        if f == start_frame:  # init
            cx, cy, w, h = get_axis_aligned_bbox(gt[f])
            target_pos = np.array([cx, cy])
            target_sz = np.array([w, h])
            state = siamese_init(im, target_pos, target_sz, model, hp,
                                 device)  # init tracker
            location = cxy_wh_2_rect(state['target_pos'], state['target_sz'])
            regions.append(1 if 'VOT' in args.dataset else gt[f])
        elif f > start_frame:  # tracking
            state = siamese_track(state, im, mask_enable, refine_enable,
                                  device, args.debug)  # track
            if mask_enable:
                location = state['ploygon'].flatten()
                mask = state['mask']
            else:
                location = cxy_wh_2_rect(state['target_pos'],
                                         state['target_sz'])
                mask = []

            if 'VOT' in args.dataset:
                gt_polygon = ((gt[f][0], gt[f][1]), (gt[f][2], gt[f][3]),
                              (gt[f][4], gt[f][5]), (gt[f][6], gt[f][7]))
                if mask_enable:
                    pred_polygon = ((location[0], location[1]), (location[2],
                                                                 location[3]),
                                    (location[4], location[5]), (location[6],
                                                                 location[7]))
                else:
                    pred_polygon = ((location[0], location[1]),
                                    (location[0] + location[2],
                                     location[1]), (location[0] + location[2],
                                                    location[1] + location[3]),
                                    (location[0], location[1] + location[3]))
                b_overlap = vot_overlap(gt_polygon, pred_polygon,
                                        (im.shape[1], im.shape[0]))
            else:
                b_overlap = 1

            if b_overlap:
                regions.append(location)
            else:  # lost
                regions.append(2)
                lost_times += 1
                start_frame = f + 5  # skip 5 frames
        else:  # skip
            regions.append(0)
        toc += cv2.getTickCount() - tic

        if args.visualization and f >= start_frame:  # visualization (skip lost frame)
            im_show = im.copy()
            if f == 0: cv2.destroyAllWindows()
            if gt.shape[0] > f:
                if len(gt[f]) == 8:
                    cv2.polylines(
                        im_show, [np.array(gt[f], np.int).reshape(
                            (-1, 1, 2))], True, (0, 255, 0), 3)
                else:
                    cv2.rectangle(im_show, (gt[f, 0], gt[f, 1]),
                                  (gt[f, 0] + gt[f, 2], gt[f, 1] + gt[f, 3]),
                                  (0, 255, 0), 3)
            if len(location) == 8:
                if mask_enable:
                    mask = mask > state['p'].seg_thr
                    im_show[:, :,
                            2] = mask * 255 + (1 - mask) * im_show[:, :, 2]
                location_int = np.int0(location)
                cv2.polylines(im_show, [location_int.reshape((-1, 1, 2))],
                              True, (0, 255, 255), 3)
            else:
                location = [int(l) for l in location]
                cv2.rectangle(
                    im_show, (location[0], location[1]),
                    (location[0] + location[2], location[1] + location[3]),
                    (0, 255, 255), 3)
            cv2.putText(im_show, str(f), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (0, 255, 255), 2)
            cv2.putText(im_show, str(lost_times), (40, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            cv2.putText(im_show,
                        str(state['score']) if 'score' in state else '',
                        (40, 120), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)

            cv2.imshow(video['name'], im_show)
            cv2.waitKey(1)
    toc /= cv2.getTickFrequency()

    # save result
    name = args.arch.split('.')[0] + '_' + ('mask_' if mask_enable else '') + ('refine_' if refine_enable else '') +\
           args.resume.split('/')[-1].split('.')[0]

    if 'VOT' in args.dataset:
        video_path = join('test', args.dataset, name, 'baseline',
                          video['name'])
        if not isdir(video_path): makedirs(video_path)
        result_path = join(video_path, '{:s}_001.txt'.format(video['name']))
        with open(result_path, "w") as fin:
            for x in regions:
                fin.write("{:d}\n".format(x)) if isinstance(x, int) else \
                        fin.write(','.join([vot_float2str("%.4f", i) for i in x]) + '\n')
    else:  # OTB
        video_path = join('test', args.dataset, name)
        if not isdir(video_path): makedirs(video_path)
        result_path = join(video_path, '{:s}.txt'.format(video['name']))
        with open(result_path, "w") as fin:
            for x in regions:
                fin.write(','.join([str(i) for i in x]) + '\n')

    logger.info(
        '({:d}) Video: {:12s} Time: {:02.1f}s Speed: {:3.1f}fps Lost: {:d}'.
        format(v_id, video['name'], toc, f / toc, lost_times))

    return lost_times, f / toc
Example #36
0
def extract_card(img, output_fn=None, min_focus=120, debug=False):
    """
    """

    imgwarp = None

    # Check the image is not too blurry
    focus = varianceOfLaplacian(img)
    if focus < min_focus:
        if debug: print("Focus too low :", focus)
        return False, None

    # Convert in gray color
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Noise-reducing and edge-preserving filter
    gray = cv2.bilateralFilter(gray, 11, 17, 17)

    # Edge extraction
    edge = cv2.Canny(gray, 30, 200)

    # Find the contours in the edged image
    _, cnts, _ = cv2.findContours(edge.copy(), cv2.RETR_EXTERNAL,
                                  cv2.CHAIN_APPROX_SIMPLE)

    # We suppose that the contour with largest area corresponds to the contour delimiting the card
    cnt = sorted(cnts, key=cv2.contourArea, reverse=True)[0]

    # We want to check that 'cnt' is the contour of a rectangular shape
    # First, determine 'box', the minimum area bounding rectangle of 'cnt'
    # Then compare area of 'cnt' and area of 'box'
    # Both areas sould be very close
    rect = cv2.minAreaRect(cnt)
    box = cv2.boxPoints(rect)
    box = np.int0(box)
    areaCnt = cv2.contourArea(cnt)
    areaBox = cv2.contourArea(box)
    valid = areaCnt / areaBox > 0.95

    if valid:
        # We want transform the zone inside the contour into the reference rectangle of dimensions (cardW,cardH)
        ((xr, yr), (wr, hr), thetar) = rect
        # Determine 'Mp' the transformation that transforms 'box' into the reference rectangle
        if wr > hr:
            Mp = cv2.getPerspectiveTransform(np.float32(box), refCard)
        else:
            Mp = cv2.getPerspectiveTransform(np.float32(box), refCardRot)
        # Determine the warped image by applying the transformation to the image
        imgwarp = cv2.warpPerspective(img, Mp, (cardW, cardH))
        # Add alpha layer
        imgwarp = cv2.cvtColor(imgwarp, cv2.COLOR_BGR2BGRA)

        # Shape of 'cnt' is (n,1,2), type=int with n = number of points
        # We reshape into (1,n,2), type=float32, before feeding to perspectiveTransform
        cnta = cnt.reshape(1, -1, 2).astype(np.float32)
        # Apply the transformation 'Mp' to the contour
        cntwarp = cv2.perspectiveTransform(cnta, Mp)
        cntwarp = cntwarp.astype(np.int)

        # We build the alpha channel so that we have transparency on the
        # external border of the card
        # First, initialize alpha channel fully transparent
        alphachannel = np.zeros(imgwarp.shape[:2], dtype=np.uint8)
        # Then fill in the contour to make opaque this zone of the card
        cv2.drawContours(alphachannel, cntwarp, 0, 255, -1)

        # Apply the alphamask onto the alpha channel to clean it
        alphachannel = cv2.bitwise_and(alphachannel, alphamask)

        # Add the alphachannel to the warped image
        imgwarp[:, :, 3] = alphachannel

        # Save the image to file
        if output_fn is not None:
            cv2.imwrite(output_fn, imgwarp)

    if debug:
        cv2.imshow("Gray", gray)
        cv2.imshow("Canny", edge)
        edge_bgr = cv2.cvtColor(edge, cv2.COLOR_GRAY2BGR)
        cv2.drawContours(edge_bgr, [box], 0, (0, 0, 255), 3)
        cv2.drawContours(edge_bgr, [cnt], 0, (0, 255, 0), -1)
        cv2.imshow("Contour with biggest area", edge_bgr)
        if valid:
            cv2.imshow("Alphachannel", alphachannel)
            cv2.imshow("Extracted card", imgwarp)

    return valid, imgwarp
Example #37
0
def inference(model, model_name: str, data_folder: str, fold: int, 
              debug=False, img_size=IMG_SIZE,
              batch_size = 8, num_workers=4):
    """
    Model inference
    
    Input: 
        model : PyTorch model
        model_name : string name for model for checkpoints saving
        fold: evaluation fold number, 0-3
        debug: if True, runs the debugging on few images 
        img_size: size of images for training (for pregressive learning)
        batch_size: number of images in batch
        num_workers: number of workers available
        resume_weights: directory with weights to resume (if avaialable)         
    """
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print(device)
        
    # We weight the loss for the 0 class lower to account for (some of) the big class imbalance
    class_weights = torch.from_numpy(np.array([0.2] + [1.0]*NUM_CLASSES, dtype=np.float32))
    class_weights = class_weights.to(device)

    # choose test samples
    input_filepaths = sorted(glob.glob(os.path.join(data_folder, "*_input.png")))
    sample_tokens = [x.split("/")[-1].replace("_input.png","") for x in input_filepaths]
    sample_tokens = [x.replace("bev_data\\","") for x in sample_tokens] 
    
    #creates directories for test predictions checkpoints, tensorboard and predicitons      
    predictions_dir  = f'{OUTPUT_ROOT}/test_preds/{model_name}_fold_{fold}'
    test_outputs_dir = f'{OUTPUT_ROOT}/test_outs/{model_name}_fold_{fold}'    
    os.makedirs(predictions_dir, exist_ok=True)
    os.makedirs(test_outputs_dir, exist_ok=True)
    
    test_dataset = BEVTestDataset(sample_tokens=sample_tokens, 
                                  debug=debug, img_size=img_size, 
                                  input_dir=data_folder,
                                  transforms = albu_test_tansforms) 

    # dataloaders for test    
    dataloader_test = DataLoader(test_dataset,
                                 num_workers=num_workers,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=os.cpu_count() * 2)
    print('{} test images'.format(len(test_dataset)))   
    
    # We perform an opening morphological operation to filter tiny detections
    # Note that this may be problematic for classes that are inherently small (e.g. pedestrians)..
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    gc.collect()

    progress_bar = tqdm(test_loader)
    # We quantize to uint8 here to conserve memory. We're allocating >20GB of memory otherwise.
    predictions = np.zeros((len(test_loader), 1+len(classes), img_size, img_size), dtype=np.uint8)    
    sample_tokens, all_losses = [], []
    detection_boxes, detection_scores, detection_classes = [], [], []

    # Arbitrary threshold in our system to create a binary image to fit boxes around.
    background_threshold = 200
    with torch.no_grad():
        model.eval()
        for ii, (X, batch_sample_tokens) in enumerate(progress_bar):
            sample_tokens.extend(batch_sample_tokens)

            X = X.to(device)  # [N, 1, H, W]
            prediction = model(X)  # [N, 2, H, W]

            prediction = F.softmax(prediction, dim=1)

            prediction_cpu = prediction.cpu().numpy()
            predictions = np.round(prediction_cpu * 255).astype(np.uint8)

            # Get probabilities for non-background
            predictions_non_class0 = 255 - predictions[:, 0]
            predictions_opened = np.zeros((predictions_non_class0.shape), dtype=np.uint8)

            for i, p in enumerate(predictions_non_class0):
                thresholded_p = (p > background_threshold).astype(np.uint8)
                predictions_opened[i] = cv2.morphologyEx(thresholded_p, cv2.MORPH_OPEN, kernel)

                sample_boxes, sample_detection_scores, sample_detection_classes = calc_detection_box(predictions_opened[i],
                                                                                                    predictions[i])

                detection_boxes.append(np.array(sample_boxes))
                detection_scores.append(sample_detection_scores)
                detection_classes.append(sample_detection_classes)
    
    print("Total amount of boxes:", np.sum([len(x) for x in detection_boxes]))        
    ind = 11
    # Visualize the boxes in the first sample
    t = np.zeros_like(predictions_opened[0])
    for sample_boxes in detection_boxes[ind]:
        box_pix = np.int0(sample_boxes)
        cv2.drawContours(t, [box_pix], 0, (255), 2)
Example #38
0
def siamese_track(state,
                  im,
                  mask_enable=False,
                  refine_enable=False,
                  device='cpu',
                  debug=False):
    p = state['p']
    net = state['net']
    avg_chans = state['avg_chans']
    window = state['window']
    target_pos = state['target_pos']
    target_sz = state['target_sz']

    wc_x = target_sz[1] + p.context_amount * sum(target_sz)
    hc_x = target_sz[0] + p.context_amount * sum(target_sz)
    s_x = np.sqrt(wc_x * hc_x)
    scale_x = p.exemplar_size / s_x
    d_search = (p.instance_size - p.exemplar_size) / 2
    pad = d_search / scale_x
    s_x = s_x + 2 * pad
    crop_box = [
        target_pos[0] - round(s_x) / 2, target_pos[1] - round(s_x) / 2,
        round(s_x),
        round(s_x)
    ]

    if debug:
        im_debug = im.copy()
        crop_box_int = np.int0(crop_box)
        cv2.rectangle(im_debug, (crop_box_int[0], crop_box_int[1]),
                      (crop_box_int[0] + crop_box_int[2],
                       crop_box_int[1] + crop_box_int[3]), (255, 0, 0), 2)
        cv2.imshow('search area', im_debug)
        cv2.waitKey(0)

    # extract scaled crops for search region x at previous target position
    x_crop = Variable(
        get_subwindow_tracking(im, target_pos, p.instance_size, round(s_x),
                               avg_chans).unsqueeze(0))
    # x_no_crop = Variable(get_subwindow_tracking(im, target_pos, p.instance_size, round(s_x), avg_chans).unsqueeze(0))

    if mask_enable:
        score, delta, mask = net.track_mask(x_crop.to(device))
        # score, delta, mask = net.track_mask(x_no_crop.to(device))
    else:
        score, delta = net.track(x_crop.to(device))

    delta = delta.permute(1, 2, 3, 0).contiguous().view(4,
                                                        -1).data.cpu().numpy()
    score = F.softmax(score.permute(1, 2, 3,
                                    0).contiguous().view(2, -1).permute(1, 0),
                      dim=1).data[:, 1].cpu().numpy()

    delta[0, :] = delta[0, :] * p.anchor[:, 2] + p.anchor[:, 0]
    delta[1, :] = delta[1, :] * p.anchor[:, 3] + p.anchor[:, 1]
    delta[2, :] = np.exp(delta[2, :]) * p.anchor[:, 2]
    delta[3, :] = np.exp(delta[3, :]) * p.anchor[:, 3]

    def change(r):
        return np.maximum(r, 1. / r)

    def sz(w, h):
        pad = (w + h) * 0.5
        sz2 = (w + pad) * (h + pad)
        return np.sqrt(sz2)

    def sz_wh(wh):
        pad = (wh[0] + wh[1]) * 0.5
        sz2 = (wh[0] + pad) * (wh[1] + pad)
        return np.sqrt(sz2)

    # size penalty
    target_sz_in_crop = target_sz * scale_x
    s_c = change(sz(delta[2, :], delta[3, :]) /
                 (sz_wh(target_sz_in_crop)))  # scale penalty
    r_c = change((target_sz_in_crop[0] / target_sz_in_crop[1]) /
                 (delta[2, :] / delta[3, :]))  # ratio penalty

    penalty = np.exp(-(r_c * s_c - 1) * p.penalty_k)
    pscore = penalty * score

    # cos window (motion model)
    pscore = pscore * (1 - p.window_influence) + window * p.window_influence
    best_pscore_id = np.argmax(pscore)

    pred_in_crop = delta[:, best_pscore_id] / scale_x
    lr = penalty[best_pscore_id] * score[best_pscore_id] * p.lr  # lr for OTB

    res_x = pred_in_crop[0] + target_pos[0]
    res_y = pred_in_crop[1] + target_pos[1]

    res_w = target_sz[0] * (1 - lr) + pred_in_crop[2] * lr
    res_h = target_sz[1] * (1 - lr) + pred_in_crop[3] * lr

    target_pos = np.array([res_x, res_y])
    target_sz = np.array([res_w, res_h])

    # for Mask Branch
    if mask_enable:
        best_pscore_id_mask = np.unravel_index(best_pscore_id,
                                               (5, p.score_size, p.score_size))
        delta_x, delta_y = best_pscore_id_mask[2], best_pscore_id_mask[1]

        if refine_enable:
            mask = net.track_refine(
                (delta_y, delta_x)).to(device).sigmoid().squeeze().view(
                    p.out_size, p.out_size).cpu().data.numpy()
        else:
            mask = mask[0, :, delta_y, delta_x].sigmoid(). \
                squeeze().view(p.out_size, p.out_size).cpu().data.numpy()

        def crop_back(image, bbox, out_sz, padding=-1):
            a = (out_sz[0] - 1) / bbox[2]
            b = (out_sz[1] - 1) / bbox[3]
            c = -a * bbox[0]
            d = -b * bbox[1]
            mapping = np.array([[a, 0, c], [0, b, d]]).astype(np.float)
            crop = cv2.warpAffine(image,
                                  mapping, (out_sz[0], out_sz[1]),
                                  flags=cv2.INTER_LINEAR,
                                  borderMode=cv2.BORDER_CONSTANT,
                                  borderValue=padding)
            return crop

        s = crop_box[2] / p.instance_size
        sub_box = [
            crop_box[0] + (delta_x - p.base_size / 2) * p.total_stride * s,
            crop_box[1] + (delta_y - p.base_size / 2) * p.total_stride * s,
            s * p.exemplar_size, s * p.exemplar_size
        ]
        s = p.out_size / sub_box[2]
        back_box = [
            -sub_box[0] * s, -sub_box[1] * s, state['im_w'] * s,
            state['im_h'] * s
        ]
        mask_in_img = crop_back(mask, back_box, (state['im_w'], state['im_h']))

        target_mask = (mask_in_img > p.seg_thr).astype(np.uint8)
        if cv2.__version__[-5] == '4':
            contours, _ = cv2.findContours(target_mask, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_NONE)
        else:
            _, contours, _ = cv2.findContours(target_mask, cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_NONE)
        cnt_area = [cv2.contourArea(cnt) for cnt in contours]
        if len(contours) != 0 and np.max(cnt_area) > 100:
            contour = contours[np.argmax(cnt_area)]  # use max area polygon
            polygon = contour.reshape(-1, 2)
            # pbox = cv2.boundingRect(polygon)  # Min Max Rectangle
            prbox = cv2.boxPoints(
                cv2.minAreaRect(polygon))  # Rotated Rectangle

            # box_in_img = pbox
            rbox_in_img = prbox
        else:  # empty mask
            location = cxy_wh_2_rect(target_pos, target_sz)
            rbox_in_img = np.array(
                [[location[0], location[1]],
                 [location[0] + location[2], location[1]],
                 [location[0] + location[2], location[1] + location[3]],
                 [location[0], location[1] + location[3]]])

    target_pos[0] = max(0, min(state['im_w'], target_pos[0]))
    target_pos[1] = max(0, min(state['im_h'], target_pos[1]))
    target_sz[0] = max(10, min(state['im_w'], target_sz[0]))
    target_sz[1] = max(10, min(state['im_h'], target_sz[1]))

    state['target_pos'] = target_pos
    state['target_sz'] = target_sz
    state['score'] = score[best_pscore_id]
    state['mask'] = mask_in_img if mask_enable else []
    state['ploygon'] = rbox_in_img if mask_enable else []
    return state
Example #39
0
    for contour in contoursG:

        area = cv2.contourArea(contour)
        if area > 2000:
            foundG = True
            M = cv2.moments(contour)
            if (len(pointsG) > 30):
                pointsG.pop(0)
            pointsG.append((M['m10'] / M['m00'], M['m01'] / M['m00']))
            centxG = M['m10'] / M['m00']
            centyG = M['m01'] / M['m00']

            rect = cv2.minAreaRect(contour)
            boxG = cv2.boxPoints(rect)
            boxG = np.int0(boxG)
            cv2.drawContours(frame, [boxG], 0, (0, 0, 255), 2)

            break
    for contour in contoursR:

        area = cv2.contourArea(contour)
        if area > 2000:
            foundR = True
            M = cv2.moments(contour)
            if (len(pointsR) > 30):
                pointsR.pop(0)
            pointsR.append((M['m10'] / M['m00'], M['m01'] / M['m00']))
            centxR = M['m10'] / M['m00']
            centyR = M['m01'] / M['m00']
def TrackTheTape(frame, sd):  # does the opencv image proccessing

    try:
        # HL = sd.getNumber('HL', 0)
        # HU = sd.getNumber('HU', 180)
        # SL = sd.getNumber('SL', 0)
        # SU = sd.getNumber('SU', 255)
        # VL = sd.getNumber('VL', 40)
        # VU = sd.getNumber('VU', 255)
        HL = sd.getNumber('HL', 66)
        HU = sd.getNumber('HU', 114)
        SL = sd.getNumber('SL', 64)
        SU = sd.getNumber('SU', 117)
        VL = sd.getNumber('VL', 127)
        VU = sd.getNumber('VU', 179)
        TapeLower = (HL, SL, VL)
        TapeUpper = (HU, SU, VU)
        print("HSV lower:%s HSV Upper:%s" % (TapeLower, TapeUpper))
    except:
        print("Unable to grab network table values, going to default values")

    if frame is None:  # if there is no frame recieved
        sd.putNumber('GettingFrameData', False)
    else:
        sd.putNumber('GettingFrameData', True)

    hsv = cv2.cvtColor(
        frame, cv2.COLOR_BGR2HSV
    )  # creates a binary image with only the parts within the bounds True

    mask = cv2.inRange(hsv, TapeLower,
                       TapeUpper)  # cuts out all the useless stuff
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, None, iterations=2)

    minArea = 1000  # minimum area of either of the tapes
    a, cnts, b = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                  cv2.CHAIN_APPROX_NONE)
    center = None
    neg = [-1, -1]  # just a negative array to use when no tape is detected
    centerN = neg
    centerL = neg
    centerR = neg
    avgArea = 0
    cnts2 = []
    for cur in cnts:
        if cv2.contourArea(cur) >= minArea:
            cnts2.append(cur)
    cnts = cnts2

    if len(cnts) > 1:  # if there is more than 1 contour
        sorted(
            cnts, key=cv2.contourArea, reverse=True
        )  #sorts the array with all the contours so those with the largest area are first
        c = cnts[0]  # c is the largest contour
        d = cnts[1]  # d is the second largest contour
        rect = cv2.minAreaRect(c)
        boxL = cv2.boxPoints(rect)
        boxL = np.int0(boxL)
        # for these refer to https://docs.opencv.org/3.1.0/dd/d49/tutorial_py_contour_features.html
        rect2 = cv2.minAreaRect(d)
        boxR = cv2.boxPoints(rect2)
        boxR = np.int0(boxR)
        # if len(cnts) > 1:
        #     centerL = FindCenter(boxL)
        #     centerR = FindCenter(boxR)
        if findSlope(boxL) < findSlope(
                boxR
        ):  # finds out which tape is on the left and right by comparing slopes
            centerL, centerR = centerR, centerL
            boxL, boxR = boxR, boxL
        avgArea = (cv2.contourArea(c) + cv2.contourArea(d)) / 2
        tape1 = centerL
        tape2 = centerR
        centerN[0] = (centerR[0] + centerL[0]) / 2
        centerN[1] = (centerR[1] + centerL[1]) / 2
        cv2.drawContours(img, [boxL], 0, (0, 0, 255), 2)
        cv2.drawContours(img, [boxR], 0, (0, 255, 0), 2)
        # else:
        #     tape1 = neg
        #     tape2 = neg
        #     avgArea = -1
    elif len(cnts) == 1:  # if there is 1 contour
        sorted(
            cnts, key=cv2.contourArea, reverse=True
        )  #sorts the array with all the contours so those with the largest area are first
        c = cnts[0]  # c is the largest contour
        rect = cv2.minAreaRect(c)
        box = cv2.boxPoints(rect)
        box = np.int0(box)
        boxL = None
        boxR = None
        # for these refer to https://docs.opencv.org/3.1.0/dd/d49/tutorial_py_contour_features.html
        # if len(cnts) >= 1:
        center = FindCenter(box)
        if findSlope(
                box
        ) <= 0:  # if there is only one tape detects wheter it is on the left or right using the value of its slope
            centerR = center
            centerN = centerR
            centerL = neg
            boxR = box
            cv2.drawContours(img, [boxR], 0, (0, 255, 0), 2)
        else:
            centerL = center
            centerN = centerL
            centerR = neg
            boxL = box
            cv2.drawContours(img, [boxL], 0, (0, 0, 255), 2)
        avgArea = cv2.contourArea(c)
        tape1 = centerL
        tape2 = centerR
        # else:
        #     tape1 = neg
        #     tape2 = neg
        #     avgArea = -1
    else:  # when no tape is detected put the neg array everywhere
        tape1 = neg
        tape2 = neg
        centerN = neg
        avgArea = -1

    sd.putNumberArray('tape1', tape1)
    sd.putNumberArray('tape2', tape2)
    sd.putNumberArray('centerN', centerN)
    sd.putNumber('avgArea', avgArea)
    return img
Example #41
0
def visual():

    global begin
    begin = time.time()
    cap = cv2.VideoCapture(int(camera))
    ret, image = cap.read()
    cv2.waitKey(1)
    # res_y = int(len(image[0]))
    res_x = int(len(image))
    # starty = int((res_y - res_x) / 2)
    origin = int(res_x / 2)
    DEFINED_CENTER.append(origin)
    DEFINED_CENTER.append(origin)
    global morp_ker, thresh_ker, thresh_sub, epsilon

    while 1:
        ret, image = cap.read()
        cv2.waitKey(1)
        image = image[:, 0:480]
        image = imutils.rotate(image, 90)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                     cv2.THRESH_BINARY, thresh_ker, thresh_sub)
        edged = cv2.Canny(gray, 50, 100)
        kernel = np.ones((morp_ker, morp_ker), np.uint8)
        closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)
        cnts = cv2.findContours(closed, cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        mycnts = []
        image[origin, origin - 5:origin + 5] = (255, 0, 0)
        image[origin - 5:origin + 5, origin] = (255, 0, 0)
        cv2.imshow("Out", image)
        cv2.waitKey(5)
        # print(thresh_sub, thresh_ker, morp_ker)
        if len(cnts) == 0:
            print(
                "Warning!\nNo contour detected. Threshold level or morphological filter\nparameters must be changed."
            )
            print("Enter Thresholding kernel size (Previous value is %d): " %
                  thresh_ker)
            thresh_ker = input()
            print(
                "Enter Threshold subtraction value (Previous value is %d): " %
                thresh_sub)
            thresh_sub = input()
            print(
                "Enter Morphological Filter kernel size (Previous value is %d): "
                % morp_ker)
            morp_ker = input()

        for cnt in cnts:
            if 20000 < cv2.contourArea(cnt) < 45000:
                perimeter = cv2.arcLength(cnt, True)
                print(epsilon)
                approx = cv2.approxPolyDP(cnt, epsilon * perimeter, True)
                print(len(approx))
                if len(approx) == 4:
                    mycnts.append(cnt)

        cv2.circle(image, (origin, origin), 1, (255, 255, 255), -1)
        for cnt in mycnts:
            data = []
            if 15000 < cv2.contourArea(cnt) < 50000:
                rect = cv2.minAreaRect(cnt)
                box = cv2.boxPoints(rect)
                box = np.int0(box)
                cv2.drawContours(image, [box], 0, (0, 0, 255), 2)
                rect = list(rect)
                angle = list(np.float_(rect[2:3]))
                angle = float(angle[0])
                coor = list(np.float_(rect[0:1]))
                coor = coor[0]
                cx = int(coor[0])
                cy = int(coor[1])
                cv2.circle(image, (cx, cy), 1, (0, 0, 255), -1)
                cv2.putText(
                    image,
                    "center: %f-%f  Angle: %f" % (coor[0], coor[1], angle),
                    (15, 15), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255), 1)

                data.append((coor[0]))
                data.append((coor[1]))
                data.append(angle)
                cv2.drawContours(image, mycnts, -1, (0, 255, 0), 1)
                image[origin, origin - 5:origin + 5] = (255, 0, 0)
                image[origin - 5:origin + 5, origin] = (255, 0, 0)
                cv2.imshow("Output", image)
                cv2.waitKey(5)
                # cv2.waitKey()
                return data
        end = time.time()
        elapsed = end - begin
        if elapsed > 5:
            print("Center detection failed.\n Check camera output.")
            check_position_status = input(
                "Is component in the camera limits?(y/n)")
            if check_position_status.lower() == "y":
                print("Enter new Epsilon value(old value is %d:" % epsilon)
                new_epsilon = 0
                epsilon = float(new_epsilon)
            else:
                print("Make manual adjustments.\n")
                x, y = input("Enter x-y values to make adjustment:").split()
                gcode_generate(x, y, 0, State.CAMERA_ADJUST)

            end, elapsed = 0, 0
            begin = time.time()
import numpy as np
import cv2
from matplotlib import pyplot as plt

img1 = cv2.imread('C:/Users/Jared/Documents/python_scripts/test1_segmented_images_left/rectified_left_20.png')
img2 = cv2.imread('C:/Users/Jared/Documents/python_scripts/test1_segmented_images_right/rectified_right_20.png')
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray_left', img1)
cv2.imshow('gray_right', img2)
corners1 = cv2.goodFeaturesToTrack(gray1, 15, 0.07, 40)
corners2 = cv2.goodFeaturesToTrack(gray2, 15, 0.07, 40)
corners1 = np.int0(corners1)
print('corners1 = ', corners1)
corners2 = np.int0(corners2)
print('corners2 = ', corners2)

for i in corners1:
    # print('i= ', i)
    x, y = i.ravel()
    cv2.circle(img1, (x, y), 3, 255, -1)

for j in corners2:
    x2, y2 = j.ravel()
    cv2.circle(img2, (x2, y2), 3, 255, -1)

# plt.imshow(img1)
f, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
ax1.imshow(img2)
ax2.imshow(img1)
plt.show()
Example #43
0
cv2.imwrite('finalMask.jpg', finalMask)
#final_img = cv2.add(img, finalMask)
#cv2.imwrite('final_img.jpg', final_img)
#edged = cv2.Canny(finalMask, 100, 255)
#cv2.imwrite('edge.jpg', edged)
_, contours, hierarchy = cv2.findContours(finalMask, cv2.RETR_EXTERNAL,
                                          cv2.CHAIN_APPROX_SIMPLE)
#areas = [cv2.contourArea(c) for c in contours]
#max_index = np.argmax(areas)
#rect = cv2.minAreaRect(contours[max_index])
for cnt in contours:
    areas = cv2.contourArea(cnt)
    if areas < 100:
        continue
    rect = cv2.minAreaRect(cnt)
    box = np.int0(cv2.boxPoints(rect))
    #img = cv2.drawContours(img.copy(), [box], -1, (255, 0, 0), 3)
    xs = [i[0] for i in box]
    ys = [i[1] for i in box]
    x1 = min(xs)
    x2 = max(xs)
    y1 = min(ys)
    y2 = max(ys)
    h = y2 - y1
    w = x2 - x1
    for i in range(y1, y1 + h):
        for j in range(x1, x1 + w):
            #print(i, j)
            if red_img[i, j] > 160:
                img[i, j] = [255, 255, 255]
    #roi = img[y1:y1+h, x1:x1+w]
Example #44
0
mask = cv.inRange(hsv_roi, np.array((0., 60., 32.)), np.array(
    (180., 255., 255)))
roi_hist = cv.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv.normalize(roi_hist, roi_hist, 0, 255, cv.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1)
cv.imshow('roi', roi)
while (1):
    ret, frame = cap.read()
    if ret == True:

        hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
        dst = cv.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
        # apply meanshift to get the new location
        ret, track_window = cv.CamShift(dst, track_window, term_crit)

        # Draw it on image
        pts = cv.boxPoints(ret)
        print(pts)
        pts = np.int0(pts)
        final_image = cv.polylines(frame, [pts], True, (0, 255, 0), 2)
        #x,y,w,h = track_window
        #final_image = cv.rectangle(frame, (x,y), (x+w, y+h), 255, 3)

        cv.imshow('dst', dst)
        cv.imshow('final_image', final_image)
        k = cv.waitKey(30) & 0xff
        if k == 27:
            break
    else:
        break
Example #45
0
    def detect_and_rotate(self, image):
        """
        Detect the blue square surronding the world in order to turn and resize the picture
        :param image:       Picture to analyse

        :return:            The cropped and rotate image
        """

        # computing of the blue mask to isolate the contours of the map
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        mask_blue = cv2.inRange(hsv, self.colors.low_blue, self.colors.up_blue)
        # find the outside blue contours of the map on the whole world
        contours, _ = cv2.findContours(mask_blue, cv2.RETR_CCOMP,
                                       cv2.CHAIN_APPROX_NONE)

        # find the rectangle which includes the contours
        maxArea = 0
        best = None
        for contour in contours:
            area = cv2.contourArea(contour)

            if area > maxArea:
                maxArea = area
                best = contour
        if maxArea < 10:
            return None

        rect = cv2.minAreaRect(best)
        box = cv2.boxPoints(rect)
        box = np.int0(box)

        # crop image inside bounding box
        scale = 1
        W = rect[1][0]
        H = rect[1][1]

        # finding the box to rotate
        Xs = [i[0] for i in box]
        Ys = [i[1] for i in box]
        x1 = min(Xs)
        x2 = max(Xs)
        y1 = min(Ys)
        y2 = max(Ys)

        # Correct if needed the angle between vertical and longest size of rectangle
        angle = rect[2]
        rotated = False
        if angle < -45:
            angle += 90
            rotated = True

        # rotation center and rotation matrix
        center = (int((x1 + x2) / 2), int((y1 + y2) / 2))
        size = (int(scale * (x2 - x1)), int(scale * (y2 - y1)))
        M = cv2.getRotationMatrix2D((size[0] / 2, size[1] / 2), angle, 1.0)

        # cropping the image and rotating it
        cropped = cv2.getRectSubPix(image, size, center)
        cropped = cv2.warpAffine(cropped, M, size)
        croppedW = W if not rotated else H
        croppedH = H if not rotated else W
        corrected = cv2.getRectSubPix(
            cropped, (int(croppedW * scale), int(croppedH * scale)),
            (size[0] / 2, size[1] / 2))

        #  Return the corrected grid in an array
        final_grid = np.array(corrected)
        return final_grid
    def calculateFrame(self, cap):
        cascPath = "/home/ubuntu/2016-Tegra-OpenCV/HVLib/vision/2017-classifier.xml"
        data = self.getDataPoints()
        targetCascade = cv2.CascadeClassifier(cascPath)
        frame = cap.read()
        frame = frame.copy()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # This section builds probabilities based upon the mahine learning available
        ml_weight = 50
        targets = targetCascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=7,
            minSize=(10, 10),
            flags=cv2.cv.CV_HAAR_SCALE_IMAGE)
        probability = gray.copy()
        probability[:, :] = 0
        for (x, y, w, h) in targets:
            probability[y:y + h, x:x + w] += ml_weight

        #This section builds probabilities based upon the avaiablie geometry of the target
        geometry_weight = 150
        lower_bound = np.array(
            [float(data['HMIN']),
             float(data["SMIN"]),
             float(data['VMIN'])])
        upper_bound = np.array(
            [float(data['HMAX']),
             float(data["SMAX"]),
             float(data['VMAX'])])
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, lower_bound, upper_bound)
        ret, thresh = cv2.threshold(mask, 200, 255, 0)
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)

        centers = []
        for c in contours:
            area = cv2.contourArea(c)
            if area > 50 and area < 10000:
                rect = cv2.minAreaRect(c)
                box = cv2.cv.BoxPoints(rect)

                xCenter = (box[0][0] + box[1][0] + box[2][0] + box[3][0]) / 4
                yCenter = (box[0][1] + box[1][1] + box[2][1] + box[3][1]) / 4
                centers.append((xCenter, yCenter, c))
        pairs = []
        for i in range(0, len(centers)):
            for j in range(i + 1, len(centers)):
                xdelta = abs(centers[i][0] - centers[j][0])
                ydelta = abs(centers[i][1] - centers[j][1])
                if xdelta < 20 and ydelta < 20:
                    if centers[i][1] < centers[j][1]:
                        pairs.append((i, j))
                    else:
                        pairs.append((j, i))
        for pair in pairs:
            top = (centers[pair[0]])[2]
            bottom = (centers[pair[1]])[2]
            xt, yt, wt, ht = cv2.boundingRect(top)
            xb, yb, wb, hb = cv2.boundingRect(bottom)
            w = max(wt, wb)
            h = ht + hb
            xCorner = int(xCenter - w / 2)
            yCorner = int(yCenter - h / 2)
            ratio = cv2.contourArea(top) / (2 * cv2.contourArea(bottom))
            weight = int(geometry_weight * min(ratio, 1 / ratio))
            probability[yCorner:yCorner + h, xCorner:xCorner + w] += weight

        # Factor in the mask as a part of the final probability
        probability += mask * 0.15

        #Use the new probability map to find the location of the new target
        ret, thresh = cv2.threshold(probability, 100, 255, 0)
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
        cnt = None
        if len(contours) > 0:
            areas = [cv2.contourArea(c) for c in contours]
            max_index = np.argmax(areas)
            cnt = contours[max_index]
        xCenter = -1
        yCenter = -1
        if cnt is not None:
            rect = cv2.minAreaRect(c)
            box = cv2.cv.BoxPoints(rect)
            box = np.int0(box)
            cv2.drawContours(frame, [box], 0, (0, 255, 0), 1)
            xCenter = (box[0][0] + box[1][0] + box[2][0] + box[3][0]) / 4
            yCenter = (box[0][1] + box[1][1] + box[2][1] + box[3][1]) / 4

        distance = 0.0025396523 * (yCenter**2) + (0.1000098497 *
                                                  yCenter) + 46.8824851568
        theta = math.atan2(xCenter - 160, distance)
        try:
            output_dict = {
                "xCenter": xCenter,
                "yCenter": yCenter,
                "theta": theta,
                "distance": distance
            }
            #print type(output_dict)
            output = json.dumps(output_dict)
            return frame, output, True, probability
        except:
            pass

        return frame, "", True, probability