def merge_intersected(contours):
    ret_val = []
    merged_index = [] #lista indeksa kontura koje su već spojene sa nekim
    
    for i,contour1 in enumerate(contours): #slova
        if i in merged_index:
            continue
        rect1 = cv2.minAreaRect(contour1)
        for j,contour2 in enumerate(contours): #kukice
            if j in merged_index or i == j:
                continue
            rect2 = cv2.minAreaRect(contour2)
            
            #TODO 2 - izvršiti spajanje kukica iznad slova
            #spajanje dva niza je moguće obaviti funkcijom np.concatenate((contour1,contour2))
            indicator, vertices = cv2.rotatedRectangleIntersection(rect1, rect2)
            if indicator>0:
                #spajanje kontura
                ret_val.append(np.concatenate((contour1,contour2)))
                merged_index.append(i)
                merged_index.append(j)
    #svi regioni koji se nisu ni sa kim spojili idu u listu kontura, bez spajanja
    for idx,contour in enumerate(contours):
        if idx not in merged_index:
            ret_val.append(contour)
        
    return ret_val
예제 #2
0
def drawRects(img, ctrs):
	i = 1
	rectList = []
	for ct in ctrs[0]:
		x, y, w, h = cv2.boundingRect(ct)

		#process only vertical rectagles (ie, w<h) with w and h > 1
		if w < h and w > 10 and h > 10:
			#print i, ". ", len(ct), " -- ", cv2.boundingRect(ct), (x+w/2), cv2.minAreaRect(ct)
			rectList.append([cv2.boundingRect(ct), cv2.minAreaRect(ct)])
			clr=(random.randrange(0,255),random.randrange(0,255),random.randrange(0,255))
			#cv2.drawContours(image=img, contours=ct, contourIdx=-1, color=clr , thickness=-1)
			cv2.rectangle(img, (x,y), (x+w,y+h), clr, 5)
			cv2.fillConvexPoly(img, ct, clr)
			cv2.rectangle(img, (x+w/2-3,y), (x+w/2+3,y+h), (255,255,255), -1)
			cv2.rectangle(img, (x,y+h/2-3), (x+w,y+h/2+3), (255,255,255), -1)
			
			rotRect = cv2.minAreaRect(ct)
			box = cv2.cv.BoxPoints(rotRect)
			box = np.int0(box)
			print box
			cv2.drawContours(img, [box], 0, (0,0,255),2)
			#cv2.imshow("asdsdasdadasdasd",img)
			#key = cv2.waitKey(1000)
			i = i + 1
	cv2.rectangle(img, (318,0), (322,640), (255,255,255), -1)
	cv2.imshow("Output",img)
	print "done"
	return rectList
예제 #3
0
파일: main.py 프로젝트: asgarJ/Professional
def detect_objects(imageFile, thresh_area=0.002, top=3):
    print ''
    print "==================="

    image = cv2.imread(imageFile)

    print 'Processing image:', imageFile, 'of size:', image.shape

    grayscale_img = cv2.imread(imageFile, 0)
    ret, thresh = cv2.threshold(grayscale_img, 127, 255, 0)
    contours, hierarchy = cv2.findContours(thresh, 1, 2)

    # calculate the threshold size
    height, width = grayscale_img.shape[:2]
    image_area = height * width
    object_threshold = thresh_area * image_area

    # filter out valid contours
    valid_contours = filter(lambda cont: cv2.minAreaRect(cont)[1][0] * \
                                         cv2.minAreaRect(cont)[1][1]>object_threshold, contours)
    objects = []
    for cnt in valid_contours:
        rect = cv2.minAreaRect(cnt)
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(image, [box], 0, (0,0,0), 1)
        objects.append(box)

    print
    top_colors = process(image, objects, top)
    for index, color in enumerate(top_colors):
        print '%i. %s' % (index+1, color)
예제 #4
0
def get_motions(f, fMask, thickness=1, color=(170, 170, 170)):
    '''
    Iterates over the contours in a mask and draws a bounding box
    around the ones that encompas an area greater than a threshold.
    This will return an image of just the draw bock (black bg), and
    also an array of the box points.
    '''
    rects_mot = []
    f_rects = np.zeros(f.shape, np.uint8)
    # get contours
    if imutils.is_cv3():
        _, cnts, hierarchy = cv2.findContours(
            fMask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    elif imutils.is_cv2():
        cnts, hierarchy = cv2.findContours(
            fMask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # loop over the contours
    for c in cnts:
        # if the contour is too small, ignore it
        if cv2.contourArea(c) < contourThresh:
            continue

        if imutils.is_cv3():
            box = cv2.boxPoints(cv2.minAreaRect(c))
        elif imutils.is_cv2():
            box = cv2.cv.BoxPoints(cv2.minAreaRect(c))

        box = np.int0(box)
        cv2.drawContours(f_rects, [box], 0, color, thickness)
        rects_mot.append(cv2.boundingRect(c))
    return f_rects, rects_mot
예제 #5
0
  def findDirection(self,circle1,circle2):


    #Divide the bound into two halves.
    rect1 = cv2.minAreaRect(getPointList(self.point1,self.point3,self.pointm1,self.pointm2))
    rect2 = cv2.minAreaRect(getPointList(self.point2,self.point4,self.pointm1,self.pointm2))
    r1Count = 0
    r2Count = 0
    #Check the count of each "corner" in each half.
    valid = False
    corners = getCornerList(self.point1.x,self.point1.y,self.point4.x,self.point4.y)
    #print "corners", corners
    for corner in corners:
      x,y = corner.ravel()
      #if(circle1.contains(x,y) || circle2.contains(x,y))
      if (checkBounds(x,y,self.point1,self.point3,self.pointm1,self.pointm2)):
        r1Count +=1
      elif(checkBounds(x,y,self.point2,self.point4,self.pointm1,self.pointm2)):
        r2Count +=1

    print "r1/rr2 count:", r1Count,r2Count
      #if(cv2.pointPolygonTest(rect1,(x,y),False)== 1):
      #  r1Count += 1
      #if(cv2.pointPolygonTest(rect2,(x,y),False) == 1):
      #  r2Count += 1
    if(r1Count > r2Count):
      circle2.setNext(circle1)
    if(r2Count > r1Count):
      circle1.setNext(circle2)
예제 #6
0
파일: d5_1.py 프로젝트: EyeTechPae/EyeTech
def get_centroids (contours, frame):
	centres = []
	if contours:
		for i in range(len(contours)):
			moments = cv2.moments(contours[i])
			centres.append((int(moments['m10']/moments['m00']), int(moments['m01']/moments['m00'])))
		
			if i>0:                
				dist = calculateDistance(centres[i-1][0],centres[i-1][1],centres[i][0],centres[i][1])
				area=cv2.contourArea(contours[i])
				prevarea=cv2.contourArea(contours[i-1])
				if dist < 120:                    
					if area > prevarea:
						rect = cv2.minAreaRect(contours[i])
						box = cv2.boxPoints(rect)
						box = np.int0(box)
						print(box)
						frame = cv2.drawContours(frame,[box],0,(0,0,255),2)
					else :
						rect = cv2.minAreaRect(contours[i-1])
						box = cv2.boxPoints(rect)
						box = np.int0(box)
						print(box)
						frame = cv2.drawContours(frame,[box],0,(0,0,255),2)
			else:
 	
				rect = cv2.minAreaRect(contours[i])
				box = cv2.boxPoints(rect)
				box = np.int0(box)
				frame = cv2.drawContours(frame,[box],0,(0,0,255),2)
				print(box)
	return centres, frame
예제 #7
0
파일: expt.py 프로젝트: Kitware/VIAME
def _read_kresimir_results():
    # Load downloaded matlab csv results
    mat = scipy.io.loadmat(expanduser('~/data/camtrawl_stereo_sample_data/Haul_83/Haul_083_qcresult.mat'))
    header = ub.readfrom(expanduser('~/data/camtrawl_stereo_sample_data/Haul_83/mat_file_header.csv')).strip().split(',')
    data = mat['lengthsqc']

    mat_df = pd.DataFrame(data, columns=header)
    mat_df['current_frame'] = mat_df['current_frame'].astype(np.int)
    mat_df['Species'] = mat_df['Species'].astype(np.int)
    mat_df['QC'] = mat_df['QC'].astype(np.int)

    # Transform so each row corresponds to one set of (x, y) points per detection
    bbox_cols1 = ['LX1', 'LX2', 'LX3', 'LX4', 'LY1', 'LY2', 'LY3', 'LY4', 'Lar', 'LboxL', 'WboxL', 'aveL']
    bbox_pts1 = mat_df[bbox_cols1[0:8]]  # NOQA
    bbox_pts1_ = bbox_pts1.values
    bbox_pts1_ = bbox_pts1_.reshape(len(bbox_pts1_), 2, 4).transpose((0, 2, 1))

    bbox_cols2 = ['RX1', 'RX2', 'RX3', 'RX4', 'RY1', 'RY2', 'RY3', 'RY4', 'Rar', 'LboxR', 'WboxR', 'aveW']
    bbox_pts2 = mat_df[bbox_cols2]  # NOQA
    bbox_pts2 = mat_df[bbox_cols2[0:8]]  # NOQA
    bbox_pts2_ = bbox_pts2.values
    bbox_pts2_ = bbox_pts2_.reshape(len(bbox_pts2_), 2, 4).transpose((0, 2, 1))

    # Convert matlab bboxes into python-style bboxes
    mat_df['obox1'] = [ctalgo.OrientedBBox(*cv2.minAreaRect(pts[:, None, :].astype(np.int)))
                       for pts in bbox_pts1_]
    mat_df['obox2'] = [ctalgo.OrientedBBox(*cv2.minAreaRect(pts[:, None, :].astype(np.int)))
                       for pts in bbox_pts2_]

    mat_df.drop(bbox_cols2, axis=1, inplace=True)
    mat_df.drop(bbox_cols1, axis=1, inplace=True)
    return mat_df
예제 #8
0
    def draw_walls(self):
        left_wall_points = np.array([self.transform(point) for point in self.left_wall_points])
        right_wall_points = np.array([self.transform(point) for point in self.right_wall_points])

        rect = cv2.minAreaRect(left_wall_points[:,:2].astype(np.float32))
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(self.grid, [box], 0, 128, -1)

        rect = cv2.minAreaRect(right_wall_points[:,:2].astype(np.float32))
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        cv2.drawContours(self.grid, [box], 0, 128, -1)

        # So I dont have to comment abunch of stuff out for debugging
        dont_display = True
        if dont_display:
            return

        # Bob Ross it up (just for display)
        left_f, right_f = self.transform(self.left_f), self.transform(self.right_f)
        left_b, right_b = self.transform(self.left_b), self.transform(self.right_b)

        boat = self.transform(self.boat_pos)
        target = self.transform(self.target)

        cv2.circle(self.grid, tuple(boat[:2].astype(np.int32)), 8, 255)
        cv2.circle(self.grid, tuple(target[:2].astype(np.int32)), 15, 255)
        cv2.circle(self.grid, tuple(self.transform(self.mid_point)[:2].astype(np.int32)), 5, 255)
        cv2.circle(self.grid, tuple(left_f[:2].astype(np.int32)), 10, 255)
        cv2.circle(self.grid, tuple(right_f[:2].astype(np.int32)), 10, 255)
        cv2.circle(self.grid, tuple(left_b[:2].astype(np.int32)), 3, 125)
        cv2.circle(self.grid, tuple(right_b[:2].astype(np.int32)), 3, 128)
        cv2.imshow("test", self.grid)
        cv2.waitKey(0)
예제 #9
0
def getRects(ctrs, imageOut=None):
  i = 1
  rectList = []
  #print "getRects(): {0} contours".format(len(ctrs[0]))
  for ct in ctrs[0]:
    #ct = ct.astype(np.int32)
    bbox = cv2.boundingRect(ct)
    x, y, w, h = bbox

    length = ""
    #process only vertical rectagles (ie, w<h) with w and h > 1
    if w < h and w > 30 and h > 70:
      #print i, ". ", len(ct), " -- ", cv2.boundingRect(ct), (x+w/2), cv2.minAreaRect(ct)
      
      #dist = 320-(x+w/2)
      #direction = 1
      #if dist < 0:
      #  direction = -1
      #print "Distance to center: ", dist, "pixels -- ", dist*0.0192, "inches --", dist*0.0192*1622/9.89,"revolutions"
      
      #if (x < 320) and ((x+w) > 320):
      if h > 173:
        length = "large"
      elif h > 140:
        length = "medium"
      elif h > 100:
        length = "small"
      #print i, " : ", cv2.boundingRect(ct), " -- ", length, "---", x, x+w, y, h
      
      #color detection code here... 
      color = "red"
      
      rectList.append([cv2.boundingRect(ct), cv2.minAreaRect(ct),length, color])
      
      if imageOut is not None:
        clr=(random.randrange(0,255),random.randrange(0,255),random.randrange(0,255))
        #cv2.drawContours(image=imageOut, contours=ct, contourIdx=-1, color=clr , thickness=-1)
        cv2.rectangle(imageOut, (x,y), (x+w,y+h), clr, 5)
        #cv2.fillConvexPoly(imageOut, ct, clr)
        cv2.rectangle(imageOut, (x+w/2-3,y), (x+w/2+3,y+h), (255,255,255), -1)
        cv2.rectangle(imageOut, (x,y+h/2-3), (x+w,y+h/2+3), (255,255,255), -1)
        rotRect = cv2.minAreaRect(ct)
        box = cv2.cv.BoxPoints(rotRect)
        box = np.int0(box)
        #print box
        #cv2.drawContours(imageOut, [box], 0, (0,0,255),2)
      
      i = i + 1
  
  if imageOut is not None:
    cv2.rectangle(imageOut, (318,0), (322,640), (255,255,255), -1)
    #cv2.imshow("Rects", imageOut)
  #print "done"
  
  ## sort rectList by the first tuple - so that they are from left to right in image.
  rectList.sort(key=lambda tup: tup[0])
  
  return rectList
예제 #10
0
    def process(self, imageLeftRect, imageRightRect, imageDisparityRect, cameraModel, stereoCameraModel, upper, lower):
        assert(imageLeftRect is not None)
        feedback = TrackObjectFeedback()
        feedback.found = False
        imageHLS = cv2.cvtColor(imageLeftRect, cv2.COLOR_BGR2HLS)
        lower = np.array([0,70,50], dtype = 'uint8')
        upper = np.array([200,255,255], dtype='uint8')
        mask=cv2.inRange(imageHLS, lower,upper) #HLS thresholds
        output = cv2.bitwise_and(imageLeftRect, imageLeftRect, mask=mask)
        self.image_pub.publish(self.bridge.cv2_to_imgmsg(imageLeftRect, "bgr8"))
        #mask=cv2.inRange(imageHSV, np.array([20,30,80],dtype='uint8'),np.array([40,52,120],dtype='uint8'))
        cnts = cv2.findContours(mask.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)    
        contours = cnts[1]

        if len(contours) == 0:
            print("No contours")
            return feedback

        rects = []
        for contour in contours: #adapted from https://github.com/opencv/opencv/blob/master/samples/python/squares.py
            epsilon = cv2.arcLength(contour, True)*0.05
            contour = cv2.approxPolyDP(contour, epsilon, True)
            if len(contour) == 4 and cv2.isContourConvex(contour):
                contour = contour.reshape(-1, 2)
                max_cos = np.max([angle_cos( contour[i], contour[(i+1) % 4], contour[(i+2) % 4] ) for i in xrange(4)])
                if max_cos < 0.1:
                    rects.append(contour)

        if len(rects) > 1:
            rects = greatestNAreaContours(rects, 2)
            rect1 = list(cv2.minAreaRect(rects[0]))
            rect2 = list(cv2.minAreaRect(rects[1]))

            if(rect1[1][0] < rect1[1][1]): #Fix wonky angles from opencv (I think)
                rect1[2] = (rect1[2] + 180) * 180/3.141
            else:
                rect1[2] = (rect1[2] + 90) * 180/3.141

            if(rect2[1][0] < rect2[1][1]):
                rect2[2] = (rect2[2] + 180) * 180/3.141
            else:
                rect2[2] = (rect2[2] + 90) * 180/3.141

            gateCenter = (int((rect1[0][0] + rect2[0][0])/2), int((rect1[0][1] + rect2[0][1])/2))
            self.feedback_msg.center = gateCenter
            self.feedback_msg.size = imageRightRect.shape
        self.feedback_pub.publish(self.feedback_msg)

        #feedback.center = gateCenter
            #feedback.size = imageRightRect.shape

        if gateCenter[0] - rect1[0][0] > 0:
            feedback.width = (rect2[0][0]+(rect2[1][0]/2)) - (rect1[0][0] - (rect1[1][0]/2))
        else:
            feedback.width = (rect1[0][0] -(rect1[1][0]/2)) - (rect2[0][0]+(rect2[1][0]/2))
        feedback.height = rect1[1][1]
        feedback.found = True
        return feedback
def get_cube_upright():
    # Uses the depth image to only take the part of the image corresponding to the closest point and a bit further
    global depth_img_avg
    global img_bgr8_clean
    closest_pnt = np.amin(depth_img_avg)
    # resize the depth image so it matches the color one
    depth_img_avg = cv2.resize(depth_img_avg, (1280, 960))
    # generate a mask with the closest points
    img_detection = np.where(depth_img_avg < closest_pnt + val_depth_capture, depth_img_avg, 0)
    # put all the pixels greater than 0 to 255
    ret, mask = cv2.threshold(img_detection, 0.0, 255, cv2.THRESH_BINARY)
    # convert to 8-bit
    mask = np.array(mask, dtype=np.uint8)
    im2, contours, hierarchy = cv2.findContours(mask, 1, 2, offset=(0, -6))
    useful_cnts = list()
    uprightrects = list()
    img_bgr8_clean_copy = img_bgr8_clean.copy()
    for cnt in contours:
        if 9000 < cv2.contourArea(cnt) < 15000:
            if 420 < cv2.arcLength(cnt, 1) < 560:
                useful_cnts.append(cnt)
            else:
                print("Wrong Lenght 450 < " + str(cv2.arcLength(cnt, 1)) + str(" < 570"))
        else:
            print ("Wrong Area: 9000 < " + str(cv2.contourArea(cnt)) + " < 15000")
    for index, cnts in enumerate(useful_cnts):
        min_area_rect = cv2.minAreaRect(cnts)  # minimum area rectangle that encloses the contour cnt
        (center, size, angle) = cv2.minAreaRect(cnts)
        width, height = size[0], size[1]
        if not (0.7*height < width < 1.3*height):
            print("Wrong Height/Width: " + str(0.7*height) + " < " + str(width) + " < " + str(1.3*height))
            continue
        points = cv2.boxPoints(min_area_rect)  # Find four vertices of rectangle from above rect
        points = np.int32(np.around(points))  # Round the values and make it integers
        cv2.drawContours(img_bgr8_clean_copy, [points], 0, (0, 0, 255), 2)
        cv2.drawContours(img_bgr8_clean_copy, cnts, -1, (255, 0, 255), 2)
        cv2.waitKey(1)
        # if we rotate more than 90 degrees, the width becomes height and vice-versa
        if angle < -45.0:
            angle += 90.0
            width, height = size[0], size[1]
            size = (height, width)
        rot_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
        # rotate the entire image around the center of the parking cell by the
        # angle of the rotated rect
        imgwidth, imgheight = (img_bgr8_clean.shape[0], img_bgr8_clean.shape[1])
        rotated = cv2.warpAffine(img_bgr8_clean, rot_matrix, (imgheight, imgwidth), flags=cv2.INTER_CUBIC)
        # extract the rect after rotation has been done
        sizeint = (np.int32(size[0]), np.int32(size[1]))
        uprightrect = cv2.getRectSubPix(rotated, sizeint, center)
        uprightrects.append(uprightrect)
        uprightrect_copy = uprightrect.copy()
        cv2.drawContours(uprightrect_copy, [points], 0, (0, 0, 255), 2)
        cv2.imshow('uprightRect ' + str(index), uprightrect_copy)

    cv2.imshow('RBG', img_bgr8_clean_copy)
    cv2.waitKey(1)
    objects_detector(uprightrects)
예제 #12
0
def processCam(cap):
    bx = -1
    by = -1



    
    # Capture frame-by-frame
    ret, frame = cap.read()
    
    # Our operations on the frame come here
    #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)#HSV
    #white led ring: 45,2,240 - 130,40,255
    lower_lim = np.array([37,10,180])#80,23,235
    upper_lim = np.array([106,63,255])#102,167,255
    mask = cv2.inRange(hsv, lower_lim, upper_lim)
    img, contours, heirarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    img = cv2.inRange(hsv, lower_lim, upper_lim)
    img = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)

    contours = findBigContours(contours)
    # find big contours
    #biggestContourIndex = findBiggestContour(contours)
    biggestContourIndex, secondBiggestIndex = findSecondBiggestContour(contours)
    #bigContours = findBigContours(contours)
    #biggestContourIndex = findBestAR(bigContours)
    if(len(contours) != 0):
        # find box around contour and it's center
        recta = cv2.minAreaRect(contours[biggestContourIndex])
        rectb = cv2.minAreaRect(contours[secondBiggestIndex])
        boxa = cv2.boxPoints(recta)
        boxb = cv2.boxPoints(rectb)
        rect = cv2.minAreaRect(np.concatenate([boxa,boxb]))
        box = cv2.boxPoints(rect)
        bx = int((box[0][0] + box[2][0])/2)
        by = int((box[0][1] + box[2][1])/2)
        #x,y,w,h = cv2.boundingRect(contours[biggestContourIndex])
        #if(h != 0):
        #    print("aspect ratio: " + str(h/float(w)))
        #print("center: " + str(bx) + ', ' + str(by))
        box = np.int0(box)
        img = cv2.drawContours(img,[box],0,(0,0,255),1)
        img = cv2.circle(img,(bx,by),4,(0,255,255),-1)

        # find centroid from moments
        #M = cv2.moments(contours[biggestContourIndex])
        #if(M['m00'] != 0):
        #    cx = int(M['m10']/M['m00'])
        #    cy = int(M['m01']/M['m00'])
        #    img = cv2.circle(img,(cx,cy),4,(255,255,0),-1)
    
    #img = cv2.drawContours(img, contours, biggestContourIndex, (255,255,0), 3)
    #img = cv2.drawContours(img, contours, secondBiggestIndex, (255,0,0), 3)
    for i in range(len(contours)):
        col = cv2.contourArea(contours[i]) / 20
        img = cv2.drawContours(img, contours, i, (0,255-col,col), 1)
    return img, bx, by
예제 #13
0
	def callback(self,data):
		try:
			img = self.bridge.imgmsg_to_cv2(data, "bgr8")
		except CvBridgeError as e:
			print(e)

		#imageHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    		contours = ThreshAndContour(img, self.upper, self.lower)
		contours = contours[1]
    		#output = cv2.bitwise_and(img, img, mask=mask)

    		if len(contours) == 0:
        		return None

		rects = []
		#cv2.drawContours(img,contours,-1, (0,255,0), 3)
		for contour in contours: #adapted from https://github.com/opencv/opencv/blob/master/samples/python/squares.py
			epsilon = cv2.arcLength(contour, True)*0.05
	        	contour = cv2.approxPolyDP(contour, epsilon, True)
	       		if len(contour) == 4 and cv2.isContourConvex(contour):
	            		contour = contour.reshape(-1, 2)
	            		max_cos = np.max([angle_cos( contour[i], contour[(i+1) % 4], contour[(i+2) % 4] ) for i in range(4)])
	            		if max_cos < 0.1:
	                		rects.append(contour)
	        
	        	if len(rects) > 1:
				rects = sorted(contours, key=cv2.contourArea, reverse=True)
				rect1 = cv2.minAreaRect(rects[0])
	            		rect2 = cv2.minAreaRect(rects[1])

	            		if(rect1[1][0] < rect1[1][1]): #Fix wonky angles from opencv (I think)
	                		rect1 = (rect1[0], rect1[1], (rect1[2] + 180) * 180/3.141)
	            		else:
	                		rect1 = (rect1[0], rect1[1], (rect1[2] + 90) * 180/3.141)
	                
	            		if(rect2[1][0] < rect2[1][1]):
	                		rect2 = (rect2[0], rect2[1], (rect2[2] + 180) * 180/3.141)
	            		else:
	                		rect2 = (rect2[0], rect2[1], (rect2[2] + 90) * 180/3.141)

                                box = cv2.boxPoints(rect1)
                                box = np.int0(box)
                                #cv2.drawContours(img,[box],-1,(0,0,255),2)
                                box = cv2.boxPoints(rect2)
                                box = np.int0(box)
                                #cv2.drawContours(img,[box],-1,(0,0,255),2)

				gateLocation = None
	            		gateAxis = None
	            		gateAngle = None                     
	            		gateCenter = (int((rect1[0][0] + rect2[0][0])/2), int((rect1[0][1] + rect2[0][1])/2))
				cv2.circle(img,gateCenter,5,(0,255,0),3)

		try:
			self.image_pub.publish(self.bridge.cv2_to_imgmsg(img,"bgr8"))
		except CvBridgeError as e:
			print(e)
예제 #14
0
def DrawMark(image,contours,mark,I=255,border=2):
	rect = cv2.minAreaRect(contours[mark])
	box = cv2.cv.BoxPoints(rect)
	box = np.int0(box)
	#Get Corners for box 2
	rect1 = cv2.minAreaRect(contours[mark-1])
	box1 = cv2.cv.BoxPoints(rect1)
	box1 = np.int0(box1)
	#Draw
	cv2.drawContours(image,[box],0,(I,I,I),border)
	cv2.drawContours(image,[box1],0,(I,I,I),border)
예제 #15
0
def find(img, hue_min=20, hue_max=175, sat_min=0, sat_max=255, val_min=0, val_max=255):
    """
    Detect the qualification gate.
    :param img: HSV image from the bottom camera
    :return: tuple of location of the center of the gate in a "targeting" coordinate system: origin is at center of image, axes range [-1, 1]
    """

    img = np.copy(img)

    bin = vision_util.hsv_threshold(img, hue_min, hue_max, sat_min, sat_max, val_min, val_max)

    canny = vision_util.canny(bin, 50)

    # find contours after first processing it with Canny edge detection
    contours, hierarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    hulls = vision_util.convex_hulls(contours)
    cv2.drawContours(bin, hulls, -1, 255)

    cv2.imshow('bin', bin)

    hulls.sort(key=hull_score)

    if len(hulls) < 2:
        return ()

    # get the two highest scoring candidates
    left = cv2.minAreaRect(hulls[0])
    right = cv2.minAreaRect(hulls[1])

    # if we got left and right mixed up, switch them
    if right[0][0] < left[0][0]:
        left, right = right, left

    confidence = score_pair(left, right)
    if confidence < 80:
        return 0, 0

    # draw hulls in Blaze Orange
    cv2.drawContours(img, hulls, -1, (0, 102, 255), -1)
    # draw green outlines so we know it actually detected it
    cv2.drawContours(img, hulls, -1, (0, 255, 0), 2)

    cv2.imshow('img', img)

    center_actual = (np.mean([left[0][0], right[0][0]]), np.mean([left[0][1], right[0][1]]))
    # shape[0] is the number of rows because matrices are dumb
    center = (center_actual[0] / img.shape[1], center_actual[1] / img.shape[0])
    # convert to the targeting system of [-1, 1]
    center = ((center[0] * 2) - 1, (center[1] * 2) - 1)

    return center
예제 #16
0
파일: feature.py 프로젝트: jinified/visionx
def detectSmallSquare(gray, info,blank,sm=50):
    chosen_cnt = []
    chosen_cntx = []
    cent = (-1,-1)
    gray = cv2.GaussianBlur(gray, (3,3),0)
    #gray = cv2.GaussianBlur(gray, (9,9),2)
    area = gray.shape[0]*gray.shape[1]
    min = np.amin(gray)
    max = np.amax(gray)
    thresh = min + (max-min)/1.5
    mask = np.uint8(cv2.Canny(gray, thresh/2, thresh))
    kern = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
    mask = cv2.dilate(mask, kern, iterations=1)
    #mask = cv2.erode(mask, kern, iterations=1)
    outImg = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
    contours, hierr = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
    contours.sort(key=cv2.contourArea, reverse=True) 

    if len(contours) >= 1:
        for currCnt in contours:
            rect = cv2.minAreaRect(currCnt)
            ellipse = cv2.fitEllipse(currCnt)
            #if cv2.contourArea(currCnt) > 5000 and (ellipse[1][1]/ellipse[1][0]) >= 3:
            if cv2.contourArea(currCnt) > 1000 and VUtil.checkRectangle(currCnt):
                info['detected'] = True
                cent = VUtil.getCentroid(currCnt)
                cent = (cent[0],cent[1]-70)
                chosen_cnt.append(cent)
                chosen_cntx.append(currCnt)
                info['centroid'] =   cent
                VUtil.drawInfo(outImg,info)
                VUtil.getRailDOA(currCnt,outImg,info,blank)

        if len(chosen_cnt) > 1:
            info['detected'] = True
            info['centroid'] = VUtil.averageCentroids(chosen_cnt)
            VUtil.groupContoursAlign(chosen_cntx,outImg,info,blank)
            chosen_cnt.sort(key=lambda x:x[0], reverse=True)
            chosen_cntx.sort(key=cv2.contourArea, reverse=True)
            chosen_cntx.sort(key=lambda x:VUtil.getCentroid(x)[0],reverse=True)
            rect_r = cv2.minAreaRect(chosen_cntx[0])
            rect_l = cv2.minAreaRect(chosen_cntx[-1])
            cv2.drawContours(outImg, [np.int0(cv2.cv.BoxPoints(rect_l))], -1, PURPLE,3)
            cv2.drawContours(outImg, [np.int0(cv2.cv.BoxPoints(rect_r))], -1, YELLOW,3)
            cv2.drawContours(blank, [np.int0(cv2.cv.BoxPoints(rect_l))], -1, PURPLE,2)
            cv2.drawContours(blank, [np.int0(cv2.cv.BoxPoints(rect_r))], -1, YELLOW,2)
            if sm < 0:
                info['centroid'] = chosen_cnt[-1]
            else:
                info['centroid'] = chosen_cnt[0]
            VUtil.drawInfo(outImg,info)
    return outImg
예제 #17
0
def areSimilar(cont1, cont2):
	(x1, y1), (w1, h1), angle1 = cv2.minAreaRect(cont1)
	(x2, y2), (w2, h2), angle2 = cv2.minAreaRect(cont2)
	(area1, area2) = (w1*h1, w2*h2)
	distance = ((x1-x2)**2 + (y1-y2)**2)**0.5
	areaDifference = area1 / area2 - 1
	angleDifference = angle1 - angle2
	if (distance < MIN_BOX_DISTANCE and
		abs(areaDifference) < 0.2 and
		abs(angleDifference) < 20):
		return True
	else:
		return False
예제 #18
0
파일: functions.py 프로젝트: ghattab/VICAR
def pull_anchors(flist, n, fltr_contours):
    """ Returns 3 anchor points for frames having equal n contours
    """
    # amount of contours found in frame 0
    if n == 1:
        ctrl_pts = []
        # bounding rectangle to get 4 coords.
        for i in range(len(flist)):
            rect = cv2.minAreaRect(fltr_contours[i])
            box = cv2.cv.BoxPoints(rect)
            # rect = ( center (x,y), (width, height), angle of rotation )
            # x1, y1 ; x1 + width, y1 + height
            x1, y1 = box[0]
            x2, y2 = box[1]
            x3, y3 = box[2]
            pts = np.array((x1, y1), (x2, y2), (x3, y3))
            # -- get data array sorted by n-th column (n= 1)
            pts = pts[np.argsort(pts[:, 1])]
            ctrl_pts.append(pts)
        return ctrl_pts
    # 2 cnts
    elif n == 2:
        # initial random indexes to extract 4 corners
        # 2 from each bounding rectangle
        ctrl_pts = []
        for i in range(len(flist)):
            rect1 = cv2.minAreaRect(fltr_contours[i][0])
            box1 = cv2.cv.BoxPoints(rect1)
            rect2 = cv2.minAreaRect(fltr_contours[i][1])
            box2 = cv2.cv.BoxPoints(rect2)
            # coordinates 2 from each
            x1, y1 = box1[0]
            x2, y2 = box1[1]
            x3, y3 = box2[0]
            pts = np.array((x1, y1), (x2, y2), (x3, y3))
            # -- get data array sorted by n-th column (n= 1)
            pts = pts[np.argsort(pts[:, 1])]
            ctrl_pts.append(pts)
        return ctrl_pts
    else:
        # get all centers
        center_contours = find_centers(flist, fltr_contours)
        print center_contours
        pts = []
        for i in range(len(flist)):
            coords = copy(center_contours[i])
            # sort by argsort col n=1 and grab first 3 pts
            tmp = coords[np.argsort(coords[:, 1])]
            tmp = concatenate((tmp[:2], tmp[-1:]), axis=0)
            pts.append(tmp)
        return pts
예제 #19
0
    def draw(self):
        """
        draw - Method
        @summary: 
        """
        #Internal shape colour - Blue
        colours = [(89, 73, 48)]
        
        for x in self.interior:
            cv2.drawContours(self.img, [x], 0, colours[random.randint(0, len(colours)-1)],2)
        
        cv2.drawContours(self.img, [self.exterior], 0, (43, 58, 255),2)      
        rect = cv2.minAreaRect(self.exterior)
        box = cv2.cv.BoxPoints(rect)
        box = numpy.int0(box)
        
        if config.DEBUG:
            #Draw the center point 
            cv2.circle(self.img, self.getCentrePoint(), 10, (0,0,255))    
            cv2.drawContours(self.img, [box],0,(0,0,255),2)
            cv2.imshow('Material', self.img)
            
        img1 = Image.fromarray(cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB))
        
        self._drawLine(img1, [(box[0][0], box[0][1]), (box[1][0], box[1][1])], (255, 58, 48))
        self._drawLine(img1, [(box[1][0], box[1][1]), (box[2][0], box[2][1])], (255, 58, 48))
        self._drawLine(img1, [(box[2][0], box[2][1]), (box[3][0], box[3][1])], (255, 58, 48))
        self._drawLine(img1, [(box[3][0], box[3][1]), (box[0][0], box[0][1])], (255, 58, 48))
        
        for x in self.interior:
            rect = cv2.minAreaRect(x)
            box = cv2.cv.BoxPoints(rect)
            box = numpy.int0(box)
            
            self._drawLine(img1, [(box[0][0], box[0][1]), (box[1][0], box[1][1])], (48, 73, 89))
            self._drawLine(img1, [(box[1][0], box[1][1]), (box[2][0], box[2][1])], (48, 73, 89))
            self._drawLine(img1, [(box[2][0], box[2][1]), (box[3][0], box[3][1])], (48, 73, 89))
            self._drawLine(img1, [(box[3][0], box[3][1]), (box[0][0], box[0][1])], (48, 73, 89))

        quad = self.tag.quad
        self._drawLine(img1, [(quad[0][0], quad[0][1]), (quad[1][0], quad[1][1])], (96, 96, 96), 2, False)
        self._drawLine(img1, [(quad[1][0], quad[1][1]), (quad[2][0], quad[2][1])], (96, 96, 96), 2, False)
        self._drawLine(img1, [(quad[2][0], quad[2][1]), (quad[3][0], quad[3][1])], (96, 96, 96), 2, False)
        self._drawLine(img1, [(quad[3][0], quad[3][1]), (quad[0][0], quad[0][1])], (96, 96, 96), 2, False)
        
            
        if config.DEBUG:
            img1.show()
            
        img1.save(os.path.abspath(config.OUTLINE_DIR+os.path.basename(self.filename)), "JPEG")
예제 #20
0
 def run(self):
   while True:
     f, orig_img = self.capture.read()
     orig_img = cv2.flip(orig_img, 1)
     img = cv2.GaussianBlur(orig_img, (5,5), 0)
     img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2HSV)
     img = cv2.resize(img, (len(orig_img[0]) / self.scale_down, len(orig_img) / self.scale_down))
     red_lower = np.array([0, 150, 0],np.uint8)
     red_upper = np.array([5, 255, 255],np.uint8)
     red_binary = cv2.inRange(img, red_lower, red_upper)
     dilation = np.ones((15, 15), "uint8")
     red_binary = cv2.dilate(red_binary, dilation)
     contours, hierarchy = cv2.findContours(red_binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
     max_area = 0
     largest_contour = None
     for idx, contour in enumerate(contours):
       area = cv2.contourArea(contour)
       if area > max_area:
         max_area = area
         largest_contour = contour
     if not largest_contour == None:
       moment = cv2.moments(largest_contour)
       if moment["m00"] > 1000 / self.scale_down:
         rect = cv2.minAreaRect(largest_contour)
         rect = ((rect[0][0] * self.scale_down, rect[0][1] * self.scale_down), (rect[1][0] * self.scale_down, rect[1][1] * self.scale_down), rect[2])
         box = cv2.cv.BoxPoints(rect)
         box = np.int0(box)
         cv2.drawContours(orig_img,[box], 0, (0, 0, 255), 2)
         cv2.imshow("ColourTrackerWindow", orig_img)
         if cv2.waitKey(20) == 27:
           cv2.destroyWindow("ColourTrackerWindow")
           self.capture.release()
           break
예제 #21
0
    def locate(self, geo_image, image, marked_image):
        '''Find sticks in image and return list of FieldItem instances.''' 
        # Extract out just blue channel from BGR image.
        #blue_channel, _, _ = cv2.split(image)
        #_, mask = cv2.threshold(blue_channel, 160, 255, 0)
        
        # Convert Blue-Green-Red color space to HSV
        hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        
        lower_blue = np.array([90, 90, 50], np.uint8)
        upper_blue = np.array([130, 255, 255], np.uint8)
        mask = cv2.inRange(hsv_image, lower_blue, upper_blue)
        
        # Night time testing
        lower_blue = np.array([90, 10, 5], np.uint8)
        upper_blue = np.array([142, 255, 255], np.uint8)
        mask = cv2.inRange(hsv_image, lower_blue, upper_blue)

        filtered_rectangles = []
        
        # Open mask (to remove noise) and then dilate it to connect contours.
        kernel = np.ones((5,5), np.uint8)
        mask_open = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        mask = cv2.dilate(mask_open, kernel, iterations = 1)
        
        # Find outer contours (edges) and 'approximate' them to reduce the number of points along nearly straight segments.
        contours, hierarchy = cv2.findContours(mask.copy(), cv2.cv.CV_RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        #contours = [cv2.approxPolyDP(contour, .1, True) for contour in contours]
        
        # Create bounding box for each contour.
        bounding_rectangles = [cv2.minAreaRect(contour) for contour in contours]
        
        if marked_image is not None:
            for rectangle in bounding_rectangles:
                # Show rectangles using bounding box.
                drawRect(marked_image, rectangle, (0,0,0), thickness=2)
        
        # Remove any rectangles that couldn't be a plant based off specified size.
        min_stick_size = self.stick_diameter * 0.75 # looking straight down on it
        max_stick_size = self.stick_length * 1.25 # laying flat on the ground
        filtered_rectangles.extend(filter_by_size(bounding_rectangles, geo_image.resolution, min_stick_size, max_stick_size, enforce_min_on_w_and_h=True))
        
        if ImageWriter.level <= ImageWriter.DEBUG:
            # Debug save intermediate images
            mask_filename = postfix_filename(geo_image.file_name, 'blue_thresh')
            ImageWriter.save_debug(mask_filename, mask)
        
        if marked_image is not None:
            for rectangle in filtered_rectangles:
                # Show rectangles using colored bounding box.
                purple = (255, 0, 255)
                drawRect(marked_image, rectangle, purple, thickness=2)

        sticks = []
        for i, rectangle in enumerate(filtered_rectangles):
            # Just give default name for saving image until we later go through and assign to plant group.
            stick = FieldItem(name = 'stick' + str(i), bounding_rect = rectangle)
            sticks.append(stick)
                
        return sticks
예제 #22
0
def mask(img):
  biggest =None
  max_area = 0
  grey = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
  #blk = cv2.bitwise_not(grey)
  kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
  res = cv2.morphologyEx(grey,cv2.MORPH_OPEN,kernel)
  ret,thresh = cv2.threshold(grey,127,255,0)
  contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
  dest = np.zeros(thresh.shape, np.uint8)
  print contours[::1]
  print len(contours)
  print hierarchy
  for cnt in contours[::1]:
    rect  = cv2.minAreaRect(cnt)
    points = cv2.cv.BoxPoints(rect)
    points  = np.int0(np.around(points))
    #cv2.drawContours(dest, [cnt],0,(0,255,0),2)
    #cv2.polylines(dest, [points], True,( 255,255,255), 2 )
    cv2.fillPoly(orig, [cnt], (100,20,90), 4)
    cv2.fillPoly(dest, [cnt], (255,255,255), 4)

    x = cv2.cvtColor(dest,cv2.COLOR_GRAY2RGB)
    cv2.imshow('contour-highlighted image.jpg', x)
    cv2.imwrite("../../images/bound.jpg", x)

    cv2.imshow('masked image', orig)
예제 #23
0
    def filter(seg,area,label):
        """
        Apply the filter.
        The final list is ranked by area.
        """
        good = label[area > TextRegions.minArea]
        area = area[area > TextRegions.minArea]
        filt,R = [],[]
        for idx,i in enumerate(good):
            mask = seg==i
            xs,ys = np.where(mask)

            coords = np.c_[xs,ys].astype('float32')
            rect = cv2.minAreaRect(coords)          
            box = np.array(cv2.boxPoints(rect))
            h,w,rot = TextRegions.get_hw(box,return_rot=True)

            f = (h > TextRegions.minHeight 
                and w > TextRegions.minWidth
                and TextRegions.minAspect < w/h < TextRegions.maxAspect
                and area[idx]/w*h > TextRegions.pArea)
            filt.append(f)
            R.append(rot)

        # filter bad regions:
        filt = np.array(filt)
        area = area[filt]
        R = [R[i] for i in range(len(R)) if filt[i]]

        # sort the regions based on areas:
        aidx = np.argsort(-area)
        good = good[filt][aidx]
        R = [R[i] for i in aidx]
        filter_info = {'label':good, 'rot':R, 'area': area[aidx]}
        return filter_info
예제 #24
0
def blackbodysegment(img1, lowerlim, upperlim):
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (4, 4))
    img_np = np.asarray(img1)
    blue, green, red = img_np.T
    res = green > 100  # |(red>50)|(blue>50)
    res = res.astype(np.uint8) * 255
    res = np.transpose(res)
    offset = 50
    img_erode = cv2.erode(res, element)
    ret, thresh = cv2.threshold(img_erode, 127, 255, 0)
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # img_crop = img1

    for i in range(len(contours)):
        rect = cv2.minAreaRect(contours[i])
        (x, y), (w, h), theta = rect
        if w * h > lowerlim and w * h < upperlim:
            rect = (x + 5, y + 10), (w + offset, h + offset), theta
            box = cv2.cv.BoxPoints(rect)
            box = np.int0(box)
            img_crop = img1[box[2][1] : box[0][1], box[1][0] : box[3][0]]
            break
            # cv2.drawContours(img1, [box], 0, (0,0,255), 2)

    return img_crop
예제 #25
0
def detectBlobs(d1):
  contours, hierarchy = cv2.findContours(d1,
                                         cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)

  raw_bins = []

  if len(contours) >= 1:
      cnt = contours[0]
      cv2.drawContours(d1, contours, -1, (255, 255, 255), 3)
      max_w = 0
      max_h = 0
      max_x = 0
      max_y = 0
      avg_x = 0
      avg_y = 0
      count = 0
      for h, cnt in enumerate(contours):
          #hull = cv2.convexHull(cnt)
          rect = cv2.minAreaRect(cnt)
          box = cv2.cv.BoxPoints(rect)
          box = np.int0(box)

          x,y,w,h = cv2.boundingRect(cnt)
          avg_x += x+w/2
          avg_y += y+h/2
          count += 1
          if (w*h >max_w*max_h):
              max_w, max_h, max_x, max_y = w, h, x, y
      d1 = cv2.cvtColor(d1, cv2.COLOR_GRAY2BGR,d1, 3)
      cv2.rectangle(d1,(max_x,max_y),(max_x+max_w,max_y+max_h),(0,255,0),2)
      cv2.circle(d1, (int(avg_x/count), int(avg_y/count)), 15, (255,0,0), -1)
      cv2.circle(d1, (int(max_x+max_w/2), int(max_y+max_h/2)), 15, (0,255,0), -1)
      vis_to_ard(int(max_x+max_w/2),int(max_y+max_h/2))
  return d1
예제 #26
0
def get_hp_bars(binary):
    _, contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
        center, (height, width), angle = cv2.minAreaRect(cnt)
        if 2 < height < 4 and 5 < width < 80 and 89 < -angle < 91:
            box = cv2.boxPoints((center, (height, width), angle))
            yield np.int0(box)
예제 #27
0
def barcode_detect(img):
    image = img
    # load the image and convert it to grayscale
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    sobelgx = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=3)
    sobelgy = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=3)

    # subtract the y-gradient from the x-gradient
    gradient = sobelgx - sobelgy
    gradient = cv2.convertScaleAbs(gradient)

    # blur and threshold the image
    blurred = cv2.blur(gradient, (3, 3))
    _, thresh = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)

    # construct a closing kernel and apply it to the thresholded image
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))
    closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)

    # perform a series of erosions and dilations
    closed = cv2.erode(closed, None, iterations=4)
    closed = cv2.dilate(closed, None, iterations=4)

    # find the contours in the thresholded image, then sort the contours
    # by their area, keeping only the largest one
    (_, contours, _) = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    c = sorted(contours, key=cv2.contourArea, reverse=True)[0]

    # compute the rotated bounding box of the largest contour
    rect = cv2.minAreaRect(c)
    box = np.int0(cv2.boxPoints(rect))

    return box
def MomentDescriptor(name, thres):
    img1=cv2.imread(name)   
#     img=img1
    img=cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
#   edges=cv2.Canny(img, thres, thres*2) 
    #Image to draw the contours
    drawing=np.zeros(img.shape[:2], np.uint8)
    ret,thresh = cv2.threshold(img,thres,255,0)
   
    contours, hierarchy=cv2.findContours(thresh,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    MomentVector=[]
    
    for cnt in contours:
        M=cv2.moments(cnt) #Calculate moments
        if M['m00']!=0:
            Cx=int(M['m10']/M['m00'])
            Cy=int(M['m01']/M['m00'])
            
            Moments_Area=M['m00'] # Contour area moment
            Contours_Area=cv2.contourArea(cnt) # Contour area using in_built function
           #Draw moment
            rect = cv2.minAreaRect(cnt)
            box = cv2.cv.BoxPoints(rect)
            box = np.int0(box)
#           cv2.drawContours(img1,contours, 0, (0,255,0),3) #draw contours in green color
            cv2.drawContours(img1,[box],0,(0,0,255),1)
            cv2.circle(img1, (Cx,Cy), 3,(0,255,0), -1)#draw centroids in red color
            MomentVector.append([M['m00'],Cx,Cy])
            cv2.imshow('winname',img1)
            cv2.waitKey(5000)
    print MomentVector
예제 #29
0
def getShape(contour):
    p = cv2.arcLength(contour, True)
    aV = cv2.approxPolyDP(contour, 0.04 * p, True)

    vertices = len(aV)
    if vertices == 3:
        return 'Triangle'
    elif vertices == 4:
        rect = cv2.minAreaRect(contour)
        contourArea = cv2.contourArea(contour)
        fittedArea = rect[1][0] * rect[1][1]
        #print "Countor Area:", contourArea , " Fiited A:", fittedArea
        (x, y, w, h) = cv2.boundingRect(aV)
        ar = w / float(h)
        if .95 * fittedArea <= contourArea and ar >= 0.95 and ar <= 1.05:
            return 'Square'
        else:
            return 'Rectangle'
    elif vertices == 5:
        return 'Pentagon'
    elif vertices == 6:
        return 'Hexagon'
    elif vertices == 7:
        return 'Heptagon'
    else:
        (xC, yC), radius  = cv2.minEnclosingCircle(contour)
        contourArea = cv2.contourArea(contour)
        fittedArea = radius*radius*3.14
        # print "Countor Area:", contourArea , " Circle A:", fittedArea
        if abs(contourArea-fittedArea) / max(contourArea, fittedArea) < 0.10:
            return 'Circle'
        else:
            return str(str(len(aV))+'-Polygon')
    return 'Unknown'
def calMoments(name, thres):
    img1=cv2.imread(name)   
    img=cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
#   edges=cv2.Canny(img, thres, thres*2) 
    #Image to draw the contours
    drawing=np.zeros(img.shape[:2], np.uint8)
    ret,thresh = cv2.threshold(img,127,255,0)
   
    contours, hierarchy=cv2.findContours(thresh,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    for cnt in contours:
        M=cv2.moments(cnt) #Calculate moments
        if M['m00']!=0:
            Cx=int(M['m10']/M['m00'])
            Cy=int(M['m01']/M['m00'])
            C_x=M['m10']/M['m00']
            print C_x
            Moments_Area=M['m00'] # Contour area moment
            Contours_Area=cv2.contourArea(cnt) # Contour area using in_built function
#         #Draw moment
            rect = cv2.minAreaRect(cnt)
            box = cv2.cv.BoxPoints(rect)
            box = np.int0(box)
#           cv2.drawContours(img1,contours, 0, (0,255,0),3) #draw contours in green color
            cv2.drawContours(img1,[box],0,(0,0,255),5)
            cv2.circle(img1, (Cx,Cy), 5,(0,255,0), -1)#draw centroids in red color
            
    cv2.imshow("Original", img1)
    cv2.waitKey(0)
예제 #31
0
    def detect(self):
        res10 = np.zeros_like(self.img)
        boxRes = self.img.copy()

        regions, bboxes = self.getMSERegions(self.grayImg)

        n1 = len(regions)
        # print('num bboxes:',n1)
        n2, n3, n4, n5, n6, n7, n8, n9, n10 = [0] * 9
        bar = progressbar.ProgressBar(maxval=n1,
                                      widgets=[
                                          progressbar.Bar(marker='=',
                                                          left='[',
                                                          right=']'), ' ',
                                          progressbar.SimpleProgress()
                                      ])

        bar.start()
        ## Coloring the regions
        for i, region in enumerate(regions):
            bar.update(i + 1)

            if self.getRegionArea(
                    region
            ) > self.grayImg.shape[0] * self.grayImg.shape[1] * AREA_LIM:
                n2 += 1

                if self.getRegionPerimeter(
                        region) > 2 * (self.grayImg.shape[0] +
                                       self.grayImg.shape[1]) * PERIMETER_LIM:
                    n3 += 1

                    if self.getAspectRatio(region) < ASPECT_RATIO_LIM:
                        n4 += 1

                        if (self.getOccupyRate(region) > OCCUPATION_LIM[0]
                            ) and (self.getOccupyRate(region) <
                                   OCCUPATION_LIM[1]):
                            n5 += 1

                            if (self.getCompactness(region) >
                                    COMPACTNESS_LIM[0]) and (
                                        self.getCompactness(region) <
                                        COMPACTNESS_LIM[1]):
                                n6 += 1

                                # x, y, w, h = cv2.boundingRect(region)
                                x, y, w, h = bboxes[i]

                                # strokeWidths, strokeWidths_opp, strokes = self.getStrokes((x, y, w, h))
                                strokeWidths, strokeWidths_opp = self.getStrokes(
                                    (x, y, w, h))
                                if DIRECTION != "both+":
                                    strokeWidths = np.append(strokeWidths,
                                                             strokeWidths_opp,
                                                             axis=0)
                                    strokeWidth, strokeWidthCount, mean, std, xMin, xMax = self.getStrokeProperties(
                                        strokeWidths)
                                else:
                                    strokeWidth, strokeWidthCount, mean, std, xMin, xMax = self.getStrokeProperties(
                                        strokeWidths)
                                    strokeWidth_opp, strokeWidthCount_opp, mean_opp, std_opp, xMin_opp, xMax_opp = self.getStrokeProperties(
                                        strokeWidths_opp)
                                    if strokeWidthCount_opp > strokeWidthCount:  ## Take the strokeWidths with max of counts strokeWidth (most probable one)
                                        strokeWidths = strokeWidths_opp
                                        strokeWidth = strokeWidth_opp
                                        strokeWidthCount = strokeWidthCount_opp
                                        mean = mean_opp
                                        std = std_opp
                                        xMin = xMin_opp
                                        xMax = xMax_opp

                                if len(strokeWidths) > SWT_TOTAL_COUNT:
                                    n7 += 1

                                    if std < SWT_STD_LIM:
                                        n8 += 1

                                        strokeWidthSizeRatio = strokeWidth / (
                                            1.0 *
                                            max(self.getRegionShape(region)))
                                        if strokeWidthSizeRatio > STROKE_WIDTH_SIZE_RATIO_LIM:
                                            n9 += 1

                                            strokeWidthVarianceRatio = (
                                                1.0 * strokeWidth) / (std**std)
                                            if strokeWidthVarianceRatio > STROKE_WIDTH_VARIANCE_RATIO_LIM:
                                                n10 += 1
                                                res10 = self.colorRegion(
                                                    res10, region)

        bar.finish()
        # print("{} regions left.".format(n10))

        ## Binarize regions
        binarized = np.zeros_like(self.grayImg)
        rows, cols, color = np.where(res10 != [0, 0, 0])
        binarized[rows, cols] = 255

        ## Dilate regions and find contours
        kernel = np.zeros((KSIZE, KSIZE), dtype=np.uint8)
        kernel[(KSIZE // 2)] = 1

        if TESS:
            print("Tesseract eliminates..")

        res = np.zeros_like(self.grayImg)
        dilated = cv2.dilate(binarized.copy(), kernel, iterations=ITERATION)
        image, contours, hierarchies = cv2.findContours(
            dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        print("num contours", len(contours))
        boxes = []
        for i, (contour, hierarchy) in enumerate(zip(contours,
                                                     hierarchies[0])):
            if hierarchy[-1] == -1:

                # x, y, w, h = cv2.boundingRect(contour)
                # if (y - MARGIN > 0) and (y + h + MARGIN < self.height) and (x - MARGIN > 0) and (x + w + MARGIN < self.width):
                # 	boxes.append([x-MARGIN, y-MARGIN, x+w+MARGIN, y+h+MARGIN])
                # else:
                # 	boxes.append([x, y, x+w, y+h])

                if TESS:
                    x, y, w, h = cv2.boundingRect(contour)
                    if (y - MARGIN > 0) and (y + h + MARGIN < self.height
                                             ) and (x - MARGIN > 0) and (
                                                 x + w + MARGIN < self.width):
                        cv2.imwrite(
                            "text.jpg", self.final[y - MARGIN:y + h + MARGIN,
                                                   x - MARGIN:x + w + MARGIN])
                    else:
                        cv2.imwrite("text.jpg", self.final[y:y + h, x:x + w])

                    ###################
                    ## Run tesseract ##
                    ###################
                    string = pytesseract.image_to_string(
                        Image.open("text.jpg"))
                    if string is not u'':
                        rect = cv2.minAreaRect(contour)
                        box = cv2.boxPoints(rect)
                        box = np.int0(box)
                        boxes.append(box)
                        cv2.drawContours(self.final, [box], 0, (0, 255, 0), 2)
                        cv2.drawContours(res, [box], 0, 255, -1)
                        # print(string)
                    os.remove("text.jpg")

                else:
                    rect = cv2.minAreaRect(contour)
                    box = cv2.boxPoints(rect)
                    box = np.int0(box)
                    boxes.append(box)
                    # cv2.drawContours(self.final, [box], 0, (0, 255, 0), 2)
                    # cv2.drawContours(res, [box], 0, 255, -1)

        return res, boxes
예제 #32
0
def siamese_track(state,
                  im,
                  mask_enable=False,
                  refine_enable=False,
                  use_cuda=True,
                  preprocessed=False):
    p = state['p']
    net = state['net']
    avg_chans = state['avg_chans']
    window = state['window']
    target_pos = state['target_pos']
    target_sz = state['target_sz']

    wc_x = target_sz[1] + p.context_amount * sum(target_sz)
    hc_x = target_sz[0] + p.context_amount * sum(target_sz)
    s_x = np.sqrt(wc_x * hc_x)
    scale_x = p.exemplar_size / s_x
    d_search = (p.instance_size - p.exemplar_size) / 2
    pad = d_search / scale_x
    s_x = s_x + 2 * pad
    crop_box = [
        target_pos[0] - round(s_x) / 2, target_pos[1] - round(s_x) / 2,
        round(s_x),
        round(s_x)
    ]

    if preprocessed:
        x_crop = Variable(im_to_torch(im).unsqueeze(0))
    else:
        # extract scaled crops for search region x at previous target position
        x_crop = Variable(
            get_subwindow_tracking(im, target_pos, p.instance_size, round(s_x),
                                   avg_chans).unsqueeze(0))

    if mask_enable:
        score, delta, mask = net.track_mask(
            x_crop.cuda() if use_cuda else x_crop)
    else:
        score, delta = net.track(x_crop.cuda() if use_cuda else x_crop)

    delta = delta.permute(1, 2, 3, 0).contiguous().view(4,
                                                        -1).data.cpu().numpy()
    score = F.softmax(score.permute(1, 2, 3,
                                    0).contiguous().view(2, -1).permute(1, 0),
                      dim=1).data[:, 1].cpu().numpy()

    delta[0, :] = delta[0, :] * p.anchor[:, 2] + p.anchor[:, 0]
    delta[1, :] = delta[1, :] * p.anchor[:, 3] + p.anchor[:, 1]
    delta[2, :] = np.exp(delta[2, :]) * p.anchor[:, 2]
    delta[3, :] = np.exp(delta[3, :]) * p.anchor[:, 3]

    def change(r):
        return np.maximum(r, 1. / r)

    def sz(w, h):
        pad = (w + h) * 0.5
        sz2 = (w + pad) * (h + pad)
        return np.sqrt(sz2)

    def sz_wh(wh):
        pad = (wh[0] + wh[1]) * 0.5
        sz2 = (wh[0] + pad) * (wh[1] + pad)
        return np.sqrt(sz2)

    # size penalty
    target_sz_in_crop = target_sz * scale_x
    s_c = change(sz(delta[2, :], delta[3, :]) /
                 (sz_wh(target_sz_in_crop)))  # scale penalty
    r_c = change((target_sz_in_crop[0] / target_sz_in_crop[1]) /
                 (delta[2, :] / delta[3, :]))  # ratio penalty

    penalty = np.exp(-(r_c * s_c - 1) * p.penalty_k)
    pscore = penalty * score

    # cos window (motion model)
    pscore = pscore * (1 - p.window_influence) + window * p.window_influence
    best_pscore_id = np.argmax(pscore)

    pred_in_crop = delta[:, best_pscore_id] / scale_x
    lr = penalty[best_pscore_id] * score[best_pscore_id] * p.lr  # lr for OTB

    res_x = pred_in_crop[0] + target_pos[0]
    res_y = pred_in_crop[1] + target_pos[1]

    res_w = target_sz[0] * (1 - lr) + pred_in_crop[2] * lr
    res_h = target_sz[1] * (1 - lr) + pred_in_crop[3] * lr

    target_pos = np.array([res_x, res_y])
    target_sz = np.array([res_w, res_h])

    # for Mask Branch
    if mask_enable:
        best_pscore_id_mask = np.unravel_index(best_pscore_id,
                                               (5, p.score_size, p.score_size))
        delta_x, delta_y = best_pscore_id_mask[2], best_pscore_id_mask[1]

        if refine_enable:
            if use_cuda:
                mask = net.track_refine(
                    (delta_y, delta_x)).cuda().sigmoid().squeeze().view(
                        p.out_size, p.out_size).cpu().data.numpy()
            else:
                mask = net.track_refine(
                    (delta_y, delta_x)).sigmoid().squeeze().view(
                        p.out_size, p.out_size).cpu().data.numpy()
        else:
            mask = mask[0, :, delta_y, delta_x].sigmoid(). \
                squeeze().view(p.out_size, p.out_size).cpu().data.numpy()

        def crop_back(image, bbox, out_sz, padding=-1):
            a = (out_sz[0] - 1) / bbox[2]
            b = (out_sz[1] - 1) / bbox[3]
            c = -a * bbox[0]
            d = -b * bbox[1]
            mapping = np.array([[a, 0, c], [0, b, d]]).astype(np.float)
            crop = cv2.warpAffine(image,
                                  mapping, (out_sz[0], out_sz[1]),
                                  flags=cv2.INTER_LINEAR,
                                  borderMode=cv2.BORDER_CONSTANT,
                                  borderValue=padding)
            return crop

        s = crop_box[2] / p.instance_size
        sub_box = [
            crop_box[0] + (delta_x - p.base_size / 2) * p.total_stride * s,
            crop_box[1] + (delta_y - p.base_size / 2) * p.total_stride * s,
            s * p.exemplar_size, s * p.exemplar_size
        ]
        s = p.out_size / sub_box[2]
        back_box = [
            -sub_box[0] * s, -sub_box[1] * s, state['im_w'] * s,
            state['im_h'] * s
        ]
        mask_in_img = crop_back(mask, back_box, (state['im_w'], state['im_h']))

        target_mask = (mask_in_img > p.seg_thr).astype(np.uint8)
        if cv2.__version__[-5] == '4':
            contours, _ = cv2.findContours(target_mask, cv2.RETR_EXTERNAL,
                                           cv2.CHAIN_APPROX_NONE)
        else:
            _, contours, _ = cv2.findContours(target_mask, cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_NONE)
        cnt_area = [cv2.contourArea(cnt) for cnt in contours]
        if len(contours) != 0 and np.max(cnt_area) > 100:
            contour = contours[np.argmax(cnt_area)]  # use max area polygon
            polygon = contour.reshape(-1, 2)
            # pbox = cv2.boundingRect(polygon)  # Min Max Rectangle
            prbox = cv2.boxPoints(
                cv2.minAreaRect(polygon))  # Rotated Rectangle

            # box_in_img = pbox
            rbox_in_img = prbox
        else:  # empty mask
            location = cxy_wh_2_rect(target_pos, target_sz)
            rbox_in_img = np.array(
                [[location[0], location[1]],
                 [location[0] + location[2], location[1]],
                 [location[0] + location[2], location[1] + location[3]],
                 [location[0], location[1] + location[3]]])

    target_pos[0] = max(0, min(state['im_w'], target_pos[0]))
    target_pos[1] = max(0, min(state['im_h'], target_pos[1]))
    target_sz[0] = max(10, min(state['im_w'], target_sz[0]))
    target_sz[1] = max(10, min(state['im_h'], target_sz[1]))

    state['target_pos'] = target_pos
    state['target_sz'] = target_sz
    state['score'] = score
    state['mask'] = mask_in_img if mask_enable else []
    state['ploygon'] = rbox_in_img if mask_enable else []
    return state
예제 #33
0
    def detect(self, msg):
        img = self.bridge.imgmsg_to_cv2(msg, "rgb8")

        # blur, filter colors, dilate and erode to define edges, and find contours
        blur = cv2.GaussianBlur(img, (self.gauss_k, self.gauss_k), 0)
        mask = self.filterColors(blur)

        mask = cv2.dilate(mask, None, iterations=2)
        mask = cv2.erode(mask, None, iterations=1)

        inf, cnts, hrch = cv2.findContours(mask.copy(), cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
        try:
            cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:10]
        except:
            pass

        epsilon = 0.1
        squares = []
        corners2D = None
        R = None
        t = None
        cx = 0
        cy = 0

        for cnt in cnts:

            # approximate the contour
            peri = cv2.arcLength(cnt, True)
            approx = cv2.approxPolyDP(cnt, epsilon * peri, True)

            # if our approximated contour has four points, then we
            # can assume that we have found our square
            if len(approx) == 4:
                squares.append(approx)

        for square in squares:

            rect = cv2.minAreaRect(square)
            w, h = rect[1]
            # x,y,w,h = cv2.boundingRect(square)

            # set a lower threshold
            if ((w < 40) or (h < 40)):
                continue
            else:
                pass

            # verify width and height are similar
            aspect_ratio = w / h
            if (not ((aspect_ratio > 0.9) and (aspect_ratio < 1.1))):
                continue
            else:
                pass

            # verify area
            valid_area = self.verifyArea(w, h, square)
            if (not valid_area):
                continue
            else:
                pass

            box = cv2.boxPoints(rect)
            box = np.array(box, dtype="int")
            rect = perspective.order_points(
                box
            )  # order points: top-left, top-right, down-right, down-left
            # https://www.pyimagesearch.com/2016/03/21/ordering-coordinates-clockwise-with-python-and-opencv/
            square = square.reshape(
                (-1, 2))  # square_couples[0][0].reshape((-1,2))
            square = perspective.order_points(square)

            #screenCnt = square.reshape((4, 2))

            # get centroid
            m = cv2.moments(square)
            try:
                cx = int(m["m10"] / m["m00"])
                cy = int(m["m01"] / m["m00"])

                corners2D = square.astype('float32')
                # corners2D = np.concatenate((screenCnt2, centroid), axis = 0)
                R, t, R_exp = self.getPose(self.corners3D, corners2D)
                img = self.draw_frame(img, (cx, cy), R_exp, t)

            except ZeroDivisionError:
                pass

            break

        self.pub_center(img, cx, cy, corners2D, R, t)

        maskmsg = self.bridge.cv2_to_imgmsg(mask, "mono8")
        self.mask.publish(maskmsg)
예제 #34
0
# contourArea() : Calculates a contour area
perimeter = cv.arcLength(cnt,True)
# arcLength() : Calculates a contour perimeter or a curve length
epsilon = 0.1*perimeter
approx = cv.approxPolyDP(cnt,epsilon,True)
# approxPolyDP : Approximates a polygonal curve with specified precision
# Convex Hull
hull = cv.convexHull(cnt)
# Checking Convexity
k = cv.isContourConvex(cnt)
# Bounding Rectangle
# straight bounding rectangle
x,y,w,h = cv.boundingRect(cnt)
cv.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
# rotated rectangle
rect = cv.minAreaRect(cnt)
box = cv.boxPoints(rect)
box = np.int0(box)
cv.drawContours(img,[box],0,(0,0,255),2)
# Minimum Enclosing circle
(x,y),radius = cv.minEnclosingCircle(cnt)
center = (int(x),int(y))
radius = int(radius)
cv.circle(img,center,radius,(0,255,0),2)
# Fitting an Ellipse
ellipse = cv.fitEllipse(cnt)
cv.ellipse(img,ellipse,(0,255,0),2)
# Fitting a Line
rows,cols = img.shape[:2]
[vx,vy,x,y] = cv.fitLine(cnt, cv.DIST_L2,0,0.01,0.01)
lefty = int((-x*vy/vx) + y)
예제 #35
0
파일: test_0802.py 프로젝트: zzu0203/DIMA
def func_thr(img, window_name, file_name):
    ############ rotated
    height, width = img.shape

    img = cv2.bitwise_not(img)
    ret, thresh = cv2.threshold(img, 0, 255,
                                cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    coords = np.column_stack(np.where(thresh > 0))
    angle = cv2.minAreaRect(coords)[-1]
    if angle < -45:
        angle = -(90 + angle)
    else:
        angle = -angle

    # rotate the image to deskew it
    #(h, w) = save_img.shape[:2]
    center = (width // 2, height // 2)
    M = cv2.getRotationMatrix2D(center, angle, 1.0)
    rotated = cv2.warpAffine(img,
                             M, (width, height),
                             flags=cv2.INTER_CUBIC,
                             borderMode=cv2.BORDER_REPLICATE)

    print("[INFO] angle: {:.3f}".format(angle))

    rotated = cv2.bitwise_not(rotated)
    #cv2.imshow('rotated', rotated)
    #cv2.waitKey(0)

    cv2.imwrite('r_{}_{}'.format(int(angle), file_name), rotated)

    threshold_name = 'Ths'
    cv2.createTrackbar(threshold_name, window_name, 0, 255, nothing)
    cv2.setTrackbarPos(threshold_name, window_name, 140)

    while (1):
        height, width = img.shape
        ths = cv2.getTrackbarPos(threshold_name, window_name)
        ret, gray = cv2.threshold(rotated, ths, 255, cv2.THRESH_BINARY)
        save_img = gray.copy()

        height_ratio = 1200

        if height >= height_ratio and height_ratio != height:
            resize_width = (width * height_ratio) // height
            height, width = height_ratio, resize_width

            img_resize = cv2.resize(gray, (width, height))
        else:
            img_resize = gray

        img_resize = cv2.fastNlMeansDenoising(img_resize, None, 15, 7, 21)
        cv2.imshow(window_name, img_resize)

        if cv2.waitKey(30) & 0xFF == 27:
            cv2.imwrite('t_{}_{}'.format(ths, file_name), save_img)
            break
    #save_img = threshold 된 이미지

    ### rotated 가 최종
    dataframe = pytesseract.image_to_data(
        save_img,
        lang='kor3+eng',
        output_type=Output.DATAFRAME,
        config="--psm 4 --oem 1 -c tessedit_char_whitelist=-01234567890XYZ:@")

    list_dataframe = dataframe_to_list(data_frame=dataframe)
    removed = df_list_removeNan(list_dataframe)
    topNheight_list = dflist_roi(removed)
    print(removed)

    cut_roi(img=save_img, axis_list=topNheight_list, file_name=file_name)
예제 #36
0
파일: Gui.py 프로젝트: abhishek210/hack_v15
    def script(self):

        # construct the argument parse and parse the arguments
        # ap = argparse.ArgumentParser()
        # ap.add_argument("-i", "--image", required=True,
        # 	help="path to the input image")
        # ap.add_argument("-w", "--width", type=float, required=True,
        # 	help="width of the left-most object in the image (in inches)")
        # args = vars(ap.parse_args())

        # load the image, convert it to grayscale, and blur it slightly
        image = cv2.imread(self.fileName)
        image = cv2.resize(image,
                           None,
                           fx=0.5,
                           fy=0.5,
                           interpolation=cv2.INTER_CUBIC)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (7, 7), 0)

        # perform edge detection, then perform a dilation + erosion to
        # close gaps in between object edges
        edged = cv2.Canny(gray, 10, 10)
        edged = cv2.dilate(edged, None, iterations=1)
        edged = cv2.erode(edged, None, iterations=1)

        # find contours in the edge map
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)

        # sort the contours from left-to-right and initialize the
        # 'pixels per metric' calibration variable
        (cnts, _) = contours.sort_contours(cnts)
        pixelsPerMetric = None
        orig = None

        for c in cnts:
            # if the contour is not sufficiently large, ignore it
            if cv2.contourArea(c) < 100:
                continue

            # compute the rotated bounding box of the contour
            orig = image.copy()
            box = cv2.minAreaRect(c)
            box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(
                box)
            box = np.array(box, dtype="int")

            # order the points in the contour such that they appear
            # in top-left, top-right, bottom-right, and bottom-left
            # order, then draw the outline of the rotated bounding
            # box
            box = perspective.order_points(box)
            cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)

            # loop over the original points and draw them
            for (x, y) in box:
                cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)

            # unpack the ordered bounding box, then compute the midpoint
            # between the top-left and top-right coordinates, followed by
            # the midpoint between bottom-left and bottom-right coordinates
            (tl, tr, br, bl) = box
            (tltrX, tltrY) = (tl[0] + tr[0]) * 0.5, (tl[1] + tr[1]) * 0.5
            (blbrX, blbrY) = (bl[0] + br[0]) * 0.5, (bl[1] + br[1]) * 0.5

            # compute the midpoint between the top-left and top-right points,
            # followed by the midpoint between the top-righ and bottom-right
            (tlblX, tlblY) = (tl[0] + bl[0]) * 0.5, (tl[1] + bl[1]) * 0.5
            (trbrX, trbrY) = (tr[0] + br[0]) * 0.5, (tr[1] + br[1]) * 0.5

            # draw the midpoints on the image
            cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
            cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
            cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
            cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)

            # draw lines between the midpoints
            cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
                     (255, 0, 255), 2)
            cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
                     (255, 0, 255), 2)

            # compute the Euclidean distance between the midpoints
            dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
            dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))

            # if the pixels per metric has not been initialized, then
            # compute it as the ratio of pixels to supplied metric
            # (in this case, inches)
            if pixelsPerMetric is None:
                pixelsPerMetric = dB / 7.87
            # compute the size of the object
            dimA = dA / pixelsPerMetric
            dimB = dB / pixelsPerMetric

            # draw the object sizes on the image
            cv2.putText(orig, "{:.1f}in".format(dimA),
                        (int(tltrX - 15), int(tltrY - 10)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
            cv2.putText(orig, "{:.1f}in".format(dimB),
                        (int(trbrX + 10), int(trbrY)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)

            # show the output image
            cv2.imshow("Image", orig)
            break

        # Specify the paths for the 2 files
        protoFile = "pose/coco/pose_deploy_linevec.prototxt"
        weightsFile = "pose/coco/pose_iter_440000.caffemodel"
        nPoints = 18
        POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7],
                      [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13],
                      [0, 14], [0, 15], [14, 16], [15, 17]]

        # Read the network into Memory
        net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)

        frameWidth = orig.shape[1]
        frameHeight = orig.shape[0]
        threshold = 0.1

        net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)

        # Specify the input image dimensions
        inWidth = 368
        inHeight = 368

        # Prepare the frame to be fed to the network
        inpBlob = cv2.dnn.blobFromImage(orig,
                                        1.0 / 255, (inWidth, inHeight),
                                        (0, 0, 0),
                                        swapRB=False,
                                        crop=False)

        # Set the prepared object as the input blob of the network
        net.setInput(inpBlob)

        output = net.forward()

        H = output.shape[2]
        W = output.shape[3]
        # Empty list to store the detected keypoints
        points = []
        for i in range(1, nPoints):
            if (i == 9 or i == 10):
                continue

            # confidence map of corresponding body's part.
            probMap = output[0, i, :, :]

            # Find global maxima of the probMap.
            minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)

            # Scale the point to fit on the original image
            x = (frameWidth * point[0]) / W
            y = (frameHeight * point[1]) / H
            if prob > threshold:
                cv2.circle(orig, (int(x), int(y)),
                           3, (0, 255, 255),
                           thickness=-1,
                           lineType=cv2.FILLED)
                cv2.putText(orig,
                            "{}".format(i), (int(x), int(y)),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (0, 255, 0),
                            1,
                            lineType=cv2.LINE_AA)

                # Add the point to the list if the probability is greater than the threshold
                points.append((int(x), int(y)))
            else:
                points.append(None)
            if (i == 11):
                break

        Shoulder = (points[4][0] - points[1][0]) / pixelsPerMetric
        Length = (points[7][1] - points[1][1]) / pixelsPerMetric
        Arm = (points[3][1] - points[1][1]) / pixelsPerMetric
        self.info = "shoulder " + str(Shoulder) + "\nlength " + str(
            Length) + "\nArm-length " + str(Arm) + "\n"

        if (Shoulder < 12):
            self.info += "Your T-shirt size is Small"
        elif (Shoulder < 13):
            self.info += "Your T-shirt size is Medium"
        else:
            self.info += "Your T-shirt size is Large"

        self.label1.configure(text=self.info)
        print(self.info)

        cv2.imshow("Image", orig)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
예제 #37
0
ret4, thresh = cv2.threshold(imgray, 0, 255,
                             cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

cv2.imshow('thresh', thresh)
print('all good')
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST,
                                              cv2.CHAIN_APPROX_NONE)
cnt = contours[-1]
#image = cv2.drawContours(res2,contours,1, (0,255,0), 3)
cv2.drawContours(res2, (cnt), 1, (0, 255, 0), 3)

img = cv2.drawContours(frame, [cnt], 0, (0, 255, 0), 3)
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(res2, (x, y), (x + w, y + h), (0, 255, 0), 2)

rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(res2, [box], 0, (0, 0, 255), 2)

cv2.imshow('frame1', res2)
cv2.imshow('frame', frame)

x, y, w, h = cv2.boundingRect(cnt)
aspect_ratio = float(w) / h
print(aspect_ratio)
area = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(cnt)
rect_area = w * h
extent = float(area) / rect_area
print(extent)
예제 #38
0
def split_text_line3(line, step, img, step_align=True):
    """
    按照 minAreaRect 对文本进行划分
    :param line:
        (x1,y1,x2,y2,x3,y3,x4,y4)
        矩形四点坐标的顺序: left-top, right-top, right-bottom, left-bottom
    :return: [(anchor_xmin, anchor_ymin, anchor_xmax, anchor_ymax)]
    """
    if isinstance(line, torch.Tensor):
        line = line.cpu().data.numpy()
    global global_k_is_none_count
    if DEBUG:
        img = draw_four_vectors(img, line)

    # 首先,按照step切分img
    step_max = int(np.ceil(img.shape[1] / step))
    max_start_col = step_max - 1
    xmin, ymin, xmax, ymax = get_ltrb(line)
    width = xmax - xmin
    height = ymax - ymin

    if height > MAX_HEIGHT_WIDTH_SCALE * width:
        return []
    if xmax - xmin < step:
        # 过滤掉特别小的框

        return []
    anchor_count = int(math.ceil(width / step))

    if DEBUG:
        img = draw_bounding_box(img, (xmin, ymin, xmax, ymax))

    rect = cv2.minAreaRect(
        np.asarray([[line[0], line[1]], [line[2], line[3]], [line[4], line[5]],
                    [line[6], line[7]]]))
    # 获得最小 rotate rect 的四个角点
    box = cv2.boxPoints(rect)
    box = order_points(box)

    if DEBUG:
        img = draw_four_vectors(img,
                                (box[0][0], box[0][1], box[1][0], box[1][1],
                                 box[2][0], box[2][1], box[3][0], box[3][1]),
                                color=(255, 55, 55))
    # 获取anchor的相关信息
    p1 = Point(box[0][0], box[0][1])
    p2 = Point(box[1][0], box[1][1])
    p3 = Point(box[2][0], box[2][1])
    p4 = Point(box[3][0], box[3][1])

    mid_p12 = Point((box[0][0] + box[1][0]) / 2, (box[0][1] + box[1][1]) / 2)
    mid_p34 = Point((box[2][0] + box[3][0]) / 2, (box[2][1] + box[3][1]) / 2)

    if mid_p12.y >= mid_p34.y:
        print('bugs happen , this line is not useful')
        return []

    l1 = Line(p1, p2)
    l2 = Line(p2, p3)
    l3 = Line(p3, p4)
    l4 = Line(p4, p1)
    lines = [l1, l2, l3, l4]

    if l1.k is None:
        global_k_is_none_count += 1
        print("l1 K is None")
        print(p1)
        print(p2)
        print(p3)
        print(p4)
        return []

    quad = []
    splited_lines = []
    side_refinement = []
    shift = ((np.ceil((xmax - xmin) / step)) * step - (xmax - xmin)) / 2
    if step_align:
        anchor_start = int(np.floor(xmin / step) * step)
        anchor_end = int((np.ceil(xmax / step)) * step)
        if abs(anchor_start - xmin) > (step // 2):
            anchor_start = anchor_start + step
        if abs(anchor_end - xmax) > (step // 2):
            anchor_end = anchor_end - step
    else:
        anchor_start = int(np.floor(xmin - shift))
        anchor_end = int(np.floor(xmax + shift))

    interval = int((anchor_end - anchor_start) / step)
    for start in range(interval):
        # 这里的down,up是按照y轴方向上的大小来定义的,靠近0的是down,即在上方的是down
        if anchor_start + start * step > max_start_col * step:
            continue
        grid_start = anchor_start + start * step
        grid_end = anchor_start + (start + 1) * step
        line_left_down = Point(grid_start, 0)
        line_left_up = Point(grid_start, height)
        line_right_down = Point(grid_end, 0)
        line_right_up = Point(grid_end, height)
        line_left = Line(line_left_down, line_left_up)
        line_right = Line(line_right_down, line_right_up)
        # 计算和 box的上下的line的交点
        left_down = line_left.cross(l1)
        left_up = line_left.cross(l3)
        right_down = line_right.cross(l1)
        right_up = line_right.cross(l3)

        center_y = (left_down.y + right_down.y) / 2 + (
            (left_up.y + right_up.y) / 2 -
            (left_down.y + right_down.y) / 2) / 2
        center_x = (grid_start + grid_end) / 2

        h = (left_up.y - left_down.y + right_up.y - right_down.y) / 2
        dh = (left_up.y - right_up.y + left_down.y -
              right_down.y) / 2  # dh 定义成左侧减去右侧
        splited_lines.append((center_x, center_y, h, dh, step))
        quad.append((left_down.x, left_down.y, right_down.x, right_down.y,
                     right_up.x, right_up.y, left_up.x, left_up.y))

        if DEBUG:
            img = draw_four_vectors(img, (
                left_down.x,
                left_down.y,
                right_down.x,
                right_down.y,
                right_up.x,
                right_up.y,
                left_up.x,
                left_up.y,
            ),
                                    color=(0, 255, 55))
            cv2.imshow('test', img)
            cv2.waitKey()
        # 考虑side refinement
        # if abs(center_x - xmin) < 8:
        #     side_refinement.append(center_x - xmin)
        # elif abs(center_x - xmax) < 8:
        #     side_refinement.append(center_x - xmax)
        # else:
        #     side_refinement.append(-999)

        if start == 0:
            side_refinement.append(center_x - xmin)
        elif start == interval - 1:
            side_refinement.append(center_x - xmax)
        else:
            side_refinement.append(-999)

    if DEBUG:
        cv2.imshow('test', img)
        cv2.waitKey()
    splited_lines = np.array(splited_lines)
    quad = np.array(quad)
    return quad, splited_lines, side_refinement
예제 #39
0
def getTargetPixelCoords(img):
    # returns the pixel coordinates of the target site in an image, as well as number of vision tapes detected
    # may need to change algorithm depending on what your current build season's game is (may or may not include numberOfVisionTapesDetected)
    # newcameramtx, roi = cv2.getOptimalNewCameraMatrix(CAMERA_MATRIX, DISTORTION_CONSTANTS, (IMAGE_WIDTH, IMAGE_HEIGHT), 1, (IMAGE_WIDTH, IMAGE_HEIGHT))
    # img = cv2.undistort(image, CAMERA_MATRIX, DISTORTION_CONSTANTS, None, newcameramtx)

    g = img.copy()  # separate green image channel
    g[:, :, 0] = 0
    g[:, :, 2] = 0

    r = img.copy()  # separate red image channel
    r[:, :, 0] = 0
    r[:, :, 1] = 0

    img = g - r  # create new image subtracting red channel from green channel

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # convert to grayscale
    mask = cv2.inRange(gray, LOWER_GRAY, UPPER_GRAY)  # create grayscale mask
    res = cv2.bitwise_and(gray, gray,
                          mask=mask)  # apply grayscale mask to image
    _, contours, __ = cv2.findContours(
        mask, cv2.RETR_TREE,
        cv2.CHAIN_APPROX_SIMPLE)  # find contours in the image

    targetCoords = []  # array to hold target coordinates
    points = []  # list of highest corners of rectangles
    numberOfVisionTapesDetected = 0

    for contour in contours:

        contour_area = cv2.contourArea(contour)
        rect = cv2.minAreaRect(
            contour)  # minimum area rectangle around contour
        rect_width = rect[1][0]  # height of rectangle around contour is tilted
        rect_height = rect[1][
            1]  # height of rectangle around contour is tilted
        rect_angle = rect[
            2]  # angle at which rectangle around contour is tilted

        if (isValidTarget(contour_area, rect_width, rect_height, rect_angle)):
            # if condition to check if detected rectangle is a piece of vision tape

            numberOfVisionTapesDetected = numberOfVisionTapesDetected + 1  # add one to number of vision tapes detected
            box = cv2.boxPoints(rect)  # find corners of vision tape
            box = np.int0(box)  # convert to int0 array
            points.append(
                getMinYPoint(box)
            )  # add the highest corner of the detected vision tape to the list of points
            cv2.drawContours(
                res, [box], 0, (255),
                4)  # draw box around detected vision tape in output image

    if (len(points) > 0):

        targetCoords = list(
            sum(points) / len(points)
        )  # centroid of the highest corners of the detected vision tapes
        targetCoords.append(
            numberOfVisionTapesDetected)  # number of vision tapes detected
        cv2.circle(res, (int(targetCoords[0]), int(targetCoords[1])), 4, (255),
                   -1)  # draw dot on target area in output image

    else:

        targetCoords.append(numberOfVisionTapesDetected
                            )  # add number of vision tapes detected to result

    cv2.imwrite(
        '/Users/yiyi/Desktop/cv/contours/' +
        datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + '.jpg', res
    )  # write output image with white dot on target area and white boxes around detected vision tapes
    return targetCoords  # result is the centroid of the highest corners of the detected vision tapes
예제 #40
0
def draw_contour(contour, img):
    min_square_size = 987
    images = []
    area = cv2.contourArea(contour)
    rect = cv2.minAreaRect(screenCnt)
    # If the contour is not really small, or really big
    h, w = img.shape[0], img.shape[1]
    print('area == ' + str(area))
    if area > min_square_size and area < h * w - (2 * (h + w)):
        # Get the four corners of the contour
        epsilon = .1 * cv2.arcLength(contour, True)
        approx = cv2.approxPolyDP(contour, epsilon, True)

        print("========================================================")

        # now that we have our screen contour, we need to determine
        # the top-left, top-right, bottom-right, and bottom-left
        # points so that we can later warp the image -- we'll start
        # by reshaping our contour to be our finals and initializing
        # our output rectangle in top-left, top-right, bottom-right,
        # and bottom-left order
        pts = screenCnt.reshape(4, 2)
        rect = np.zeros((4, 2), dtype="float32")

        # the top-left point has the smallest sum whereas the
        # bottom-right has the largest sum
        s = pts.sum(axis=1)
        rect[0] = pts[np.argmin(s)]
        rect[2] = pts[np.argmax(s)]

        # compute the difference between the points -- the top-right
        # will have the minumum difference and the bottom-left will
        # have the maximum difference
        diff = np.diff(pts, axis=1)
        rect[1] = pts[np.argmin(diff)]
        rect[3] = pts[np.argmax(diff)]

        # now that we have our rectangle of points, let's compute
        # the width of our new image
        (tl, tr, br, bl) = rect
        widthA = np.sqrt(((br[0] - bl[0])**2) + ((br[1] - bl[1])**2))
        widthB = np.sqrt(((tr[0] - tl[0])**2) + ((tr[1] - tl[1])**2))

        # ...and now for the height of our new image
        heightA = np.sqrt(((tr[0] - br[0])**2) + ((tr[1] - br[1])**2))
        heightB = np.sqrt(((tl[0] - bl[0])**2) + ((tl[1] - bl[1])**2))

        # take the maximum of the width and height values to reach
        # our final dimensions
        maxWidth = max(int(widthA), int(widthB))
        maxHeight = max(int(heightA), int(heightB))

        # construct our destination points which will be used to
        # map the screen to a top-down, "birds eye" view
        dst = np.array([[0, 0], [maxWidth - 1, 0],
                        [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]],
                       dtype="float32")

        # calculate the perspective transform matrix and warp
        # the perspective to grab the screen
        M = cv2.getPerspectiveTransform(rect, dst)
        warp = cv2.warpPerspective(frame, M, (maxWidth, maxHeight))

        #cv2.imshow('warp', warp)
        #cv2.waitKey()
        locs = text_area_detect(warp)

        print("========================================================")

        images.append(warp.copy())
    return images, locs
예제 #41
0
def digitize_sudoku(sudoku_image, test=False):
    gray = cv2.cvtColor(sudoku_image, cv2.COLOR_BGR2GRAY)
    black_box = np.zeros((28,28,3),np.uint8)
    black_box = cv2.cvtColor(black_box, cv2.COLOR_BGR2GRAY)
    h, w = sudoku_image.shape[0], sudoku_image.shape[1]

    # Sudoku object that will contain all the information
    sudoku = Sudoku.instance()
    
    # Let The borders of the whole grid (4) 
    sudoku_border = 4
    border = 4
    x = w/9
    y = h/9

    for i in range(9):
        for j in range(9):
            # We get the position of each case (simply dividing the image in 9)
            top     = int(round(y*i+border)) 
            left    = int(round(x*j+border)) 
            right   = int(round(x*(j+1)-border))
            bottom  = int(round(y*(i+1)-border)) 
            if i == 0:
                top+=sudoku_border
            if i == 8:
                bottom-=sudoku_border
            if j == 0:
                left+=sudoku_border
            if j == 8:
                right-=sudoku_border

            point = [[[left,  top]],[[right, top]],[[left,  bottom]],[[right, bottom]]]
            square, _ = crop(gray, point)

            grid_square = square.copy()
            contours, _ = cv2.findContours(grid_square, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            cv2.drawContours(grid_square, contours, -1, (255, 255, 255), 2)
            contours, _ = cv2.findContours(grid_square, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            num_grid_img_position = [top, right, bottom, left]

            if contours:
                conts = sorted(contours, key=cv2.contourArea, reverse=True)
                # seelcting the biggest contour
                cnt = conts[0]
                minarea = x*y*0.04
                if cv2.contourArea(cnt) > minarea:
                    # Cropping out the number from grid
                    rect = cv2.minAreaRect(cnt)
                    box = cv2.boxPoints(rect)
                    box = np.int0(box)
                    minx , miny = max(min(box, key=lambda g: g[0])[0], 0),max(min(box, key=lambda g: g[1])[1], 0)
                    maxx ,maxy = min(max(box, key=lambda g: g[0])[0], int(x)),min(max(box, key=lambda g: g[1])[1], int(y))

                    number_image = square[miny:maxy, minx:maxx]

                    if number_image is None or number_image.shape[0] < 2 or number_image.shape[1] < 2:
                        # If no number in there
                        sudoku.update_grid(black_box, (i, j), num_grid_img_position)
                    else:
  
                        final = number_image.copy()
                        final = cv2.adaptiveThreshold(final, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 7, 7)
                        final = make_square(final)                
                        final = final/255
                        #final = optimize_digit(final)
                        sudoku.update_grid(final, (i, j), num_grid_img_position)
                else:
                    sudoku.update_grid(black_box, (i, j), num_grid_img_position)
            else:
                sudoku.update_grid(black_box, (i, j), num_grid_img_position)
    return sudoku
예제 #42
0
 def draw_rotated_rect(self, image, contour):
     rect = cv2.minAreaRect(contour)
     box = cv2.boxPoints(rect)
     box = np.int0(box)
     cv2.drawContours(image, [box], -1, (255, 0, 0), 3)
     return rect
예제 #43
0
파일: privacy.py 프로젝트: vivienney/deda
 def restoreSkewByMarkers(self):
     _,_, angle = cv2.minAreaRect(self._getMagentaMarkers())
     angle = angle%90 if angle%90<45 else angle%90-90
     self._print("Skew correction: rotating by %+f°"%angle)
     self.im = rotateImage(self.im, angle, cv2.INTER_NEAREST)
예제 #44
0
img = cv2.imread('detect_blob.png')

# 避免修改原图
img = img.copy()
binary = threshold_demo(img)

# 轮廓发现
out, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE,
                                            cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(contours)):
    cv2.drawContours(img, contours, i, (0, 0, 255), 2, 8)
    # 面积
    area = cv2.contourArea(contours[i])
    # 弧长
    perimeter = cv2.arcLength(contours[i], True)
    print('the %dth contours, area: %d, perimeter: %d' % (i, area, perimeter))
    # 计算包围目标的最小矩形区域
    rect = cv2.minAreaRect(contours[i])
    cx, cy = rect[0]
    # 获得包围矩形的四个顶点坐标
    box = cv2.boxPoints(rect)
    box = np.int0(box)
    cv2.drawContours(img, [box], 0, (0, 0, 255), 2)
    cv2.circle(img, (np.int32(cx), np.int32(cy)), 2, (255, 0, 0), 2, 8, 0)

cv2.imshow('contours-Matrix', img)

cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #45
0
                  [10, 20, 70, 30]]
    print 'RotatedRoi:', RotatedRoi1
    Point_2x1 = np.array([10, 20]).reshape(2, 1)
    drawPoints(Img1, Point_2x1, 255)
    print Point_2x1.ravel(), 'in RotatedRoi?', inRoi(Point_2x1, RotatedRoi1, ROI_TYPE_ROTATED)
    Point_2x1 = np.array([10, 10]).reshape(2, 1)
    drawPoints(Img1, Point_2x1, 255)
    print Point_2x1.ravel(), 'in RotatedRoi?', inRoi(Point_2x1, RotatedRoi1, ROI_TYPE_ROTATED)
    Point_2x1 = np.array([70, 70]).reshape(2, 1)
    drawPoints(Img1, Point_2x1, 255)
    drawPoints(Img1, Point_2x1, 255, offset=(-10, 10))
    print Point_2x1.ravel(), 'in RotatedRoi?', inRoi(Point_2x1, RotatedRoi1, ROI_TYPE_ROTATED)
    drawRoi(Img1, RotatedRoi1, ROI_TYPE_ROTATED, color=255, thickness=-1)
    drawRoi(Img2, RotatedRoi2, ROI_TYPE_ROTATED, color=255, offset=(-50, -50))
    Contours, _ = cv2.findContours(image=Img2.copy(), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
    Rect = cv2.minAreaRect(Contours[0])
    Box = cv2.cv.BoxPoints(Rect)
    BoxImg = np.zeros((200, 200), np.uint8)
    drawRoi(Img2, Box, ROI_TYPE_ROTATED, color=255)
    drawRoi(Img1, RotatedRoi1, ROI_TYPE_ROTATED, color=255, offset=(20, 20))
    # drawRoi(Img2, RotatedRoi2, ROI_TYPE_ROTATED, color=255, offset=(20, 20))
    cv2.imshow('Roi2', Img2)
    cv2.imshow('Roi1', Img1)
    cv2.waitKey()

    # Img = (np.random.random((100, 100)) * 255).astype(np.uint8)
    # roi_xywh = [10, 10, 20, 20]
    # roi_xyxy = [10, 10, 30, 30]
    # print 'Roi_xywh:      ', roi_xywh
    # roi_xywh2xyxy = cvtRoi(roi=roi_xywh, flag=ROI_CVT_XYWH2XYXY)
    # roi_xyxy2xywh = cvtRoi(roi=roi_xyxy, flag=ROI_CVT_XYXY2XYWH)
예제 #46
0
def recognition():
    """
    """
    img0 = cv2.imread(pic_path)
    gray = cv2.cvtColor(img0, cv2.COLOR_BGR2GRAY)
    #高斯矩阵的尺寸(只能取奇数)越大,标准差越大,处理过的图像模糊程度越大
    #     gray = cv2.GaussianBlur(gray, (5, 5), 0)

    #边缘检测
    gray = cv2.Canny(gray, 50, 100)
    img = deal_block(gray)
    show_pic(img)

    #return 所处理的图像, 轮廓的点集,轮廓的属性矩阵
    _, cnts, _ = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL,
                                  cv2.CHAIN_APPROX_SIMPLE)

    #轮廓排序,使第一个轮廓为最左边参照物
    cnts = sorted(cnts, key=lambda k: np.min(k[:, :, 0]), reverse=False)

    scale = None
    blue = (255, 0, 0)
    green = (0, 255, 0)
    red = (0, 0, 255)
    pink = (255, 0, 255)
    orig = img0.copy()
    count = 0
    for c in cnts:
        if cv2.contourArea(c) < 100:
            #去除小轮廓
            continue
        count += 1
        orig1 = img0.copy()
        cv2.drawContours(orig1, [c], -1, (255, 255, 0), 2)
        show_pic(orig1)
        # 获取最小包围矩形
        rect = cv2.minAreaRect(c)
        box = cv2.boxPoints(rect)
        cv2.drawContours(orig, [box.astype("int")], -1, green, 2)
        show_pic(orig)

        for point in box:
            cv2.circle(orig, (point[0], point[1]), 5, red, -1)
            show_pic(orig)

        (p1, p2, p3, p4) = box
        midpoint1 = getMidPoint(p1, p2)
        midpoint2 = getMidPoint(p2, p3)
        midpoint3 = getMidPoint(p3, p4)
        midpoint4 = getMidPoint(p4, p1)

        for midpoint in [midpoint1, midpoint2, midpoint3, midpoint4]:
            cv2.circle(orig, midpoint, 5, blue, -1)
        show_pic(orig)

        cv2.line(orig, midpoint1, midpoint3, pink, 2)
        cv2.line(orig, midpoint2, midpoint4, pink, 2)
        show_pic(orig)

        #通过查看参照物来初始化pixelsPerMetric变量
        dis13 = getDistanceByPosition(midpoint1, midpoint3)
        dis24 = getDistanceByPosition(midpoint2, midpoint4)
        if scale is None:
            if dis24 > dis13:
                scale = dis24 / standard
            else:
                scale = dis13 / standard

        reald1 = dis13 / scale
        reald2 = dis24 / scale

        if reald1 > reald2:
            rad = reald1
        else:
            rad = reald2

        value = "unknown"
        if rad > 235:
            value = "1 yuan"
        else:
            Ymin = np.min(c[:, :, 0])
            Ymax = np.max(c[:, :, 0])
            Xmin = np.min(c[:, :, 1])
            Xmax = np.max(c[:, :, 1])
            orig1 = cv2.imread(pic_path, 0)
            cropImg = orig1[Xmin:Xmax, Ymin:Ymax]
            value = orb_deal(cropImg)
            value = str(value) + "jiao"


#         break

#照片/添加的文字/左上角坐标/字体/字体大小/颜色/字体粗细
        cv2.putText(orig, '%s' % (value),
                    (midpoint1[0] - 10, midpoint1[1] + 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
        cv2.putText(orig, '%.1fmm' % (rad / 10.0),
                    (midpoint2[0] + 10, midpoint2[1] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)

        show_pic(orig)

    cv2.putText(orig, '%s' % (str(count)), (20, 20), cv2.FONT_HERSHEY_SIMPLEX,
                0.5, (0, 0, 0), 4)
    show_pic(orig)
예제 #47
0
def main_func(image_path):
	#start
	PATH_ORIGINAL = image_path
	originalPath = PATH_ORIGINAL
	imgSkewed = Image.open(originalPath)
	imgSwt = pillowfight.swt(imgSkewed, output_type=pillowfight.SWT_OUTPUT_BW_TEXT)
	imgSwt.save(PATH_SWT)
	# saved skewed Image
	# imageCopied = convertPilImageToCv2Image(imgSwt)
	image = convertPilImageToCv2Image(imgSwt)

	results, image = east(image)

	# TODO
	rect = results[-4][0]
	# import pdb
	# pdb.set_trace()

	roi = image[rect[1]-40:rect[3]+40, rect[0]-40:rect[2]+40]
	if roi==[]:
		roi = image[rect[1]-4:rect[3], rect[0]-4:rect[2]]
	# cv2.imwrite(PATH_CROP_AFTER_EAST, roi)
	# roi = image[rect[1]-4:rect[3], rect[0]-4:rect[2]]
	image = roi

	# for rect in results[-2]:
		# roi = image[rect[1]-40:rect[3]+40, rect[0]-40:rect[2]+40]
		# cv2.imwrite('crop.jpg',roi)
		# break

	#################################################################################################

	# convert the image to grayscale and flip the foreground
	# and background to ensure foreground is now "white" and
	# the background is "black"
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	gray = cv2.bitwise_not(gray)
	 
	# threshold the image, setting all foreground pixels to
	# 255 and all background pixels to 0
	thresh = cv2.threshold(gray, 0, 255,
		cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

	# grab the (x, y) coordinates of all pixel values that
	# are greater than zero, then use these coordinates to
	# compute a rotated bounding box that contains all
	# coordinates
	coords = np.column_stack(np.where(thresh > 0))
	angle = cv2.minAreaRect(coords)[-1]
	 
	# the `cv2.minAreaRect` function returns values in the
	# range [-90, 0); as the rectangle rotates clockwise the
	# returned angle trends to 0 -- in this special case we
	# need to add 90 degrees to the angle
	if angle < -45:
		angle = -(90 + angle)
	 
	# otherwise, just take the inverse of the angle to make
	# it positive
	else:
		angle = -angle

	# print("angle " + str(angle))

	# rotate the image to deskew it
	(h, w) = image.shape[:2]
	center = (w // 2, h // 2)
	M = cv2.getRotationMatrix2D(center, angle, 1.0)
	img = cv2.imread(PATH_ORIGINAL)
	(h, w) = img.shape[:2]
	rotated = cv2.warpAffine(img, M, (w, h),
		flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)

	# # draw the correction angle on the image so we can validate it
	# cv2.putText(rotated, "Angle: {:.2f} degrees".format(angle),
	# 	(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
	 
	# show the output image
	# print("[INFO] angle: {:.3f}".format(angle))
	# cv2.imshow("Input", image)
	# cv2.imshow("Rotated", rotated)
	# cv2.imwrite(PATH_ORIGINAL_ROTATED,rotated)
	# cv2.waitKey(0)
	#################################################################
	image = convertCv2ImageToPilImage(rotated)
	image.save(PATH_300_DPI_IMAGE,dpi=(300,300))

	results, image = east(convertPilImageToCv2Image(image))

	# print(results)
	######################################################################################################33\

	imgSkewed = Image.open(PATH_300_DPI_IMAGE)
	# imgSwt = pillowfight.swt(imgSkewed, output_type=pillowfight.SWT_OUTPUT_BW_TEXT)
	imgSkewed.save(PATH_SWT)

	cmd = "tesseract " + PATH_SWT + " test -l eng --psm 11 --oem 1" 

	returned_value = os.system(cmd)
	  # returns the exit code in unix
	text = open("test.txt", "r")
	text1 = str(text.read())
	print(text.read())
	return (text1)
예제 #48
0
def extract_rows_columns(gray_image):
    inverted = cv2.bitwise_not(gray_image)
    blurred = cv2.GaussianBlur(inverted, (5, 5), 0)

    height, width = gray_image.shape

    thresholded = cv2.threshold(blurred, 128, 255,
                                cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
    # show_wait_destroy("extract_rows_columns", thresholded)
    # A verticle kernel of (1 X kernel_length), which will detect all the verticle lines from the image.
    vertical_kernel_height = math.ceil(height * 0.1)
    verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
                                                (1, vertical_kernel_height))

    # A horizontal kernel of (kernel_length X 1), which will help to detect all the horizontal line from the image.
    horizontal_kernel_width = math.ceil(width * 0.3)
    hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
                                            (horizontal_kernel_width, 1))

    # Morphological operation to detect vertical lines from an image
    img_temp1 = cv2.erode(thresholded, verticle_kernel, iterations=3)
    # show_wait_destroy("extract_rows_columns", img_temp1)
    verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=3)
    # show_wait_destroy("extract_rows_columns", verticle_lines_img)
    _, vertical_contours, _ = cv2.findContours(verticle_lines_img.copy(),
                                               cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_SIMPLE)
    # Sort all the contours by top to bottom.
    (vertical_contours,
     vertical_bounding_boxes) = sort_contours(vertical_contours,
                                              method="left-to-right")

    filtered_vertical_bounding_boxes = list(
        filter(lambda x: vertical_boxes_filter(x, height),
               vertical_bounding_boxes))

    # Morphological operation to detect horizontal lines from an image
    img_temp2 = cv2.erode(thresholded, hori_kernel, iterations=3)

    horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=3)
    # show_wait_destroy("extract_rows_columns", horizontal_lines_img)
    _, horizontal_contours, _ = cv2.findContours(horizontal_lines_img.copy(),
                                                 cv2.RETR_EXTERNAL,
                                                 cv2.CHAIN_APPROX_SIMPLE)

    horizontal_contours, horizontal_bounding_boxes = sort_contours(
        horizontal_contours, method="top-to-bottom")

    filtered_horizontal_bounding_boxes = list(
        filter(lambda x: horizontal_boxes_filter(x, width),
               horizontal_bounding_boxes))

    # if DEBUG:
    color_image = cv2.cvtColor(gray_image.copy(), cv2.COLOR_GRAY2BGR)
    cv2.drawContours(color_image, vertical_contours, -1, (0, 255, 0), 2)
    cv2.drawContours(color_image, horizontal_contours, -1, (255, 0, 0), 2)

    # for filtered_horizontal_bounding_box in filtered_horizontal_bounding_boxes:
    #     x,y,w,h = filtered_horizontal_bounding_box
    #     cv2.rectangle(color_image,(x,y),(x+w,y+h),(0,255,255),2)
    #
    # for filtered_vertical_bounding_box in filtered_vertical_bounding_boxes:
    #     x,y,w,h = filtered_vertical_bounding_box
    #     cv2.rectangle(color_image,(x,y),(x+w,y+h),(0,255,255),2)

    # show_wait_destroy("horizontal_vertical_contours", color_image)

    extracted_rows_columns = []

    for idx_h, horizontal_bounding_box in enumerate(
            filtered_horizontal_bounding_boxes):
        if idx_h == 0:
            continue
        # previous horizontal box
        hx_p, hy_p, hw_p, hh_p = filtered_horizontal_bounding_boxes[idx_h - 1]
        hx_c, hy_c, hw_c, hh_c = horizontal_bounding_box

        extracted_columns = []
        for idx_v, vertical_bounding_box in enumerate(
                filtered_vertical_bounding_boxes):
            if idx_v == 0:
                continue
            # previous horizontal box
            vx_p, vy_p, vw_p, vh_p = filtered_vertical_bounding_boxes[idx_v -
                                                                      1]
            vx_c, vy_c, vw_c, vh_c = vertical_bounding_box
            table_cell = gray_image[hy_p:hy_c + hh_c, vx_p:vx_c + vw_c]

            blurred = cv2.GaussianBlur(table_cell, (5, 5), 0)
            # cv2.rectangle(color_image,(vx_p,hy_p),(vx_c+vw_c,hy_c+hh_c),(255,0,0),2)

            thresholded = cv2.threshold(blurred, 128, 255,
                                        cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

            im2, contours, hierarchy = cv2.findContours(
                thresholded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            contours = sorted(contours, key=cv2.contourArea, reverse=True)

            rect = cv2.minAreaRect(contours[0])
            box = cv2.boxPoints(rect)
            box = np.int0(box)
            extracted = four_point_transform(
                table_cell.copy(),
                box.reshape(4, 2))[1:-1, 1:-1]  # remove 1 px from each side
            ret, extracted = cv2.threshold(extracted, 165, 255,
                                           cv2.THRESH_BINARY)
            extracted_columns.append(extracted)

            # cv2.drawContours(color_image, [contours[0]], -1, (0,255,0), 3)

        extracted_rows_columns.append(extracted_columns)

    # show_wait_destroy("horizontal_lines_img",color_image)
    return extracted_rows_columns
예제 #49
0
image = cv2.imread('images/tankwa/8.jpg')
# convert the image to grayscale and flip the foreground
# and background to ensure foreground is now "white" and
# the background is "black"
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(gray)
# threshold the image, setting all foreground pixels to
# 255 and all background pixels to 0
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

# grab the (x, y) coordinates of all pixel values that
# are greater than zero, then use these coordinates to
# compute a rotated bounding box that contains all
# coordinates
coords = np.column_stack(np.where(thresh > 0))
angle = cv2.minAreaRect(coords)[-1]
# the `cv2.minAreaRect` function returns values in the
# range [-90, 0); as the rectangle rotates clockwise the
# returned angle trends to 0 -- in this special case we
# need to add 90 degrees to the angle
if angle < -45:
    angle = -(90 + angle)
# otherwise, just take the inverse of the angle to make
# it positive
else:
    angle = -angle

# rotate the image to deskew it
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
예제 #50
0
def hi2():
    #import numpy as np
    
    img =  cv2.imread('image4.jpg')
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    im2 = cv2.GaussianBlur(gray, (3,3), 0)
    dimen=im2.shape

    #edges = cv2.Canny(im2, 75, 150)

    ret,thresh = cv2.threshold(im2,45 ,255,0)
    
    try:
        
       
        contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        for i in range(0,len(contours)):
            cnt = contours[i]
            
            M = cv2.moments(cnt)
            
            if(M["m00"] != 0 and cv2.contourArea(cnt)<18000 and cv2.contourArea(cnt)>150):
                
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])
                perimeter = cv2.arcLength(cnt,True)
                #epsilon = 0.1*perimeter
                rect = cv2.minAreaRect(cnt)
                box = cv2.boxPoints(rect)#
                #print(list(a)[0],list(b)[0],list(c)[0],list(d)[0])
                
                listy = []
                for row in range(4):
                    inner_list = []
                    for col in range(2):
                        inner_list.append(box[row][col])

                    #print(inner_list[col][row])
                    print(inner_list[0])
                    print(inner_list[1])

                    cv2.putText(im2,str(int(inner_list[0]))+'.'+str(int(inner_list[1])),(inner_list[0], inner_list[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,255), 2, cv2.LINE_AA)

                    listy.append(inner_list)
                print("tjos/n",listy)    
                #box = np.int0([a][b][c][d])
                #box=[[k for row in list(a)] for column in range(4)]
                #print([box])
                
                #im = cv2.drawContours(im2,list_of_lists,0,(0,255,0),2)#[[a][b][c][d]]
                
                for i in range(4):
                    im=cv2.line(im2, tuple(listy[i]), tuple(listy[(i + 1) % 4]), (0,255,0), 2, cv2.LINE_AA, 0);

                #im =cv2.line(im2,(listy),(x2,y2),(0,0,255),2)


                

                print( (cX  , cY),"area: ",cv2.contourArea(cnt),"perimeter:",perimeter)
                cv2.circle(im, (cX, cY), 4, (0, 0, 255), -1)
                '''
                cv2.putText(im,str(cv2.contourArea(cnt)),(cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,255), 2, cv2.LINE_AA)
                '''
                fromCenter = False
                #ROIs = cv2.selectROIs('Select ROIs', im, showCrosshair=False,fromCenter)
                #im=im[int(dimen[0])-125:int(dimen[0])-25, int(dimen[1])-450:int(dimen[1])-50]

                #approx = cv2.approxPolyDP(cnt,epsilon,True)
                #print(approx,i)
                #cv2.drawContours(im, [cnt],-2,(0,255,0))

        im_ROI1=im[int(dimen[0])-125:int(dimen[0])-25, int(dimen[1])-680:int(dimen[1])-50]
        im_ROI2=im[int(dimen[0])-300:int(dimen[0])-100, int(dimen[1])-640:int(dimen[1])-80]

        print("1",int(dimen[0])-125,int(dimen[0])-25, int(dimen[1])-680,int(dimen[1])-50)
        print("2",int(dimen[0])-300,int(dimen[0])-100, int(dimen[1])-640,int(dimen[1])-80)



        while True:
            cv2.imshow('Features',im )
            cv2.imshow('Features0',im_ROI1 )
            cv2.imshow('Features1',im_ROI2 )
            if cv2.waitKey(1)& 0xFF == 27:
                break
        cv2.destroyAllWindows()
    except (KeyboardInterrupt, SystemExit):
        return 'exiting'
예제 #51
0
def area_of_contour(contour):
    return cv2.minAreaRect(contour)
예제 #52
0
def get_bounding_rect(contour):
    rect = cv2.minAreaRect(contour)
    box = cv2.boxPoints(rect)
    return np.int0(box)
예제 #53
0
	def callback(self,data):
		#The below two functions conver the compressed image to opencv Image
		#'''
		# np_arr = np.fromstring(data.data, np.uint8)
		# cv_image = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
		#'''
		cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
		#Create copy of captured image
		img_cpy = cv_image.copy()
		#Color to HSV and Gray Scale conversion
		hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
		#gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)

		#Red_Thresholds
		lower_red1 = np.array([0, 100, 100])
		upper_red1 = np.array([10, 255,255])
		lower_red2 = np.array([160,100,100])
		upper_red2 = np.array([179,255,255])
		#Blue Thresholds
		lower_blue = np.array([104,110,110])
		upper_blue = np.array([143,255,255])
		#Green Thresholds
		lower_green = np.array([60,60,46])
		upper_green = np.array([97,255,255])

#GREEN STUFF
		# Threshold the HSV image to get only single color portions
		mask2 = cv2.inRange(hsv, lower_green, upper_green)

		#Find contours(borders) for the shapes in the image
		#NOTE if you get following error:
		# contours, hierarchy = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
		# ValueError: need more than two values to unpack
		# change following line to:
		# contours, hierarchy = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
		contours, hierarchy = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

		#Pass through each contour and check if it has required properties to classify into required object
		for x in range (len(contours)):
			contourarea = cv2.contourArea(contours[x]) #get area of contour
			if contourarea > 600: #Discard contours with a small area as this may just be noise
				#The below 2 functions help you to approximate the contour to a nearest polygon
				arclength = cv2.arcLength(contours[x], True)
				approxcontour = cv2.approxPolyDP(contours[x], 0.02 * arclength, True)
				#Find the coordinates of the polygon with respect to he camera frame in pixels
				rect_cordi = cv2.minAreaRect(contours[x])
				obj_x = int(rect_cordi[0][0])
				obj_y = int(rect_cordi[0][1])

				#Check for Square
				if len(approxcontour) == 4:
					#print ('Length ', len(approxcontour))
					cv2.drawContours(cv_image,[approxcontour],0,(0,255,255),2)
					approxcontour = approxcontour.reshape((4,2))
					LongestSide = Longest_Length(approxcontour)
					Distance = (focal_leng*square_side_lenth)/LongestSide #focal length x Actual Border width / size of Border in pixels
          				Green_Id = 122
				#Move to next Contour
				else :
          				# v-----------------------v added by team 7 v-----------------------v
          				#Check for Triangle
					if len(approxcontour) == 3:
						#print ('Length ', len(approxcontour))
						cv2.drawContours(cv_image,[approxcontour],0,(0,255,255),2)
						approxcontour = approxcontour.reshape((3,2))
						LongestSide = Longest_Length(approxcontour)
						Distance = (focal_leng*triangle_side_length)/LongestSide #focal length x Actual Border width / size of Border in pixels
						Green_Id = 123
				  	else :
            					#Check for Star
  				  		if len(approxcontour) == 8:
					    		#print ('Length ', len(approxcontour))
					    		cv2.drawContours(cv_image,[approxcontour],0,(0,255,255),2)
					    		approxcontour = approxcontour.reshape((8,2))
					    		LongestSide = Longest_Length(approxcontour)
					    		Distance = (focal_leng*star_side_length)/LongestSide #focal length x Actual Border width / size of Border in pixels
              						Green_Id = 121
            					else :
        						# ^-----------------------^ added by team 7 ^-----------------------^ 
					    		continue

				#Calculate Cordinates wrt to Camera, convert to Map
				#Coordinates and publish message for storing
				#319.5, 239.5 = image centre
				obj_cam_x = ((obj_x - 319.5)*Distance)/focal_leng
				obj_cam_y = ((obj_y - 239.5)*Distance)/focal_leng

				#convert the x,y in camera frame to a geometric stamped point
				P = PointStamped()
				P.header.stamp = rospy.Time(0) # .now() - rospy.Time(23) # ^-----------------------^ commented by team 7 ^-----------------------^ 
				#print ('time: ', data.header.stamp)
				P.header.frame_id = 'camera_rgb_optical_frame'
				P.point.x = obj_cam_x
				P.point.y = obj_cam_y
				P.point.z = Distance

				#Transform Point into map coordinates
				# trans_pt = self.tl.transformPoint('/map', P)

				#fill in the publisher object to publish
				obj_info_pub = object_loc()
				obj_info_pub.ID = Green_Id
				obj_info_pub.point.x = 0
				obj_info_pub.point.y = 0
				obj_info_pub.point.z = 0

				#publish the message
				self.object_location_pub.publish(obj_info_pub)






# RED STUFF 1
		# Threshold the HSV image to get only single color portions
		mask2 = cv2.inRange(hsv, lower_red1, upper_red1)

		#Find contours(borders) for the shapes in the image
		#NOTE if you get following error:
		# contours, hierarchy = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
		# ValueError: need more than two values to unpack
		# change following line to:
		# contours, hierarchy = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
		contours, hierarchy = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

		#Pass through each contour and check if it has required properties to classify into required object
		for x in range (len(contours)):
			contourarea = cv2.contourArea(contours[x]) #get area of contour
			if contourarea > 600: #Discard contours with a small area as this may just be noise
				#The below 2 functions help you to approximate the contour to a nearest polygon
				arclength = cv2.arcLength(contours[x], True)
				approxcontour = cv2.approxPolyDP(contours[x], 0.02 * arclength, True)
				#Find the coordinates of the polygon with respect to he camera frame in pixels
				rect_cordi = cv2.minAreaRect(contours[x])
				obj_x = int(rect_cordi[0][0])
				obj_y = int(rect_cordi[0][1])

				#Check for Square
				if len(approxcontour) == 4:
					#print ('Length ', len(approxcontour))
					cv2.drawContours(cv_image,[approxcontour],0,(0,255,255),2)
					approxcontour = approxcontour.reshape((4,2))
					LongestSide = Longest_Length(approxcontour)
					Distance = (focal_leng*square_side_lenth)/LongestSide #focal length x Actual Border width / size of Border in pixels
          				Red_Id = 112
				#Move to next Contour
				else :
          				# v-----------------------v added by team 7 v-----------------------v
          				#Check for Triangle
				  	if len(approxcontour) == 3:
					  	#print ('Length ', len(approxcontour))
					  	cv2.drawContours(cv_image,[approxcontour],0,(0,255,255),2)
						approxcontour = approxcontour.reshape((3,2))
						LongestSide = Longest_Length(approxcontour)
						Distance = (focal_leng*triangle_side_length)/LongestSide #focal length x Actual Border width / size of Border in pixels
						Red_Id = 113
          				else :
						#Check for Star
						if len(approxcontour) == 8:
							#print ('Length ', len(approxcontour))
							cv2.drawContours(cv_image,[approxcontour],0,(0,255,255),2)
							approxcontour = approxcontour.reshape((8,2))
							LongestSide = Longest_Length(approxcontour)
							Distance = (focal_leng*star_side_length)/LongestSide #focal length x Actual Border width / size of Border in pixels
							Red_Id = 111
						else :
							# ^-----------------------^ added by team 7 ^-----------------------^ 
							continue

				#Calculate Cordinates wrt to Camera, convert to Map
				#Coordinates and publish message for storing
				#319.5, 239.5 = image centre
				obj_cam_x = ((obj_x - 319.5)*Distance)/focal_leng
				obj_cam_y = ((obj_y - 239.5)*Distance)/focal_leng

				#convert the x,y in camera frame to a geometric stamped point
				P = PointStamped()
				P.header.stamp = rospy.Time(0) # .now() - rospy.Time(23) # ^-----------------------^ commented by team 7 ^-----------------------^ 
				#print ('time: ', data.header.stamp)
				P.header.frame_id = 'camera_rgb_optical_frame'
				P.point.x = obj_cam_x
				P.point.y = obj_cam_y
				P.point.z = Distance

				#Transform Point into map coordinates
				# trans_pt = self.tl.transformPoint('/map', P)

				#fill in the publisher object to publish
				obj_info_pub = object_loc()
				obj_info_pub.ID = Red_Id
				obj_info_pub.point.x = 0
				obj_info_pub.point.y = 0
				obj_info_pub.point.z = 0

				#publish the message
				self.object_location_pub.publish(obj_info_pub)




# RED STUFF 2
		# Threshold the HSV image to get only single color portions
		mask2 = cv2.inRange(hsv, lower_red2, upper_red2)

		#Find contours(borders) for the shapes in the image
		#NOTE if you get following error:
		# contours, hierarchy = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
		# ValueError: need more than two values to unpack
		# change following line to:
		# contours, hierarchy = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
		contours, hierarchy = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

		#Pass through each contour and check if it has required properties to classify into required object
		for x in range (len(contours)):
			contourarea = cv2.contourArea(contours[x]) #get area of contour
			if contourarea > 600: #Discard contours with a small area as this may just be noise
				#The below 2 functions help you to approximate the contour to a nearest polygon
				arclength = cv2.arcLength(contours[x], True)
				approxcontour = cv2.approxPolyDP(contours[x], 0.02 * arclength, True)
				#Find the coordinates of the polygon with respect to he camera frame in pixels
				rect_cordi = cv2.minAreaRect(contours[x])
				obj_x = int(rect_cordi[0][0])
				obj_y = int(rect_cordi[0][1])

				#Check for Square
				if len(approxcontour) == 4:
					#print ('Length ', len(approxcontour))
					cv2.drawContours(cv_image,[approxcontour],0,(0,255,255),2)
					approxcontour = approxcontour.reshape((4,2))
					LongestSide = Longest_Length(approxcontour)
					Distance = (focal_leng*square_side_lenth)/LongestSide #focal length x Actual Border width / size of Border in pixels
          				Red_Id = 112
				#Move to next Contour
				else :
					# v-----------------------v added by team 7 v-----------------------v
					#Check for Triangle
					if len(approxcontour) == 3:
						#print ('Length ', len(approxcontour))
						cv2.drawContours(cv_image,[approxcontour],0,(0,255,255),2)
						approxcontour = approxcontour.reshape((3,2))
						LongestSide = Longest_Length(approxcontour)
						Distance = (focal_leng*triangle_side_length)/LongestSide #focal length x Actual Border width / size of Border in pixels
						Red_Id = 113
					else :
						#Check for Star
						if len(approxcontour) == 8:
							#print ('Length ', len(approxcontour))
							cv2.drawContours(cv_image,[approxcontour],0,(0,255,255),2)
							approxcontour = approxcontour.reshape((8,2))
							LongestSide = Longest_Length(approxcontour)
							Distance = (focal_leng*star_side_length)/LongestSide #focal length x Actual Border width / size of Border in pixels
							Red_Id = 111
						else :
							# ^-----------------------^ added by team 7 ^-----------------------^ 
							continue

				#Calculate Cordinates wrt to Camera, convert to Map
				#Coordinates and publish message for storing
				#319.5, 239.5 = image centre
				obj_cam_x = ((obj_x - 319.5)*Distance)/focal_leng
				obj_cam_y = ((obj_y - 239.5)*Distance)/focal_leng

				#convert the x,y in camera frame to a geometric stamped point
				P = PointStamped()
				P.header.stamp = rospy.Time(0) # .now() - rospy.Time(23) # ^-----------------------^ commented by team 7 ^-----------------------^ 
				#print ('time: ', data.header.stamp)
				P.header.frame_id = 'camera_rgb_optical_frame'
				P.point.x = obj_cam_x
				P.point.y = obj_cam_y
				P.point.z = Distance

				#Transform Point into map coordinates
				# trans_pt = self.tl.transformPoint('/map', P)

				#fill in the publisher object to publish
				obj_info_pub = object_loc()
				obj_info_pub.ID = Red_Id
				obj_info_pub.point.x = 0
				obj_info_pub.point.y = 0
				obj_info_pub.point.z = 0

				#publish the message
				self.object_location_pub.publish(obj_info_pub)

#BLUE STUFF
		# Threshold the HSV image to get only single color portions
		mask2 = cv2.inRange(hsv, lower_blue, upper_blue)

		#Find contours(borders) for the shapes in the image
		#NOTE if you get following error:
		# contours, hierarchy = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
		# ValueError: need more than two values to unpack
		# change following line to:
		# contours, hierarchy = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
		contours, hierarchy = cv2.findContours(mask2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

		#Pass through each contour and check if it has required properties to classify into required object
		for x in range (len(contours)):
			contourarea = cv2.contourArea(contours[x]) #get area of contour
			if contourarea > 600: #Discard contours with a small area as this may just be noise
				#The below 2 functions help you to approximate the contour to a nearest polygon
				arclength = cv2.arcLength(contours[x], True)
				approxcontour = cv2.approxPolyDP(contours[x], 0.02 * arclength, True)
				#Find the coordinates of the polygon with respect to he camera frame in pixels
				rect_cordi = cv2.minAreaRect(contours[x])
				obj_x = int(rect_cordi[0][0])
				obj_y = int(rect_cordi[0][1])

				#Check for Square
				if len(approxcontour) == 4:
					#print ('Length ', len(approxcontour))
					cv2.drawContours(cv_image,[approxcontour],0,(0,255,255),2)
					approxcontour = approxcontour.reshape((4,2))
					LongestSide = Longest_Length(approxcontour)
					Distance = (focal_leng*square_side_lenth)/LongestSide #focal length x Actual Border width / size of Border in pixels
          				Blue_Id = 132
				#Move to next Contour
				else :
					# v-----------------------v added by team 7 v-----------------------v
					#Check for Triangle
					if len(approxcontour) == 3:
						#print ('Length ', len(approxcontour))
						cv2.drawContours(cv_image,[approxcontour],0,(0,255,255),2)
						approxcontour = approxcontour.reshape((3,2))
						LongestSide = Longest_Length(approxcontour)
						Distance = (focal_leng*triangle_side_length)/LongestSide #focal length x Actual Border width / size of Border in pixels
						Blue_Id = 133
					else :
						#Check for Star
						if len(approxcontour) == 8:
							#print ('Length ', len(approxcontour))
							cv2.drawContours(cv_image,[approxcontour],0,(0,255,255),2)
							approxcontour = approxcontour.reshape((8,2))
							LongestSide = Longest_Length(approxcontour)
							Distance = (focal_leng*star_side_length)/LongestSide #focal length x Actual Border width / size of Border in pixels
							Blue_Id = 131
						else :
							# ^-----------------------^ added by team 7 ^-----------------------^ 
							continue

				#Calculate Cordinates wrt to Camera, convert to Map
				#Coordinates and publish message for storing
				#319.5, 239.5 = image centre
				obj_cam_x = ((obj_x - 319.5)*Distance)/focal_leng
				obj_cam_y = ((obj_y - 239.5)*Distance)/focal_leng

				#convert the x,y in camera frame to a geometric stamped point
				P = PointStamped()
				P.header.stamp = rospy.Time(0) # .now() - rospy.Time(23) # ^-----------------------^ commented by team 7 ^-----------------------^ 
				#print ('time: ', data.header.stamp)
				P.header.frame_id = 'camera_rgb_optical_frame'
				P.point.x = obj_cam_x
				P.point.y = obj_cam_y
				P.point.z = Distance

				#Transform Point into map coordinates
				# trans_pt = self.tl.transformPoint('/map', P)

				#fill in the publisher object to publish
				obj_info_pub = object_loc()
				obj_info_pub.ID = Blue_Id
				obj_info_pub.point.x = 0
				obj_info_pub.point.y = 0
				obj_info_pub.point.z = 0

				#publish the message
				self.object_location_pub.publish(obj_info_pub)



		#Display the captured image
		cv2.imshow("Image",cv_image)
		#cv2.imshow("HSV", hsv)
		cv2.waitKey(1)
예제 #54
0
def fcenet_decode(preds,
                  fourier_degree,
                  num_reconstr_points,
                  scale,
                  alpha=1.0,
                  beta=2.0,
                  text_repr_type='poly',
                  score_thr=0.3,
                  nms_thr=0.1):
    """Decoding predictions of FCENet to instances.

    Args:
        preds (list(Tensor)): The head output tensors.
        fourier_degree (int): The maximum Fourier transform degree k.
        num_reconstr_points (int): The points number of the polygon
            reconstructed from predicted Fourier coefficients.
        scale (int): The down-sample scale of the prediction.
        alpha (float) : The parameter to calculate final scores. Score_{final}
                = (Score_{text region} ^ alpha)
                * (Score_{text center region}^ beta)
        beta (float) : The parameter to calculate final score.
        text_repr_type (str):  Boundary encoding type 'poly' or 'quad'.
        score_thr (float) : The threshold used to filter out the final
            candidates.
        nms_thr (float) :  The threshold of nms.

    Returns:
        boundaries (list[list[float]]): The instance boundary and confidence
            list.
    """
    assert isinstance(preds, list)
    assert len(preds) == 2
    assert text_repr_type in ['poly', 'quad']

    cls_pred = preds[0][0]
    tr_pred = cls_pred[0:2].softmax(dim=0).data.cpu().numpy()
    tcl_pred = cls_pred[2:].softmax(dim=0).data.cpu().numpy()

    reg_pred = preds[1][0].permute(1, 2, 0).data.cpu().numpy()
    x_pred = reg_pred[:, :, :2 * fourier_degree + 1]
    y_pred = reg_pred[:, :, 2 * fourier_degree + 1:]

    score_pred = (tr_pred[1]**alpha) * (tcl_pred[1]**beta)
    tr_pred_mask = (score_pred) > score_thr
    tr_mask = fill_hole(tr_pred_mask)

    tr_contours, _ = cv2.findContours(
        tr_mask.astype(np.uint8), cv2.RETR_TREE,
        cv2.CHAIN_APPROX_SIMPLE)  # opencv4

    mask = np.zeros_like(tr_mask)
    boundaries = []
    for cont in tr_contours:
        deal_map = mask.copy().astype(np.int8)
        cv2.drawContours(deal_map, [cont], -1, 1, -1)

        score_map = score_pred * deal_map
        score_mask = score_map > 0
        xy_text = np.argwhere(score_mask)
        dxy = xy_text[:, 1] + xy_text[:, 0] * 1j

        x, y = x_pred[score_mask], y_pred[score_mask]
        c = x + y * 1j
        c[:, fourier_degree] = c[:, fourier_degree] + dxy
        c *= scale

        polygons = fourier2poly(c, num_reconstr_points)
        score = score_map[score_mask].reshape(-1, 1)
        polygons = poly_nms(np.hstack((polygons, score)).tolist(), nms_thr)

        boundaries = boundaries + polygons

    boundaries = poly_nms(boundaries, nms_thr)

    if text_repr_type == 'quad':
        new_boundaries = []
        for boundary in boundaries:
            poly = np.array(boundary[:-1]).reshape(-1, 2).astype(np.float32)
            score = boundary[-1]
            points = cv2.boxPoints(cv2.minAreaRect(poly))
            points = np.int0(points)
            new_boundaries.append(points.reshape(-1).tolist() + [score])

    return boundaries
예제 #55
0
def split_text_line2(line, step, img=None):
    """
    按照 minAreaRect 对文本进行划分
    :param line:
        (x1,y1,x2,y2,x3,y3,x4,y4)
        矩形四点坐标的顺序: left-top, right-top, right-bottom, left-bottom
    :return: [(anchor_xmin, anchor_ymin, anchor_xmax, anchor_ymax)]
    """
    global global_k_is_none_count
    if DEBUG:
        img = draw_four_vectors(img, line)

    xmin, ymin, xmax, ymax = get_ltrb(line)
    width = xmax - xmin
    height = ymax - ymin

    if height > MAX_HEIGHT_WIDTH_SCALE * width:
        return []

    anchor_count = int(math.ceil(width / step))

    if DEBUG:
        img = draw_bounding_box(img, (xmin, ymin, xmax, ymax))

    rect = cv2.minAreaRect(
        np.asarray([[line[0], line[1]], [line[2], line[3]], [line[4], line[5]],
                    [line[6], line[7]]]))
    # 获得最小 rotate rect 的四个角点
    box = cv2.boxPoints(rect)
    box = get_clockwise(box)

    if DEBUG:
        img = draw_four_vectors(img,
                                (box[0][0], box[0][1], box[1][0], box[1][1],
                                 box[2][0], box[2][1], box[3][0], box[3][1]),
                                color=(255, 55, 55))

    p1 = Point(box[0][0], box[0][1])
    p2 = Point(box[1][0], box[1][1])
    p3 = Point(box[2][0], box[2][1])
    p4 = Point(box[3][0], box[3][1])

    l1 = Line(p1, p2)
    l2 = Line(p2, p3)
    l3 = Line(p3, p4)
    l4 = Line(p4, p1)
    lines = [l1, l2, l3, l4]

    if l1.k is None:
        global_k_is_none_count += 1
        print("l1 K is None")
        print(p1)
        print(p2)
        print(p3)
        print(p4)
        return []

    splited_lines = []
    for i in range(anchor_count):
        anchor_xmin = i * step + xmin
        anchor_xmax = anchor_xmin + step - 1
        anchor_ymin = ymin
        anchor_ymax = ymax

        # 垂直于 X 轴的线
        left_line = Line(Point(anchor_xmin, 0), Point(anchor_xmin, height))
        right_line = Line(Point(anchor_xmax, 0), Point(anchor_xmax, height))

        left_cross_pnts = [left_line.cross(l) for l in lines]
        right_cross_pnts = [right_line.cross(l) for l in lines]

        if l1.k < 0:
            if l1.contain(right_cross_pnts[0]):
                anchor_ymin = right_cross_pnts[0].y

            if l4.contain(right_cross_pnts[3]):
                anchor_ymax = right_cross_pnts[3].y

            if l3.contain(left_cross_pnts[2]):
                anchor_ymax = left_cross_pnts[2].y

            if l2.contain(left_cross_pnts[1]):
                anchor_ymin = left_cross_pnts[1].y

        if l1.k > 0:
            if l4.contain(right_cross_pnts[3]):
                anchor_ymin = right_cross_pnts[3].y

            if l3.contain(right_cross_pnts[2]):
                anchor_ymax = right_cross_pnts[2].y

            if l1.contain(left_cross_pnts[0]):
                anchor_ymin = left_cross_pnts[0].y

            if l2.contain(left_cross_pnts[1]):
                anchor_ymax = left_cross_pnts[1].y

        if anchor_ymax - anchor_ymin <= MIN_TEXT_HEIGHT:
            continue

        if DEBUG:
            img = draw_bounding_box(
                img, (anchor_xmin, anchor_ymin, anchor_xmax, anchor_ymax),
                (0, 0, 255))
            cv2.imshow('test', img)
            cv2.waitKey()

        splited_lines.append(
            (anchor_xmin, anchor_ymin, anchor_xmax, anchor_ymax))

    if DEBUG:
        cv2.imshow('test', img)
        cv2.waitKey()

    return splited_lines
def draw_circle(xy,r,color):
	cv2.circle(resize, (int(xy[0]), int(xy[1])) , r, color, -1)

def mid_point(a,b):
	return ((a[0] + b[0]) / 2 , (a[1] + b[1]) / 2)
 

def draw_line(a,b):
	cv2.line(resize, (int(a[0]),int(a[1])), (int(b[0]), int(b[1])), (255,0,0), thickness=2)

def distance(a,b):
	return int(dist.euclidean(a,b))


for c in cnts:
  box = cv2.minAreaRect(c)
  box = cv2.boxPoints(box)
  box = np.array(box, dtype="int")
  box = Helpers.orders(box)
  (tl, tr, br, bl) = box

  cv2.drawContours(resize, [box.astype("int")], -1, (0, 0, 255), 2)


  for xy in box:
  	draw_circle(xy,4,(0,255,0))
  
  tltr = mid_point(tl,tr)
  tlbl = mid_point(tl,bl)
  trbr = mid_point(tr,br)
  blbr = mid_point(bl,br)
예제 #57
0
파일: debug.py 프로젝트: minhnd3796/fti_id
def getMinRectangles(input_binary_img):
    _, contours, _ = cv2.findContours(input_binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE, offset=(0, 0))
    min_rects = [None] * len(contours)
    for i in range(len(contours)):
        min_rects[i] = cv2.minAreaRect(contours[i])
    return contours, min_rects
예제 #58
0
    def PoseEstimator(self, frame):
        # convert the frame to grayscale, blur it, and detect edges
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (7, 7), 0)
        edged = cv2.Canny(blurred, 50, 150)
        # find contours in the edge map
        # print cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
        Area = 0
        # loop over the contours
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.01 * peri, True)

            # ensure that the approximated contour is "roughly" rectangular
            if len(approx) >= 4 and len(approx) <= 6:
                # compute the bounding box of the approximated contour and
                # use the bounding box to compute the aspect ratio
                (x1, y1, w, h) = cv2.boundingRect(approx)
                # print x1
                # print y1

                aspectRatio = w / float(h)

                # compute the solidity of the original contour
                Area = []
                area = cv2.contourArea(c)
                Area = max(area, Area)

                hullArea = cv2.contourArea(cv2.convexHull(c))
                solidity = area / float(hullArea)

                # compute whether or not the width and height, solidity, and
                # aspect ratio of the contour falls within appropriate bounds
                keepDims = w > 25 and h > 25
                keepSolidity = solidity > 0.9
                keepAspectRatio = aspectRatio >= 0.8 and aspectRatio <= 1.2

                # ensure that the contour passes all our tests
                if keepDims and keepSolidity and keepAspectRatio:
                    # draw an outline around the target and update the status
                    # text
                    cv2.drawContours(frame, [approx], -1, (0, 0, 255), 4)
                    status = "Target(s) Acquired"
                    # This will give you the Pixel location of the rectangular box
                    rc = cv2.minAreaRect(approx[:])
                    # print rc,'rc'
                    box = cv2.boxPoints(rc)
                    pt = []
                    for p in box:
                        val = (p[0], p[1])
                        pt.append(val)
                        # print pt,'pt'
                        cv2.circle(frame, val, 5, (200, 0, 0), 2)
                    #print pt
                    M = cv2.moments(approx)
                    (cX, cY) = (int(M["m10"] / M["m00"]),
                                int(M["m01"] / M["m00"]))
                    (startX, endX) = (int(cX - (w * 0.15)),
                                      int(cX + (w * 0.15)))
                    (startY, endY) = (int(cY - (h * 0.15)),
                                      int(cY + (h * 0.15)))
                    cv2.line(frame, (startX, cY), (endX, cY), (0, 0, 255), 3)
                    cv2.line(frame, (cX, startY), (cX, endY), (0, 0, 255), 3)
                    #print "cX", cX
                    cv2.putText(frame, str(cX), (cX, cY),
                                cv2.FONT_HERSHEY_SIMPLEX, 2, 100)

                    # 2D image points. If you change the image, you need to change vector
                    image_points = np.array([pt], dtype="double")
                    # print image_points
                    size = frame.shape
                    # 3D model points.
                    model_points = np.array([
                        (0.0, 0.0, 0.0),  # Rectangle center
                        (0.0, 13.6, 0.0),  #
                        (13.6, 13.6, 0.0),  #
                        (13.6, 0.0, 0.0),  #
                    ])

                    # Camera intrinsic parameters

                    focal_length = size[1]
                    center = (size[1] / 2, size[0] / 2)
                    camera_matrix = np.array(
                        [[focal_length, 0, center[0]],
                         [0, focal_length, center[1]], [0, 0, 1]],
                        dtype="double")

                    # print "Camera Matrix :\n {0}".format(camera_matrix)
                    criteria = (cv2.TERM_CRITERIA_EPS +
                                cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

                    dist_coeffs = np.zeros(
                        (4, 1))  # Assuming no lens distortion
                    (success, rotation_vector,
                     translation_vector) = cv2.solvePnP(
                         model_points, image_points, camera_matrix,
                         dist_coeffs, criteria)

                    rotationMatrix = np.zeros((3, 3), np.float32)

                    # print "Rotation Vector:\n {0}".format(rotation_vector)
                    print "Translation Vector:\n {0}".format(
                        translation_vector)
                    print translation_vector[2]
                    cv2.putText(frame, str(round(translation_vector[2],
                                                 2)), (cX, cY + 50),
                                cv2.FONT_HERSHEY_SIMPLEX, 2, 100)
                    cv2.imshow("Image window", frame)

                    # Rodrigues to convert it into a Rotation vector
                    #print rotation_vector
                    dst, jacobian = cv2.Rodrigues(rotation_vector)
                    #print "Rotation matrix:\n {0}".format(dst)
                    # sy = math.sqrt(dst[0, 0] * dst[0, 0] + dst[1, 0] * [1, 0])
                    A = dst[2, 1] * dst[2, 2]
                    B = dst[1, 0] * dst[0, 0]
                    # C = -dst[2,0]*sy

                    if success:
                        #print cX
                        self.twist.linear.x = self.translate(
                            translation_vector[2], 45, 200, -.1, 1)
                        self.twist.linear.y = 0
                        self.twist.linear.z = 0
                        self.twist.angular.x = 0
                        self.twist.angular.y = 0
                        self.twist.angular.z = self.translate(
                            cX, 50, 600, -1, 1)

                        #print "Twist values are " , self.twist
                        try:
                            self.twist_pub.publish(self.twist)
                            print self.twist
                        except:
                            print "failed to publish"

                    # Eular angles

                        Theta_x = math.degrees(math.atan(A) * 2)
                        #Theta_y = math.degrees(math.atan(C)*2)
                        Theta_z = math.degrees(math.atan(B) * 2)
                        #print "Roll axis:\n {0}".format(Theta_x)
                        #print "Yaw axis:\n {0}".format(Theta_z)
                    cv2.imshow("Pose estimator", frame)
                    cv2.waitKey(1)
예제 #59
0
 def solidity(self, contour):
     w, h = cv2.minAreaRect(contour)[1]
     area = w * h
     contourArea = cv2.contourArea(contour)
     return contourArea / area
예제 #60
0
    _, frame = cap.read()
    frame = np.array(np.flip(frame, 1))
    frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
    blur = cv2.GaussianBlur(frame, (5, 5), 0)
    hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
    ### Color Thresholds
    mask = cv2.inRange(hsv, np.array([h_low, s_low, v_low]),
                       np.array([h_hi, s_hi, v_hi]))
    ### Change Here
    res = cv2.bitwise_and(frame, frame, mask=mask)

    im, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
    if contours is not None and len(contours) > 0:
        cnt = max(
            contours,
            key=lambda x: cv2.minAreaRect(x)[1][0] * cv2.minAreaRect(x)[1][1])
        rect = cv2.minAreaRect(cnt)
        boxpts = cv2.boxPoints(rect)
        box = np.int0(boxpts)
        cv2.drawContours(frame, [box], 0, (0, 0, 255), 5)
        for corner in boxpts:
            cv2.circle(frame, (corner[0], corner[1]), 10, (0, 0, 255), -1)
    cv2.imshow('contours', np.hstack((frame, res)))
    #else:
    #   cv2.imshow('contours', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
cap.release()
cv2.destroyAllWindows()