def __init__(self): self.scaleDetector = ScaleDetector() self.cornerDetector = CornerDetector() self.leastSquaresSolver = LeastSquaresSolver() self.overlayProcessor = OverlayProcessor() self.percentile50 = 0.4321768046926515 self.percentile80 = 0.9881273553137421
def __init__(self): self.camera = CameraAccessor() self.drawingExtractor = DrawingExtractor() self.contourExtractor = ContourExtractor() self.cornerDetector = CornerDetector() self.sideDetector = SideDetector() self. positionner = Positionner()
class ImageProcessing: def __init__(self): self.scaleDetector = ScaleDetector() self.cornerDetector = CornerDetector() self.leastSquaresSolver = LeastSquaresSolver() self.overlayProcessor = OverlayProcessor() self.percentile50 = 0.4321768046926515 self.percentile80 = 0.9881273553137421 def determineDisplacement(self, beforeLeft, beforeRight, afterLeft, afterRight): """Returns displacement (angle, tx, ty) for (left, right, overlayLeft, overlayRight) Arguments: beforeLeft {[type]} -- [description] beforeRight {[type]} -- [description] afterLeft {[type]} -- [description] afterRight {[type]} -- [description] Returns: [type] -- [description] """ scaleBL = self.scaleDetector.detectScaleAndLeftMostPoint( beforeLeft) # (scale, leftMostPoint) scaleAL = self.scaleDetector.detectScaleAndLeftMostPoint(afterLeft) scaleBR = self.scaleDetector.detectScaleAndLeftMostPoint(beforeRight) scaleAR = self.scaleDetector.detectScaleAndLeftMostPoint(afterRight) pointsBL = self.cornerDetector.detectCorners(beforeLeft, True) # (corner, corner) pointsAL = self.cornerDetector.detectCorners(afterLeft, True) pointsBR = self.cornerDetector.detectCorners(beforeRight, False) pointsAR = self.cornerDetector.detectCorners(afterRight, False) realBL = self._convertToReal(pointsBL, scaleBL) realAL = self._convertToReal(pointsAL, scaleAL) realBR = self._convertToReal(pointsBR, scaleBR) realAR = self._convertToReal(pointsAR, scaleAR) # TODO: calculate left = self.leastSquaresSolver.solve(np.array(realBL), np.array(realAL)) right = self.leastSquaresSolver.solve(np.array(realBR), np.array(realAR)) # Calculate overlay image overlayImageLeft = self.overlayProcessor.overlay( beforeLeft, afterLeft, scaleBL[1], scaleAL[1]) overlayImageRight = self.overlayProcessor.overlay( beforeRight, afterRight, scaleBR[1], scaleAR[1]) # cv2.imshow("Left", overlayImageLeft) # cv2.imshow("Right", overlayImageRight) # cv2.waitKey() errorLeft = np.abs(left[1][0]) + np.abs(left[1][1]) errorRight = np.abs(right[1][0]) + np.abs(right[1][1]) print("Error is: " + str(errorLeft) + ", " + str(errorRight)) qualityScoreLeft = self._convertToQualityScore(errorLeft) qualityScoreRight = self._convertToQualityScore(errorRight) print("Quality Score: " + str((qualityScoreLeft + qualityScoreRight) / 2)) return (left, right, overlayImageLeft, overlayImageRight, (qualityScoreLeft + qualityScoreRight) / 2) def _convertToQualityScore(self, value): qualityScore = (value - self.percentile50) / ( np.abs(self.percentile50 - self.percentile80)) return 1 - min(1, max(0, qualityScore)) def _convertToReal(self, corners, scaleAndLeftMost): corners = list(corners) print("Corners: ", corners) print("SCale and left most: " + str(scaleAndLeftMost)) scale, leftMostPoint = scaleAndLeftMost print("LEft most: " + str(leftMostPoint)) print("Scale: " + str(scale)) return list( map( lambda corner: self._convertToMM( self._convertCornerToCordsRelativeToLeftMostPoint( corner, leftMostPoint), scale), corners)) def _convertCornerToCordsRelativeToLeftMostPoint(self, corner, leftMostPoint): return (corner[0] - leftMostPoint[0], corner[1] - leftMostPoint[1]) def _convertToMM(self, coordinates, scale): return (coordinates[0] / scale, coordinates[1] / scale)
class Camera: def __init__(self): self.camera = CameraAccessor() self.drawingExtractor = DrawingExtractor() self.contourExtractor = ContourExtractor() self.cornerDetector = CornerDetector() self.sideDetector = SideDetector() self. positionner = Positionner() def getDrawingContour(self): try: image = self.camera.getFrame(False) drawingImage = self.drawingExtractor.ExtractShape(image) contourPoints = self.contourExtractor.findContours(drawingImage) size = cv.GetSize(drawingImage) squareSize = size[0] newSquareSize = size[0]-(2*19) for point in contourPoints: pointCopy = (point[0], squareSize - point[1]) cv.Circle(drawingImage, pointCopy, 5, (0,0,0), 2) #cv.SaveImage("segmentationResult.jpg", drawingImage) return contourPoints, newSquareSize except: raise ValueError("Couldn't find drawing in image") def getCurrentPose(self): try: image = self.camera.getFrame(False) pointBlue, pointOrange, side = self.getVisibleCorners(image) print "blue: ", len(pointBlue) print "orange: ", len(pointOrange) self.drawPointsOnImage(image, pointBlue) self.drawPointsOnImage(image, pointOrange) if len(pointBlue) > 0 and side == SideDetector.EAST_SIDE: print "Blue East Corner" x, y, theta = self.positionner.getCurrentPose(pointBlue[0], pointBlue[1], CornerDetector.EAST_BLUE_CORNER) elif len(pointBlue) > 0 and side == SideDetector.WEST_SIDE: print "Blue West Corner" x, y, theta = self.positionner.getCurrentPose(pointBlue[0], pointBlue[1], CornerDetector.WEST_BLUE_CORNER) elif len(pointOrange) > 0 and side == SideDetector.EAST_SIDE: print "Orange East Corner" x, y, theta = self.positionner.getCurrentPose(pointOrange[0], pointOrange[1], CornerDetector.EAST_ORANGE_CORNER) elif len(pointOrange) > 0 and side == SideDetector.WEST_SIDE: print "Orange West Corner" x, y, theta = self.positionner.getCurrentPose(pointOrange[0], pointOrange[1], CornerDetector.WEST_ORANGE_CORNER) else: print "No corners detected" raise ValueError("No corners detected for positionning") if theta < 0: theta = 360 + theta return x, y, theta except: raise ValueError("Problem while getting robot pose") def getCurrentPose2(self): xTotal = 0 yTotal = 0 thetaTotal = 0 nbSuccess = 0 attemps = 0 try: while attemps < 10 and nbSuccess <= 3: attemps += 1 print "Camera: Getting current pose. Attemp ", attemps x = 0 y = 0 theta = 0 image = self.camera.getFrame(False) pointBlue, pointOrange, side = self.getVisibleCorners(image) #self.drawPointsOnImage(image, pointBlue) #self.drawPointsOnImage(image, pointOrange) if len(pointBlue) > 0 and side == SideDetector.EAST_SIDE: x, y, theta = self.positionner.getCurrentPose(pointBlue[0], pointBlue[1], CornerDetector.EAST_BLUE_CORNER) print "Camera Getting current pose. Success! Blue East corner found." elif len(pointBlue) > 0 and side == SideDetector.WEST_SIDE: x, y, theta = self.positionner.getCurrentPose(pointBlue[0], pointBlue[1], CornerDetector.WEST_BLUE_CORNER) print "Camera Getting current pose. Success! Blue West corner found." elif len(pointOrange) > 0 and side == SideDetector.EAST_SIDE: x, y, theta = self.positionner.getCurrentPose(pointOrange[0], pointOrange[1], CornerDetector.EAST_ORANGE_CORNER) print "Camera Getting current pose. Success! Orange East corner found." elif len(pointOrange) > 0 and side == SideDetector.WEST_SIDE: x, y, theta = self.positionner.getCurrentPose(pointOrange[0], pointOrange[1], CornerDetector.WEST_ORANGE_CORNER) print "Camera Getting current pose. Success! Orange West corner found." if x > 130: nbSuccess += 1 xTotal += x yTotal += y thetaTotal += theta if nbSuccess > 0: xAverage = xTotal/nbSuccess yAverage = yTotal/nbSuccess thetaAverage = thetaTotal/nbSuccess print "Camera: Robot pose found. X = ", xAverage, ", Y = ", yAverage, ", Theta = ", thetaAverage return xAverage, yAverage, thetaAverage else: raise ValueError("Couldn't find corner") except: raise ValueError("Problem while getting robot pose") def getCurrentPoseBackup(self): image = self.camera.getFrame(True) imageSize = cv.GetSize(image) pointBlue, pointOrange, side = self.getVisibleCorners(image) if side == SideDetector.EAST_SIDE and len(pointBlue) > 0: lenght = (pointBlue[1][0] - pointBlue[0][0])/size[0] distance = 1699.1*lenght*lenght - 1051.6*lenght + 229.33 return True, distance else: return False, 0 def drawPointsOnImage(self, image, points): #cv.SaveImage("cornerDetectionResult.jpg", image) if len(points) > 0: for point in points: #pointCopy = (point[0], squareSize - point[1]) cv.Circle(image, point, 5, (0,0,0), 2) #cv.SaveImage("cornerDetectionResult.jpg", image) def getVisibleCorners(self, image): contourBlue, contourOrange = self.cornerDetector.detectCorners(image) side = self.sideDetector.detectVisibleSide(image) return contourBlue, contourOrange, side def __doubleImage__(self, contourPoints, size): doubleImage = cv.CreateImage((1000,1000),8,1) for scale in range(1,3): x = size[0]*scale y = size[1]*scale for i in range(len(contourPoints)): if i == len(contourPoints)-1: x1 = contourPoints[i][0]*scale y1 = contourPoints[i][1]*scale x2 = contourPoints[0][0]*scale y2 = contourPoints[0][1]*scale else: x1 = contourPoints[i][0]*scale y1 = contourPoints[i][1]*scale x2 = contourPoints[i+1][0]*scale y2 = contourPoints[i+1][1]*scale point1 = (x1, y1) point2 = (x2, y2) cv.Line(doubleImage,point1, point2,255, 2) #cv.SaveImage("scale.jpg", doubleImage)