class VisionModule:

    _contour_predictor: ContourPredictor_COLOUR
    _contour_extractor: ContourExtractor
    _contour_filter: F.NULL

    def __init__(self):
        self._contour_predictor = ContourPredictor_COLOUR()
        self._contour_extractor = ContourExtractor()


    
    def process_img(self, img, fn = F.NULL):
        #scale down to a more managable size and pass through colour filter
        processed_img, scale_ratio = self.pre_process_img(img.copy(), fn)
        #retrieve contours from image
        contour_data = self._contour_extractor.return_contours(processed_img)
        #filter to get only top 5
        contour_data = self._contour_extractor.return_top_n(contour_data)
        #scale back up to original image size
        contour_data = self._contour_extractor.scale_contours(contour_data, scale_ratio)
        #get masked images 
        contour_img_data = self._contour_extractor.return_img_data(img, contour_data)

        #label contours
        contour_img_data = self._contour_predictor.return_contour_labels(contour_img_data)
        return contour_img_data, scale_ratio
    
    def pre_process_img(self, img, fn, _w = 300):
        resized = imutils.resize(img, _w)
        scale_ration = img.shape[0] / float(resized.shape[0])
        processed = fn(resized)
        return processed, scale_ration
    
    def plot_contours_on_img(self, img, contour_img_data, scale_ratio):
        img_a = img.copy()
        for data in contour_img_data:
            c_x, c_y = int(data['data']['moments']['m10'] / data['data']['moments']['m00'] * scale_ratio) if (data['data']['moments']['m00'] > 0) else 0, int(data['data']['moments']['m01'] / data['data']['moments']['m00'] * scale_ratio) if (data['data']['moments']['m00'] > 0) else 0
            cv2.putText(img_a, data['label'], (c_x, c_y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 5)
            cv2.drawContours(img_a, [data['data']['contour']], -1, (255, 255, 255), 3)
        plt.figure()
        plt.imshow(img_a)
        return
    
    def compare_boundingRect_contours(self, img, contour_img_data):
        img_b = img.copy()
        plt.figure()
        i = 1
        for data in contour_img_data:
            plt.subplot(5,2,i)
            x_l, x_h = data['data']['x'], min(data['data']['x'] + data['data']['w'], img.shape[1]-1)
            y_l, y_h = data['data']['y'], min(data['data']['y'] + data['data']['h'], img.shape[0]-1)
            blck = img_b[y_l : y_h,x_l : x_h]
            plt.imshow(blck)
            plt.subplot(5,2,i+1)
            plt.imshow(data['img'])
            i += 2
        return
Beispiel #2
0
Datei: Camera.py Projekt: spg/JDV
 def __init__(self):
     self.camera = CameraAccessor()
     self.drawingExtractor = DrawingExtractor()
     self.contourExtractor = ContourExtractor()
     self.cornerDetector = CornerDetector()
     self.sideDetector = SideDetector()
     self. positionner = Positionner()
 def __init__(self):
     self._contour_predictor = ContourPredictor_COLOUR()
     self._contour_extractor = ContourExtractor()
Beispiel #4
0
Datei: Camera.py Projekt: spg/JDV
class Camera:
    def __init__(self):
        self.camera = CameraAccessor()
        self.drawingExtractor = DrawingExtractor()
        self.contourExtractor = ContourExtractor()
        self.cornerDetector = CornerDetector()
        self.sideDetector = SideDetector()
        self. positionner = Positionner()

    def getDrawingContour(self):
        try:
            image = self.camera.getFrame(False)
            drawingImage = self.drawingExtractor.ExtractShape(image)
            contourPoints = self.contourExtractor.findContours(drawingImage)
            size = cv.GetSize(drawingImage)
            squareSize = size[0]
            newSquareSize = size[0]-(2*19)

            for point in contourPoints:
                pointCopy = (point[0], squareSize - point[1])
                cv.Circle(drawingImage, pointCopy, 5, (0,0,0), 2)

            #cv.SaveImage("segmentationResult.jpg", drawingImage)

            return contourPoints, newSquareSize
        except:
            raise ValueError("Couldn't find drawing in image")

    def getCurrentPose(self):
        try:
            image = self.camera.getFrame(False)
            pointBlue, pointOrange, side = self.getVisibleCorners(image)
            print "blue: ", len(pointBlue)
            print "orange: ", len(pointOrange)
            self.drawPointsOnImage(image, pointBlue)
            self.drawPointsOnImage(image, pointOrange)
            if len(pointBlue) > 0 and side == SideDetector.EAST_SIDE:
                print "Blue East Corner"
                x, y, theta = self.positionner.getCurrentPose(pointBlue[0], pointBlue[1], CornerDetector.EAST_BLUE_CORNER)
            elif len(pointBlue) > 0 and side == SideDetector.WEST_SIDE:
                print "Blue West Corner"
                x, y, theta = self.positionner.getCurrentPose(pointBlue[0], pointBlue[1], CornerDetector.WEST_BLUE_CORNER)
            elif len(pointOrange) > 0 and side == SideDetector.EAST_SIDE:
                print "Orange East Corner"
                x, y, theta = self.positionner.getCurrentPose(pointOrange[0], pointOrange[1], CornerDetector.EAST_ORANGE_CORNER)
            elif len(pointOrange) > 0 and side == SideDetector.WEST_SIDE:
                print "Orange West Corner"
                x, y, theta = self.positionner.getCurrentPose(pointOrange[0], pointOrange[1], CornerDetector.WEST_ORANGE_CORNER)
            else:
                print "No corners detected"
                raise ValueError("No corners detected for positionning")
            if theta < 0:
                theta = 360 + theta
            return x, y, theta
        except:
            raise ValueError("Problem while getting robot pose")

    def getCurrentPose2(self):
        xTotal = 0
        yTotal = 0
        thetaTotal = 0
        nbSuccess = 0
        attemps = 0
        try:
            while attemps < 10  and nbSuccess <= 3:
                attemps += 1
                print "Camera: Getting current pose. Attemp ", attemps
                x = 0
                y  = 0
                theta = 0
                image = self.camera.getFrame(False)
                pointBlue, pointOrange, side = self.getVisibleCorners(image)
                #self.drawPointsOnImage(image, pointBlue)
                #self.drawPointsOnImage(image, pointOrange)
                if len(pointBlue) > 0 and side == SideDetector.EAST_SIDE:
                    x, y, theta = self.positionner.getCurrentPose(pointBlue[0], pointBlue[1], CornerDetector.EAST_BLUE_CORNER)
                    print "Camera Getting current pose. Success! Blue East corner found."
                elif len(pointBlue) > 0 and side == SideDetector.WEST_SIDE:
                    x, y, theta = self.positionner.getCurrentPose(pointBlue[0], pointBlue[1], CornerDetector.WEST_BLUE_CORNER)
                    print "Camera Getting current pose. Success! Blue West corner found."
                elif len(pointOrange) > 0 and side == SideDetector.EAST_SIDE:
                    x, y, theta = self.positionner.getCurrentPose(pointOrange[0], pointOrange[1], CornerDetector.EAST_ORANGE_CORNER)
                    print "Camera Getting current pose. Success! Orange East corner found."
                elif len(pointOrange) > 0 and side == SideDetector.WEST_SIDE:
                    x, y, theta = self.positionner.getCurrentPose(pointOrange[0], pointOrange[1], CornerDetector.WEST_ORANGE_CORNER)
                    print "Camera Getting current pose. Success! Orange West corner found."
                if x > 130:
                    nbSuccess += 1
                    xTotal += x
                    yTotal += y
                    thetaTotal += theta
            if nbSuccess > 0:
                xAverage = xTotal/nbSuccess
                yAverage = yTotal/nbSuccess
                thetaAverage = thetaTotal/nbSuccess
                print "Camera: Robot pose found. X = ", xAverage, ", Y = ", yAverage, ", Theta = ", thetaAverage
                return xAverage, yAverage, thetaAverage
            else:
                raise ValueError("Couldn't find corner")
        except:
            raise ValueError("Problem while getting robot pose")

    def getCurrentPoseBackup(self):
        image = self.camera.getFrame(True)
        imageSize = cv.GetSize(image)
        pointBlue, pointOrange, side = self.getVisibleCorners(image)
        if side == SideDetector.EAST_SIDE and len(pointBlue) > 0:
            lenght = (pointBlue[1][0] - pointBlue[0][0])/size[0]
            distance = 1699.1*lenght*lenght - 1051.6*lenght + 229.33
            return True, distance
        else:
            return False, 0

    def drawPointsOnImage(self, image, points):
        #cv.SaveImage("cornerDetectionResult.jpg", image)
        if len(points) > 0:
            for point in points:
                #pointCopy = (point[0], squareSize - point[1])
                cv.Circle(image, point, 5, (0,0,0), 2)
            #cv.SaveImage("cornerDetectionResult.jpg", image)

    def getVisibleCorners(self, image):
        contourBlue, contourOrange = self.cornerDetector.detectCorners(image)
        side = self.sideDetector.detectVisibleSide(image)
        return contourBlue, contourOrange, side


    def __doubleImage__(self, contourPoints, size):
        doubleImage = cv.CreateImage((1000,1000),8,1)
        for scale in range(1,3):
            x = size[0]*scale
            y = size[1]*scale
            for i in range(len(contourPoints)):
                if i == len(contourPoints)-1:
                    x1 = contourPoints[i][0]*scale
                    y1 = contourPoints[i][1]*scale
                    x2 = contourPoints[0][0]*scale
                    y2 = contourPoints[0][1]*scale
                else:
                    x1 = contourPoints[i][0]*scale
                    y1 = contourPoints[i][1]*scale
                    x2 = contourPoints[i+1][0]*scale
                    y2 = contourPoints[i+1][1]*scale
                point1 = (x1, y1)
                point2 = (x2, y2)
                cv.Line(doubleImage,point1, point2,255, 2)

        #cv.SaveImage("scale.jpg", doubleImage)