Beispiel #1
0
    def drawDetections(self, outimg, hlist, rvecs=None, tvecs=None):
        # Show trihedron and parallelepiped centered on object:
        hw = self.winw * 0.5
        hh = self.winh * 0.5
        dd = -max(hw, hh)
        i = 0
        empty = np.array([(0.0), (0.0), (0.0)])

        # NOTE: this code similar to FirstVision, but in the present module we only have at most one object in the list
        # (the window, if detected):
        for obj in hlist:
            # skip those for which solvePnP failed:
            if np.array_equal(rvecs[i], empty):
                i += 1
                continue
            # This could throw some overflow errors as we convert the coordinates to int, if the projection gets
            # singular because of noisy detection:
            try:
                # Project axis points:
                axisPoints = np.array([(0.0, 0.0, 0.0), (hw, 0.0, 0.0),
                                       (0.0, hh, 0.0), (0.0, 0.0, dd)])
                imagePoints, jac = cv2.projectPoints(axisPoints, rvecs[i],
                                                     tvecs[i], self.camMatrix,
                                                     self.distCoeffs)

                # Draw axis lines:
                jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5),
                                int(imagePoints[0][0, 1] + 0.5),
                                int(imagePoints[1][0, 0] + 0.5),
                                int(imagePoints[1][0, 1] + 0.5), 2,
                                jevois.YUYV.MedPurple)
                jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5),
                                int(imagePoints[0][0, 1] + 0.5),
                                int(imagePoints[2][0, 0] + 0.5),
                                int(imagePoints[2][0, 1] + 0.5), 2,
                                jevois.YUYV.MedGreen)
                jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5),
                                int(imagePoints[0][0, 1] + 0.5),
                                int(imagePoints[3][0, 0] + 0.5),
                                int(imagePoints[3][0, 1] + 0.5), 2,
                                jevois.YUYV.MedGrey)

                # Also draw a parallelepiped: NOTE: contrary to FirstVision, here we draw it going into the object, as
                # opposed to sticking out of it (we just negate Z for that):
                cubePoints = np.array([(-hw, -hh, 0.0), (hw, -hh, 0.0),
                                       (hw, hh, 0.0), (-hw, hh, 0.0),
                                       (-hw, -hh, -dd), (hw, -hh, -dd),
                                       (hw, hh, -dd), (-hw, hh, -dd)])
                cu, jac2 = cv2.projectPoints(cubePoints, rvecs[i], tvecs[i],
                                             self.camMatrix, self.distCoeffs)

                # Round all the coordinates and cast to int for drawing:
                cu = np.rint(cu)

                # Draw parallelepiped lines:
                jevois.drawLine(outimg, int(cu[0][0, 0]), int(cu[0][0, 1]),
                                int(cu[1][0, 0]), int(cu[1][0, 1]), 1,
                                jevois.YUYV.LightGreen)
                jevois.drawLine(outimg, int(cu[1][0, 0]), int(cu[1][0, 1]),
                                int(cu[2][0, 0]), int(cu[2][0, 1]), 1,
                                jevois.YUYV.LightGreen)
                jevois.drawLine(outimg, int(cu[2][0, 0]), int(cu[2][0, 1]),
                                int(cu[3][0, 0]), int(cu[3][0, 1]), 1,
                                jevois.YUYV.LightGreen)
                jevois.drawLine(outimg, int(cu[3][0, 0]), int(cu[3][0, 1]),
                                int(cu[0][0, 0]), int(cu[0][0, 1]), 1,
                                jevois.YUYV.LightGreen)
                jevois.drawLine(outimg, int(cu[4][0, 0]), int(cu[4][0, 1]),
                                int(cu[5][0, 0]), int(cu[5][0, 1]), 1,
                                jevois.YUYV.LightGreen)
                jevois.drawLine(outimg, int(cu[5][0, 0]), int(cu[5][0, 1]),
                                int(cu[6][0, 0]), int(cu[6][0, 1]), 1,
                                jevois.YUYV.LightGreen)
                jevois.drawLine(outimg, int(cu[6][0, 0]), int(cu[6][0, 1]),
                                int(cu[7][0, 0]), int(cu[7][0, 1]), 1,
                                jevois.YUYV.LightGreen)
                jevois.drawLine(outimg, int(cu[7][0, 0]), int(cu[7][0, 1]),
                                int(cu[4][0, 0]), int(cu[4][0, 1]), 1,
                                jevois.YUYV.LightGreen)
                jevois.drawLine(outimg, int(cu[0][0, 0]), int(cu[0][0, 1]),
                                int(cu[4][0, 0]), int(cu[4][0, 1]), 1,
                                jevois.YUYV.LightGreen)
                jevois.drawLine(outimg, int(cu[1][0, 0]), int(cu[1][0, 1]),
                                int(cu[5][0, 0]), int(cu[5][0, 1]), 1,
                                jevois.YUYV.LightGreen)
                jevois.drawLine(outimg, int(cu[2][0, 0]), int(cu[2][0, 1]),
                                int(cu[6][0, 0]), int(cu[6][0, 1]), 1,
                                jevois.YUYV.LightGreen)
                jevois.drawLine(outimg, int(cu[3][0, 0]), int(cu[3][0, 1]),
                                int(cu[7][0, 0]), int(cu[7][0, 1]), 1,
                                jevois.YUYV.LightGreen)
            except:
                pass

            i += 1
Beispiel #2
0
    def drawDetections(self, outimg, hlist, rvecs=None, tvecs=None):
        # Show trihedron and parallelepiped centered on object:
        hw = self.owm * 0.5
        hh = self.ohm * 0.5
        dd = -max(hw, hh)
        i = 0
        empty = np.array([(0.0), (0.0), (0.0)])

        for obj in hlist:
            # skip those for which solvePnP failed:
            if np.array_equal(rvecs[i], empty):
                i += 1
                continue

            # Project axis points:
            axisPoints = np.array([(0.0, 0.0, 0.0), (hw, 0.0, 0.0),
                                   (0.0, hh, 0.0), (0.0, 0.0, dd)])
            imagePoints, jac = cv2.projectPoints(axisPoints, rvecs[i],
                                                 tvecs[i], self.camMatrix,
                                                 self.distCoeffs)

            # Draw axis lines:
            jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5),
                            int(imagePoints[0][0, 1] + 0.5),
                            int(imagePoints[1][0, 0] + 0.5),
                            int(imagePoints[1][0, 1] + 0.5), 2,
                            jevois.YUYV.MedPurple)
            jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5),
                            int(imagePoints[0][0, 1] + 0.5),
                            int(imagePoints[2][0, 0] + 0.5),
                            int(imagePoints[2][0, 1] + 0.5), 2,
                            jevois.YUYV.MedGreen)
            jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5),
                            int(imagePoints[0][0, 1] + 0.5),
                            int(imagePoints[3][0, 0] + 0.5),
                            int(imagePoints[3][0, 1] + 0.5), 2,
                            jevois.YUYV.MedGrey)

            # Also draw a parallelepiped:
            cubePoints = np.array([(-hw, -hh, 0.0), (hw, -hh, 0.0),
                                   (hw, hh, 0.0), (-hw, hh, 0.0),
                                   (-hw, -hh, dd), (hw, -hh, dd), (hw, hh, dd),
                                   (-hw, hh, dd)])
            cu, jac2 = cv2.projectPoints(cubePoints, rvecs[i], tvecs[i],
                                         self.camMatrix, self.distCoeffs)

            # Round all the coordinates and cast to int for drawing:
            cu = np.rint(cu)

            # Draw parallelepiped lines:
            jevois.drawLine(outimg, int(cu[0][0, 0]), int(cu[0][0, 1]),
                            int(cu[1][0, 0]), int(cu[1][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[1][0, 0]), int(cu[1][0, 1]),
                            int(cu[2][0, 0]), int(cu[2][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[2][0, 0]), int(cu[2][0, 1]),
                            int(cu[3][0, 0]), int(cu[3][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[3][0, 0]), int(cu[3][0, 1]),
                            int(cu[0][0, 0]), int(cu[0][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[4][0, 0]), int(cu[4][0, 1]),
                            int(cu[5][0, 0]), int(cu[5][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[5][0, 0]), int(cu[5][0, 1]),
                            int(cu[6][0, 0]), int(cu[6][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[6][0, 0]), int(cu[6][0, 1]),
                            int(cu[7][0, 0]), int(cu[7][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[7][0, 0]), int(cu[7][0, 1]),
                            int(cu[4][0, 0]), int(cu[4][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[0][0, 0]), int(cu[0][0, 1]),
                            int(cu[4][0, 0]), int(cu[4][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[1][0, 0]), int(cu[1][0, 1]),
                            int(cu[5][0, 0]), int(cu[5][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[2][0, 0]), int(cu[2][0, 1]),
                            int(cu[6][0, 0]), int(cu[6][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[3][0, 0]), int(cu[3][0, 1]),
                            int(cu[7][0, 0]), int(cu[7][0, 1]), 1,
                            jevois.YUYV.LightGreen)

            i += 1
Beispiel #3
0
    def detect(self, imggray, outimg=None):
        h, w = imggray.shape
        hlist = []

        # Create a keypoint detector if needed:
        if not hasattr(self, 'detector'):
            self.detector = cv2.ORB_create()

        # Load training image and detect keypoints on it if needed:
        if not hasattr(self, 'refkp'):
            refimg = cv2.imread(self.fname, 0)
            self.refkp, self.refdes = self.detector.detectAndCompute(
                refimg, None)

            # Also store corners of reference image and of window for homography mapping:
            refh, refw = refimg.shape
            self.refcorners = np.float32([[0.0, 0.0], [0.0,
                                                       refh], [refw, refh],
                                          [refw, 0.0]]).reshape(-1, 1, 2)
            self.wincorners = np.float32(
                [[
                    self.winleft * refw / self.owm,
                    self.wintop * refh / self.ohm
                ],
                 [
                     self.winleft * refw / self.owm,
                     (self.wintop + self.winh) * refh / self.ohm
                 ],
                 [(self.winleft + self.winw) * refw / self.owm,
                  (self.wintop + self.winh) * refh / self.ohm],
                 [(self.winleft + self.winw) * refw / self.owm,
                  self.wintop * refh / self.ohm]]).reshape(-1, 1, 2)
            jevois.LINFO(
                "Extracted {} keypoints and descriptors from {}".format(
                    len(self.refkp), self.fname))

        # Compute keypoints and descriptors:
        kp, des = self.detector.detectAndCompute(imggray, None)
        str = "{} keypoints".format(len(kp))

        # Create a matcher if needed:
        if not hasattr(self, 'matcher'):
            self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        # Compute matches between reference image and camera image, then sort them by distance:
        matches = self.matcher.match(des, self.refdes)
        matches = sorted(matches, key=lambda x: x.distance)
        str += ", {} matches".format(len(matches))

        # Keep only good matches:
        lastidx = 0
        for m in matches:
            if m.distance < self.distth: lastidx += 1
            else: break
        matches = matches[0:lastidx]
        str += ", {} good".format(len(matches))

        # If we have enough matches, compute homography:
        corners = []
        wincorners = []
        if len(matches) >= 10:
            obj = []
            scene = []

            # Localize the object (see JeVois C++ class ObjectMatcher for details):
            for m in matches:
                obj.append(self.refkp[m.trainIdx].pt)
                scene.append(kp[m.queryIdx].pt)

            # compute the homography
            hmg, mask = cv2.findHomography(np.array(obj), np.array(scene),
                                           cv2.RANSAC, 5.0)

            # Check homography conditioning using SVD:
            u, s, v = np.linalg.svd(hmg, full_matrices=False)

            # We need the smallest eigenvalue to not be too small, and the ratio of largest to smallest eigenvalue to be
            # quite large for our homography to be declared good here. Note that linalg.svd returns the eigenvalues in
            # descending order already:
            if s[-1] > 0.001 and s[0] / s[-1] > 100:
                # Project the reference image corners to the camera image:
                corners = cv2.perspectiveTransform(self.refcorners, hmg)
                wincorners = cv2.perspectiveTransform(self.wincorners, hmg)

        # Display any results requested by the users:
        if outimg is not None and outimg.valid():
            if len(corners) == 4:
                jevois.drawLine(outimg, int(corners[0][0, 0] + 0.5),
                                int(corners[0][0, 1] + 0.5),
                                int(corners[1][0, 0] + 0.5),
                                int(corners[1][0, 1] + 0.5), 2,
                                jevois.YUYV.LightPink)
                jevois.drawLine(outimg, int(corners[1][0, 0] + 0.5),
                                int(corners[1][0, 1] + 0.5),
                                int(corners[2][0, 0] + 0.5),
                                int(corners[2][0, 1] + 0.5), 2,
                                jevois.YUYV.LightPink)
                jevois.drawLine(outimg, int(corners[2][0, 0] + 0.5),
                                int(corners[2][0, 1] + 0.5),
                                int(corners[3][0, 0] + 0.5),
                                int(corners[3][0, 1] + 0.5), 2,
                                jevois.YUYV.LightPink)
                jevois.drawLine(outimg, int(corners[3][0, 0] + 0.5),
                                int(corners[3][0, 1] + 0.5),
                                int(corners[0][0, 0] + 0.5),
                                int(corners[0][0, 1] + 0.5), 2,
                                jevois.YUYV.LightPink)
            jevois.writeText(outimg, str, 3, h + 4, jevois.YUYV.White,
                             jevois.Font.Font6x10)

        # Return window corners if we did indeed detect the object:
        hlist = []
        if len(wincorners) == 4: hlist.append(wincorners)

        return hlist