def process(self, inframe, outframe):
        jevois.LINFO("process with usb")

        # Get the next camera image (may block until it is captured):
        inimg = inframe.get()
        jevois.LINFO("Input image is {} {}x{}".format(jevois.fccstr(inimg.fmt), inimg.width, inimg.height))

        # Get the next available USB output image:
        outimg = outframe.get()
        jevois.LINFO("Output image is {} {}x{}".format(jevois.fccstr(outimg.fmt), outimg.width, outimg.height))

        # Example of getting pixel data from the input and copying to the output:
        jevois.paste(inimg, outimg, 0, 0)

        # We are done with the input image:
        inframe.done()

        # Example of in-place processing:
        jevois.hFlipYUYV(outimg)

        # Example of simple drawings:
        jevois.drawCircle(outimg, int(outimg.width/2), int(outimg.height/2), int(outimg.height/4),
                          2, jevois.YUYV.White)
        jevois.writeText(outimg, "Hi from Python - @MODULE@", 20, 20, jevois.YUYV.White, jevois.Font.Font10x20)
        
        # We are done with the output, ready to send it to host over USB:
        outframe.send()

        # Send a string over serial (e.g., to an Arduino). Remember to tell the JeVois Engine to display those messages,
        # as they are turned off by default. For example: 'setpar serout All' in the JeVois console:
        jevois.sendSerial("DONE frame {}".format(self.frame));
        self.frame += 1
Пример #2
0
    def process(self, inframe, outframe):
        inimg = inframe.get()
        self.timer.start()

        imgbgr = jevois.convertToCvBGR(inimg)
        h, w, chans = imgbgr.shape
        outimg = outframe.get()
        outimg.require("output", w, h + 12, jevois.V4L2_PIX_FMT_YUYV)
        jevois.paste(inimg, outimg, 0, 0)
        jevois.drawFilledRect(outimg, 0, h, outimg.width, outimg.height - h,
                              jevois.YUYV.Black)
        inframe.done()

        cube = self.detect(imgbgr, outimg)
        # Load camera calibration if needed:
        # if not hasattr(self, 'camMatrix'): self.loadCameraCalibration(w, h)

        if cube is not None:
            jevois.sendSerial(cube.toJson())

        # Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time):
        fps = self.timer.stop()
        jevois.writeText(outimg, fps, 3, h - 10, jevois.YUYV.White,
                         jevois.Font.Font6x10)
        outframe.send()
Пример #3
0
 def printOnImage(self, outimg, cube, width, height):
     if outimg is not None and outimg.valid() and cube is not None:
         jevois.drawRect(outimg, cube.x, cube.y, cube.w, cube.h, 2,
                         jevois.YUYV.MedPurple)
         jevois.writeText(
             outimg, "Angle: " + str(int(numpy.degrees(cube.angle))) +
             " Distance: " + str(int(cube.distance)) + " Pixel width: " +
             str(cube.w), 3, height + 1, jevois.YUYV.White,
             jevois.Font.Font6x10)
Пример #4
0
    def process(self, inframe, outframe):
        # Get the next camera image (may block until it is captured). To avoid wasting much time assembling a composite
        # output image with multiple panels by concatenating numpy arrays, in this module we use raw YUYV images and
        # fast paste and draw operations provided by JeVois on those images:
        inimg = inframe.get()

        # Start measuring image processing time:
        self.timer.start()

        # Convert input image to BGR24:
        imgbgr = jevois.convertToCvBGR(inimg)
        h, w, chans = imgbgr.shape

        # Get pre-allocated but blank output image which we will send over USB:
        outimg = outframe.get()
        outimg.require("output", w * 2, h + 12, jevois.V4L2_PIX_FMT_YUYV)
        jevois.paste(inimg, outimg, 0, 0)
        jevois.drawFilledRect(outimg, 0, h, outimg.width, outimg.height - h,
                              jevois.YUYV.Black)

        # Let camera know we are done using the input image:
        inframe.done()

        # Get a list of quadrilateral convex hulls for all good objects:
        hlist = self.detect(imgbgr, outimg)

        # Load camera calibration if needed:
        if not hasattr(self, 'camMatrix'): self.loadCameraCalibration(w, h)

        # Map to 6D (inverse perspective):
        (rvecs, tvecs) = self.estimatePose(hlist)

        # Send all serial messages:
        self.sendAllSerial(w, h, hlist, rvecs, tvecs)

        # Draw all detections in 3D:
        self.drawDetections(outimg, hlist, rvecs, tvecs)

        # Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time):
        fps = self.timer.stop()
        jevois.writeText(outimg, fps, 3, h - 10, jevois.YUYV.White,
                         jevois.Font.Font6x10)

        # We are done with the output, ready to send it to host over USB:
        outframe.send()
Пример #5
0
    def detect(self, imgbgr, outimg=None):
        maxn = 5  # max number of objects we will consider
        h, w, chans = imgbgr.shape

        # Convert input image to HSV:
        imghsv = cv2.cvtColor(imgbgr, cv2.COLOR_BGR2HSV)

        # Isolate pixels inside our desired HSV range:
        imgth = cv2.inRange(imghsv, self.HSVmin, self.HSVmax)
        str = "H={}-{} S={}-{} V={}-{} ".format(self.HSVmin[0], self.HSVmax[0],
                                                self.HSVmin[1], self.HSVmax[1],
                                                self.HSVmin[2], self.HSVmax[2])

        # Create structuring elements for morpho maths:
        if not hasattr(self, 'erodeElement'):
            self.erodeElement = cv2.getStructuringElement(
                cv2.MORPH_RECT, (2, 2))
            self.dilateElement = cv2.getStructuringElement(
                cv2.MORPH_RECT, (2, 2))

        # Apply morphological operations to cleanup the image noise:
        imgth = cv2.erode(imgth, self.erodeElement)
        imgth = cv2.dilate(imgth, self.dilateElement)

        # Detect objects by finding contours:
        im2, contours, hierarchy = cv2.findContours(imgth, cv2.RETR_CCOMP,
                                                    cv2.CHAIN_APPROX_SIMPLE)
        str += "N={} ".format(len(contours))

        # Only consider the 5 biggest objects by area:
        contours = sorted(contours, key=cv2.contourArea, reverse=True)[:maxn]
        hlist = []  # list of hulls of good objects, which we will return
        str2 = ""
        beststr2 = ""

        # Identify the "good" objects:
        for c in contours:
            # Keep track of our best detection so far:
            if len(str2) > len(beststr2): beststr2 = str2
            str2 = ""

            # Compute contour area:
            area = cv2.contourArea(c, oriented=False)

            # Compute convex hull:
            rawhull = cv2.convexHull(c, clockwise=True)
            rawhullperi = cv2.arcLength(rawhull, closed=True)
            hull = cv2.approxPolyDP(rawhull,
                                    epsilon=self.epsilon * rawhullperi * 3.0,
                                    closed=True)

            # Is it the right shape?
            if (hull.shape != (4, 1, 2)):
                continue  # 4 vertices for the rectangular convex outline (shows as a trapezoid)
            str2 += "H"  # Hull is quadrilateral

            huarea = cv2.contourArea(hull, oriented=False)
            if huarea < self.hullarea[0] or huarea > self.hullarea[1]: continue
            str2 += "A"  # Hull area ok

            hufill = area / huarea * 100.0
            if hufill > self.hullfill: continue
            str2 += "F"  # Fill is ok

            # Check object shape:
            peri = cv2.arcLength(c, closed=True)
            approx = cv2.approxPolyDP(c,
                                      epsilon=self.epsilon * peri,
                                      closed=True)
            if len(approx) < 7 or len(approx) > 9:
                continue  # 8 vertices for a U shape
            str2 += "S"  # Shape is ok

            # Compute contour serr:
            serr = 100.0 * cv2.matchShapes(c, approx, cv2.CONTOURS_MATCH_I1,
                                           0.0)
            if serr > self.ethresh: continue
            str2 += "E"  # Shape error is ok

            # Reject the shape if any of its vertices gets within the margin of the image bounds. This is to avoid
            # getting grossly incorrect 6D pose estimates as the shape starts getting truncated as it partially exits
            # the camera field of view:
            reject = 0
            for v in c:
                if v[0, 0] < self.margin or v[0, 0] >= w - self.margin or v[
                        0, 1] < self.margin or v[0, 1] >= h - self.margin:
                    reject = 1
                    break

            if reject == 1: continue
            str2 += "M"  # Margin ok

            # Re-order the 4 points in the hull if needed: In the pose estimation code, we will assume vertices ordered
            # as follows:
            #
            #    0|        |3
            #     |        |
            #     |        |
            #    1----------2

            # v10+v23 should be pointing outward the U more than v03+v12 is:
            v10p23 = complex(
                hull[0][0, 0] - hull[1][0, 0] + hull[3][0, 0] - hull[2][0, 0],
                hull[0][0, 1] - hull[1][0, 1] + hull[3][0, 1] - hull[2][0, 1])
            len10p23 = abs(v10p23)
            v03p12 = complex(
                hull[3][0, 0] - hull[0][0, 0] + hull[2][0, 0] - hull[1][0, 0],
                hull[3][0, 1] - hull[0][0, 1] + hull[2][0, 1] - hull[1][0, 1])
            len03p12 = abs(v03p12)

            # Vector from centroid of U shape to centroid of its hull should also point outward of the U:
            momC = cv2.moments(c)
            momH = cv2.moments(hull)
            vCH = complex(
                momH['m10'] / momH['m00'] - momC['m10'] / momC['m00'],
                momH['m01'] / momH['m00'] - momC['m01'] / momC['m00'])
            lenCH = abs(vCH)

            if len10p23 < 0.1 or len03p12 < 0.1 or lenCH < 0.1: continue
            str2 += "V"  # Shape vectors ok

            good = (v10p23.real * vCH.real +
                    v10p23.imag * vCH.imag) / (len10p23 * lenCH)
            bad = (v03p12.real * vCH.real +
                   v03p12.imag * vCH.imag) / (len03p12 * lenCH)

            # We reject upside-down detections as those are likely to be spurious:
            if vCH.imag >= -2.0: continue
            str2 += "U"  # U shape is upright

            # Fixup the ordering of the vertices if needed:
            if bad > good: hull = np.roll(hull, shift=1, axis=0)

            # This detection is a keeper:
            str2 += " OK"
            hlist.append(hull)

        if len(str2) > len(beststr2): beststr2 = str2

        # Display any results requested by the users:
        if outimg is not None and outimg.valid():
            if (outimg.width == w * 2):
                jevois.pasteGreyToYUYV(imgth, outimg, w, 0)
            jevois.writeText(outimg, str + beststr2, 3, h + 1,
                             jevois.YUYV.White, jevois.Font.Font6x10)

        return hlist
Пример #6
0
    def process(self, inframe, outframe):
        # Get the next camera image (may block until it is captured). To avoid wasting much time assembling a composite
        # output image with multiple panels by concatenating numpy arrays, in this module we use raw YUYV images and
        # fast paste and draw operations provided by JeVois on those images:
        inimg = inframe.get()

        # Start measuring image processing time:
        self.timer.start()

        # Convert input image to BGR24:
        imgbgr = jevois.convertToCvBGR(inimg)
        h, w, chans = imgbgr.shape

        # Get pre-allocated but blank output image which we will send over USB:
        outimg = outframe.get()
        outimg.require("output", w * 2, h + 12, jevois.V4L2_PIX_FMT_YUYV)
        #outimg.require("output", w, h + 12, jevois.V4L2_PIX_FMT_YUYV)
        jevois.paste(inimg, outimg, 0, 0)
        jevois.drawFilledRect(outimg, 0, h, outimg.width, outimg.height - h,
                              jevois.YUYV.Black)

        # Let camera know we are done using the input image:
        inframe.done()

        # Get a list of quadrilateral convex hulls for all good objects:
        hlist = self.detect(imgbgr, outimg)

        # Load camera calibration if needed:
        if not hasattr(self, 'camMatrix'): self.loadCameraCalibration(w, h)

        # Map to 6D (inverse perspective):
        (rvecs, tvecs) = self.estimatePose(hlist)

        # Average Values
        for i in range(len(tvecs)):
            for k in range(len(tvecs[i])):
                self.tsum[k].append(tvecs[i][k])
                while (len(self.tsum[k]) > 10):
                    self.tsum[k].pop(0)

        for i in range(len(rvecs)):
            for k in range(len(rvecs[i])):
                self.rsum[k].append(rvecs[i][k])
                while (len(self.rsum[k]) > 10):
                    self.rsum[k].pop(0)

        # Find distance along ground to robot (Y)
        try:
            X = sum(self.tsum[0]) / len(self.tsum[0]) * self.mToFt
            Y = sum(self.tsum[2]) / len(self.tsum[2]) * self.mToFt
            Z = sum(self.tsum[1]) / len(self.tsum[1]) * self.mToFt

            groundDis = -0.2509 + 1.2073 * math.cos(
                self.cameraAngle -
                math.atan(Z / Y)) * math.sqrt(math.pow(Z, 2) + math.pow(Y, 2))
        except:
            groundDis = 0
            X = 0
            Y = 0
            Z = 0

        # Output Average of Target in Feet and Radians
        jevois.writeText(
            outimg, "X: {} Y: {} Angle: {}".format(
                X, groundDis,
                (180 / 3.14) * sum(self.rsum[1]) / len(self.rsum[1])), 3, 0,
            jevois.YUYV.White, jevois.Font.Font6x10)

        # Send all serial messages:
        self.sendAllSerial(w, h, hlist, rvecs, tvecs)

        # Draw all detections in 3D:
        self.drawDetections(outimg, hlist, rvecs, tvecs)

        # Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time):
        fps = self.timer.stop()
        jevois.writeText(outimg, fps, 3, h - 10, jevois.YUYV.White,
                         jevois.Font.Font6x10)

        # Test Serial Output
        #jevois.sendSerial("{} {}".
        #        format(-1 * self.mToFt, -1 * self.mToFt))

        # We are done with the output, ready to send it to host over USB:
        outframe.send()
Пример #7
0
    def drawDetections(self, outimg, hlist, rvecs=None, tvecs=None):
        # Show trihedron and parallelepiped centered on object:
        hw = self.owm * 0.5
        hh = self.ohm * 0.5
        dd = -max(hw, hh)
        i = 0
        empty = np.array([(0.0), (0.0), (0.0)])

        for obj in hlist:
            # skip those for which solvePnP failed:
            if np.array_equal(rvecs[i], empty):
                i += 1
                continue

            jevois.writeText(outimg, str(i), 3, 100 + 10 * i,
                             jevois.YUYV.White, jevois.Font.Font6x10)

            # Project axis points:
            axisPoints = np.array([(0.0, 0.0, 0.0), (hw, 0.0, 0.0),
                                   (0.0, hh, 0.0), (0.0, 0.0, dd)])
            imagePoints, jac = cv2.projectPoints(axisPoints, rvecs[i],
                                                 tvecs[i], self.camMatrix,
                                                 self.distCoeffs)

            # Draw axis lines:
            jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5),
                            int(imagePoints[0][0, 1] + 0.5),
                            int(imagePoints[1][0, 0] + 0.5),
                            int(imagePoints[1][0, 1] + 0.5), 2,
                            jevois.YUYV.MedPurple)
            jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5),
                            int(imagePoints[0][0, 1] + 0.5),
                            int(imagePoints[2][0, 0] + 0.5),
                            int(imagePoints[2][0, 1] + 0.5), 2,
                            jevois.YUYV.MedGreen)
            # Normal to plane
            jevois.drawLine(outimg, int(imagePoints[0][0, 0] + 0.5),
                            int(imagePoints[0][0, 1] + 0.5),
                            int(imagePoints[3][0, 0] + 0.5),
                            int(imagePoints[3][0, 1] + 0.5), 2,
                            jevois.YUYV.MedGrey)

            # Also draw a parallelepiped:
            cubePoints = np.array([(-hw, -hh, 0.0), (hw, -hh, 0.0),
                                   (hw, hh, 0.0), (-hw, hh, 0.0),
                                   (-hw, -hh, dd), (hw, -hh, dd), (hw, hh, dd),
                                   (-hw, hh, dd)])
            cu, jac2 = cv2.projectPoints(cubePoints, rvecs[i], tvecs[i],
                                         self.camMatrix, self.distCoeffs)

            # Round all the coordinates and cast to int for drawing:
            cu = np.rint(cu)

            # Draw parallelepiped lines:
            jevois.drawLine(outimg, int(cu[0][0, 0]), int(cu[0][0, 1]),
                            int(cu[1][0, 0]), int(cu[1][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[1][0, 0]), int(cu[1][0, 1]),
                            int(cu[2][0, 0]), int(cu[2][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[2][0, 0]), int(cu[2][0, 1]),
                            int(cu[3][0, 0]), int(cu[3][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[3][0, 0]), int(cu[3][0, 1]),
                            int(cu[0][0, 0]), int(cu[0][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[4][0, 0]), int(cu[4][0, 1]),
                            int(cu[5][0, 0]), int(cu[5][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[5][0, 0]), int(cu[5][0, 1]),
                            int(cu[6][0, 0]), int(cu[6][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[6][0, 0]), int(cu[6][0, 1]),
                            int(cu[7][0, 0]), int(cu[7][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[7][0, 0]), int(cu[7][0, 1]),
                            int(cu[4][0, 0]), int(cu[4][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[0][0, 0]), int(cu[0][0, 1]),
                            int(cu[4][0, 0]), int(cu[4][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[1][0, 0]), int(cu[1][0, 1]),
                            int(cu[5][0, 0]), int(cu[5][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[2][0, 0]), int(cu[2][0, 1]),
                            int(cu[6][0, 0]), int(cu[6][0, 1]), 1,
                            jevois.YUYV.LightGreen)
            jevois.drawLine(outimg, int(cu[3][0, 0]), int(cu[3][0, 1]),
                            int(cu[7][0, 0]), int(cu[7][0, 1]), 1,
                            jevois.YUYV.LightGreen)

            i += 1
Пример #8
0
    def detect(self, imgbgr, outimg=None):
        maxn = 9  # max number of objects we will consider
        h, w, chans = imgbgr.shape

        # Convert input image to HSV:
        imghsv = cv2.cvtColor(imgbgr, cv2.COLOR_BGR2HSV)

        # Isolate pixels inside our desired HSV range:
        imgth = cv2.inRange(imghsv, self.HSVmin, self.HSVmax)
        outstr = "H={}-{} S={}-{} V={}-{} ".format(
            self.HSVmin[0], self.HSVmax[0], self.HSVmin[1], self.HSVmax[1],
            self.HSVmin[2], self.HSVmax[2])

        # Create structuring elements for morpho maths:
        if not hasattr(self, 'erodeElement'):
            self.erodeElement = cv2.getStructuringElement(
                cv2.MORPH_RECT, (2, 2))
            self.dilateElement = cv2.getStructuringElement(
                cv2.MORPH_RECT, (2, 2))

        # Apply morphological operations to cleanup the image noise:
        imgth = cv2.erode(imgth, self.erodeElement)
        imgth = cv2.dilate(imgth, self.dilateElement)

        contours, hierarchy = cv2.findContours(imgth, cv2.RETR_CCOMP,
                                               cv2.CHAIN_APPROX_SIMPLE)
        contours = sorted(contours, key=cv2.contourArea, reverse=True)[:maxn]

        # Find and create the raw hulls
        hulls = ()
        centers = ()
        for i in range(len(contours)):
            rawhull = cv2.convexHull(contours[i], clockwise=True)
            rawhullperi = cv2.arcLength(rawhull, closed=True)
            hull = cv2.approxPolyDP(rawhull,
                                    epsilon=self.epsilon * rawhullperi * 3.0,
                                    closed=True)

            # Outline hull and find center
            huarea = cv2.contourArea(hull, oriented=False)
            if len(hull) == 4 and huarea > self.hullarea[
                    0] and huarea < self.hullarea[1]:
                npHull = np.array(hull, dtype=int).reshape(len(hull), 2, 1)

                centers += (((npHull[0, 0, 0] + npHull[1, 0, 0] +
                              npHull[2, 0, 0] + npHull[3, 0, 0]) / 4,
                             (npHull[0, 1, 0] + npHull[1, 1, 0] +
                              npHull[2, 1, 0] + npHull[3, 1, 0]) / 4), )
                hulls += (npHull, )

        # Reset image
        imgth = cv2.inRange(imghsv, np.array([122, 122, 122], dtype=np.uint8),
                            np.array([122, 122, 122], dtype=np.uint8))

        # Finds the two lowest points of each hull, typically: bottom left point, bottom right point
        bottomPoints = ()
        for i in range(len(hulls)):
            bottomPoint = (-1, 0)
            secondPoint = (-1, 0)
            for k in range(len(hulls[i])):
                if (bottomPoint[0] == -1):
                    bottomPoint = (k, hulls[i][k, 1, 0])
                elif (bottomPoint[1] < hulls[i][k, 1, 0]):
                    bottomPoint = (k, hulls[i][k, 1, 0])

            for k in range(len(hulls[i])):
                if (k != bottomPoint[0]):
                    if (secondPoint[0] == -1):
                        secondPoint = (k, hulls[i][k, 1, 0])
                    elif (secondPoint[1] < hulls[i][k, 1, 0]):
                        secondPoint = (k, hulls[i][k, 1, 0])

            if (abs(centers[i][0] - hulls[i][bottomPoint[0], 0, 0]) >
                    abs(centers[i][0] - hulls[i][secondPoint[0], 0, 0])):
                bottomPoints += ((i, bottomPoint[0]), )
            else:
                bottomPoints += ((i, secondPoint[0]), )

        # Find closest other hull to each hull
        nearHull = ()
        for i in range(len(hulls)):
            closest = (-1, 0.0)
            for k in range(len(hulls)):
                if (i != k):
                    if ((hulls[bottomPoints[i][0]][bottomPoints[i][1], 0, 0] -
                         centers[i][0]) *
                        (hulls[bottomPoints[k][0]][bottomPoints[k][1], 0, 0] -
                         centers[k][0])) < 0:
                        if (centers[i][0] < centers[k][0] and
                                hulls[bottomPoints[i][0]][bottomPoints[i][1],
                                                          0, 0] < centers[i][0]
                            ) or (centers[i][0] > centers[k][0]
                                  and hulls[bottomPoints[i][0]]
                                  [bottomPoints[i][1], 0, 0] > centers[i][0]):
                            if (closest[0] == -1):
                                closest = (
                                    k,
                                    math.pow(centers[i][0] - centers[k][0],
                                             2) +
                                    math.pow(centers[i][1] - centers[k][1], 2))
                            elif (
                                    closest[1] >
                                (math.pow(centers[i][0] - centers[k][0], 2) +
                                 math.pow(centers[i][1] - centers[k][1], 2))):
                                closest = (
                                    k,
                                    math.pow(centers[i][0] - centers[k][0],
                                             2) +
                                    math.pow(centers[i][1] - centers[k][1], 2))

            nearHull += (closest[0], )

        # Find the two closest points between a hull and its nearest hull
        closePoint = ()
        for i in range(len(hulls)):
            closest = (-1, -1, 0.0)
            for k in range(len(hulls[i])):
                for j in range(len(hulls[nearHull[i]])):
                    if (closest[0] == -1 and closest[1] == -1):
                        closest = (k, j,
                                   math.pow(
                                       hulls[i][k, 0, 0] -
                                       hulls[nearHull[i]][j, 0, 0], 2) +
                                   math.pow(
                                       hulls[i][k, 1, 0] -
                                       hulls[nearHull[i]][j, 1, 0], 2))
                    elif (closest[2] > math.pow(
                            hulls[i][k, 0, 0] - hulls[nearHull[i]][j, 0, 0], 2)
                          + math.pow(
                              hulls[i][k, 1, 0] - hulls[nearHull[i]][j, 1, 0],
                              2)):
                        closest = (k, j,
                                   math.pow(
                                       hulls[i][k, 0, 0] -
                                       hulls[nearHull[i]][j, 0, 0], 2) +
                                   math.pow(
                                       hulls[i][k, 1, 0] -
                                       hulls[nearHull[i]][j, 1, 0], 2))
            closePoint += ((closest[0], closest[1]), )

        # Find the target center
        hullCenter = []
        for i in range(len(hulls)):
            if (centers[i][0] > centers[nearHull[i]][0]
                    and nearHull[nearHull[i]] == i):
                hullCenter += [(centers[i][0] + centers[nearHull[i]][0]) / 2,
                               (centers[i][1] + centers[nearHull[i]][1]) / 2,
                               i],

        # Choose target closest to center of screen
        targetHull = (-1, 0)
        for i in range(len(hullCenter)):
            if (targetHull[0] == -1):
                targetHull = (hullCenter[i][2],
                              abs(hullCenter[i][0] - self.width / 4))
            elif (targetHull[1] > abs(hullCenter[i][0] - self.width / 4)):
                targetHull = (hullCenter[i][2],
                              abs(hullCenter[i][0] - self.width / 4))

        # Generate the U-Shape map of the target for pose estimation if a target exists
        hlist = []
        if (targetHull[0] != -1):
            # Calculates the displacement of the edge of the physical target to the corner of the drawn target
            xChange = hulls[nearHull[targetHull[0]]][
                (closePoint[targetHull[0]][1] + 3) % 4, 0,
                0] - hulls[targetHull[0]][(closePoint[targetHull[0]][0] + 1) %
                                          4, 0, 0]
            yChange = hulls[nearHull[targetHull[0]]][
                (closePoint[targetHull[0]][1] + 3) % 4, 1,
                0] - hulls[targetHull[0]][(closePoint[targetHull[0]][0] + 1) %
                                          4, 1, 0]

            # Map points to desired U-Shape
            #
            #   0               3
            #   |               |
            #   |               |
            #   |               |
            #   1_______________2
            #
            corners = (
                (hulls[nearHull[targetHull[0]]][(closePoint[targetHull[0]][1] +
                                                 1) % 4, 0, 0],
                 hulls[nearHull[targetHull[0]]][(closePoint[targetHull[0]][1] +
                                                 1) % 4, 1, 0]),
                (hulls[targetHull[0]][(closePoint[targetHull[0]][0] + 1) % 4,
                                      0, 0] + xChange * self.targetRatio,
                 hulls[targetHull[0]][(closePoint[targetHull[0]][0] + 1) % 4,
                                      1, 0] + yChange * self.targetRatio),
                (hulls[nearHull[targetHull[0]]][
                    (closePoint[targetHull[0]][1] + 3) % 4, 0, 0] -
                 xChange * self.targetRatio, hulls[nearHull[targetHull[0]]][
                     (closePoint[targetHull[0]][1] + 3) % 4, 1, 0] -
                 yChange * self.targetRatio),
                (hulls[targetHull[0]][(closePoint[targetHull[0]][0] + 3) % 4,
                                      0, 0],
                 hulls[targetHull[0]][(closePoint[targetHull[0]][0] + 3) % 4,
                                      1, 0]),
            )

            # Maps U-Shape's corners weighted by Rectangular Corners
            poly = np.array([
                [int(corners[0][0]), int(corners[0][1])],
                [int(corners[1][0]), int(corners[1][1])],
                [int(corners[2][0]), int(corners[2][1])],
                [int(corners[3][0]), int(corners[3][1])],
                [
                    int((corners[3][0] *
                         (1 - self.percentFill) + corners[0][0] *
                         (self.percentFill))),
                    int((corners[3][1] *
                         (1 - self.percentFill) + corners[0][1] *
                         (self.percentFill)))
                ],
                [
                    int((corners[2][0] *
                         (1 - self.percentFill) + corners[0][0] *
                         (self.percentFill))),
                    int((corners[2][1] *
                         (1 - self.percentFill) + corners[0][1] *
                         (self.percentFill)))
                ],
                [
                    int((corners[1][0] *
                         (1 - self.percentFill) + corners[3][0] *
                         (self.percentFill))),
                    int((corners[1][1] *
                         (1 - self.percentFill) + corners[3][1] *
                         (self.percentFill)))
                ],
                [
                    int((corners[0][0] *
                         (1 - self.percentFill) + corners[3][0] *
                         (self.percentFill))),
                    int((corners[0][1] *
                         (1 - self.percentFill) + corners[3][1] *
                         (self.percentFill)))
                ],
            ], np.int32)

            # Draw U-Shaped Map
            poly = poly.reshape((-1, 1, 2))
            cv2.fillPoly(imgth, [poly], (255, 255, 255))

            # Does not pass if the sides of the rectangle are too close to the sides of the image
            isInRange = True
            for point in corners:
                if (point[0] > self.pxThreshold
                        and point[0] < self.width - self.pxThreshold
                        and point[1] > self.pxThreshold
                        and point[1] < self.height - self.pxThreshold):
                    continue
                isInRange = False

            if (isInRange): hlist.append(corners)

        # Display any results requested by the users:
        if outimg is not None and outimg.valid():
            if (outimg.width == w * 2):
                jevois.pasteGreyToYUYV(imgth, outimg, w, 0)
            jevois.writeText(outimg, "yeet 2.0", 3, h + 1, jevois.YUYV.White,
                             jevois.Font.Font6x10)

        # Return the target
        return hlist
Пример #9
0
    def detect(self, imggray, outimg=None):
        h, w = imggray.shape
        hlist = []

        # Create a keypoint detector if needed:
        if not hasattr(self, 'detector'):
            self.detector = cv2.ORB_create()

        # Load training image and detect keypoints on it if needed:
        if not hasattr(self, 'refkp'):
            refimg = cv2.imread(self.fname, 0)
            self.refkp, self.refdes = self.detector.detectAndCompute(
                refimg, None)

            # Also store corners of reference image and of window for homography mapping:
            refh, refw = refimg.shape
            self.refcorners = np.float32([[0.0, 0.0], [0.0,
                                                       refh], [refw, refh],
                                          [refw, 0.0]]).reshape(-1, 1, 2)
            self.wincorners = np.float32(
                [[
                    self.winleft * refw / self.owm,
                    self.wintop * refh / self.ohm
                ],
                 [
                     self.winleft * refw / self.owm,
                     (self.wintop + self.winh) * refh / self.ohm
                 ],
                 [(self.winleft + self.winw) * refw / self.owm,
                  (self.wintop + self.winh) * refh / self.ohm],
                 [(self.winleft + self.winw) * refw / self.owm,
                  self.wintop * refh / self.ohm]]).reshape(-1, 1, 2)
            jevois.LINFO(
                "Extracted {} keypoints and descriptors from {}".format(
                    len(self.refkp), self.fname))

        # Compute keypoints and descriptors:
        kp, des = self.detector.detectAndCompute(imggray, None)
        str = "{} keypoints".format(len(kp))

        # Create a matcher if needed:
        if not hasattr(self, 'matcher'):
            self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

        # Compute matches between reference image and camera image, then sort them by distance:
        matches = self.matcher.match(des, self.refdes)
        matches = sorted(matches, key=lambda x: x.distance)
        str += ", {} matches".format(len(matches))

        # Keep only good matches:
        lastidx = 0
        for m in matches:
            if m.distance < self.distth: lastidx += 1
            else: break
        matches = matches[0:lastidx]
        str += ", {} good".format(len(matches))

        # If we have enough matches, compute homography:
        corners = []
        wincorners = []
        if len(matches) >= 10:
            obj = []
            scene = []

            # Localize the object (see JeVois C++ class ObjectMatcher for details):
            for m in matches:
                obj.append(self.refkp[m.trainIdx].pt)
                scene.append(kp[m.queryIdx].pt)

            # compute the homography
            hmg, mask = cv2.findHomography(np.array(obj), np.array(scene),
                                           cv2.RANSAC, 5.0)

            # Check homography conditioning using SVD:
            u, s, v = np.linalg.svd(hmg, full_matrices=False)

            # We need the smallest eigenvalue to not be too small, and the ratio of largest to smallest eigenvalue to be
            # quite large for our homography to be declared good here. Note that linalg.svd returns the eigenvalues in
            # descending order already:
            if s[-1] > 0.001 and s[0] / s[-1] > 100:
                # Project the reference image corners to the camera image:
                corners = cv2.perspectiveTransform(self.refcorners, hmg)
                wincorners = cv2.perspectiveTransform(self.wincorners, hmg)

        # Display any results requested by the users:
        if outimg is not None and outimg.valid():
            if len(corners) == 4:
                jevois.drawLine(outimg, int(corners[0][0, 0] + 0.5),
                                int(corners[0][0, 1] + 0.5),
                                int(corners[1][0, 0] + 0.5),
                                int(corners[1][0, 1] + 0.5), 2,
                                jevois.YUYV.LightPink)
                jevois.drawLine(outimg, int(corners[1][0, 0] + 0.5),
                                int(corners[1][0, 1] + 0.5),
                                int(corners[2][0, 0] + 0.5),
                                int(corners[2][0, 1] + 0.5), 2,
                                jevois.YUYV.LightPink)
                jevois.drawLine(outimg, int(corners[2][0, 0] + 0.5),
                                int(corners[2][0, 1] + 0.5),
                                int(corners[3][0, 0] + 0.5),
                                int(corners[3][0, 1] + 0.5), 2,
                                jevois.YUYV.LightPink)
                jevois.drawLine(outimg, int(corners[3][0, 0] + 0.5),
                                int(corners[3][0, 1] + 0.5),
                                int(corners[0][0, 0] + 0.5),
                                int(corners[0][0, 1] + 0.5), 2,
                                jevois.YUYV.LightPink)
            jevois.writeText(outimg, str, 3, h + 4, jevois.YUYV.White,
                             jevois.Font.Font6x10)

        # Return window corners if we did indeed detect the object:
        hlist = []
        if len(wincorners) == 4: hlist.append(wincorners)

        return hlist