Example #1
0
    def __init__(self):
        """!
        @brief      Constructs a new instance.
        """
        self.VideoFrame = np.array([])
        self.DepthFrameRaw = np.array([]).astype(np.uint16)
        """ Extra arrays for colormaping the depth image"""
        self.DepthFrameHSV = np.zeros((480, 640, 3)).astype(np.uint8)
        self.DepthFrameRGB = np.array([])
        """initialize kinect & turn off auto gain and whitebalance"""
        freenect.sync_get_video_with_res(resolution=freenect.RESOLUTION_HIGH)
        # print(freenect.sync_set_autoexposure(False))
        freenect.sync_set_autoexposure(False)
        # print(freenect.sync_set_whitebalance(False))
        freenect.sync_set_whitebalance(False)
        """check depth returns a frame, and flag kinectConnected"""
        if (freenect.sync_get_depth_with_res(
                format=freenect.DEPTH_11BIT) == None):
            self.kinectConnected = False
        else:
            self.kinectConnected = True

        # mouse clicks & calibration variables
        self.depth2rgb_affine = np.float32([[1, 0, 0], [0, 1, 0]])
        self.kinectCalibrated = False
        self.last_click = np.array([0, 0])
        self.new_click = False
        self.rgb_click_points = np.zeros((5, 2), int)
        self.depth_click_points = np.zeros((5, 2), int)
        """ block info """
        self.block_contours = np.array([])
        self.block_detections = np.array([])
Example #2
0
    def __init__(self):
        """!
        @brief      Constructs a new instance.
        """
        self.VideoFrame = np.array([])
        self.VideoFrameHSV = np.array([])
        self.DepthFrameRaw = np.array([]).astype(np.uint16)
        """ Extra arrays for colormapping the depth image"""
        self.DepthFrameHSV = np.zeros((480, 640, 3)).astype(np.uint8)
        self.DepthFrameRGB = np.array([])
        self.DepthFrameFiltered = np.array([])
        """initialize kinect & turn off auto gain and whitebalance"""
        freenect.sync_get_video_with_res(resolution=freenect.RESOLUTION_MEDIUM)
        # print(freenect.sync_set_autoexposure(False))
        freenect.sync_set_autoexposure(False)
        # print(freenect.sync_set_whitebalance(False))
        freenect.sync_set_whitebalance(False)
        """check depth returns a frame, and flag kinectConnected"""
        if (freenect.sync_get_depth_with_res(
                format=freenect.DEPTH_11BIT) == None):
            self.kinectConnected = False
        else:
            self.kinectConnected = True

        self.kinectCalibrated = False
        # mouse clicks & calibration variables
        # self.depth2rgb_affine = np.float32([[1,0,0],[0,1,0]]) #no transform
        # self.depth2rgb_affine = np.float32([[  9.28726252E-1,   -1.14277108E-2,  -2.06562788],
        #                                     [ 6.51754476E-3,   9.21278359E-1,   4.02982221E+1]]) # older version

        self.depth2rgb_affine = np.float32(
            [[9.21074557E-1, -9.91213238E-3, -2.15895387E-1],
             [3.72252283E-3, 9.19210560E-1,
              4.14502181E+1]])  # determined from test_kinect
        """ inverse extrinsic matrix """
        self.loadCameraCalibration(
            "/home/student/armlab-w20/util/calibration.cfg")
        self.getWorkspaceBoundary()

        self.last_click = np.array([0, 0])
        self.last_rclick = np.array([0, 0])
        self.new_click = False
        self.new_rclick = False
        self.rgb_click_points = np.zeros((5, 2), np.float32)
        self.depth_click_points = np.zeros((5, 2), int)
        """ block info """
        self.block_contours = np.array([])
        self.block_detections = np.array([])
        self.blocks = []
        color.initColors()
Example #3
0
 def rgbedges(self):
     image = freenect.sync_get_video_with_res()[0]
     image = cv2.medianBlur(image, 5)
     image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
     ret, image = cv2.threshold(image, 220, 255, cv2.THRESH_BINARY)
     cv2.imwrite("gray_rgb.jpg", image)
     image = cv2.Canny(image, 200, 250)
     cv2.imwrite("gray_rgb_edge.jpg", image)
     return image
Example #4
0
 def captureVideoFrame(self):
     """!
     @brief Capture frame from Kinect, format is 24bit RGB
     """
     if (self.kinectConnected):
         self.VideoFrame = freenect.sync_get_video_with_res(
             resolution=freenect.RESOLUTION_MEDIUM)[0]
     else:
         self.loadVideoFrame()
     self.VideoFrameHSV = cv2.cvtColor(self.VideoFrame, cv2.COLOR_RGB2HSV)
Example #5
0
 def captureVideoFrame(self):
     """!
     @brief Capture frame from Kinect, format is 24bit RGB
     """
     if (self.kinectConnected):
         self.VideoFrame = freenect.sync_get_video_with_res(
             resolution=freenect.RESOLUTION_HIGH)[0]
     else:
         self.loadVideoFrame()
     self.processVideoFrame()
Example #6
0
    def toggleExposure(self, state):
        """!
        @brief      Toggle auto exposure

        @param      state  False turns off auto exposure True turns it on
        """
        if state == False:
            freenect.sync_get_video_with_res(
                resolution=freenect.RESOLUTION_MEDIUM)
            # print(freenect.sync_set_autoexposure(False))
            freenect.sync_set_autoexposure(False)
            # print(freenect.sync_set_whitebalance(False))
            freenect.sync_set_whitebalance(False)
        else:
            freenect.sync_get_video_with_res(
                resolution=freenect.RESOLUTION_MEDIUM)
            # print(freenect.sync_set_autoexposure(True))
            freenect.sync_set_autoexposure(True)
            # print(freenect.sync_set_whitebalance(True))
            freenect.sync_set_whitebalance(True)
def get_video():
    return frame_convert2.video_cv(
        freenect.sync_get_video_with_res(
            resolution=freenect.RESOLUTION_HIGH)[0])
import frame_convert2
import numpy as np
from apriltag import apriltag

cv2.namedWindow('Video')
print('Press ESC in window to stop')


def get_video():
    return frame_convert2.video_cv(
        freenect.sync_get_video_with_res(
            resolution=freenect.RESOLUTION_HIGH)[0])


freenect.init()
freenect.sync_get_video_with_res(resolution=freenect.RESOLUTION_HIGH)
freenect.sync_set_autoexposure(False)
freenect.sync_set_whitebalance(False)

detector = apriltag("tag36h11", threads=4, decimate=2.0)

object_points = np.array(
    [[-0.1, -0.1, 0.0], [0.1, -0.1, 0.0], [0.1, 0.1, 0.0], [-0.1, 0.1, 0.0]],
    dtype="double")
camera_matrix = np.array([[1.09194704e+03, 0.00000000e+00, 6.79986322e+02],
                          [0.00000000e+00, 1.09300427e+03, 5.09471427e+02],
                          [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]],
                         dtype="double")

dist_coeffs = np.array(
    [0.17099743, -0.24604911, 0.00678919, 0.01108217, 0.02124964])
Example #9
0
    def blockDetector(self):
        """!
        @brief      Detect blocks from rgb

                    TODO: Implement your block detector here. You will need to locate blocks in 3D space and put their XYZ
                    locations in self.block_detections
        """
        # Load ref contour
        contour_ref = np.load("blockdetector_dev/contour_ref.npy")

        # Smoothing Kernel:
        rgb = freenect.sync_get_video_with_res(
            resolution=freenect.RESOLUTION_HIGH)[0]
        bgr = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
        hsvImg = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
        cv2.imwrite("blockdetector_dev/testImage.png", bgr)

        kernel = np.ones((5, 5), np.uint8)

        # Crop the image to ignore backgroud
        borderPoints = np.array([[880, 260], [190, 260], [183, 951],
                                 [875, 961]])
        m, n, _ = hsvImg.shape
        ctp1 = borderPoints[0, 0]
        ctp2 = borderPoints[0, 1]
        ctp3 = borderPoints[1, 0]
        ctp4 = borderPoints[1, 1]
        # ctp5 = borderPoints[2, 0]
        ctp6 = borderPoints[2, 1]
        hsvImg[:, 0:ctp2] = np.array([0, 0, 100])
        hsvImg[:, ctp6:n] = np.array([0, 0, 100])
        hsvImg[0:ctp3, ctp4:ctp6] = np.array([0, 0, 100])
        hsvImg[ctp1:m, ctp2:ctp6] = np.array([0, 0, 100])
        whiteBoard = np.zeros([m, n, 3], dtype=np.uint8)
        whiteBoard[:, :] = np.array([0, 0, 100], dtype=np.uint8)

        # Mask the center region
        centerPoints = np.array([[660, 560], [560, 560], [560, 650],
                                 [660, 650]])
        hsvImg[centerPoints[1, 0]:centerPoints[0, 0],
               centerPoints[0, 1]:centerPoints[2, 1]] = np.array([0, 0, 100])

        # Define color constants
        colors = [
            "yellow", "orange", "pink", "black", "red", "purple", "green",
            "blue"
        ]
        yellow_lo = np.array([23, 180, 150])
        yellow_hi = np.array([35, 255, 255])
        orange_lo = np.array([3, 190, 110])
        orange_hi = np.array([9, 255, 170])
        pink_lo = np.array([165, 120, 130])  #np.array([165, 120, 130])
        pink_hi = np.array([178, 255, 200])  #np.array([178, 255, 180])
        black_lo = np.array([0, 0, 0])
        black_hi = np.array([180, 180, 40])
        red_lo = np.array([0, 190, 80])  # Red is special
        red_hi = np.array([10, 255, 120])
        red2_lo = np.array([160, 140, 80])
        red2_hi = np.array([180, 255, 120])
        purple_lo = np.array([130, 120, 40])
        purple_hi = np.array([160, 255, 120])
        green_lo = np.array([40, 0, 50])
        green_hi = np.array([70, 255, 120])
        blue_lo = np.array([110, 60, 60])
        blue_hi = np.array([140, 255, 150])

        # colorRangesLo = [yellow_lo, orange_lo, pink_lo, black_lo, red_lo, purple_lo, green_lo, blue_lo]
        # colorRangesHi = [yellow_hi, orange_hi, pink_hi, black_hi, red_hi, purple_hi, green_hi, blue_hi]
        colorRangesLo = [
            yellow_lo, orange_lo, pink_lo, black_lo, red_lo, purple_lo,
            green_lo, blue_lo
        ]
        colorRangesHi = [
            yellow_hi, orange_hi, pink_hi, black_hi, red_hi, purple_hi,
            green_hi, blue_hi
        ]

        # Results
        block_detections = []
        allContours = []
        # Ident for each color:
        for k in range(len(colorRangesLo)):
            colorRangeLo = colorRangesLo[k]
            colorRangeHi = colorRangesHi[k]

            inRangeMask = cv2.inRange(hsvImg, colorRangeLo, colorRangeHi)
            inRangeMask = cv2.morphologyEx(inRangeMask, cv2.MORPH_CLOSE,
                                           kernel)
            inRangeMask = cv2.morphologyEx(inRangeMask, cv2.MORPH_OPEN, kernel)
            hsvImg_singleColor = cv2.bitwise_and(hsvImg,
                                                 hsvImg,
                                                 mask=inRangeMask)

            # Only for red
            if (k == 4):
                inRangeMask2 = cv2.inRange(hsvImg, red2_lo, red2_hi)
                inRangeMask2 = cv2.morphologyEx(inRangeMask2, cv2.MORPH_CLOSE,
                                                kernel)
                inRangeMask2 = cv2.morphologyEx(inRangeMask2, cv2.MORPH_OPEN,
                                                kernel)
                hsvImg_singleColor2 = cv2.bitwise_and(hsvImg,
                                                      hsvImg,
                                                      mask=inRangeMask2)
                hsvImg_singleColor = cv2.bitwise_or(hsvImg_singleColor,
                                                    hsvImg_singleColor2)
                inRangeMask = cv2.bitwise_or(inRangeMask, inRangeMask2)

            # contours, hierarchy = cv2.findContours(inRangeMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            contours, _ = cv2.findContours(inRangeMask, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)

            for i in range(len(contours)):
                contour = contours[i]
                area = cv2.contourArea(contour)
                if (area < 1400 or area > 2600):  # Filter too small ones
                    continue
                # print(cv2.matchShapes(contour, contour_ref, 1, 0.0))
                if cv2.matchShapes(contour, contour_ref, 1,
                                   0.0) > 0.3:  # Filter absurd shapes
                    continue
                rect = cv2.minAreaRect(contour)
                (center_y, center_x) = rect[0]
                (width, height) = rect[1]
                coutour_orientation = rect[2]
                block_detections.append([
                    int(center_x),
                    int(center_y), width, height, area, coutour_orientation, k
                ])  # TODO Format
                allContours.append(contour)
                worldCoord3 = self.getWorldCoord(
                    np.array([int(center_x / 2.1333),
                              int(center_y / 2)]))
                worldCoord3[2] -= 0.02
                self.block_detections.append([
                    worldCoord3[0], worldCoord3[1], worldCoord3[2],
                    coutour_orientation
                ])

                print(colors[k] + " @ " +
                      str(np.array([int(center_x / 2.1333),
                                    int(center_y / 2)])))
                print(colors[k] + " @ " + str(
                    self.getWorldCoord(
                        np.array([int(center_x / 2.1333),
                                  int(center_y / 2)]))))

        self.block_contours = allContours
        print(self.block_detections)