Esempio n. 1
0
class Kinect2:
    def __init__(self, calibration_folder=None):
        self.freenect = Freenect2()
        setGlobalLogger(createConsoleLogger(LoggerLevel.NONE))
        num_devices = self.freenect.enumerateDevices()
        if num_devices == 0:
            raise ValueError("No kinect2 device connected!")

        serial = self.freenect.getDeviceSerialNumber(0)
        self.device = self.freenect.openDevice(serial, pipeline=pipeline)
        self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                               | FrameType.Depth)
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)
        self.device.start()

        self.calibration = {}
        if calibration_folder is not None:
            self.calibration = utils.load_calibration(calibration_folder)

    def snapshot(self):
        frames = self.listener.waitForNewFrame()
        color = frames["color"]
        depth = frames["depth"]
        ir = frames["ir"]
        color = color.asarray(np.uint8).copy()
        depth = depth.asarray(np.float32).copy()
        ir = ir.asarray(np.float32).copy()

        # flip images
        color = color[:, ::-1]
        ir = ir[:, ::-1]
        depth = depth[:, ::-1]

        color = color[..., :3]
        # color = color[..., ::-1]
        ir = self._convert_ir(ir)

        self.listener.release(frames)
        return color, depth, ir

    def _convert_ir(self, ir):
        """
        convertIr function
        https://github.com/code-iai/iai_kinect2/blob/master/kinect2_calibration/src/kinect2_calibration.cpp
        """
        ir_min, ir_max = ir.min(), ir.max()
        ir_convert = (ir - ir_min) / (ir.max() - ir.min())
        ir_convert *= 255
        ir_convert = np.tile(ir_convert[..., None], (1, 1, 3))
        return ir_convert.astype(np.uint8)
Esempio n. 2
0
    def run(self):
        """
        main function to start Kinect and read depth information from Kinect, show video and save it
        """

        Fn = Freenect2()
        num_devices = Fn.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        serial = Fn.getDeviceSerialNumber(0)
        device = Fn.openDevice(serial, pipeline=pipeline)
        listener = SyncMultiFrameListener(FrameType.Depth)
        device.setIrAndDepthFrameListener(listener)
        device.start()

        fourcc = cv2.VideoWriter_fourcc('m', 'j', 'p', 'g')
        depth_video = cv2.VideoWriter(
            'Depth.avi', fourcc, 30.0, (512, 424), 0)

        i = 0
        while not self._done:

            frames = listener.waitForNewFrame()
            depth = frames["depth"]
            depth = depth.asarray().clip(0, 4096)

            hand_contour = self._find_contour(depth)
            darks = np.zeros((424, 512), dtype=np.uint8)
            if cv2.contourArea(hand_contour[0]) < 1000 or cv2.contourArea(hand_contour[0]) > 5000:
                self.cover = np.uint8(depth/16.)
            else:
                seg_depth = self._segment(depth, hand_contour, darks)
                np.save('./offline/data/seg_depth%d.npy' % i, seg_depth)
                i += 1
            cv2.imshow("depth", self.cover)
            depth_video.write(self.cover)

            listener.release(frames)

            key = cv2.waitKey(delay=1)

            if key == ord('q'):
                self._done = True

        depth_video.release()
        device.stop()
        device.close()

        sys.exit()
    def  getcameraimgbykinect(self, basedir):
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
        fn = Freenect2()
        num_devices = fn.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            return
        serial = fn.getDeviceSerialNumber(0)
        device = fn.openDevice(serial, pipeline=pipeline)

        types = 0
        types |= FrameType.Color
            
        listener = SyncMultiFrameListener(types)

        # Register listeners
        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        device.startStreams(rgb=True, depth=False)

        while True:
            frames = listener.waitForNewFrame()
            color = frames["color"]
            colorimg  =  cv2.resize(color.asarray(), (200,150))
            (shortDate, longDate)=self.getDateStr()
            file_dir = basedir+"/"+shortDate
            if not os.path.exists(file_dir):
                os.makedirs(file_dir)
            new_filename = Pic_str().create_uuid() + '.jpg' 
            fpath = os.path.join(file_dir,  new_filename) 
            # print(fpath )
            cv2.imwrite(fpath, colorimg, [cv2.IMWRITE_JPEG_QUALITY,70])
            listener.release(frames)
            isfire = self.fireDetect.detect_image(fpath)
            self.sendUtils.sendReqtoServer("园区厂区仓库","网络摄像头",longDate,isfire,shortDate+"/"+new_filename)
            time.sleep(3)
        device.stop()
        device.close()
        return
Esempio n. 4
0
def service_func():
	try:
	    from pylibfreenect2 import OpenGLPacketPipeline
	    pipeline = OpenGLPacketPipeline()
	except:
	    from pylibfreenect2 import CpuPacketPipeline
	    pipeline = CpuPacketPipeline()

	fn = Freenect2()
	num_devices = fn.enumerateDevices()
	if num_devices == 0:
	    print("No device connected!")
	    sys.exit(1)

	serial = fn.getDeviceSerialNumber(0)
	device = fn.openDevice(serial, pipeline=pipeline)

	listener = SyncMultiFrameListener(FrameType.Ir)

	device.setIrAndDepthFrameListener(listener)

	directory = os.path.dirname(os.path.realpath('__file__'))

	try:
		while 1:
			device.start()
			count=0
			for i in range(1,5):
				frames = listener.waitForNewFrame()
				ir = frames["ir"]
				tmpimg = ir.asarray() / 256.
				tmp = tmpimg.astype(int)
				path = directory +"/kinect.png"
				print path
				cv2.imwrite(path, tmp)
				listener.release(frames)
				if i < 4 :
					time.sleep(30-time.localtime().tm_sec%30)
			device.stop()
			time.sleep(5)
			time.sleep((90-time.localtime().tm_sec)%30)
	except KeyboardInterrupt:
		device.close()
		exit
Esempio n. 5
0
class Kinect:
    def __init__(self):
        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        serial = self.fn.getDeviceSerialNumber(0)
        self.device = self.fn.openDevice(serial, pipeline=pipeline)

        self.listener = SyncMultiFrameListener(
            FrameType.Color)

        # Register listeners
        self.device.setColorFrameListener(self.listener)
        # device.setIrAndDepthFrameListener(listener)

        self.device.start()

        # NOTE: must be called after device.start()
        registration = Registration(self.device.getIrCameraParams(),
                                    self.device.getColorCameraParams())

    def __del__(self):
        self.device.stop()
        self.device.close()

    def get_image(self):
        frames = self.listener.waitForNewFrame()

        color = frames["color"].asarray()[:,::-1,:3]
        self.listener.release(frames)

        # if np.sum(color[-1,:,:]) == 0:
            # return None

        return color
Esempio n. 6
0
class KinectCamera(Camera):

  def __init__(self):
    self.pipeline = OpenCLPacketPipeline()
    fn = Freenect2()
    self.device = fn.openDefaultDevice(pipeline=self.pipeline)
    self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth)
    self.device.setColorFrameListener(self.listener)
    self.device.setIrAndDepthFrameListener(self.listener)
    self.device.start()
    self.registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams())
    self.undistorted = Frame(512, 424, 4)
    self.registered = Frame(512, 424, 4)

  def getFrames(self):
    frames = self.listener.waitForNewFrame()
    self.registration.apply(frames['color'], frames['depth'], self.undistorted, self.registered)
    frames['registered'] = self.registration
    return frames

  def stop(self):
    self.device.stop()
    self.device.close()
Esempio n. 7
0
    def __test(enable_rgb, enable_depth):
        fn = Freenect2()
        num_devices = fn.enumerateDevices()
        assert num_devices > 0
        device = fn.openDefaultDevice()

        types = 0
        if enable_rgb:
            types |= FrameType.Color
        if enable_depth:
            types |= (FrameType.Ir | FrameType.Depth)
        listener = SyncMultiFrameListener(types)

        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        device.startStreams(rgb=enable_rgb, depth=enable_depth)
        # test if we can get one frame at least
        frames = listener.waitForNewFrame()
        listener.release(frames)

        device.stop()
        device.close()
Esempio n. 8
0
    def __test(enable_rgb, enable_depth):
        fn = Freenect2()
        num_devices = fn.enumerateDevices()
        assert num_devices > 0
        device = fn.openDefaultDevice()

        types = 0
        if enable_rgb:
            types |= FrameType.Color
        if enable_depth:
            types |= (FrameType.Ir | FrameType.Depth)
        listener = SyncMultiFrameListener(types)

        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        device.startStreams(rgb=enable_rgb, depth=enable_depth)
        # test if we can get one frame at least
        frames = listener.waitForNewFrame()
        listener.release(frames)

        device.stop()
        device.close()
Esempio n. 9
0
    def publishRGBD(self):
        fn = Freenect2()
        num_devices = fn.enumerateDevices()

        #Exit if device is not found
        if num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        serial = fn.getDeviceSerialNumber(0)
        device = fn.openDevice(serial, pipeline=self.pipeline)

        types = 0
        if self.enable_rgb:
            types |= FrameType.Color
        if self.enable_depth:
            types |= (FrameType.Ir | FrameType.Depth)
        listener = SyncMultiFrameListener(types)

        # Register listeners
        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        if self.enable_rgb and self.enable_depth:
            device.start()
        else:
            device.startStreams(rgb=self.enable_rgb, depth=self.enable_depth)

        # Should be transformed to manually calibrated values.
        if self.enable_depth:
            registration = Registration(device.getIrCameraParams(),
                                        device.getColorCameraParams())

        undistorted = Frame(512, 424, 4)
        registered = Frame(512, 424, 4)

        while not rospy.is_shutdown():
            frames = listener.waitForNewFrame()

            if self.enable_rgb:
                color = frames["color"]
            if self.enable_depth:
                ir = frames["ir"]
                depth = frames["depth"]

            if self.enable_rgb and self.enable_depth:
                registration.apply(color, depth, undistorted, registered)
            elif enable_depth:
                registration.undistortDepth(depth, undistorted)

            if self.enable_depth:
                #cv2.imshow("undistorted", undistorted.asarray(np.float32) / 4500.)
                self.publishDepth(undistorted)

            if self.enable_rgb and self.enable_depth:
                #cv2.imshow("registered", registered.asarray(np.uint8))
                self.publishColor(registered)

            listener.release(frames)

        device.stop()
        device.close()

        sys.exit(0)
Esempio n. 10
0
def test_sync_multi_frame():
    fn = Freenect2()

    num_devices = fn.enumerateDevices()
    assert num_devices > 0

    serial = fn.getDefaultDeviceSerialNumber()
    assert serial == fn.getDeviceSerialNumber(0)

    # TODO: tests for openDefaultDevice
    # device = fn.openDefaultDevice()
    device = fn.openDevice(serial)

    assert fn.getDefaultDeviceSerialNumber() == device.getSerialNumber()
    device.getFirmwareVersion()

    listener = SyncMultiFrameListener(
        FrameType.Color | FrameType.Ir | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # Registration
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())
    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # test if we can get two frames at least
    frames = listener.waitForNewFrame()
    listener.release(frames)
    frames = listener.waitForNewFrame()

    color = frames[FrameType.Color]
    ir = frames[FrameType.Ir]
    depth = frames[FrameType.Depth]

    for frame in [ir, depth]:
        assert frame.exposure == 0
        assert frame.gain == 0
        assert frame.gamma == 0

    for frame in [color]:
        assert frame.exposure > 0
        assert frame.gain > 0
        assert frame.gamma > 0

    registration.apply(color, depth, undistorted, registered)

    ### Color ###
    assert color.width == 1920
    assert color.height == 1080
    assert color.bytes_per_pixel == 4

    assert ir.width == 512
    assert ir.height == 424
    assert ir.bytes_per_pixel == 4

    assert depth.width == 512
    assert depth.height == 424
    assert depth.bytes_per_pixel == 4

    assert color.asarray().shape == (color.height, color.width, 4)
    assert ir.asarray().shape == (ir.height, ir.width)
    assert depth.astype(np.float32).shape == (depth.height, depth.width)

    listener.release(frames)

    def __test_cannot_determine_type_of_frame(frame):
        frame.asarray()

    for frame in [registered, undistorted]:
        yield raises(ValueError)(__test_cannot_determine_type_of_frame), frame

    device.stop()
    device.close()
Esempio n. 11
0
def find_and_track_kinect(name, tracker = "CSRT",
        min_range = 0, max_range = 1700,
        face_target_box = DEFAULT_FACE_TARGET_BOX,
        res = (RGB_W, RGB_H),
        video_out = True, debug = True):

    known_faces = {}

    for person in faces:
        image = face_recognition.load_image_file(faces[person])
        print(person)
        face_encoding_list = face_recognition.face_encodings(image)
        if len(face_encoding_list) > 0:
            known_faces[person] = face_encoding_list[0]
        else:
            print("\t Could not find face for person...")

    fn = Freenect2()
    num_devices = fn.enumerateDevices()

    if num_devices == 0:
        print("No device connected!")

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline = pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Depth)

    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    registration = Registration(device.getIrCameraParams(),
            device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)
    bigdepth = Frame(1920, 1082, 4)


    trackerObj = None
    face_count = 5
    face_process_frame = True

    bbox = None
    face_bbox = None
    track_bbox = None

    while True:
        timer = cv2.getTickCount()

        person_found = False

        frames = listener.waitForNewFrame()

        color = frames["color"]
        depth = frames["depth"]

        registration.apply(color, depth, undistorted, registered, bigdepth=bigdepth)

        bd = np.resize(bigdepth.asarray(np.float32), (1080, 1920))
        c = cv2.cvtColor(color.asarray(), cv2.COLOR_RGB2BGR)

        __clean_color(c, bd, min_range, max_range)

        if face_process_frame:
            small_c = __crop_frame(c, face_target_box)
            face_locations = face_recognition.face_locations(small_c, model="cnn")
            face_encodings = face_recognition.face_encodings(small_c, face_locations)
            for face_encoding in face_encodings:
                matches = face_recognition.compare_faces(
                        [known_faces[name]], face_encoding, 0.6)
                if len(matches) > 0 and matches[0]:
                    person_found = True
                    face_count += 1
                    (top, right, bottom, left) = face_locations[0]

                    left += face_target_box[0]
                    top += face_target_box[1]
                    right += face_target_box[0]
                    bottom += face_target_box[1]

                    face_bbox = (left, top, right, bottom)

                    person_found = True

                    break
        face_process_frame = not face_process_frame

        overlap_pct = 0
        track_area = __bbox_area(track_bbox)
        if track_area > 0 and face_bbox:
            overlap_area = __bbox_overlap(face_bbox, track_bbox)
            overlap_pct = min(overlap_area / __bbox_area(face_bbox),
                    overlap_area / __bbox_area(track_bbox))

        if person_found and face_count >= FACE_COUNT and overlap_pct < CORRECTION_THRESHOLD:
            bbox = (face_bbox[0],
                    face_bbox[1],
                    face_bbox[2] - face_bbox[0],
                    face_bbox[3] - face_bbox[1])
            trackerObj = __init_tracker(c, bbox, tracker)
            face_count = 0

        status = False

        if trackerObj is not None:
            status, trackerBBox = trackerObj.update(c)
            bbox = (int(trackerBBox[0]),
                int(trackerBBox[1]),
                int(trackerBBox[0] + trackerBBox[2]),
                int(trackerBBox[1] + trackerBBox[3]))

        if bbox is not None:
            track_bbox = bbox

        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        w = 0
        h = 0

        if status:
            w = track_bbox[0] + int((track_bbox[2] - track_bbox[0])/2)
            h = track_bbox[1] + int((track_bbox[3] - track_bbox[1])/2)
            if (w < res[0] and w >= 0 and h < res[1] and h >= 0):
                distanceAtCenter =  bd[h][w]
                __update_individual_position("NONE", track_bbox, distanceAtCenter, res)

        if video_out:
            cv2.line(c, (w, 0),
                    (w, int(res[1])), (0,255,0), 1)
            cv2.line(c, (0, h), \
                    (int(res[0]), h), (0,255,0), 1)

            __draw_bbox(True, c, face_target_box, (255, 0, 0), "FACE_TARGET")
            __draw_bbox(status, c, track_bbox, (0, 255, 0), tracker)
            __draw_bbox(person_found, c, face_bbox, (0, 0, 255), name)

            c = __scale_frame(c, scale_factor = 0.5)

            cv2.putText(c, "FPS : " + str(int(fps)), (100,50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 1)
            if not status:
                failedTrackers = "FAILED: "
                failedTrackers += tracker + " "
                cv2.putText(c, failedTrackers, (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,142), 1)

            cv2.imshow("color", c)

        listener.release(frames)

        key = cv2.waitKey(1) & 0xff
        if key == ord('q'):
            break
    device.stop()
    device.close()
Esempio n. 12
0
if enable_rgb and enable_depth:
    device.start()
else:
    device.startStreams(rgb=enable_rgb, depth=enable_depth)

# NOTE: must be called after device.start()
if enable_depth:
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

undistorted = Frame(512, 424, 4)
registered = Frame(512, 424, 4)

while True:
    frames = listener.waitForNewFrame()

    if enable_rgb:
        color = frames["color"]
    if enable_depth:
        ir = frames["ir"]
        depth = frames["depth"]

    if enable_rgb and enable_depth:
        registration.apply(color, depth, undistorted, registered)
    elif enable_depth:
        registration.undistortDepth(depth, undistorted)

    if enable_depth:
        cv2.imshow("ir", ir.asarray() / 65535.)
        cv2.imshow("depth", depth.asarray() / 4500.)
Esempio n. 13
0
class Kinect:
    def __init__(self, kinect_num=0):
        self.fn = Freenect2()
        self.serial = None
        self.device = None
        self.listener = None
        self.registration = None

        self._frames = None  # frames cache so that the user can use them before we free them
        self._bigdepth = Frame(1920, 1082, 4)  # malloc'd
        self._undistorted = Frame(512, 424, 4)
        self._registered = Frame(512, 424, 4)
        self._color_dump = Frame(1920, 1080, 4)

        num_devices = self.fn.enumerateDevices()
        if num_devices <= kinect_num:
            raise ConnectionError("No Kinect device at index %d" % kinect_num)

        self.serial = self.fn.getDeviceSerialNumber(kinect_num)
        self.device = self.fn.openDevice(self.serial, pipeline=pipeline)

        self.listener = SyncMultiFrameListener(FrameType.Color
                                               | FrameType.Depth)

        # Register listeners
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

    def close(self):
        if self.device:
            self.device.close()

    def start(self):
        if self.device:
            self.device.start()
            self.registration = Registration(
                self.device.getIrCameraParams(),
                self.device.getColorCameraParams())
            return self  # for convenience
        else:
            raise ConnectionError("Connection to Kinect wasn't established")

    def stop(self):
        if self.device:
            self.device.stop()

    ### If copy is False for these functoins, make sure to call release_frames
    ### when you're done with the returned frame ###

    def get_current_color_frame(self, copy=True):
        self._frames = self.listener.waitForNewFrame()
        ret = self._convert_color_frame(self._frames["color"], copy=copy)
        if copy:
            self.release_frames()
        return ret

    def get_current_depth_frame(self, copy=True):
        self._frames = self.listener.waitForNewFrame()
        if copy:
            ret = self._frames["depth"].asarray().copy()
            self.release_frames()
        else:
            ret = self._frames["depth"].asarray()
        return ret

    def get_current_rgbd_frame(self, copy=True):
        self._frames = self.listener.waitForNewFrame()

        self.registration.apply(self._frames["color"],
                                self._frames["depth"],
                                self._undistorted,
                                self._registered,
                                bigdepth=self._bigdepth)

        color = self._convert_color_frame(self._frames["color"], copy=copy)
        depth = self._convert_bigdepth_frame(self._bigdepth, copy=copy)
        if copy:
            self.release_frames()
        return color, depth

    def get_current_bigdepth_frame(self, copy=True):
        self._frames = self.listener.waitForNewFrame()

        self.registration.apply(self._frames["color"],
                                self._frames["depth"],
                                self._undistorted,
                                self._registered,
                                bigdepth=self._bigdepth)

        depth = self._convert_bigdepth_frame(self._bigdepth, copy=copy)
        if copy:
            self.release_frames()
        return depth

    def release_frames(self):
        self.listener.release(self._frames)

    # single frame calls

    def get_single_color_frame(self):
        self.start()
        ret = self.get_current_color_frame()
        self.stop()
        return ret

    def get_single_depth_frame(self):
        self.start()
        ret = self.get_current_depth_frame()
        self.stop()
        return ret

    def _convert_bigdepth_frame(self, frame, copy=True):
        if copy:
            d = self._bigdepth.asarray(np.float32).copy()
        else:
            d = self._bigdepth.asarray(np.float32)
        return d[1:-1, ::-1]

    def _convert_color_frame(self, frame, copy=True):
        if copy:
            img = frame.asarray().copy()
        else:
            img = frame.asarray()
        img = img[:, ::-1]
        img[..., :3] = img[..., 2::-1]  # bgrx -> rgbx
        return img
Esempio n. 14
0
def test_sync_multi_frame():
    fn = Freenect2()

    num_devices = fn.enumerateDevices()
    assert num_devices > 0

    serial = fn.getDefaultDeviceSerialNumber()
    assert serial == fn.getDeviceSerialNumber(0)

    device = fn.openDevice(serial)

    assert fn.getDefaultDeviceSerialNumber() == device.getSerialNumber()
    device.getFirmwareVersion()

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # Registration
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())
    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # optional parameters for registration
    bigdepth = Frame(1920, 1082, 4)
    color_depth_map = np.zeros((424, 512), np.int32)

    # test if we can get two frames at least
    frames = listener.waitForNewFrame()
    listener.release(frames)

    # frames as a first argment also should work
    frames = FrameMap()
    listener.waitForNewFrame(frames)

    color = frames[FrameType.Color]
    ir = frames[FrameType.Ir]
    depth = frames[FrameType.Depth]

    for frame in [ir, depth]:
        assert frame.exposure == 0
        assert frame.gain == 0
        assert frame.gamma == 0

    for frame in [color]:
        assert frame.exposure > 0
        assert frame.gain > 0
        assert frame.gamma > 0

    registration.apply(color, depth, undistorted, registered)

    # with optinal parameters
    registration.apply(color,
                       depth,
                       undistorted,
                       registered,
                       bigdepth=bigdepth,
                       color_depth_map=color_depth_map.ravel())

    assert color.width == 1920
    assert color.height == 1080
    assert color.bytes_per_pixel == 4

    assert ir.width == 512
    assert ir.height == 424
    assert ir.bytes_per_pixel == 4

    assert depth.width == 512
    assert depth.height == 424
    assert depth.bytes_per_pixel == 4

    assert color.asarray().shape == (color.height, color.width, 4)
    assert ir.asarray().shape == (ir.height, ir.width)
    assert depth.asarray(np.float32).shape == (depth.height, depth.width)

    listener.release(frames)

    def __test_cannot_determine_type_of_frame(frame):
        frame.asarray()

    for frame in [registered, undistorted]:
        yield raises(ValueError)(__test_cannot_determine_type_of_frame), frame

    device.stop()
    device.close()
Esempio n. 15
0
class KinectStreamer:
    def __init__(self):
        try:
            from pylibfreenect2 import OpenCLPacketPipeline
            pipeline = OpenCLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenGLPacketPipeline
                pipeline = OpenGLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
        print("Packet pipeline:", type(pipeline).__name__)

        # Create and set logger
        logger = createConsoleLogger(LoggerLevel.Debug)
        setGlobalLogger(logger)

        self.freenect = Freenect2()
        num_devices = self.freenect.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        serial = self.freenect.getDeviceSerialNumber(0)
        self.device = self.freenect.openDevice(serial, pipeline=pipeline)

        self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                               | FrameType.Depth)

        # Register listeners
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        self.device.start()

        self.registration = Registration(self.device.getIrCameraParams(),
                                         self.device.getColorCameraParams())

        self.run_loop = True
        self.lock = threading.Lock()
        self.img_buffer = np.zeros(
            (RES_COLOR[1], RES_COLOR[0], 3, SIZE_AVG_FILTER))
        self.idx_buffer = 0

        # Close on Ctrl-C
        def signal_handler(signal, frame):
            self.run_loop = False

        signal.signal(signal.SIGINT, signal_handler)

    def stream_thread(self):
        undistorted = Frame(RES_DEPTH[0], RES_DEPTH[1], 4)
        registered = Frame(RES_DEPTH[0], RES_DEPTH[1], 4)

        while self.run_loop:
            frames = self.listener.waitForNewFrame()

            color = frames["color"]
            ir = frames["ir"]
            depth = frames["depth"]

            self.registration.apply(color, depth, undistorted, registered)

            img_ir = ir.asarray() / 65535.
            self.lock.acquire()
            self.img_depth = (depth.asarray() / 4500. * 255).astype(np.uint8)
            self.img_color = np.copy(color.asarray()[:, ::-1, :3])
            self.img_buffer[:, :, :, self.idx_buffer] = self.img_color
            self.img_registered_color = np.copy(registered.asarray(np.uint8))
            self.img_registered_depth = np.copy(undistorted.asarray(np.uint8))
            self.lock.release()
            self.idx_buffer += 1
            if self.idx_buffer >= SIZE_AVG_FILTER:
                self.idx_buffer = 0

            cv2.imshow("Kinect Depth", self.img_depth)
            cv2.imshow(
                "Kinect Color",
                cv2.resize(self.img_color, (int(round(
                    0.3 * RES_COLOR[0])), int(round(0.3 * RES_COLOR[1])))))
            cv2.imshow("Kinect R Depth", self.img_registered_depth)
            cv2.imshow("Kinect R Color", self.img_registered_color)

            key = cv2.waitKey(1) & 0xFF
            if key == ord('q') or key == ord('x'):
                self.run_loop = False
                break
            elif key == ord(' '):
                cv2.imwrite("frame.png", frame)

            self.listener.release(frames)

        self.device.stop()
        self.device.close()

    def write_thread(self, video_name="output"):
        fourcc = cv2.VideoWriter_fourcc(*'X264')
        vid_color = cv2.VideoWriter(video_name + ".avi", fourcc, FPS,
                                    RES_COLOR)
        vid_depth = cv2.VideoWriter(video_name + "_depth.avi",
                                    fourcc,
                                    FPS,
                                    RES_DEPTH,
                                    isColor=False)

        t_curr = time.time()
        while self.run_loop:
            t_start = t_curr
            self.lock.acquire()
            try:
                vid_color.write(self.img_color)
                vid_depth.write(self.img_depth)
            except:
                pass
            self.lock.release()
            t_curr = time.time()
            time.sleep(max(1. / FPS - (t_curr - t_start), 0))

        vid_color.release()
        vid_depth.release()
class kinectDevice:
    def __init__(self, device_no=0):
        # Package Pipeline
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            self.pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                self.pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                self.pipeline = CpuPacketPipeline()
        print "Packet pipeline: {}".format(type(self.pipeline).__name__)

        # Create and set logger
        logger = createConsoleLogger(LoggerLevel.Error)
        setGlobalLogger(logger)

        # Detect Kinect Devices
        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            self.device = None
            sys.exit(1)

        # Create Device and Frame Listeners
        self.serial = self.fn.getDeviceSerialNumber(device_no)
        self.device = self.fn.openDevice(self.serial, pipeline=self.pipeline)
        self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                               | FrameType.Depth)

        # Register Listeners
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        # Start Device
        self.device.start()

        # Set Camera Parameters
        self._ir_params = self.device.getIrCameraParams()
        self._color_params = self.device.getColorCameraParams()
        '''
        self._ir_params.fx = cameraParams.params['ir_fx']
        self._ir_params.fy = cameraParams.params['ir_fy']
        self._ir_params.cx = cameraParams.params['ir_cx']
        self._ir_params.cy = cameraParams.params['ir_cy']

        self._color_params.fx = cameraParams.params['col_fx']
        self._color_params.fy = cameraParams.params['col_fy']
        self._color_params.cx = cameraParams.params['col_cx']
        self._color_params.cy = cameraParams.params['col_cy']
        '''
        # Registration
        self.registration = Registration(self._ir_params, self._color_params)

        print '[Info] Initialization Finished!'

    # Get New Frame (in "Frame" type)
    def getNewFrame(self):
        # Get Raw Data from Listener
        self.frames = self.listener.waitForNewFrame()

        # Get Depth Frame and Color Frame
        depth = self.frames['depth']
        color = self.frames['color']
        ir = self.frames['ir']

        return depth, color, ir

    # Release Frame
    def releaseFrame(self):
        self.listener.release(self.frames)

    def __del__(self):
        # Stop and Close Device
        if self.device != None:
            self.device.stop()
            self.device.close()

    #
    # Get Parameters of KinectDevice
    #

    # Infrared Camera
    @property
    def ir_params(self):
        return self._ir_params

    # Color Camera
    @property
    def color_params(self):
        return self._color_params
Esempio n. 17
0
class Kinect():
    def __init__(self,
                 params=None,
                 device_index=0,
                 headless=False,
                 pipeline=None):
        '''
            Kinect:  Kinect interface using the libfreenect library. Will query
                libfreenect for available devices, if none are found will throw
                a RuntimeError. Otherwise will open the device for collecting
                color, depth, and ir data. 

                Use kinect.get_frame([<frame type>]) within a typical opencv 
                capture loop to collect data from the kinect.

                When instantiating, optionally pass a parameters file or dict 
                with the kinect's position and orientation in the worldspace 
                and/or the kinect's intrinsic parameters. An example dictionary 
                is shown below:

                    kinect_params = {
                        "position": {
                            "x": 0,
                            "y": 0,
                            "z": 0.810,
                            "roll": 0,
                            "azimuth": 0,
                            "elevation": -34
                        }
                        "intrinsic_parameters": {
                            "cx": 256.684,
                            "cy": 207.085,
                            "fx": 366.193,
                            "fy": 366.193
                        }
                    }
                
                These can also be adjusted using the intrinsic_parameters and 
                position properties. 

            ARGUMENTS:
                params: str or dict
                    Path to a kinect parameters file (.json) or a python dict
                    with position or intrinsic_parameters.
                device_index: int
                    Use to interface with a specific device if more than one is
                    connected. 
                headless: bool
                    If true, will allow the kinect to collect and process data
                    without a display. Usefull if the kinect is plugged into a 
                    server. 
                pipeline: PacketPipeline
                    Optionally pass a pylibfreenect2 pipeline that will be used
                    with the kinect. Possible types are as follows:
                        OpenGLPacketPipeline
                        CpuPacketPipeline
                        OpenCLPacketPipeline
                        CudaPacketPipeline
                        OpenCLKdePacketPipeline
                    Note that this will override the headless argument - 
                    Headless requires CUDA or OpenCL pipeline. 
        '''

        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if (num_devices == 0):
            raise RuntimeError('No device connected!')
        if (device_index >= num_devices):
            raise RuntimeError('Device {} not available!'.format(device_index))
        self._device_index = device_index

        # Import pipeline depending on headless state
        self._headless = headless
        if (pipeline is None):
            pipeline = self._import_pipeline(self._headless)
        print("Packet pipeline:", type(pipeline).__name__)
        self.device = self.fn.openDevice(self.fn.getDeviceSerialNumber(
            self._device_index),
                                         pipeline=pipeline)

        # We want all the types
        types = (FrameType.Color | FrameType.Depth | FrameType.Ir)
        self.listener = SyncMultiFrameListener(types)

        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)
        self.device.start()

        # We'll use this to generate registered depth images
        self.registration = Registration(self.device.getIrCameraParams(),
                                         self.device.getColorCameraParams())

        self._camera_position = dotdict({
            "x": 0.,
            "y": 0.,
            "z": 0.,
            "roll": 0.,
            "azimuth": 0.,
            "elevation": 0.
        })
        self._camera_params = dotdict({
            "cx": self.device.getIrCameraParams().cx,
            "cy": self.device.getIrCameraParams().cy,
            "fx": self.device.getIrCameraParams().fx,
            "fy": self.device.getIrCameraParams().fy
        })

        # Try and load parameters
        pos, param = self._load_camera_params(params)
        if (pos is not None):
            self._camera_position.update(pos)
        if (pos is not None):
            self._camera_params.update(param)

    def __del__(self):
        self.device.stop()

    def __repr__(self):
        params = {
            "position": self._camera_position,
            "intrinsic_parameters": self._camera_params
        }
        # Can't really return the pipeline
        return 'Kinect(' + str(params) + ',' + str(self._device_index) + \
                ',' + str(self._headless) + ')'

    @property
    def position(self):
        ''' 
            Return the kinect's stored position. Elements can be referenced 
                using setattr or setitem dunder methods. Can also be used to 
                update the stored dictionary:
                
                    k = ktb.Kinect()
                    k.position.x 
                    >>> 2.0
                    k.position.x = 1.0
                    k.position.x 
                    >>> 1.0
        '''
        return self._camera_position

    @property
    def intrinsic_parameters(self):
        ''' 
            Return the kinect's stored position. Elements can be referenced 
                using setattr or setitem dunder methods. Can also be used to 
                update the stored dictionary:
                
                    k = ktb.Kinect()
                    k.intrinsic_parameters.cx 
                    >>> 2.0
                    k.intrinsic_parameters.cx = 1.0
                    k.intrinsic_parameters.cx 
                    >>> 1.0
        '''
        return self._camera_params

    @staticmethod
    def _import_pipeline(headless=False):
        '''
            _import_pipeline: Imports the pylibfreenect2 pipeline based on
                whether or not headless mode is enabled. Unfortunately 
                more intelligent importing cannot be implemented (contrary
                to the example scripts) because the import raises a C
                exception, not a python one. As a result the only pipelines
                this function will load are OpenGL or OpenCL.
            ARGUMENTS:
                headless: bool
                    whether or not to run kinect in headless mode.
        '''
        if headless:
            from pylibfreenect2 import OpenCLPacketPipeline
            pipeline = OpenCLPacketPipeline()
        else:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()
        return pipeline

    @staticmethod
    def _load_camera_params(params_file=None):
        '''
            _load_camera_params: Optionally give the kinect's position and 
                orientation in the worldspace and/or the kinect's intrinsic 
                parameters by passing a json file. An example dictionary is 
                shown below:

                    kinect_params = {
                        "position": {
                            "x": 0,
                            "y": 0,
                            "z": 0.810,
                            "roll": 0,
                            "azimuth": 0,
                            "elevation": -34
                        }
                        "intrinsic_parameters": {
                            "cx": 256.684,
                            "cy": 207.085,
                            "fx": 366.193,
                            "fy": 366.193
                        }
                    }

                Specifically, the dict may contain the "position" or 
                "intrinsic_parameters" keys, with the above fields.
            ARGUMENTS:
                params_file: str or dict
                    Path to a kinect parameters file (.json) or a python dict
                    with position or intrinsic_parameters.
        '''
        if (params_file is None):
            return None, None
        elif isinstance(params_file, str):
            try:
                with open(params_file, 'r') as infile:
                    params_dict = json.load(infile)
            except FileNotFoundError:
                print('Kienct configuration file {} not found'.format(
                    params_file))
                raise
        else:
            params_dict = params_file

        # If the keys do not exist, return None
        return params_dict.get('position',
                               None), params_dict.get('intrinsic_parameters',
                                                      None)

    def get_frame(self, frame_type=COLOR):
        '''
            get_frame: Returns singleton or list of frames corresponding to 
                input. Frames can be any of the following types:
                    COLOR     - returns a 512 x 424 color image, 
                        registered to depth image
                    DEPTH     - returns a 512 x 424 undistorted depth 
                        image (units are m)
                    IR        - returns a 512 x 424 ir image
                    IR        - returns a 512 x 424 ir image
                    RAW_COLOR - returns a 1920 x 1080 color image
                    RAW_DEPTH - returns a 512 x 424 depth image 
                        (units are mm)
            ARGUMENTS:
                frame_type: [frame type] or frame type
                    A list of frame types. Output corresponds directly to this 
                    list. The default argument will return a single registered 
                    color image. 
        '''
        def get_single_frame(ft):
            if (ft == COLOR):
                f, _ = self._get_registered_frame(frames)
                return f
            elif (ft == DEPTH):
                return self._get_depth_frame(frames)
            elif (ft == RAW_COLOR):
                return self._get_raw_color_frame(frames)
            elif (ft == RAW_DEPTH):
                return self._get_raw_depth_frame(frames)
            elif (ft == IR):
                return self._get_ir_frame(frames)
            else:
                raise RuntimeError('Unknown frame type {}'.format(ft))

        frames = self.listener.waitForNewFrame()
        # Multiple types were passed
        if isinstance(frame_type, int):
            return_frames = get_single_frame(frame_type)
        else:
            return_frames = [None] * len(frame_type)
            for i, ft in enumerate(frame_type):
                return_frames[i] = get_single_frame(ft)
        self.listener.release(frames)

        return return_frames

    def get_ptcld(self, roi=None, scale=1000, colorized=False):
        '''
            get_ptcld: Returns a point cloud, generated from depth image. Units 
                are mm by default.
            ARGUMENTS:
                roi: [x, y, w, h]
                    If specified, will crop the point cloud according to the 
                    input roi. Does not accelerate runtime. 
                scale: int
                    Scales the point cloud such that ptcl = ptcl (m) / scale. 
                    ie scale = 1000 returns point cloud in mm. 
                colorized: bool
                    If True, returns color matrix along with point cloud such 
                    that if pt = ptcld[x,y,:], the color of that point is color 
                    = color[x,y,:]
        '''
        # Get undistorted (and registered) frames
        frames = self.listener.waitForNewFrame()
        if (colorized):
            registered, undistorted = self._get_registered_frame(frames)
        else:
            undistorted = self._get_depth_frame(frames)
        self.listener.release(frames)

        # Get point cloud
        ptcld = self._depthMatrixToPointCloudPos(undistorted,
                                                 self._camera_params,
                                                 scale=scale)

        # Adjust point cloud based on real world coordinates
        if (self._camera_position is not None):
            ptcld = self._applyCameraMatrixOrientation(ptcld,
                                                       self._camera_position)

        # Reshape to correct size
        ptcld = ptcld.reshape(DEPTH_SHAPE[1], DEPTH_SHAPE[0], 3)

        # If roi, extract
        if (roi is not None):
            if (isinstance(roi, tuple) or isinstance(roi, list)):
                [y, x, h, w] = roi
                xmin, xmax = int(x), int(x + w)
                ymin, ymax = int(y), int(y + h)
                ptcld = ptcld[xmin:xmax, ymin:ymax, :]
                if (colorized):
                    registered = registered[xmin:xmax, ymin:ymax, :]
            else:
                roi = np.clip(roi, 0, 1)
                for c in range(3):
                    ptcld[:, :, c] = np.multiply(ptcld[:, :, c], roi)

        if (colorized):
            # Format the color registration map - To become the "color" input for the scatterplot's setData() function.
            colors = np.divide(registered,
                               255)  # values must be between 0.0 - 1.0
            colors = colors.reshape(
                colors.shape[0] * colors.shape[1],
                4)  # From: Rows X Cols X RGB -to- [[r,g,b],[r,g,b]...]
            colors = colors[:, :
                            3:]  # remove alpha (fourth index) from BGRA to BGR
            colors = colors[..., ::-1]  #BGR to RGB
            return ptcld, colors

        return ptcld

    @staticmethod
    def _depthMatrixToPointCloudPos(z, camera_params, scale=1):
        '''
            Credit to: Logic1
            https://stackoverflow.com/questions/41241236/vectorizing-the-kinect-real-world-coordinate-processing-algorithm-for-speed
        '''
        # bacically this is a vectorized version of depthToPointCloudPos()
        # calculate the real-world xyz vertex coordinates from the raw depth data matrix.
        C, R = np.indices(z.shape)

        R = np.subtract(R, camera_params['cx'])
        R = np.multiply(R, z)
        R = np.divide(R, camera_params['fx'] * scale)

        C = np.subtract(C, camera_params['cy'])
        C = np.multiply(C, z)
        C = np.divide(C, camera_params['fy'] * scale)

        return np.column_stack((z.ravel() / scale, R.ravel(), -C.ravel()))

    @staticmethod
    def _applyCameraMatrixOrientation(pt, camera_position=None):
        '''
            Credit to: Logic1
            https://stackoverflow.com/questions/41241236/vectorizing-the-kinect-real-world-coordinate-processing-algorithm-for-speed
        '''

        # Kinect Sensor Orientation Compensation
        # bacically this is a vectorized version of applyCameraOrientation()
        # uses same trig to rotate a vertex around a gimbal.
        def rotatePoints(ax1, ax2, deg):
            # math to rotate vertexes around a center point on a plane.
            hyp = np.sqrt(
                pt[:, ax1]**2 + pt[:, ax2]**2
            )  # Get the length of the hypotenuse of the real-world coordinate from center of rotation, this is the radius!
            d_tan = np.arctan2(
                pt[:, ax2], pt[:, ax1]
            )  # Calculate the vertexes current angle (returns radians that go from -180 to 180)

            cur_angle = np.degrees(
                d_tan
            ) % 360  # Convert radians to degrees and use modulo to adjust range from 0 to 360.
            new_angle = np.radians(
                (cur_angle + deg) % 360
            )  # The new angle (in radians) of the vertexes after being rotated by the value of deg.

            pt[:, ax1] = hyp * np.cos(
                new_angle)  # Calculate the rotated coordinate for this axis.
            pt[:, ax2] = hyp * np.sin(
                new_angle)  # Calculate the rotated coordinate for this axis.

        if (camera_position is not None):
            rotatePoints(
                1, 2, camera_position['roll']
            )  #rotate on the Y&Z plane # Usually disabled because most tripods don't roll. If an Inertial Nav Unit is available this could be used)
            rotatePoints(
                0, 2, camera_position['elevation'])  #rotate on the X&Z plane
            rotatePoints(0, 1, camera_position['azimuth'])  #rotate on the X&Y

            # Apply offsets for height and linear position of the sensor (from viewport's center)
            pt[:] += np.float_([
                camera_position['x'], camera_position['y'],
                camera_position['z']
            ])

        return pt

    def record(self, filename, frame_type=COLOR, video_codec='XVID'):
        '''
            record: Records a video of the type specified. If no filename is 
                given, records as a .avi. Do not call this in conjunction with
                a typical cv2 display loop. 
            ARGUMENTS:
                filename: (str)
                    Name to save the video with. 
                frame_type: frame type
                    What channel to record (only one type).
                video_codec: (str)
                    cv2 video codec.
        '''

        # Create the video writer
        if (frame_type == RAW_COLOR):
            shape = COLOR_SHAPE
        else:
            shape = DEPTH_SHAPE[:2]
        fourcc = cv2.VideoWriter_fourcc(*video_codec)
        out = cv2.VideoWriter(filename, fourcc, 25, shape)

        # Record. On keyboard interrupt close and save.
        try:
            while True:
                frame = self.get_frame(frame_type=frame_type)
                if (frame_type == RAW_COLOR):
                    frame = frame[:, :, :3]
                out.write(frame)
                if (not self._headless):
                    cv2.imshow("KinectVideo", frame)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break
            out.release()
            cv2.destroyAllWindows()
        except KeyboardInterrupt:
            pass
        finally:
            out.release()
            cv2.destroyAllWindows()

    @staticmethod
    def _new_frame(shape):
        '''
            _new_frame: Return a pylibfreenect2 frame with the dimensions
                specified. Note that Frames are pointers, and as such returning 
                using numpy images created from them directly results in some
                strange behavior. After using the frame, it is highly 
                recommended to copy the resulting image using np.copy() rather 
                than returning the Frame referencing array directly. 
        '''
        return Frame(shape[0], shape[1], shape[2])

    @staticmethod
    def _get_raw_color_frame(frames):
        ''' _get_raw_color_frame: Return the current rgb image as a cv2 image. '''
        return cv2.resize(frames["color"].asarray(), COLOR_SHAPE)

    @staticmethod
    def _get_raw_depth_frame(frames):
        ''' _get_raw_depth_frame: Return the current depth image as a cv2 image. '''
        return cv2.resize(frames["depth"].asarray(), DEPTH_SHAPE[:2])

    @staticmethod
    def _get_ir_frame(frames):
        ''' get_ir_depth_frame: Return the current ir image as a cv2 image. '''
        return cv2.resize(frames["ir"].asarray(), DEPTH_SHAPE[:2])

    def _get_depth_frame(self, frames):
        ''' _get_depth_frame: Return the current undistorted depth image. '''
        undistorted = self._new_frame(DEPTH_SHAPE)
        self.registration.undistortDepth(frames["depth"], undistorted)
        undistorted = np.copy(undistorted.asarray(np.float32))
        return undistorted

    def _get_registered_frame(self, frames):
        ''' get_registered: Return registered color and undistorted depth image. '''
        registered = self._new_frame(DEPTH_SHAPE)
        undistorted = self._new_frame(DEPTH_SHAPE)
        self.registration.undistortDepth(frames["depth"], undistorted)
        self.registration.apply(frames["color"], frames["depth"], undistorted,
                                registered)
        undistorted = np.copy(undistorted.asarray(np.float32))
        registered = np.copy(registered.asarray(np.uint8))[..., :3]
        return registered, undistorted
Esempio n. 18
0
def KinectStream(frame_count, BreakKinect, enable_rgb=True, enable_depth=True):
    # create folders for the color and depth images, respectively
    last_path, _ = os.path.split(os.getcwd())
    path_color = os.path.join(last_path, "color")
    path_depth = os.path.join(last_path, "depth")
    CreateRefrashFolder(path_color)
    CreateRefrashFolder(path_depth)
    '''
	# if the depth images are needed, you must use OpenGLPacketPipeline for enabling GPU to
	# render the depth map for so that the real-time capture can be achieved.
	try:
		from pylibfreenect2 import OpenGLPacketPipeline
		pipeline = OpenGLPacketPipeline()
	except:
		try:
			from pylibfreenect2 import OpenCLPacketPipeline
			pipeline = OpenCLPacketPipeline()
		except:
			from pylibfreenect2 import CpuPacketPipeline
			pipeline = CpuPacketPipeline()
	'''

    if enable_depth:
        from pylibfreenect2 import OpenGLPacketPipeline
        pipeline = OpenGLPacketPipeline()
    else:
        from pylibfreenect2 import CpuPacketPipeline
        pipeline = CpuPacketPipeline()
    print("Packet pipeline:", type(pipeline).__name__)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    types = 0
    if enable_rgb:
        types |= FrameType.Color
    if enable_depth:
        types |= (FrameType.Ir | FrameType.Depth)
    listener = SyncMultiFrameListener(types)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    if enable_rgb and enable_depth:
        device.start()
    else:
        device.startStreams(rgb=enable_rgb, depth=enable_depth)
    # NOTE: must be called after device.start()
    if enable_depth:
        registration = Registration(device.getIrCameraParams(),
                                    device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)
    # the target size of the resize function
    SetSize = (540, 360)
    # SetSize = (1080, 720)

    while not BreakKinect.value:
        frame_count.value += 1
        file_name = 'image' + str(int(frame_count.value)) + '.jpg'
        im_path_color = os.path.join(path_color, file_name)
        im_path_depth = os.path.join(path_depth, file_name)
        frames = listener.waitForNewFrame()

        if enable_rgb:
            color = frames["color"]
        if enable_depth:
            ir = frames["ir"]
            depth = frames["depth"]

        if enable_rgb and enable_depth:
            registration.apply(color, depth, undistorted, registered, enable_filter=False)
        elif enable_depth:
            registration.undistortDepth(depth, undistorted)

        if enable_rgb:
            start = time.time()
            new_frame = cv2.resize(color.asarray(), SetSize)
            # new_frame = new_frame[:,:,:-1]
            # new_frame = cv2.cvtColor(new_frame[:,:,:-1], cv2.COLOR_RGB2BGR)
            new_frame = registered.asarray(np.uint8)
            # new_frame = cv2.cvtColor(new_frame[:,:,[0,1,2]], cv2.COLOR_RGB2BGR)
            cv2.imwrite(im_path_color, new_frame[:, :, :-1])
        # print("Kinect color:", new_frame.shape)

        if enable_depth:
            # depth_frame = cv2.resize(depth.asarray()/ 4500., SetSize)
            depth = depth.asarray() / 4500.
            # cv2.imshow("color", new_frame)
            undist = undistorted.asarray(np.float32) / 4500 * 255
            cv2.imwrite(im_path_depth, undist.astype(np.uint8))
            print("Kinect depth:", depth.shape)

        listener.release(frames)

    device.stop()
    device.close()
    # WriteVideo(path, im_pre = 'image', video_name = 'test.avi', fps = 15, size = SetSize)
    sys.exit(0)
Esempio n. 19
0
    def run(self):
        self._isrunning = True

        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
        if self._debug:
            print("Packet pipeline:", type(pipeline).__name__)

        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if self._debug:
            print("Number of devices: {}".format(num_devices))

        serial = self.fn.getDeviceSerialNumber(0)
        device = self.fn.openDevice(serial, pipeline=pipeline)
        listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                          | FrameType.Depth)

        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        if self._debug:
            print("Init done")

        device.start()

        registration = Registration(device.getIrCameraParams(),
                                    device.getColorCameraParams())

        params = device.getIrCameraParams()
        self.cx = params.cx
        self.cy = params.cy
        self.fx = params.fx
        self.fy = params.fy

        undistorted = Frame(512, 424, 4)
        registered = Frame(512, 424, 4)

        while self._isrunning:
            frames = listener.waitForNewFrame()

            color = frames["color"]
            ir = frames["ir"]
            depth = frames["depth"]

            registration.apply(color,
                               depth,
                               undistorted,
                               registered,
                               bigdepth=None,
                               color_depth_map=None)

            if self._save_color:
                self._set_color_image(
                    cv2.cvtColor(color.asarray(), cv2.COLOR_BGR2RGB))
            if self._save_depth or self._save_pointcloud:
                depth_arr = depth.asarray()
                if self._save_depth:
                    self._set_depth_image(depth_arr)
            if self._save_ir:
                self._set_ir_image(ir.asarray())
            if self._save_registered or self._save_pointcloud:
                reg = cv2.cvtColor(registered.asarray(np.uint8),
                                   cv2.COLOR_BGR2RGB)
                if self._save_registered:
                    self._set_registered_image(reg)
            if self._save_undistorted:
                und = undistorted.asarray(np.uint8)
                self._set_undistorted_image(
                    cv2.cvtColor(und, cv2.COLOR_BGR2RGB))
            if self._save_pointcloud:
                self.compute_pointcloud(reg, depth_arr)

            listener.release(frames)

        if self._debug:
            print("Stopping device")
        device.stop()
        if self._debug:
            print("Closing device")
        device.close()
        if self._debug:
            print("Device stopped and closed")
Esempio n. 20
0
def main(hrnet_c, hrnet_j, hrnet_weights, hrnet_joints_set, single_person,
         max_batch_size, disable_vidgear, device):
    if device is not None:
        device = torch.device(device)
    else:
        if torch.cuda.is_available() and True:
            torch.backends.cudnn.deterministic = True
            device = torch.device('cuda:0')
        else:
            device = torch.device('cpu')

    print(device)

    has_display = 'DISPLAY' in os.environ.keys() or sys.platform == 'win32'

    model = SimpleHRNet(
        hrnet_c,
        hrnet_j,
        hrnet_weights,
        multiperson=not single_person,
        max_batch_size=max_batch_size,
        device=device
    )

    print("Packet pipeline:", type(pipeline).__name__)

    enable_rgb = True
    enable_depth = True

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    types = 0
    if enable_rgb:
        types |= FrameType.Color
    if enable_depth:
        types |= (FrameType.Ir | FrameType.Depth)
    listener = SyncMultiFrameListener(types)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    if enable_rgb and enable_depth:
        device.start()
    else:
        device.startStreams(rgb=enable_rgb, depth=enable_depth)

    # NOTE: must be called after device.start()
    if enable_depth:
        registration = Registration(device.getIrCameraParams(),
                                    device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    while True:
        frames = listener.waitForNewFrame()

        if enable_rgb:
            color = frames["color"]
        if enable_depth:
            ir = frames["ir"]
            depth = frames["depth"]

        if enable_rgb and enable_depth:
            registration.apply(color, depth, undistorted, registered)
        elif enable_depth:
            registration.undistortDepth(depth, undistorted)

        if enable_depth:
            cv2.imshow("ir", ir.asarray() / 65535.)
            cv2.imshow("depth", depth.asarray() / 4500.)
            # cv2.imshow("undistorted", undistorted.asarray(np.float32) / 4500.)

        if enable_rgb and enable_depth:
            cv2.imshow("registered", registered.asarray(np.uint8))

        # color = cv2.resize(color.asarray()[:, :, :3], (int(1920 / 3), int(1080 / 3)))
        color = registered.asarray(np.uint8)[:, :, :3]
        color = np.ascontiguousarray(color, dtype=np.uint8)
        pts = model.predict(color)

        undistorted_nor = undistorted.asarray(np.float32) / 4500.
        undistorted_image = cv2.cvtColor(undistorted_nor, cv2.COLOR_GRAY2BGR)
        for i, pt in enumerate(pts):
            color = draw_points_and_skeleton(color, pt, joints_dict()[hrnet_joints_set]['skeleton'], person_index=i,
                                             joints_color_palette='gist_rainbow', skeleton_color_palette='jet',
                                             joints_palette_samples=10)
            for j, point in enumerate(pt):
                if point[2]>0.5 and point[0] < undistorted_image.shape[0] and point[1] < undistorted_image.shape[1]:
                    if j == 9:
                        left_wrist_depth = undistorted_nor[int(point[0]), int(point[1])]
                        print('left_wrist_depth',left_wrist_depth)
                        undistorted_image = cv2.circle(undistorted_image, (int(point[1]), int(point[0])), 30, (255, 255, 0),-1)

                    if j == 10:
                        right_wrist_depth = undistorted_nor[int(point[0]), int(point[1])]
                        print('right_wrist_depth', right_wrist_depth)
                        undistorted_image = cv2.circle(undistorted_image, (int(point[1]), int(point[0])), 30, (255, 0, 255),-1)

        if enable_rgb:
            cv2.imshow("color", color)
            cv2.imshow("undistorted", undistorted_image)

        listener.release(frames)

        key = cv2.waitKey(delay=1)
        if key == ord('q'):
            break

    device.stop()
    device.close()

    sys.exit(0)
Esempio n. 21
0
class Kinect():
    ''' Kinect object, it uses pylibfreenect2 as interface to get the frames.
    The original example was taken from the pylibfreenect2 github repository, at:
    https://github.com/r9y9/pylibfreenect2/blob/master/examples/selective_streams.py  '''
    def __init__(self, enable_rgb, enable_depth):
        ''' Init method called upon creation of Kinect object '''

        # according to the system, it loads the correct pipeline
        # and prints a log for the user
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            self.pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                self.pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                self.pipeline = CpuPacketPipeline()

        rospy.loginfo(color.BOLD + color.YELLOW + '-- PACKET PIPELINE: ' +
                      str(type(self.pipeline).__name__) + ' --' + color.END)

        self.enable_rgb = enable_rgb
        self.enable_depth = enable_depth

        # creates the freenect2 device
        self.fn = Freenect2()
        # if no kinects are plugged in the system, it quits
        num_devices = self.fn.enumerateDevices()
        if num_devices == 0:
            rospy.loginfo(color.BOLD + color.RED +
                          '-- ERROR: NO DEVICE CONNECTED!! --' + color.END)
            sys.exit(1)

        # otherwise it gets the first one available
        self.serial = self.fn.getDeviceSerialNumber(0)
        self.device = self.fn.openDevice(self.serial, pipeline=self.pipeline)

        # defines the streams to be acquired according to what the user wants
        types = 0
        if self.enable_rgb:
            types |= FrameType.Color
        if self.enable_depth:
            types |= (FrameType.Ir | FrameType.Depth)
        self.listener = SyncMultiFrameListener(types)

        # Register listeners
        if self.enable_rgb:
            self.device.setColorFrameListener(self.listener)
        if self.enable_depth:
            self.device.setIrAndDepthFrameListener(self.listener)

        if self.enable_rgb and self.enable_depth:
            self.device.start()
        else:
            self.device.startStreams(rgb=self.enable_rgb,
                                     depth=self.enable_depth)

        # NOTE: must be called after device.start()
        if self.enable_depth:
            self.registration = Registration(
                self.device.getIrCameraParams(),
                self.device.getColorCameraParams())

        # last number is bytes per pixel
        self.undistorted = Frame(512, 424, 4)
        self.registered = Frame(512, 424, 4)

        self.color_hsv = None

    def acquire(self):
        ''' Acquisition method to trigger the Kinect to acquire new frames. '''

        # acquires a frame only if it's new
        frames = self.listener.waitForNewFrame()

        if self.enable_rgb:
            self.color = frames["color"]
            self.color_new = cv2.resize(self.color.asarray(),
                                        (int(1920 / 1), int(1080 / 1)))
            # The image obtained has a fourth dimension which is the alpha value
            # thus we have to remove it and take only the first three
            self.color_new = self.color_new[:, :, 0:3]
            # the kinect sensor mirrors the images, so we have to flip them back
            self.color_new = cv2.flip(self.color_new, 1)
        if self.enable_depth:
            # these only have one dimension, we just need to convert them to arrays
            # if we want to perform detection on them
            self.depth = frames["depth"]
            self.depth_new = cv2.resize(self.depth.asarray() / 4500.)
            self.depth_new = cv2.flip(self.depth_new, 1)

            # ir stream, not needed for detection purposes right now

            #self.ir = frames["ir"]
            #self.ir_new = cv2.resize(self.ir.asarray() / 65535.)
            #self.ir_new = cv2.flip(self.ir_new, 1)
        ''' # This portion is needed if you want to print also the
            # registered (RGB+D) depth and the undistorted depth.

        if self.enable_rgb and self.enable_depth:
            self.registration.apply(self.color, self.depth, self.undistorted, self.registered)
            self.registered_new = self.registered.asarray(np.uint8)
            self.registered_new = cv2.flip(self.registered_new, 1)
        elif self.enable_depth:
            self.registration.undistortDepth(self.depth, self.undistorted)
            self.undistorted_new = cv2.resize(self.undistorted.asarray(np.float32) / 4500.)
            self.undistorted_new = cv2.flip(self.undistorted_new, 1) '''

        # do this anyway to release every acquired frame
        self.listener.release(frames)

    def rgb2hsv(self):
        '''Function that converts the RGB input frame in HSV '''

        r = self.color_new[:, :, 0] / 255.0
        g = self.color_new[:, :, 1] / 255.0
        b = self.color_new[:, :, 2] / 255.0
        mx = max(r, g, b)
        mn = min(r, g, b)
        df = mx - mn
        if mx == mn:
            h = 0
        elif mx == r:
            h = (60 * ((g - b) / df) + 360) % 360
        elif mx == g:
            h = (60 * ((b - r) / df) + 120) % 360
        elif mx == b:
            h = (60 * ((r - g) / df) + 240) % 360
        if mx == 0:
            s = 0
        else:
            s = df / mx
        v = mx

        self.color_hsv = np.dstack((h, s, v))
Esempio n. 22
0
def main():
    try:
        from pylibfreenect2 import OpenCLPacketPipeline
        pipeline = OpenCLPacketPipeline()
    except:
        from pylibfreenect2 import CpuPacketPipeline
        pipeline = CpuPacketPipeline()

    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = False
    need_color_depth_map = False

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
        if need_color_depth_map else None

    point = pcl.PointCloud()
    visual = pcl.pcl_visualization.CloudViewing()
    visual.ShowColorCloud(cloud)

    while True:
        frames = listener.waitForNewFrame()

        color = frames["color"]
        ir = frames["ir"]
        depth = frames["depth"]

        registration.apply(color,
                           depth,
                           undistorted,
                           registered,
                           bigdepth=bigdepth,
                           color_depth_map=color_depth_map)

        # NOTE for visualization:
        # cv2.imshow without OpenGL backend seems to be quite slow to draw all
        # things below. Try commenting out some imshow if you don't have a fast
        # visualization backend.
        # cv2.imshow("ir", ir.asarray() / 65535.)
        # cv2.imshow("depth", depth.asarray() / 4500.)
        # cv2.imshow("color", cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3))))
        # cv2.imshow("registered", registered.asarray(np.uint8))

        # if need_bigdepth:
        #     cv2.imshow("bigdepth", cv2.resize(bigdepth.asarray(np.float32),
        #                                       (int(1920 / 3), int(1082 / 3))))
        # if need_color_depth_map:
        #     cv2.imshow("color_depth_map", color_depth_map.reshape(424, 512))

        undistorted_arrray = undistorted.asarray(dtype=np.float32, ndim=2)
        # registered_array = registered.asarray(dtype=np.uint8)
        point = pcl.PointCloud(undistorted_arrray)
        # visual.ShowColorCloud(cloud)

        listener.release(frames)

        # key = cv2.waitKey(delay=1)
        # if key == ord('q'):
        #     break
        v = True
        while v:
            v = not (visual.WasStopped())

    device.stop()
    device.close()

    sys.exit(0)
def main():
    instructions = 0
    print "--- Intention classification for communication between a human and a robot ---"
    if instructions == 1:
        print "First you will be required to present a facial expression before you will do a head movement."
        print "If done correctly these gestures will be detected by the robot and it will perform the desired task."
        raw_input("Press Enter to continue...")

    if instructions == 1:
        print "This is the module for facial expression recognition."
        print "This program can detect the emotions: Happy and Angry."
        print "The program will look for the expression for 3 seconds."
    raw_input("To proceed to Facial Expression Recognition press Enter...")
    predictExp = 0

    # Load Facial Expression Recognition trained model
    print "- Loading FER model... -"
    #faceExpModel = tf.keras.models.load_model("/home/bjornar/ML_models/FER/Good models(80+)/tf_keras_weights_ninthRev-88percent/tf_keras_weights_ninthRev.hdf5")
    faceExpModel = tf.keras.models.load_model(
        "/home/project/Bjorn/tf_keras_weights_ninthRev-88percent/tf_keras_weights_ninthRev.hdf5"
    )

    # Load Face Cascade for Face Detection
    print "- Loading Face Cascade for Face Detection... -"
    #cascPath = "/home/bjornar/MScDissertation/TrainingData/FaceDetection/haarcascade_frontalface_default.xml"
    cascPath = "/home/project/Bjorn/IntentionClassification-Repository/haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(cascPath)

    ## Initializing Head Movement variables
    # Introduce mark_detector to detect landmarks.
    mark_detector = MarkDetector()

    sample_frame = cv2.imread("sample_frame.png")
    # Setup process and queues for multiprocessing.
    img_queue = Queue()
    box_queue = Queue()
    img_queue.put(sample_frame)
    box_process = Process(target=get_face,
                          args=(
                              mark_detector,
                              img_queue,
                              box_queue,
                          ))
    box_process.start()

    # Introduce pose estimator to solve pose. Get one frame to setup the
    # estimator according to the image size.
    height, width = sample_frame.shape[:2]

    pose_estimator = PoseEstimator(img_size=(height, width))

    # Introduce scalar stabilizers for pose.
    pose_stabilizers = [
        Stabilizer(state_num=2,
                   measure_num=1,
                   cov_process=0.1,
                   cov_measure=0.1) for _ in range(6)
    ]

    noseMarks = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0],
                 [0, 0], [0, 0], [0, 0]]
    counter = 0
    font = cv2.FONT_HERSHEY_SIMPLEX
    numXPoints = 0
    numYPoints = 0
    sumX = 0
    sumY = 0

    currentTime = 0
    previousTime1 = 0
    previousTime2 = 0

    directionArray = []
    moveSequence = []
    moves = []
    classifyMoves = 0
    headPoseDirection = 'emtpy'

    if camera == 'kinect':
        ## Initialize Kinect camera
        print "Initializing camera..."
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
        #print("Packet pipeline:", type(pipeline).__name__)

        # Create and set logger
        #logger = createConsoleLogger(LoggerLevel.Debug)
        setGlobalLogger()

        fn = Freenect2()
        num_devices = fn.enumerateDevices()
        if num_devices == 0:
            print("- No device connected! -")
            sys.exit(1)

        serial = fn.getDeviceSerialNumber(0)
        device = fn.openDevice(serial, pipeline=pipeline)

        listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                          | FrameType.Depth)

        # Register listeners
        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        device.start()

        # NOTE: must be called after device.start()
        registration = Registration(device.getIrCameraParams(),
                                    device.getColorCameraParams())

    elif camera == 'webcam':
        #video_capture = cv2.VideoCapture(0)
        video_capture = cv2.VideoCapture(-1)

    elif camera == 'video':
        #video_capture = cv2.VideoCapture(0)
        video_capture = cv2.VideoCapture(
            "/home/project/Bjorn/SonyHandycamTest.mp4")

    ## Facial Expression Recognition variables
    FER_prediction = []
    FERclass = ''
    FERstart = 0
    classifyMoves = 0

    ## Head movement variables
    predictHeadMov = 3
    HeadMov = []
    HMCclass = ''
    detectedFaces = []
    mark = []

    notDone = 0
    progressStatus = [0, 0]

    while notDone in progressStatus:  # This waits for each module to finsih
        if camera == 'kinect':
            frames = listener.waitForNewFrame()

            color = frames["color"]
            color = color.asarray()
            color = cv2.resize(color, (int(873), int(491)))
            color = color[0:480, 150:790]
            color = np.delete(color, np.s_[3::], 2)

        elif camera == 'webcam':
            # Capture frame-by-frame
            ret, color = video_capture.read()

        elif camera == 'video':
            # Capture frame-by-frame
            ret, color = video_capture.read()

        ## Detect facial expression
        predictExpNums = [1, 2]
        if predictExp == 0:
            while predictExp not in predictExpNums:
                predictExp = int(
                    raw_input(
                        "\nPress 1 to detect Facial Expression or press 2 to do Head Movement classification."
                    ))
            if predictExp == 1:
                predictExp = 1
                print "------ Facial Expression Recognition ------"
            elif predictExp == 2:
                predictHeadMov = 0
                progressStatus[0] = 1

            FERstart = time.time()

        elif predictExp == 1:
            currentTime = time.time()
            if currentTime - FERstart < 10:
                FER_prediction.append(
                    facialExpressionRecogntion(color, faceExpModel,
                                               faceCascade))
            else:
                predictExp = 2
                FER_prediction = filter(None, FER_prediction)
                FERclass = mostCommon(FER_prediction)
                FERclass = FERclass[2:7]
                predictHeadMov = 0
                if FERclass == '':
                    print("Did not detect an expression, try again.")
                    predictExp = 0
                else:
                    progressStatus[0] = 1
                    print "Detected expression: " + str(FERclass)
                    print "Progress: FER DONE"
                # else:
                #     #cv2.imwrite("sample_frame.png", color)
                #     break

        ## Detect head movement class
        if predictHeadMov == 0:
            predictHeadMov = int(
                raw_input(
                    "\nPress 1 to do Head Movement classification or 2 to skip."
                ))
            if predictHeadMov == 1:
                predictHeadMov = 1
                print "------ Head Movement Classification ------"
                moveTime = int(
                    raw_input(
                        "How many seconds should I record your movement?"))
                #moveTime = 2
            else:
                predictHeadMov = 2
            timer = time.time()
            previousTime1 = timer
            previousTime2 = timer

        if predictHeadMov == 1:
            print color.shape
            color = color[0:480, 0:480]
            color = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
            cv2.imshow("", color)
            raw_input()
            print color.shape
            # Feed frame to image queue.
            img_queue.put(color)

            #pdb.set_trace()

            # Get face from box queue.
            facebox = box_queue.get()
            print color.shape

            if facebox is not None:
                # Detect landmarks from image of 128x128.
                face_img = color[facebox[1]:facebox[3], facebox[0]:facebox[2]]
                face_img = cv2.resize(face_img,
                                      (CNN_INPUT_SIZE, CNN_INPUT_SIZE))
                face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
                marks = mark_detector.detect_marks(face_img)

                #Convert the marks locations from local CNN to global image.
                marks *= (facebox[2] - facebox[0])
                marks[:, 0] += facebox[0]
                marks[:, 1] += facebox[1]

                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(color, headPoseDirection, (20, 70), font, 1,
                            (0, 255, 0), 4)

                # # Get average position of nose
                noseMarksTemp = []
                noseMarksTemp.append(marks[30][0])
                noseMarksTemp.append(marks[30][1])
                noseMarks[0] = noseMarksTemp

                for i in range(9, 0, -1):
                    noseMarks[i] = noseMarks[i - 1]

                # Get the direction of head movement
                headPoseDirection = calculateDirection(noseMarks)

                directionArray.append(headPoseDirection)

                if classifyMoves == 0:
                    classifyMoves = 1
                    timer = time.time()
                    previousTime1 = timer
                    previousTime2 = timer

                currentTime = time.time()
                if currentTime - previousTime1 > moveTime and classifyMoves == 1:
                    print "------------------------------------------------"
                    print "Elapsed timer 1: " + str(currentTime -
                                                    previousTime1)

                    # Get most common direction
                    HMCclass = mostCommon(directionArray)

                    classifyMoves = 2
                    directionArray = []
                    previousTime1 = currentTime

                    progressStatus[1] = 1
                    print "Progress: HMC DONE"
            else:
                print "Did not detect a face"
        elif predictHeadMov == 2:
            progressStatus[1] = 1
            print "Skipped Head Movement Estimation."
            break

        # if notDone in progressStatus and predictHeadMov == 2 and predictExp == 2:
        #     print "You seem to have skipped one or more tasks."
        #     inpt = ''
        #     while inpt == '':
        #         inpt = raw_input("To do FER press 1 and to do HMC press 2...")
        #         if input == '1':
        #             predictExp = 1
        #         elif input == '2':
        #             predictHeadMov

        cv2.imshow("", color)
        if camera == 'kinect':
            listener.release(frames)

        key = cv2.waitKey(delay=1)
        if key == ord('q'):
            break

    if camera == 'kinect':
        listener.release(frames)
        device.stop()
        device.close()
    elif camera == 'webcam' or camera == 'video':
        video_capture.release()

    cv2.destroyAllWindows()

    # Clean up the multiprocessing process.
    box_process.terminate()
    box_process.join()

    print "---------------- RESULT ----------------"

    if FERclass != '':
        print "Detected facial expression: " + str(FERclass)
    else:
        print "Did not detect any expression."

    if HMCclass != '':
        print "Detected head movement: " + str(HMCclass)
    else:
        print "Did not detect a head movement."

    print "----------------------------------------"

    intentionClassification = [FERclass, HMCclass]

    return intentionClassification
class OpenCVCapture(object):
    def __init__(self, device_id=0):
        """Create an OpenCV capture object associated with the provided webcam
        device ID.
        """
        logger = createConsoleLogger(LoggerLevel.Error)
        setGlobalLogger(logger)
        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if num_devices == 0:
            print("No Kinect!")
            raise LookupError()

        serial = self.fn.getDeviceSerialNumber(0)
        self.device = self.fn.openDevice(serial, pipeline=pipeline)

        types = 0
        types |= FrameType.Color

        self.listener = SyncMultiFrameListener(types)
        self.device.setColorFrameListener(self.listener)

        # Start a thread to continuously capture frames.
        # This must be done because different layers of buffering in the webcam
        # and OS drivers will cause you to retrieve old frames if they aren't
        # continuously read.
        self._capture_frame = None
        # Use a lock to prevent access concurrent access to the camera.
        self._capture_lock = threading.Lock()
        self._capture_thread = threading.Thread(target=self._grab_frames)
        self._capture_thread.daemon = True
        self._capture_thread.start()

    def _grab_frames(self):
        self.device.startStreams(rgb=True, depth=False)

        while True:
            frames = self.listener.waitForNewFrame()
            color = frames["color"]

            with self._capture_lock:
                self._capture_frame = None
                self._capture_frame = cv2.resize(
                    color.asarray(), (int(1920 / 3), int(1080 / 3))).copy()

            self.listener.release(frames)
            time.sleep(1.0 / CAPTURE_HZ)

    def read(self):
        """Read a single frame from the camera and return the data as an OpenCV
        image (which is a numpy array).
        """
        frame = None
        with self._capture_lock:
            frame = self._capture_frame
        # If there are problems, keep retrying until an image can be read.
        while frame is None:
            time.sleep(0)
            with self._capture_lock:
                frame = self._capture_frame
        # Return the capture image data.
        return frame

    def stop(self):
        print('{"status":"Terminating..."}')
        self.device.stop()
        self.device.close()
Esempio n. 25
0
    def find_and_track_kinect(self, name, tracker = "CSRT",
            face_target_box = DEFAULT_FACE_TARGET_BOX,
            track_scaling = DEFAULT_SCALING,
            res = (RGB_W, RGB_H), video_out = True):
        print("Starting Tracking")

        target = name

        fn = Freenect2()
        num_devices = fn.enumerateDevices()

        if num_devices == 0:
            print("No device connected!")

        serial = fn.getDeviceSerialNumber(0)
        device = fn.openDevice(serial, pipeline = self.pipeline)

        listener = SyncMultiFrameListener(FrameType.Color | FrameType.Depth)

        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        device.start()

        registration = Registration(device.getIrCameraParams(),
                device.getColorCameraParams())

        undistorted = Frame(512, 424, 4)
        registered = Frame(512, 424, 4)
        bigdepth = Frame(1920, 1082, 4)

        trackerObj = None
        face_process_frame = True

        bbox = None
        track_bbox = None

        head_h = 0
        body_left_w = 0
        body_right_w = 0
        center_w = 0

        globalState = ""

        # Following line creates an avi video stream of daisy's tracking
        # out = cv2.VideoWriter('daisy_eye.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 10, (960, 540))
        # out.write(c)
        while True:
            timer = cv2.getTickCount()

            frames = listener.waitForNewFrame()

            color = frames["color"]
            depth = frames["depth"]

            registration.apply(color, depth, undistorted, registered, bigdepth=bigdepth)

            bd = np.resize(bigdepth.asarray(np.float32), (1080, 1920))
            c = cv2.cvtColor(color.asarray(), cv2.COLOR_RGB2BGR)

            if self.connected:
                newTarget = self.alexa_neuron.get('name');
                if newTarget != target:
                    target = newTarget
                    listener.release(frames)
                    trackerObj = None
                    face_process_frame = True

                    bbox = None
                    track_bbox = None
                    continue
                if target is not None and target not in self.known_faces:
                    target = None

            if target is None:
                if self.connected:
                    c = self.__scale_frame(c, scale_factor = 0.5)
                    image = cv2.imencode('.jpg', c)[1].tostring()
                    self.web_neuron.update([('image', image)])
                listener.release(frames)
                trackerObj = None
                face_process_frame = True

                bbox = None
                track_bbox = None
                self.__update_individual_position("WAITING", None, None, None, res)
                continue

            face_bbox = None
            new_track_bbox = None

            if face_process_frame:
                small_c = self.__crop_frame(c, face_target_box)
                face_locations = face_recognition.face_locations(small_c, number_of_times_to_upsample=0, model="cnn")
                face_encodings = face_recognition.face_encodings(small_c, face_locations)
                for face_encoding in face_encodings:
                    matches = face_recognition.compare_faces(
                            [self.known_faces[target]], face_encoding, 0.6)
                    if len(matches) > 0 and matches[0]:
                        (top, right, bottom, left) = face_locations[0]

                        left += face_target_box[0]
                        top += face_target_box[1]
                        right += face_target_box[0]
                        bottom += face_target_box[1]

                        face_bbox = (left, top, right, bottom)
                        mid_w = int((left + right) / 2)
                        mid_h = int((top + bottom) / 2)
                        new_track_bbox = self.__body_bbox(bd, mid_w, mid_h, res)

                        break
            face_process_frame = not face_process_frame

            overlap_pct = 0
            track_area = self.__bbox_area(track_bbox)
            if track_area > 0 and new_track_bbox:
                overlap_area = self.__bbox_overlap(new_track_bbox, track_bbox)
                overlap_pct = min(overlap_area / self.__bbox_area(new_track_bbox),
                        overlap_area / self.__bbox_area(track_bbox))
            small_c = self.__scale_frame(c, track_scaling)
            if new_track_bbox is not None and overlap_pct < CORRECTION_THRESHOLD:
                bbox = (new_track_bbox[0],
                        new_track_bbox[1],
                        new_track_bbox[2] - new_track_bbox[0],
                        new_track_bbox[3] - new_track_bbox[1])
                bbox = self.__scale_bbox(bbox, track_scaling)
                trackerObj = self.__init_tracker(small_c, bbox, tracker)
                self.alexa_neuron.update([('tracking', True)])

            if trackerObj is None:
                self.__update_individual_position("WAITING", None, None, None, res)

            status = False

            if trackerObj is not None:
                status, trackerBBox = trackerObj.update(small_c)
                bbox = (int(trackerBBox[0]),
                        int(trackerBBox[1]),
                        int(trackerBBox[0] + trackerBBox[2]),
                        int(trackerBBox[1] + trackerBBox[3]))

            if bbox is not None:
                track_bbox = (bbox[0], bbox[1], bbox[2], bbox[3])
                track_bbox = self.__scale_bbox(bbox, 1/track_scaling)

            w = 0
            h = 0

            if status:
                w = track_bbox[0] + int((track_bbox[2] - track_bbox[0])/2)
                h = track_bbox[1] + int((track_bbox[3] - track_bbox[1])/2)

                if (w < res[0] and w >= 0 and h < res[1] and h >= 0):
                    distanceAtCenter =  bd[h][w]
                    center = (w, h)
                    globalState = self.__update_individual_position(status, track_bbox, center, distanceAtCenter, res)



            if globalState == "Fail":
                break

            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

            if video_out or self.connected:
                cv2.line(c, (w, 0), (w, res[1]), (0,255,0), 1)
                cv2.line(c, (0, h), (res[0], h), (0,255,0), 1)
                cv2.line(c, (0, head_h), (res[0], head_h), (0,0,0), 1)
                cv2.line(c, (body_left_w, 0), (body_left_w, res[1]), (0,0,255), 1)
                cv2.line(c, (body_right_w, 0), (body_right_w, res[1]), (0,0,255), 1)
                cv2.line(c, (center_w, 0), (center_w, res[1]), (0,0,255), 1)

                self.__draw_bbox(True, c, face_target_box, (255, 0, 0), "FACE_TARGET")
                self.__draw_bbox(status, c, track_bbox, (0, 255, 0), tracker)
                self.__draw_bbox(face_bbox is not None, c, face_bbox, (0, 0, 255), target)
                self.__draw_bbox(face_bbox is not None, c, new_track_bbox, (0, 255, 255), "BODY")

                c = self.__scale_frame(c, scale_factor = 0.5)

                cv2.putText(c, "FPS : " + str(int(fps)), (100,50),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 1)
                if not status:
                    failedTrackers = "FAILED: "
                    failedTrackers += tracker + " "
                    cv2.putText(c, failedTrackers, (100, 80),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,142), 1)
                if self.connected:
                    image = cv2.imencode('.jpg', c)[1].tostring()
                    self.web_neuron.update([('image', image)])
                if video_out:
                    cv2.imshow("color", c)

            listener.release(frames)

            key = cv2.waitKey(1) & 0xff
            if key == ord('q'):
                self.__update_individual_position("STOP", None, None, None, res)
                break

        self.so.close()
        cv2.destroyAllWindows()
        device.stop()
        device.close()
Esempio n. 26
0
def test_sync_multi_frame():
    fn = Freenect2()

    num_devices = fn.enumerateDevices()
    assert num_devices > 0

    serial = fn.getDefaultDeviceSerialNumber()
    assert serial == fn.getDeviceSerialNumber(0)

    device = fn.openDevice(serial)

    assert fn.getDefaultDeviceSerialNumber() == device.getSerialNumber()
    device.getFirmwareVersion()

    listener = SyncMultiFrameListener(
        FrameType.Color | FrameType.Ir | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # Registration
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())
    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # optional parameters for registration
    bigdepth = Frame(1920, 1082, 4)
    color_depth_map = np.zeros((424, 512), np.int32)

    # test if we can get two frames at least
    frames = listener.waitForNewFrame()
    listener.release(frames)

    # frames as a first argment also should work
    frames = FrameMap()
    listener.waitForNewFrame(frames)

    color = frames[FrameType.Color]
    ir = frames[FrameType.Ir]
    depth = frames[FrameType.Depth]

    for frame in [ir, depth]:
        assert frame.exposure == 0
        assert frame.gain == 0
        assert frame.gamma == 0

    for frame in [color]:
        assert frame.exposure > 0
        assert frame.gain > 0
        assert frame.gamma > 0

    registration.apply(color, depth, undistorted, registered)

    # with optinal parameters
    registration.apply(color, depth, undistorted, registered,
                       bigdepth=bigdepth,
                       color_depth_map=color_depth_map.ravel())

    registration.undistortDepth(depth, undistorted)

    assert color.width == 1920
    assert color.height == 1080
    assert color.bytes_per_pixel == 4

    assert ir.width == 512
    assert ir.height == 424
    assert ir.bytes_per_pixel == 4

    assert depth.width == 512
    assert depth.height == 424
    assert depth.bytes_per_pixel == 4

    assert color.asarray().shape == (color.height, color.width, 4)
    assert ir.asarray().shape == (ir.height, ir.width)
    assert depth.asarray(np.float32).shape == (depth.height, depth.width)

    listener.release(frames)

    def __test_cannot_determine_type_of_frame(frame):
        frame.asarray()

    for frame in [registered, undistorted]:
        yield raises(ValueError)(__test_cannot_determine_type_of_frame), frame

    # getPointXYZ
    x, y, z = registration.getPointXYZ(undistorted, 512 // 2, 424 // 2)
    if not np.isnan([x, y, z]).any():
        assert z > 0

    # getPointXYZRGB
    x, y, z, b, g, r = registration.getPointXYZRGB(undistorted, registered,
                                                   512 // 2, 424 // 2)
    if not np.isnan([x, y, z]).any():
        assert z > 0
    assert np.isfinite([b, g, r]).all()

    for pix in [b, g, r]:
        assert pix >= 0 and pix <= 255

    device.stop()
    device.close()
Esempio n. 27
0
class VideoCapture:
    def __init__(self, video_source):
        self.video_source = video_source
        if video_source == 'kinectv2':
            self.listener = None
            self.device = None
            self.fn = None
            self.frames = None
            self.startKinectV2Dev()

    def startKinectV2Dev(self):
        from pylibfreenect2 import Freenect2, SyncMultiFrameListener, FrameType

        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()

        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                pipeline = OpenCLPacketPipeline()

            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
                print("LIBFREENECT2::Packet pipeline:",
                      type(pipeline).__name__)

        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()

        if num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        serial = self.fn.getDeviceSerialNumber(0)
        self.device = self.fn.openDevice(serial, pipeline=pipeline)

        self.listener = SyncMultiFrameListener(
            FrameType.Color)  # listen,ing only to rgb frames

        # Register listeners
        self.device.setColorFrameListener(self.listener)
        self.device.start()

    def stopKinectV2Dev(self):
        self.device.stop()
        self.device.close()

    def stopVideoFeed(self):
        if self.video_source == 'kinectv2':
            self.stopKinectV2Dev()

    def getKinectV1RgbFrame(self):
        import freenect

        array, _ = freenect.sync_get_video()
        rgb_frame = cv2.cvtColor(array, cv2.COLOR_RGB2BGR)

        return rgb_frame

    def getKinectV2RgbFrame(self):
        frames = self.listener.waitForNewFrame()

        return frames

    def getRgbFrame(self):
        rgb_frame = None
        if self.video_source == 'kinectv1':
            import freenect
            rgb_frame = self.getKinectV1RgbFrame()

        elif self.video_source == 'kinectv2':
            from pylibfreenect2 import Freenect2, SyncMultiFrameListener, FrameType
            rgb_frame = self.getKinectV2RgbFrame()

        return rgb_frame

    def getNextFrame(self):
        print "video source ", self.video_source
        if self.video_source == 'kinectv1':
            rgb_frame = self.getKinectV1RgbFrame()
        elif self.video_source == 'kinectv2':
            if self.frames:
                self.listener.release(self.frames)
            self.frames = self.getKinectV2RgbFrame()
            resised_rgb_frame = cv2.resize(self.frames["color"].asarray(),
                                           (int(1920), int(1080)))
            rgb_frame = resised_rgb_frame[:, ::-1, :]
        else:
            print "no video feed"
            return

        frame = cv2.GaussianBlur(rgb_frame, (7, 7), 0)
        return frame

    def launchVideoForColorCalib(self):

        while (1):
            frame = self.getNextFrame()
            # Convert the image to hsv space and find range of colors
            #hsvFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

            # self.detectReward(frame, hsvFrame)
            # Show the original and processed feed
            hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            cv2.imshow('Original feed', frame)
            cv2.setMouseCallback('Original feed', self.onMouseClick, hsv_frame)
            # cv2.imshow('Original feed', hsv_frame)

            # if 'q' is pressed then exit the loop
            if cv2.waitKey(33) == ord('q'):
                break

        # Destroy all windows exit the program
        cv2.destroyAllWindows()
Esempio n. 28
0
class DeviceFrame(TabFrame):

    persp = np.asarray([[0.75, 0, 120 + 15], [0, 0.99, 0], [0, 0, 1]],
                       dtype=np.float32)

    def __init__(self, master, freenect, serial, publish):

        TabFrame.__init__(self, master)
        self.pack(fill=tk.BOTH, expand=True)

        self._freenect = freenect

        self._serial = serial
        self._device_index = self.__device_list_index()

        self._device = None
        self._listener = None

        self._opened = False
        self._playing = False

        self.frames = FrameMap()
        self.image_buffer = {
            'color': (None, None, None),
            'ir': (None, None, None),
            'depth': (None, None, None)
        }

        cam = TabFrame(self, tk.TOP)
        self.add(' Camera ', cam)

        color = VideoUI(cam, self, lambda: self.get_image_color())
        cam.add(' RGB ', color)
        color.canvas.bind("<Button-1>", lambda _: self.capture())

        ir = VideoUI(cam, self, lambda: self.get_image_ir())
        cam.add(' IR ', ir)
        ir.canvas.bind('<Motion>', self.motion)
        ir.canvas.bind("<Button-1>", lambda _: self.capture())

        depth = VideoUI(cam, self, lambda: self.get_image_depth())
        cam.add(' Depth ', depth)
        depth.canvas.bind('<Motion>', self.motion)
        depth.canvas.bind("<Button-1>", lambda _: self.capture())

        filt = TabFrame(self, tk.TOP)
        self.add(' Filter ', filt)

        sob = VideoUI(filt, self, lambda: self.get_image_ir(filters=(sobel, )))
        filt.add(' Sobel ', sob)

        mas = VideoUI(filt, self,
                      lambda: self.get_image_ir(filters=(masking, )))
        filt.add(' Masking ', mas)

        can = VideoUI(filt, self, lambda: self.get_image_ir(filters=(canny, )))
        filt.add(' Canny ', can)

        hou = VideoUI(filt, self, lambda: self.get_image_ir(filters=(hough, )))
        filt.add(' Hough ', hou)

        if 'matplotlib' in sys.modules:
            plot = TabFrame(self, tk.TOP)
            self.add(' 3D ', plot)
            plot_pc = PlotFrame(plot, self,
                                (lambda: self.get_image_depth(), None),
                                (920, 580, (0, 5000)))
            plot.add(' Point Cloud ', plot_pc)
            plot_dm = PlotFrame(
                plot, self,
                (lambda: self.get_image_depth(), lambda: self.get_image_ir()),
                (920, 580, (0, 5000)))
            plot.add(' Depthmap ', plot_dm)
            plot_cm = PlotFrame(plot, self, (lambda: self.get_image_depth(),
                                             lambda: self.get_image_color()),
                                (920, 580, (0, 5000)))
            plot.add(' Colormap ', plot_cm)
            #if 'moderngl' in sys.modules:
            #    gl = TabFrame(plot, tk.TOP)
            #    plot.add(' OpenGL ', gl)

        track = TabFrame(self, tk.TOP)
        self.add(' Tracker ', track)

        boos = TrackerUI(track, self, lambda: self.get_image_color(), publish,
                         1)
        track.add(' Boosting ', boos)

        colo = TrackerUI(track, self, lambda: self.get_image_color(), publish,
                         2)
        track.add(' Color ', colo)

        #if 'flask' in sys.modules:
        #    stream = TabFrame(self, tk.TOP)
        #    self.add(' Stream ', stream)

        self._publish = publish
        self.refresh()

    def __device_list_index(self):
        num_devs = self._freenect.enumerateDevices()
        devs = [
            self._freenect.getDeviceSerialNumber(i) for i in range(num_devs)
        ]
        return devs.index(self._serial)

    def motion(self, event):
        msg = " x={}px y={}px ".format(event.x, event.y)
        _, _, buffer = self.get_image_depth()
        if buffer is not None:
            w, h = buffer.shape[::-1]
            if buffer[event.y % h][event.x % w] > 0:
                msg = msg + " d={}mm".format(buffer[event.y % h][event.x % w])
        if self._publish is not None: self._publish(msg)
        return None

    def open(self):
        self._listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                                | FrameType.Depth)
        self._device = self._freenect.openDevice(self._serial,
                                                 pipeline=pipeline)
        device_index = self.__device_list_index()
        if self._device_index != device_index:  # keep track of changes in the device list
            self._device_index = device_index
            self._device.close()
            self._listener.release(self.frames)
            self.open()
            return
        self._device.setColorFrameListener(self._listener)
        self._device.setIrAndDepthFrameListener(self._listener)
        self._device.start()
        self._opened = True
        self._playing = False

    def opened(self):
        return self._opened

    def play(self):
        if not self._opened: return False
        self._playing = True
        return True

    def playing(self):
        return self._playing

    def stop(self):
        if not self._opened: return False
        self._playing = False
        return True

    def close(self):
        if not self._opened: return
        self._device.close()
        self._opened = False
        self._playing = False

    def refresh(self):
        if self._playing:
            self._listener.release(self.frames)
            self.image_buffer = {
                key: (None, None, None)
                for key in self.image_buffer
            }  # reset image buffer
            self.frames = self._listener.waitForNewFrame()
        self.after(30, self.refresh)

    def get_image_color(self, filters=()):
        color, _, _ = self.image_buffer['color']
        if color is not None: return self.image_buffer['color']
        color = self.frames['color']
        color = color.asarray(dtype=np.uint8)
        color = cv.resize(color, (1920 // 2, 1080 // 2))
        color = cv.cvtColor(color, cv.COLOR_BGRA2RGBA)
        #color = np.rot90(color)
        #color = np.rot90(color, 3)
        for f in filters:
            color, *_ = f(color)
        return self.to_image('color', color)

    def get_image_ir(self, filters=()):
        ir, _, _ = self.image_buffer['ir']
        if ir is not None: return self.image_buffer['ir']
        ir = self.frames['ir']
        ir = ir.asarray(dtype=np.float32)[
            32:-32]  # remove 32 lines top and bottom (in rgb out of view)
        dsize = (1920 // 2, 1080 // 2)
        ir = cv.resize(ir, dsize)
        ir = ir / 65535
        ir = np.asarray(ir * 255, dtype=np.uint8)
        ir = cv.warpPerspective(ir,
                                DeviceFrame.persp,
                                None,
                                borderMode=cv.BORDER_CONSTANT,
                                borderValue=255)
        for f in filters:
            ir, *_ = f(ir)
        return self.to_image('ir', ir)

    def get_image_depth(self, d_min=0, d_max=5000, filters=()):
        depth, _, _ = self.image_buffer['depth']
        if depth is not None: return self.image_buffer['depth']
        depth = self.frames['depth']
        depth = depth.asarray(dtype=np.float32)[
            32:-32]  # remove 32 lines top and bottom (in rgb out of view)
        depth = cv.resize(depth, (1920 // 2, 1080 // 2))
        depth = cv.warpPerspective(depth,
                                   DeviceFrame.persp,
                                   None,
                                   borderMode=cv.BORDER_CONSTANT,
                                   borderValue=d_max)
        buffer = depth.astype(int)
        buffer[buffer == d_max], buffer[buffer == d_min] = -1, -1
        depth = (depth - d_min) / (d_max - d_min)
        depth = np.asarray(depth * 255, dtype=np.uint8)
        for f in filters:
            depth, *_ = f(depth)
        return self.to_image('depth', depth, buffer)

    def to_image(self, key, arr, arg=None):
        self.image_buffer[key] = (PIL.Image.fromarray(arr), arr, arg)
        return self.image_buffer[key]

    def capture(self):
        timezone = (int(-time.timezone / 3600) + time.daylight) % 25
        tz_abbr = 'ZABCDEFGHIKLMNOPQRSTUVWXY'
        timestamp = time.strftime('%Y%m%d' + tz_abbr[timezone] + '%H%M%S')
        self.save(self.get_image_color()[0], timestamp, 'color')
        self.save(self.get_image_ir()[0], timestamp, 'ir')
        self.save(self.get_image_depth()[0], timestamp, 'depth')

    @classmethod
    def save(cls, img, timestamp, name, ext='tiff'):
        scriptdir = os.path.dirname(__file__)
        timedir = os.path.join(scriptdir, 'capture', str(timestamp))
        if not os.path.exists(timedir): os.makedirs(timedir)
        file = os.path.join(timedir, name + '.' + ext)
        img.save(file, ext)
Esempio n. 29
0
class KinectV2:
    """
    control class for the KinectV2 based on the Python wrappers of the official Microsoft SDK
    Init the kinect and provides a method that returns the scanned depth image as numpy array.
    Also we do gaussian blurring to get smoother surfaces.

    """


    def __init__(self):
        # hard coded class attributes for KinectV2's native resolution
        self.name = 'kinect_v2'
        self.depth_width = 512
        self.depth_height = 424
        self.color_width = 1920
        self.color_height = 1080
        self.depth = None
        self.color = None
        self._init_device()

        #self.depth = self.get_frame()
        #self.color = self.get_color()
        print("KinectV2 initialized.")

    def _init_device(self):
        if _platform == 'Windows':
            device = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color |
                                                     PyKinectV2.FrameSourceTypes_Depth |
                                                     PyKinectV2.FrameSourceTypes_Infrared)

        elif _platform == 'Linux':
            if _pylib:
                # self.device = Device()
                fn = Freenect2()
                num_devices = fn.enumerateDevices()
                assert num_devices > 0

                serial = fn.getDeviceSerialNumber(0)
                self.device = fn.openDevice(serial, pipeline=pipeline)

                self.listener = SyncMultiFrameListener(
                    FrameType.Color |
                    FrameType.Ir |
                    FrameType.Depth)

                # Register listeners
                self.device.setColorFrameListener(self.listener)
                self.device.setIrAndDepthFrameListener(self.listener)

                self.device.start()

                self.registration = Registration(self.device.getIrCameraParams(), self.device.getColorCameraParams())

                self.undistorted = Frame(512, 424, 4)
                self.registered = Frame(512, 424, 4)

                frames = self.listener.waitForNewFrame()
                self.listener.release(frames)

                self.device.stop()
                self.device.close()
            elif _lib:
                try:
                    self.device = Device()
                except:
                    traceback.print_exc()

        else:
            print(_platform)
            raise NotImplementedError

    def _get_linux_frame(self, typ:str='all'):
        """
        Manage to
        Args:
            typ:

        Returns:

        """
        #self.device.start()
        frames = self.listener.waitForNewFrame(milliseconds=1000)
        if frames:
            if typ == 'depth':
                depth = frames[FrameType.Depth]
                self.listener.release(frames)
                return depth.asarray()
            elif typ == 'ir':
                ir = frames[FrameType.Ir]
                self.listener.release(frames)
                return ir.asarray()
            if typ == 'color':
                color = frames[FrameType.Color]
                self.listener.release(frames)
                return color.asarray()
            if typ == 'all':
                color = frames[FrameType.Color]
                ir = frames[FrameType.Ir]
                depth = frames[FrameType.Depth]
                self.registration.apply(color, depth, self.undistorted, self.registered)
                self.registration.undistortDepth(depth, self.undistorted)
                self.listener.release(frames)
                return depth.asarray(), color.asarray(), ir.asarray()
        else:
            raise FileNotFoundError

    def get_linux_frame(self, typ:str='all'):
        """
        Manage to
        Args:
            typ:

        Returns:

        """
        frames = {}
        with self.device.running():
            for type_, frame in self.device:
                frames[type_] = frame
                if FrameType.Color in frames and FrameType.Depth in frames and FrameType.Ir in frames:
                    break
        if typ == 'depth':
            depth = frames[FrameType.Depth].to_array()
            return depth
        elif typ == 'ir':
            ir = frames[FrameType.Ir].to_array()
            return ir
        elif typ == 'color':
            color = frames[FrameType.Color].to_array()
            resolution_camera = self.color_height * self.color_width  # resolution camera Kinect V2
            palette = numpy.reshape(color, (resolution_camera, 4))[:, [2, 1, 0]]
            position_palette = numpy.reshape(numpy.arange(0, len(palette), 1), (self.color_height, self.color_width))
            color = palette[position_palette]
            return color
        else:
            color = frames[FrameType.Color].to_array()
            resolution_camera = self.color_height * self.color_width  # resolution camera Kinect V2
            palette = numpy.reshape(color, (resolution_camera, 4))[:, [2, 1, 0]]
            position_palette = numpy.reshape(numpy.arange(0, len(palette), 1), (self.color_height, self.color_width))
            color = palette[position_palette]
            depth = frames[FrameType.Depth].to_array()
            ir = frames[FrameType.Ir].to_array()
            return color, depth, ir

    def get_frame(self):
        """
        Args:
        Returns:
               2D Array of the shape(424, 512) containing the depth information of the latest frame in mm
        """
        if _platform =='Windows':
            depth_flattened = self.device.get_last_depth_frame()
            self.depth = depth_flattened.reshape(
                (self.depth_height, self.depth_width))  # reshape the array to 2D with native resolution of the kinectV2
        elif _platform =='Linux':
            self.depth = self.get_linux_frame(typ='depth')
        return self.depth

    def get_ir_frame_raw(self):
        """
        Args:
        Returns:
               2D Array of the shape(424, 512) containing the raw infrared intensity in (uint16) of the last frame
        """
        if _platform=='Windows':
            ir_flattened = self.device.get_last_infrared_frame()
            self.ir_frame_raw = numpy.flipud(
                ir_flattened.reshape((self.depth_height,
                                      self.depth_width)))  # reshape the array to 2D with native resolution of the kinectV2
        elif _platform=='Linux':
            self.ir_frame_raw = self.get_linux_frame(typ='ir')
        return self.ir_frame_raw

    def get_ir_frame(self, min=0, max=6000):
        """

        Args:
            min: minimum intensity value mapped to uint8 (will become 0) default: 0
            max: maximum intensity value mapped to uint8 (will become 255) default: 6000
        Returns:
               2D Array of the shape(424, 512) containing the infrared intensity between min and max mapped to uint8 of the last frame

        """
        ir_frame_raw = self.get_ir_frame_raw()
        self.ir_frame = numpy.interp(ir_frame_raw, (min, max), (0, 255)).astype('uint8')
        return self.ir_frame

    def get_color(self):
        """

        Returns:

        """
        if _platform == 'Windows':
            color_flattened = self.device.get_last_color_frame()
            resolution_camera = self.color_height * self.color_width  # resolution camera Kinect V2
            # Palette of colors in RGB / Cut of 4th column marked as intensity
            palette = numpy.reshape(numpy.array([color_flattened]), (resolution_camera, 4))[:, [2, 1, 0]]
            position_palette = numpy.reshape(numpy.arange(0, len(palette), 1), (self.color_height, self.color_width))
            self.color = numpy.flipud(palette[position_palette])
        elif _platform =='Linux':
            self.color = self.get_linux_frame(typ='color')
        return self.color
Esempio n. 30
0
    # calculating regression coefficients
    b_1 = SS_xy / SS_xx
    b_0 = m_y - b_1 * m_x

    return (b_0, b_1)


def make_fig(x, y):
    plt.ion()  # enable interactivity
    fig = plt.figure()
    plt.plot(x, y)


a = list(range(511))
while True:
    frames = listener.waitForNewFrame()
    depth = frames["depth"]
    dep = depth.asarray()
    dep = dep[212:]
    dep_display = dep / 4096.
    data = dep

    y = []
    for i in a:
        d = estimate_coef(data[:, i], data[:, i + 1])
        #plot_regression_line(data[:,1],data[:,2],d)
        #print (i)
    make_fig()

    cv2.imshow("depth", dep_display)
    listener.release(frames)
Esempio n. 31
0
class KivyCamera(Image):
    def __init__(self, **kwargs):
        super(KivyCamera, self).__init__(**kwargs)

    def setup(self, fn, pipeline, **kwargs):
        super(KivyCamera, self).__init__(**kwargs)
        self.bg_image = None
        self.current_img = None
        self.current_final_img = None
        self.current_mask = None

        serial = fn.getDeviceSerialNumber(0)
        self.device = fn.openDevice(serial, pipeline=pipeline)

        self.listener = SyncMultiFrameListener(
                 FrameType.Color | FrameType.Ir | FrameType.Depth)

        # Register listeners
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        self.device.start()

        # NOTE: must be called after device.start()
        self.registration = Registration(self.device.getIrCameraParams(),
                                           self.device.getColorCameraParams())

        self.undistorted = Frame(512, 424, 4)
        self.registered = Frame(512, 424, 4)

        # Optinal parameters for registration
        # set True if you need
        need_bigdepth = True
        need_color_depth_map = False

        self.bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
        self.color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
            if need_color_depth_map else None

        self.clipping_distance = 0.5


    def update(self, dt):
        frames = self.listener.waitForNewFrame()
        color = frames["color"]
        depth = frames["depth"]

        self.registration.apply(color, depth, self.undistorted, self.registered,
                               bigdepth=self.bigdepth,
                               color_depth_map=self.color_depth_map)

        self.current_img = color.asarray()[:,:,:3]
        depth_img = self.bigdepth.asarray(np.float32) / 4500. # scale to interval [0,1]

        if(self.bg_image is not None):
            if(self.bg_image.shape[2] == 4): # put frame around picture
                fgMask = self.bg_image[:,:,2]>0
            else: # modify background
                # if needed: denoise filter
                depth_img = cv2.medianBlur(depth_img, 5)
                depth_img = cv2.GaussianBlur(depth_img, (9,9), 0)
                fgMask = ((depth_img > self.clipping_distance) & (depth_img > 0))
                fgMask = cv2.resize(fgMask.astype(np.uint8), (1920, 1080))
                
                # if needed: morphological operations
                #kernel = np.ones((10,10), np.uint8)
                #fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_CLOSE, kernel)
                #fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_OPEN, kernel)
            
            self.current_mask = np.dstack((fgMask, fgMask, fgMask)).astype(np.bool) # mask is 1 channel, color is 3 channels
            self.current_final_img = np.where(self.current_mask, self.bg_image[:,:,:3], self.current_img)
        else:
            self.current_mask = None
            self.current_final_img = self.current_img

        self.listener.release(frames)
        
        self.display_img(self.current_final_img)

    def display_img(self, img):
        buf1 = cv2.flip(img, 0)
        buf = buf1.tostring()
        image_texture = Texture.create(size=(img.shape[1], img.shape[0]), colorfmt='rgb') #(480,640)
        image_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
        # display image from the texture
        self.texture = image_texture
Esempio n. 32
0
def projection():

    pipeline = CpuPacketPipeline()
    print("Packet pipeline:", type(pipeline).__name__)

    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)
    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = True
    need_color_depth_map = True

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
        if need_color_depth_map else None

    color_pub = rospy.Publisher('color', Image, queue_size=10)
    depth_pub = rospy.Publisher('depth', Image, queue_size=10)
    ir_pub = rospy.Publisher('ir', Image, queue_size=10)
    small_depth_pub = rospy.Publisher('small_depth', Image, queue_size=10)

    rospy.init_node('projection', anonymous=True)
    rate = rospy.Rate(10)  # 10hz

    while not rospy.is_shutdown():
        frames = listener.waitForNewFrame()

        color = frames["color"]
        ir = frames["ir"]
        depth = frames["depth"]

        registration.apply(color,
                           depth,
                           undistorted,
                           registered,
                           bigdepth=bigdepth,
                           color_depth_map=color_depth_map)

        color_image = CvBridge().cv2_to_imgmsg(color.asarray())
        color_pub.publish(color_image)

        depth_image = CvBridge().cv2_to_imgmsg(bigdepth.asarray(np.float32))
        depth_pub.publish(depth_image)

        ir_image = CvBridge().cv2_to_imgmsg(ir.asarray())
        ir_pub.publish(ir_image)

        small_depth_image = CvBridge().cv2_to_imgmsg(depth.asarray())
        small_depth_pub.publish(small_depth_image)

        rate.sleep()

        listener.release(frames)

        key = cv2.waitKey(delay=1)
        if key == ord('q'):
            break

    device.stop()
    device.close()
    sys.exit(0)
def get_image(imgtopic):

    image = imgtopic

    fn = Freenect2()

    num_devices = fn.enumerateDevices()
    assert num_devices > 0

    serial = fn.getDefaultDeviceSerialNumber()
    assert serial == fn.getDeviceSerialNumber(0)

    device = fn.openDevice(serial)

    assert fn.getDefaultDeviceSerialNumber() == device.getSerialNumber()
    device.getFirmwareVersion()

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # Registration
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())
    # undistorted = Frame(512, 424, 4)
    # registered = Frame(512, 424, 4)

    # # optional parameters for registration
    # bigdepth = Frame(1920, 1082, 4)
    # color_depth_map = np.zeros((424, 512), np.int32)

    # # test if we can get two frames at least
    # frames = listener.waitForNewFrame()
    # listener.release(frames)

    # frames as a first argment also should work
    frames = FrameMap()
    listener.waitForNewFrame(frames)

    color = frames[FrameType.Color]
    # ir = frames[FrameType.Ir]
    # depth = frames[FrameType.Depth]

    # for frame in [ir, depth]:
    #     assert frame.exposure == 0
    #     assert frame.gain == 0
    #     assert frame.gamma == 0

    # for frame in [color]:
    #     assert frame.exposure > 0
    #     assert frame.gain > 0
    #     assert frame.gamma > 0

    # registration.apply(color, depth, undistorted, registered)
    # registration.apply(color, depth, undistorted, registered,
    #                    bigdepth=bigdepth,
    #                    color_depth_map=color_depth_map.ravel())

    # assert color.width == 1920
    # assert color.height == 1080
    # assert color.bytes_per_pixel == 4

    # assert ir.width == 512
    # assert ir.height == 424
    # assert ir.bytes_per_pixel == 4

    # assert depth.width == 512
    # assert depth.height == 424
    # assert depth.bytes_per_pixel == 4

    # assert color.asarray().shape == (color.height, color.width, 4)
    # assert ir.asarray().shape == (ir.height, ir.width)
    # assert depth.asarray(np.float32).shape == (depth.height, depth.width)

    return color.asarray()
Esempio n. 34
0
def CaptureVideo():
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = True
    need_color_depth_map = False

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512), np.int32).ravel() \
        if need_color_depth_map else None

    # cap = cv2.VideoCapture(0)
    res_old = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]

    while (True):
        # Capture frame-by-frame
        # ret, frame = cap.read()
        frames = listener.waitForNewFrame()
        img = frames["color"].asarray()[:, :, :3]
        frame = cv2.resize(img, (int(1920 / 3), int(1080 / 3)))

        start = time.time()
        # Display the resulting frame
        box, conf, cls = eval_video_detection(net_detection_, frame)

        p_box = []
        for i in range(len(box)):
            p_box_per_person = {}
            if int(cls[i]) == 15 and conf[i] > 0.0:
                p_box_per_person['p1'] = (box[i][0], box[i][1])
                p_box_per_person['p2'] = (box[i][2], box[i][3])
                p_box_per_person['height_difference'] = box[i][3] - box[i][1]
                p_box_per_person['weight_difference'] = box[i][2] - box[i][0]
                p_box_per_person['conf'] = conf[i]
                p_box_per_person['cls'] = int(cls[i])
                p_box.append(p_box_per_person)

        # print p_box
        if len(p_box) == 0:
            cv2.putText(frame, Category_labels_[11], (30, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 2)
            cv2.imshow('Webcam', frame)
        else:
            p_box = sorted(p_box,
                           key=lambda e: e.__getitem__('weight_difference'),
                           reverse=True)
            p_hh_low = float(p_box[0]['p1'][1]) - (1 / float(
                (p_box[0]['p2'][1] - p_box[0]['p1'][1]))
                                                   ) * 2000.0  # 记录剪裁框的位置大小
            p_hh_hige = float(p_box[0]['p2'][1]) + (1 / float(
                (p_box[0]['p2'][1] - p_box[0]['p1'][1]))) * 2000.0
            print p_box[0]['p2'][0] - p_box[0]['p1'][0]
            if p_box[0]['p2'][0] - p_box[0]['p1'][0] > 400:
                Penalty_ratio_factor = 30000.0
            elif p_box[0]['p2'][0] - p_box[0]['p1'][0] > 300:
                Penalty_ratio_factor = 15000.0
            elif p_box[0]['p2'][0] - p_box[0]['p1'][0] > 200:
                Penalty_ratio_factor = 8000.0
            elif p_box[0]['p2'][0] - p_box[0]['p1'][0] > 100:
                Penalty_ratio_factor = 3000.0
            else:
                Penalty_ratio_factor = 1000.0
            p_ww_low = float(p_box[0]['p1'][0]) - (1 / float(
                (p_box[0]['p2'][0] - p_box[0]['p1'][0]))
                                                   ) * Penalty_ratio_factor
            p_ww_hige = float(p_box[0]['p2'][0]) + (1 / float(
                (p_box[0]['p2'][0] - p_box[0]['p1'][0]))
                                                    ) * Penalty_ratio_factor
            cropframe = frame[
                int(max(p_hh_low, 0)):int(min(p_hh_hige, frame.shape[0])),
                int(max(p_ww_low, 0)):int(min(p_ww_hige, frame.shape[1]))]
            res = eval_video_classification(net_classification_, cropframe,
                                            res_old)

            cv2.imshow('CropWebcam', cropframe)
            cv2.rectangle(frame, p_box[0]['p1'], p_box[0]['p2'], (0, 0, 255))
            p3 = (max(p_box[0]['p1'][0], 15), max(p_box[0]['p1'][1], 15))
            title = "%s:%.2f" % (CLASSES[int(15)], conf[i])
            cv2.putText(frame, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)

            print res
            res_old = res
            #print np.max(res)
            if np.max(res) < 0.9:
                cv2.putText(frame, Category_labels_[11], (30, 60),
                            cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 2)
            else:
                Category_res = np.argmax(res)
                Category_res = (Category_res > 11 and 11 or Category_res)
                # print Category_res
                cv2.putText(frame, Category_labels_[Category_res], (30, 60),
                            cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 2)
            cv2.imshow('Webcam', frame)

        end = time.time()
        seconds = end - start
        # Calculate frames per second
        fps = 1 / seconds
        print("fps: {0}".format(fps))
        # Wait to press 'q' key for break
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        listener.release(frames)
    # When everything done, release the capture
    # cap.release()
    device.stop()
    device.close()
    cv2.destroyAllWindows()
    return
Esempio n. 35
0
class RunCamera(object):

    # check if it needs to make a picture and save the picture
    _img_save = False
    _img_num = 0
    _img_done = True

    def __init__(self):
        
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            self._pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                self._pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                self._pipeline = CpuPacketPipeline()
        print("Packet pipeline:", type(self._pipeline).__name__)

        pygame.init()

        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()

        # Set the width and height of the screen [width, height]
        self._screen_width = 500
        self._screen_heigth = 300
        self._screen = pygame.display.set_mode((self._screen_width, self._screen_heigth), pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)

        # Set the titel of the screen- window
        pygame.display.set_caption("Kamera")

        # Loop until the user clicks the close button.
        self._done = False

        # Kinect runtime object, we want only color and body frames 
        #self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color)

        # Create and set logger
        logger = createConsoleLogger(LoggerLevel.Debug)
        setGlobalLogger(logger)

        self._fn = Freenect2()
        self._num_devices = self._fn.enumerateDevices()
        if self._num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        self._serial = self._fn.getDeviceSerialNumber(0)
        self._device = self._fn.openDevice(self._serial, pipeline=self._pipeline)

        self._listener = SyncMultiFrameListener(
            FrameType.Color | FrameType.Ir | FrameType.Depth)

        # Register listeners
        self._device.setColorFrameListener(self._listener)
        self._device.setIrAndDepthFrameListener(self._listener)

        self._device.start()

        # NOTE: must be called after device.start()
        self._registration = Registration(self._device.getIrCameraParams(),
                                    self._device.getColorCameraParams())

        self._undistorted = Frame(512, 424, 4)
        self._registered = Frame(512, 424, 4)

        # Optinal parameters for registration
        # set True if you need
        self._need_bigdepth = False
        self._need_color_depth_map = False

        # back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
        #self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), 0, 32)
        self._bigdepth = Frame(1920, 1082, 4) if self._need_bigdepth else None
        self._color_depth_map = np.zeros((424, 512),  np.int32).ravel() if self._need_color_depth_map else None
        self._frame_surface = pygame.Surface((self._registered.width, self._registered.height), 0, 32)


    def draw_color_frame(self, np_frame, target_surface):
        target_surface.lock()
        #address = self._kinect.surface_as_array(target_surface.get_buffer())
        #ctypes.memmove(address, frame.ctypes.data, frame.size)
        address = get_address_from_array(target_surface.get_buffer())
        ctypes.memmove(address, np_frame.ctypes.data, np_frame.size)
        del address
        target_surface.unlock()

    def run(self):
        

        while not self._done:
            # --- Main event loop
            for event in pygame.event.get(): # User did something
                if event.type == pygame.QUIT: # If user clicked close
                    self._done = True # Flag that we are done so we exit this loop

                elif event.type == pygame.VIDEORESIZE: # window resized
                    self._screen = pygame.display.set_mode(event.dict['size'], 
                                               pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)
            
                    

            # --- Getting frames and drawing  
            #if self._kinect.has_new_color_frame():
            #    frame = self._kinect.get_last_color_frame()
            #    self.draw_color_frame(frame, self._frame_surface)
            #    frame = None

            frames = self._listener.waitForNewFrame()
            color_frame = frames["color"]
            ir = frames["ir"]
            depth = frames["depth"]
            self._registration.apply(color_frame, depth, self._undistorted, self._registered, bigdepth=self._bigdepth, color_depth_map=self._color_depth_map)
            color_arr_view = color_frame.asarray()[:,:,0:3]

            test_surf = pygame.surfarray.make_surface(color_arr_view)
            #self.draw_color_frame(color_frame.asarray(), self._frame_surface)

            self._frame_surface = test_surf

            # --- When the flag of Saving image is unlocked, so do it 
            if RunCamera._img_save == True:
                print("entered if statement for image saving")
                # First to lock it, or it will save too many picture
                RunCamera._img_save = False

                # Folder and names of pictures 
                # or we can just use other names of th picture..
                name = "pic\\test-" + str(RunCamera._img_num) + ".jpg"

                # Save the image
                pygame.image.save(self._frame_surface, name)

                print "\n \nWe just saved the " + name + "!\n\nLock save-image-processing...\n\n===============================================\n"

                RunCamera._img_num = RunCamera._img_num + 1

                # Tell wether all be done
                RunCamera._img_done = True
                


            # --- copy back buffer surface pixels to the screen, resize it if needed and keep aspect ratio
            h_to_w = float(self._frame_surface.get_height()) / self._frame_surface.get_width()

            # --- Screen's width and height
            target_width = self._screen.get_width()
            target_height = int(h_to_w * target_width )

            surface_to_draw = pygame.transform.scale(self._frame_surface, (target_width, target_height))
            self._screen.blit(surface_to_draw, (0,0))
            surface_to_draw = None
            pygame.display.update()

            # --- Go ahead and update the screen with what we've drawn.
            pygame.display.flip()

            # --- Limit to 30 frames per second, try 60 if 30 runs smoothly
            self._clock.tick(30)
            self._listener.release(frames)

        # Close our Kinect sensor, close the window and quit.
        #self._kinect.close()
        #self._device.stop()
        self._device.close()
        pygame.quit()
Esempio n. 36
0
class Tracker:
    # X_POSITION = -0.11387496
    # Z_POSITION = 1.3
    X_POSITION = -0.185
    Z_POSITION = 1.5


    greenLower = (29, 86, 6)
    greenUpper = (64, 255, 255)

    greenLower = (27, 100, 50)
    greenUpper = (60, 255, 255)

    def __init__(self, use_kinect=False, basket=False, gui=True):
        self.connected = False
        self.max_radius = 100000
        self.running = False
        self.killed = False
        self.gui = gui
        self.basket = basket
        self.use_kinect = use_kinect
        # self.fig, self.ax = plt.subplots()

        if self.use_kinect:
            self.px = []
            self.py = []
            logger = createConsoleLogger(LoggerLevel.Error)
            setGlobalLogger(logger)

        while not self.connected:
            # Bad, but needed
            if self.use_kinect:
                self.connect_kinect()
            else:
                self.connect_camera()

        self.thread = threading.Thread(target=self.video_thread)
        self.thread.start()

    def connect_camera(self):
        # Bad, but needed
        self.camera = cv2.VideoCapture(1)
        if not self.camera.isOpened():
            print("Unable to connect to the camera, check again")
            time.sleep(5)
        else:
            self.connected = True

    def connect_kinect(self):
        # Bad, but needed
        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if num_devices == 0:
            print("Kinect not found, check again")
            self.connected = False
            return

        try:
            self.pipeline = OpenGLPacketPipeline()
        except:
            self.pipeline = CpuPacketPipeline()

        serial = self.fn.getDeviceSerialNumber(0)
        self.device = self.fn.openDevice(serial, pipeline=self.pipeline)

        self.listener = SyncMultiFrameListener(
            FrameType.Color | FrameType.Ir | FrameType.Depth)

        # Register listeners
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        self.device.start()
        print("started")

        # NOTE: must be called after device.start()
        self.registration = Registration(self.device.getIrCameraParams(),
                                         self.device.getColorCameraParams())
        print("registration")
        self.undistorted = Frame(512, 424, 4)
        self.registered = Frame(512, 424, 4)
        self.connected = True

    def video_thread(self):
        print('Camera thread started')
        need_bigdepth = False
        need_color_depth_map = False

        bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
        color_depth_map = np.zeros((424, 512), np.int32).ravel() \
            if need_color_depth_map else None

        while not self.killed:
            while self.running:
                # (grabbed, frame) = self.camera.read()
                frames = self.listener.waitForNewFrame()
                frame = frames["color"]
                ir = frames["ir"]
                depth = frames["depth"]
                self.registration.apply(frame, depth, self.undistorted, self.registered,
                                        bigdepth=bigdepth,
                                        color_depth_map=color_depth_map)

                # print(frame.width, frame.height, frame.bytes_per_pixel)
                # print(ir.width, ir.height, ir.bytes_per_pixel)
                # print(depth.width, depth.height, depth.bytes_per_pixel)
                # print(frame.asarray().shape, ir.asarray().shape, depth.asarray().shape)

                # color_image_array = frame.asarray()
                color_image_array = self.registered.asarray(np.uint8)
                hsv = cv2.cvtColor(color_image_array, cv2.COLOR_BGR2HSV)

                mask = cv2.inRange(hsv, self.greenLower, self.greenUpper)
                mask = cv2.erode(mask, None, iterations=2)
                mask = cv2.dilate(mask, None, iterations=2)

                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None

                if len(cnts) > 0:
                    c = max(cnts, key=cv2.contourArea)
                    ((x, y), radius) = cv2.minEnclosingCircle(c)
                    M = cv2.moments(c)
                    center = (int(M["m10"] / M["m00"]),
                              int(M["m01"] / M["m00"]))

                    if radius > 10:
                        cv2.circle(color_image_array, (int(x), int(y)), int(radius),
                                   (0, 255, 255), 2)
                        cv2.circle(color_image_array, center, 5, (0, 0, 255), -1)
                        x, y, z = self.registration.getPointXYZ(self.undistorted, y, x)
                        # print(x, y, z, end='\r')
                        if not math.isnan(y) and not math.isnan(z):
                            self.px.append(y)
                            self.py.append(z)
                        row = tuple(c[c[:, :, 1].argmin()][0])[1]
                        self.max_radius = min(self.max_radius, row)

                if self.gui:
                    cv2.imshow("Frame", color_image_array)
                    cv2.imshow("Masked", mask)
                self.listener.release(frames)
                cv2.waitKey(1) & 0xFF
                # self.ax.plot(self.px)
                # self.ax.plot(self.py)
                # plt.pause(0.01)

        cv2.destroyAllWindows()
        print("exit looping")

    def start(self):
        self.max_radius = 100000
        if self.use_kinect:
            self.px = []
            self.py = []
        self.running = True

    def stop(self):
        self.running = False

    def kill(self):
        self.killed = True
        if self.use_kinect:
            self.device.stop()
            self.device.close()

    def get_reward(self):
        if self.use_kinect:
            if not self.basket:
                try:
                    peaks = peakutils.indexes(self.px)
                except Exception as e:
                    print(e)
                    peaks = np.array([])

                if peaks.any():
                    # for xx in peaks:
                    #     t.ax.plot(xx, self.px[xx], 'ro')
                    return self.py[peaks[0]] * 100
                else:
                    return 0
            else:
                print(np.mean(self.px), np.mean(self.py))
                dist = distance(np.array([self.px, self.py]).T, self.X_POSITION, self.Z_POSITION)
                print("distance", dist, len(self.px))
                return dist * 100
        else:
            return self.max_radius
Esempio n. 37
0
class Kinect:

    def __init__(self):
        self.averageSpineX = 0

        self.fn = Freenect2()
        if self.fn.enumerateDevices() == 0:
            sys.exit(47)


        self.serial = self.fn.getDeviceSerialNumber(0)

        types = 0
        types |= FrameType.Color
        types |= (FrameType.Ir | FrameType.Depth)
        self.listener = SyncMultiFrameListener(types)

        self.logger = createConsoleLogger(LoggerLevel.Debug)

        self.device = self.fn.openDevice(self.serial)

        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        self.undistorted = Frame(512, 424, 4)
        self.registered = Frame(512, 424, 4)


    def valueBounded(self, checkValue, absoluteValue):
        if ((-absoluteValue <= checkValue >= absoluteValue)):
            return True
        else:
            return False


    def valueUnbounded(self, checkValue, absoluteValue):
        if ((checkValue > absoluteValue) | (checkValue < -absoluteValue)):
            return True
        else:
            return False

    def valuePlusMinusDifferential(self, checkValue, testValue, Differential):
        if((checkValue < testValue + Differential) & (checkValue > testValue - Differential)):
            return True
        else:
            return False


    def update(self, registration):
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        frames = self.listener.waitForNewFrame()
        depth = frames["depth"]
        color = frames["color"]

        d = self.undistorted.asarray(dtype=np.float32)

        registration.apply(color, depth, self.undistorted, self.registered)

        self.listener.release(frames)

        return d


    # calculate the average and skeleton points of a depth array, those
    # values plus the depth array



    # calculate mean depth of depth array, used to find skeleton points.
    def getMeanDepth(self, depth, rows, cols):
        total = 0
        sumDepth = 0
        for row in range(rows):
            for col in range(cols):
                try:
                    offset = row + col * (rows)
                except IndexError as e:
                    print e
                #if depth[offset]['depth'] < DANGER_THRESHOLD:
                total += 1
                sumDepth += depth[offset]['depth']

        return (sumDepth / total)


    # calculate the skeleton points of the depth array
    def getSkeleton(self, depthArray, average, rows, cols):
        topY = 0
        leftX = rows
        bottomY = cols
        rightX = 0

        for d in depthArray:
            #print depthArray[offset]
            if (d['x'] > rightX):
                rightX = d['x']
            if (d['x'] < leftX):
                leftX = d['x']
            if (d['y'] < bottomY):
                bottomY = d['y']
            if (d['y'] > topY):
                topY = d['y']

        averageX = (leftX + rightX) / 2
        returnValues = {'averageX': averageX,
                        'topY': topY,
                        'bottomY': bottomY,
                        'rightX': rightX,
                        'leftX': leftX}

        return returnValues


    def changeInX(self, spine):
        print "changeInX"
        if self.averageSpineX == 0:
            self.averageSpineX = spine['averageX']
        elif (self.valueBounded((self.averageSpineX - spine['averageX']), X_CHANGE_THRESHOLD)):
            self.averageSpineX = ((self.averageSpineX + spine['averageX']) / 2)
        else:
            self.checkDrowning()


    # Check to see if the difference between the averageSpineX and the last
    # analyzed averageX is less below a threshold. If it is, the loop
    # continues. If it runs for 20 seconds, it will set the drowning flag to
    # true. If falsePositive gets to be too high, DORi will no longer
    # assume the victim is a drowning risk
    def checkDrowning(self):
        print "checkDrowning"
        drowningRisk = True
        drowning = False
        falsePositive = 0
        # 20 seconds from start of def #
        timeLimit = time.time() + DROWN_TIME
        self.device.start()
        while drowningRisk:
            print 'checking...'
            if time.time() > timeLimit:
                drowningRisk = False
                drowning = True

            data = self.fullDataLoop()
            if (self.valueUnbounded((self.averageSpineX - data['spine']['averageX']), X_CHANGE_THRESHOLD)):
                falsePositive += 1
                if falsePositive > 100:
                    drowningRisk = False
            else:
                continue
        if drowning:
            self.device.stop()
            print "This guy is for sure drowning"
            sys.exit(47)

        self.device.stop()

    def depthMatrixToPointCloudPos2(self, depthArray):

        R, C = depthArray.shape

        R -= KINECT_SPECS['cx']
        R *= depthArray
        R /= KINECT_SPECS['fx']

        C -= KINECT_SPECS['cy']
        C *= depthArray
        C /= KINECT_SPECS['fy']

        return np.column_stack((depthArray.ravel(), R.ravel(), C.ravel()))

    def getBody(self, depthArray, average, rows, cols):
        body = []

        for d in depthArray:
            if self.valuePlusMinusDifferential(d['depth'], average, BODY_DEPTH_THRESHOLD):
                body.append(d)

        return body

    def pointRavel(self, unraveledArray, rows, cols):
        raveledArray = []
        d = unraveledArray.ravel()
        for row in range(rows):
            for col in range(cols):
                try:
                    offset = row + col * (rows)
                    raveledArray.append({'x': row, 'y': col, 'depth': d[offset]})
                except IndexError as e:
                    print e

        return raveledArray

    def fullDataLoop(self):


        registration = Registration(self.device.getIrCameraParams(),
                                    self.device.getColorCameraParams())

        deptharraytest = [{'x': 152, 'y': 100, 'depth': 400}, {'x': 300, 'y': 200, 'depth': 400},
                          {'x': 350, 'y': 150, 'depth': 400}, {'x': 400, 'y': 300, 'depth': 400},
                          {'x': 22, 'y': 10, 'depth': 400}, {'x': 144, 'y': 12, 'depth': 400}]

        depthArray = self.update(registration)
        rows, cols = depthArray.shape
        d = self.pointRavel(depthArray, rows, cols)
        m = self.getMeanDepth(d, rows, cols)
        b = self.getBody(d, m, rows, cols)
        s = self.getSkeleton(deptharraytest, m, cols, rows)


        return {'spine' : s, 'meanDepth' : m}

    def run(self, duration):
        end = time.time() + duration
        self.device.start()

        registration = Registration(self.device.getIrCameraParams(),
                                    self.device.getColorCameraParams())


        deptharraytest = [{'x':152, 'y':100, 'depth':400}, {'x':300, 'y':200, 'depth':400},
                          {'x': 350, 'y': 150, 'depth': 400},{'x':400, 'y':300, 'depth':400},
                          {'x': 22, 'y': 10, 'depth': 400},{'x':144, 'y':12, 'depth':400}]
        while time.time() < end:
            depthArray = self.update(registration)
            rows, cols = depthArray.shape
            d = self.pointRavel(depthArray, rows, cols)
            m =self.getMeanDepth(d, rows, cols)
            b = self.getBody(d, m, rows, cols)
            s = self.getSkeleton(deptharraytest, m, cols, rows)
            self.changeInX(s)


        self.device.stop()

    def execute(self):
        self.device.start()

        registration = Registration(self.device.getIrCameraParams(),
                                    self.device.getColorCameraParams())

        d = self.update(registration)
        shape = d.shape
        m = self.getMeanDepth(d, 9)
        b = self.getBody(d,m)
        s = self.getSkeleton(d, m)
        self.changeInX(s)


        print 'depthArray' + str(len(d.ravel()))
        print 'body' + str(len(b))
        for body in b:
            print "(" + str(body['x']) + "," + str(body['y']) + "): " + str(body['z'])


        print m


        self.device.stop()




    def exit(self):
        self.device.stop()
        self.device.close()
Esempio n. 38
0
class Kinect:

    fn = None
    device = None
    serial = None
    listener = None
    connected = False

    # Frame-Filter Variables
    SCALE = 2
    OUTPUT_RESOLUTION = 227
    BOX_FILTER_SIZE = 3

    def __init__(self):

        # Import the pipeline which will be used with the kinect
        try:
            from pylibfreenect2 import OpenCLPacketPipeline
            self.pipeline = OpenCLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenGLPacketPipeline
                self.pipeline = OpenGLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                self.pipeline = CpuPacketPipeline()
        print("Packet pipeline:", type(self.pipeline).__name__)

        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        # Get the serial number of the kinect
        self.serial = self.fn.getDeviceSerialNumber(0)

        self.image_counter = 1

    # Connects to the kinect and starts the stream
    def connect(self):

        # Connect to kinect
        self.device = self.fn.openDevice(self.serial, pipeline=self.pipeline)

        # Define RGB listener
        self.listener = SyncMultiFrameListener(FrameType.Color)

        # Register listeners
        self.device.setColorFrameListener(self.listener)

        # Get a frame from the listener
        self.device.startStreams(rgb=True, depth=False)

        self.connected = True

    # Takes an image out of the stream and saves it
    def get_picture(self, name=None):

        # Get a frame from the listener
        frames = self.listener.waitForNewFrame()

        # Extract the RBG frame
        color = frames["color"]
        _, filtered_color_frame = self.__filter_frame(color)

        # Save the frame as picture
        if name is None:
            cv2.imwrite(
                "classification/pictures/new/" + str(self.image_counter) +
                ".jpeg", filtered_color_frame)
            self.image_counter += 1

        else:
            cv2.imwrite("classification/pictures/" + str(name) + ".jpeg",
                        filtered_color_frame)

        # Release the frame from the listener
        self.listener.release(frames)

    def start_video(self):
        while True:
            # Get a frame from the listener
            frames = self.listener.waitForNewFrame()

            # Extract the RBG frame
            color = frames["color"]
            cut_frame, _ = self.__filter_frame(color, to_stream=True)

            # Show the cut stream of pictures
            cv2.imshow("cut", cut_frame)

            # Release the frame from the listener
            self.listener.release(frames)

            # Press q to exit loop and close all cv2-windows
            key = cv2.waitKey(delay=1)
            if key == ord('q'):
                cv2.destroyAllWindows()
                break

    # Filter the frame of the kinect to the desired scale of frame-filter variables
    def __filter_frame(self, frame, to_stream=False):
        # original frame resolution: (1080, 1920, 4)
        assert type(frame) is Frame

        # the desired pixel grid to get the desired resolution in the end
        cut_size = self.OUTPUT_RESOLUTION * self.SCALE + self.BOX_FILTER_SIZE - 1

        # the amount of rows/columns to cut away
        cut_rows = int((1080 - cut_size) / 2)
        cut_columns = int((1920 - cut_size) / 2)

        # the width of the filter from the middle to the edge
        filter_edge = (self.BOX_FILTER_SIZE - 1) / 2

        frame = frame.asarray()
        filtered_frame = np.zeros(
            [self.OUTPUT_RESOLUTION, self.OUTPUT_RESOLUTION, 4], dtype=int)

        cut_frame = frame[cut_rows:cut_rows + cut_size,
                          cut_columns:cut_columns + cut_size]

        if self.BOX_FILTER_SIZE != 1 and not to_stream:

            # go through complete output array
            for i in range(self.OUTPUT_RESOLUTION):
                for j in range(self.OUTPUT_RESOLUTION):

                    # use for every output pixel the whole filter
                    for m in range(-filter_edge, filter_edge + 1):
                        for n in range(-filter_edge, filter_edge + 1):
                            filtered_frame[i, j] += cut_frame[i * self.SCALE +
                                                              filter_edge + m,
                                                              j * self.SCALE +
                                                              filter_edge + n]

                    filtered_frame[i, j] = filtered_frame[i, j] / (
                        self.BOX_FILTER_SIZE * self.BOX_FILTER_SIZE)

        return cut_frame, filtered_frame

    # Terminates the connection to the kinect
    def disconnect(self):

        self.device.stop()
        self.device.close()
        self.connected = False