示例#1
0
    def __init__(self):

        # Import the pipeline which will be used with the kinect
        try:
            from pylibfreenect2 import OpenCLPacketPipeline
            self.pipeline = OpenCLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenGLPacketPipeline
                self.pipeline = OpenGLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                self.pipeline = CpuPacketPipeline()
        print("Packet pipeline:", type(self.pipeline).__name__)

        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        # Get the serial number of the kinect
        self.serial = self.fn.getDeviceSerialNumber(0)

        self.image_counter = 1
示例#2
0
    def startKinectV2Dev(self):
        from pylibfreenect2 import Freenect2, SyncMultiFrameListener, FrameType

        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()

        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                pipeline = OpenCLPacketPipeline()

            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
                print("LIBFREENECT2::Packet pipeline:",
                      type(pipeline).__name__)

        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()

        if num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        serial = self.fn.getDeviceSerialNumber(0)
        self.device = self.fn.openDevice(serial, pipeline=pipeline)

        self.listener = SyncMultiFrameListener(
            FrameType.Color)  # listen,ing only to rgb frames

        # Register listeners
        self.device.setColorFrameListener(self.listener)
        self.device.start()
示例#3
0
 def __init__(self):
     try:
         from pylibfreenect2 import OpenCLPacketPipeline
         pipeline = OpenCLPacketPipeline()
     except:
         try:
             from pylibfreenect2 import OpenGLPacketPipeline
             pipeline = OpenGLPacketPipeline()
         except:
             from pylibfreenect2 import CpuPacketPipeline
             pipeline = CpuPacketPipeline()
     print("Packet pipeline:", type(pipeline).__name__)
     self.fn = Freenect2()
     self.num_devices = self.fn.enumerateDevices()
     if self.num_devices == 0:
         print("No device connected!")
         sys.exit(1)
     self.serial = self.fn.getDeviceSerialNumber(0)
     self.device = self.fn.openDevice(self.serial, pipeline=pipeline)
     self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir |
                                            FrameType.Depth)
     self.device.setColorFrameListener(self.listener)
     self.device.setIrAndDepthFrameListener(self.listener)
     self.device.start()
     self.undistorted = Frame(512, 424, 4)
     self.registered = Frame(512, 424, 4)
    def __init__(self, device_no=0):
        # Package Pipeline
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            self.pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                self.pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                self.pipeline = CpuPacketPipeline()
        print "Packet pipeline: {}".format(type(self.pipeline).__name__)

        # Create and set logger
        logger = createConsoleLogger(LoggerLevel.Error)
        setGlobalLogger(logger)

        # Detect Kinect Devices
        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            self.device = None
            sys.exit(1)

        # Create Device and Frame Listeners
        self.serial = self.fn.getDeviceSerialNumber(device_no)
        self.device = self.fn.openDevice(self.serial, pipeline=self.pipeline)
        self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                               | FrameType.Depth)

        # Register Listeners
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        # Start Device
        self.device.start()

        # Set Camera Parameters
        self._ir_params = self.device.getIrCameraParams()
        self._color_params = self.device.getColorCameraParams()
        '''
        self._ir_params.fx = cameraParams.params['ir_fx']
        self._ir_params.fy = cameraParams.params['ir_fy']
        self._ir_params.cx = cameraParams.params['ir_cx']
        self._ir_params.cy = cameraParams.params['ir_cy']

        self._color_params.fx = cameraParams.params['col_fx']
        self._color_params.fy = cameraParams.params['col_fy']
        self._color_params.cx = cameraParams.params['col_cx']
        self._color_params.cy = cameraParams.params['col_cy']
        '''
        # Registration
        self.registration = Registration(self._ir_params, self._color_params)

        print '[Info] Initialization Finished!'
    def  getcameraimgbykinect(self, basedir):
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
        fn = Freenect2()
        num_devices = fn.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            return
        serial = fn.getDeviceSerialNumber(0)
        device = fn.openDevice(serial, pipeline=pipeline)

        types = 0
        types |= FrameType.Color
            
        listener = SyncMultiFrameListener(types)

        # Register listeners
        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        device.startStreams(rgb=True, depth=False)

        while True:
            frames = listener.waitForNewFrame()
            color = frames["color"]
            colorimg  =  cv2.resize(color.asarray(), (200,150))
            (shortDate, longDate)=self.getDateStr()
            file_dir = basedir+"/"+shortDate
            if not os.path.exists(file_dir):
                os.makedirs(file_dir)
            new_filename = Pic_str().create_uuid() + '.jpg' 
            fpath = os.path.join(file_dir,  new_filename) 
            # print(fpath )
            cv2.imwrite(fpath, colorimg, [cv2.IMWRITE_JPEG_QUALITY,70])
            listener.release(frames)
            isfire = self.fireDetect.detect_image(fpath)
            self.sendUtils.sendReqtoServer("园区厂区仓库","网络摄像头",longDate,isfire,shortDate+"/"+new_filename)
            time.sleep(3)
        device.stop()
        device.close()
        return
示例#6
0
    def __init__(self):
        try:
            from pylibfreenect2 import OpenCLPacketPipeline
            pipeline = OpenCLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenGLPacketPipeline
                pipeline = OpenGLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
        print("Packet pipeline:", type(pipeline).__name__)

        # Create and set logger
        logger = createConsoleLogger(LoggerLevel.Debug)
        setGlobalLogger(logger)

        self.freenect = Freenect2()
        num_devices = self.freenect.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        serial = self.freenect.getDeviceSerialNumber(0)
        self.device = self.freenect.openDevice(serial, pipeline=pipeline)

        self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                               | FrameType.Depth)

        # Register listeners
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        self.device.start()

        self.registration = Registration(self.device.getIrCameraParams(),
                                         self.device.getColorCameraParams())

        self.run_loop = True
        self.lock = threading.Lock()
        self.img_buffer = np.zeros(
            (RES_COLOR[1], RES_COLOR[0], 3, SIZE_AVG_FILTER))
        self.idx_buffer = 0

        # Close on Ctrl-C
        def signal_handler(signal, frame):
            self.run_loop = False

        signal.signal(signal.SIGINT, signal_handler)
示例#7
0
    def __init__(self):
        self.enable_rgb = True
        self.enable_depth = True

        try:
            from pylibfreenect2 import OpenCLPacketPipeline
            self.pipeline = OpenCLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenGLPacketPipeline
                self.pipeline = OpenGLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                self.pipeline = CpuPacketPipeline()
        print("Packet pipeline:", type(self.pipeline).__name__)

        rospy.init_node('RGBDImagePublisher', anonymous=True)
        self.color_image_pub = rospy.Publisher('ColorImage',
                                               Image,
                                               queue_size=10)
        self.depth_image_pub = rospy.Publisher('DepthImage',
                                               Image,
                                               queue_size=10)
示例#8
0
    def __init__(self):
        
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            self._pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                self._pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                self._pipeline = CpuPacketPipeline()
        print("Packet pipeline:", type(self._pipeline).__name__)

        pygame.init()

        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()

        # Set the width and height of the screen [width, height]
        self._screen_width = 500
        self._screen_heigth = 300
        self._screen = pygame.display.set_mode((self._screen_width, self._screen_heigth), pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)

        # Set the titel of the screen- window
        pygame.display.set_caption("Kamera")

        # Loop until the user clicks the close button.
        self._done = False

        # Kinect runtime object, we want only color and body frames 
        #self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color)

        # Create and set logger
        logger = createConsoleLogger(LoggerLevel.Debug)
        setGlobalLogger(logger)

        self._fn = Freenect2()
        self._num_devices = self._fn.enumerateDevices()
        if self._num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        self._serial = self._fn.getDeviceSerialNumber(0)
        self._device = self._fn.openDevice(self._serial, pipeline=self._pipeline)

        self._listener = SyncMultiFrameListener(
            FrameType.Color | FrameType.Ir | FrameType.Depth)

        # Register listeners
        self._device.setColorFrameListener(self._listener)
        self._device.setIrAndDepthFrameListener(self._listener)

        self._device.start()

        # NOTE: must be called after device.start()
        self._registration = Registration(self._device.getIrCameraParams(),
                                    self._device.getColorCameraParams())

        self._undistorted = Frame(512, 424, 4)
        self._registered = Frame(512, 424, 4)

        # Optinal parameters for registration
        # set True if you need
        self._need_bigdepth = False
        self._need_color_depth_map = False

        # back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
        #self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), 0, 32)
        self._bigdepth = Frame(1920, 1082, 4) if self._need_bigdepth else None
        self._color_depth_map = np.zeros((424, 512),  np.int32).ravel() if self._need_color_depth_map else None
        self._frame_surface = pygame.Surface((self._registered.width, self._registered.height), 0, 32)
示例#9
0
def KinectStream(frame_count, BreakKinect, enable_rgb=True, enable_depth=True):
    # create folders for the color and depth images, respectively
    last_path, _ = os.path.split(os.getcwd())
    path_color = os.path.join(last_path, "color")
    path_depth = os.path.join(last_path, "depth")
    CreateRefrashFolder(path_color)
    CreateRefrashFolder(path_depth)
    '''
	# if the depth images are needed, you must use OpenGLPacketPipeline for enabling GPU to
	# render the depth map for so that the real-time capture can be achieved.
	try:
		from pylibfreenect2 import OpenGLPacketPipeline
		pipeline = OpenGLPacketPipeline()
	except:
		try:
			from pylibfreenect2 import OpenCLPacketPipeline
			pipeline = OpenCLPacketPipeline()
		except:
			from pylibfreenect2 import CpuPacketPipeline
			pipeline = CpuPacketPipeline()
	'''

    if enable_depth:
        from pylibfreenect2 import OpenGLPacketPipeline
        pipeline = OpenGLPacketPipeline()
    else:
        from pylibfreenect2 import CpuPacketPipeline
        pipeline = CpuPacketPipeline()
    print("Packet pipeline:", type(pipeline).__name__)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    types = 0
    if enable_rgb:
        types |= FrameType.Color
    if enable_depth:
        types |= (FrameType.Ir | FrameType.Depth)
    listener = SyncMultiFrameListener(types)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    if enable_rgb and enable_depth:
        device.start()
    else:
        device.startStreams(rgb=enable_rgb, depth=enable_depth)
    # NOTE: must be called after device.start()
    if enable_depth:
        registration = Registration(device.getIrCameraParams(),
                                    device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)
    # the target size of the resize function
    SetSize = (540, 360)
    # SetSize = (1080, 720)

    while not BreakKinect.value:
        frame_count.value += 1
        file_name = 'image' + str(int(frame_count.value)) + '.jpg'
        im_path_color = os.path.join(path_color, file_name)
        im_path_depth = os.path.join(path_depth, file_name)
        frames = listener.waitForNewFrame()

        if enable_rgb:
            color = frames["color"]
        if enable_depth:
            ir = frames["ir"]
            depth = frames["depth"]

        if enable_rgb and enable_depth:
            registration.apply(color, depth, undistorted, registered, enable_filter=False)
        elif enable_depth:
            registration.undistortDepth(depth, undistorted)

        if enable_rgb:
            start = time.time()
            new_frame = cv2.resize(color.asarray(), SetSize)
            # new_frame = new_frame[:,:,:-1]
            # new_frame = cv2.cvtColor(new_frame[:,:,:-1], cv2.COLOR_RGB2BGR)
            new_frame = registered.asarray(np.uint8)
            # new_frame = cv2.cvtColor(new_frame[:,:,[0,1,2]], cv2.COLOR_RGB2BGR)
            cv2.imwrite(im_path_color, new_frame[:, :, :-1])
        # print("Kinect color:", new_frame.shape)

        if enable_depth:
            # depth_frame = cv2.resize(depth.asarray()/ 4500., SetSize)
            depth = depth.asarray() / 4500.
            # cv2.imshow("color", new_frame)
            undist = undistorted.asarray(np.float32) / 4500 * 255
            cv2.imwrite(im_path_depth, undist.astype(np.uint8))
            print("Kinect depth:", depth.shape)

        listener.release(frames)

    device.stop()
    device.close()
    # WriteVideo(path, im_pre = 'image', video_name = 'test.avi', fps = 15, size = SetSize)
    sys.exit(0)
示例#10
0
def test_simplyfy_linux():
    if _platform == 'Windows':
        return
    # An example using startStreams
    import numpy as np
    from pylibfreenect2 import Freenect2, SyncMultiFrameListener
    from pylibfreenect2 import FrameType, Registration, Frame

    try:
        from pylibfreenect2 import OpenGLPacketPipeline
        pipeline = OpenGLPacketPipeline()
    except:
        try:
            from pylibfreenect2 import OpenCLPacketPipeline
            pipeline = OpenCLPacketPipeline()
        except:
            from pylibfreenect2 import CpuPacketPipeline
            pipeline = CpuPacketPipeline()
    print("Packet pipeline:", type(pipeline).__name__)

    enable_rgb = False
    enable_depth = True

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    assert num_devices > 0

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    def _frames():
        frames = listener.waitForNewFrame(milliseconds=1000)

        color = frames[FrameType.Color]
        ir = frames[FrameType.Ir]
        depth = frames[FrameType.Depth]

        registration.apply(color, depth, undistorted, registered)
        registration.undistortDepth(depth, undistorted)

        assert color.width == 1920
        assert color.height == 1080
        assert color.bytes_per_pixel == 4

        assert ir.width == 512
        assert ir.height == 424
        assert ir.bytes_per_pixel == 4

        assert depth.width == 512
        assert depth.height == 424
        assert depth.bytes_per_pixel == 4

        print(color.asarray().shape)
        print(ir.asarray().shape)
        print(depth.asarray().shape)

        listener.release(frames)

    _frames()
    import time
    time.sleep(3)
    _frames()
    time.sleep(3)
    _frames()
def main():
    t1=time.time()
    inputPath = "DataSet/"
    IDfile = open(IDFilePath,mode='a')
    try:
        from pylibfreenect2 import OpenCLPacketPipeline
        pipeline = OpenCLPacketPipeline()
        print("Using OpenCL")
        input()
    except:
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()
        except:
            from pylibfreenect2 import CpuPacketPipeline
            pipeline = CpuPacketPipeline()

    #print("Packet pipeline:", type(pipeline).__name__)
    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)
    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)
    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)
    
    #print("FirmwareVersion: "+ firmware)
    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth)
    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)
    device.start()
    g.InitializeCamera()
    h.InitializeCameras()
    t2=time.time()
    print("Initialization before the loop "+str(t2-t1))
    while True:
        t3=time.time()
        t_ms = time.time()
        ts = datetime.datetime.now()

        print(t_ms)
        t_ms = t_ms * 1000
        t_ms = str(t_ms).split('.')[0]
        print(t_ms)
        imageID = t_ms

        print(str(ts).split('.')[0])
        ts = str(ts).split('.')[0]
        tsList1 = str(ts).split(' ')
        date = tsList1[0]
        tm = tsList1[1]
        print(tsList1)
        tsListTime = str(tsList1[1]).split(':')
        tsTime = str(tsListTime[0]) + "." + str(tsListTime[1]) + "." + str(tsListTime[2])
        print(tsTime)
        ts = str(tsList1[0]) + "_" + tsTime
        print(ts)
        t4=time.time()
        print("Starting part of the while loop: "+str(t4-t3))
        print("Press \'Enter\' to trigger the acquisition of images or \'q\' to Quit")
        key = input()
        print(key)
        if(not key):
            t5=time.time()
            captureKinect(device, serial, listener, ts, imageID)
            t0=time.time()
            g.TakeImages(imageID, ts)
            t9=time.time()
            h.TakeImages(imageID, ts)
            t6 = time.time()
            writeToXML(date, tm, ts, imageID)
            t7 = time.time()
            writeIDtoFile(IDfile,imageID)
            t8=time.time()
            '''ans = input("Display the images captured? (y/n)")
            if(ans=='y'):
                displayImagesCaptured(inputPath, ts, imageID)
            t9=time.time()'''
            print("Kinect Capture took "+str(t0-t5))
            print("Ensenso took "+str(t9-t0))
            print("GiGE took "+str(t6-t9))
            print("Writing to XML took "+str(t7-t6))
            print("Writing ID took "+ str(t8-t7))
            #print("Displaying images took "+str(t6-t5))
            print("Total time after pressing enter: "+str(t8-t5))
            print("Total loop time: "+str(t8-t3))
        elif(key =='q'):
            ta=time.time()
            closeKinect(device)
            g.ShutdownCamera()
            h.ShutdownCameras()
            tb=time.time()
            print("Close "+str(tb-ta))
            #print("Overall "+str(tb-t11))
            IDfile.close()
            sys.exit(0)
            break
示例#12
0
def main():
    try:
        from pylibfreenect2 import OpenCLPacketPipeline
        pipeline = OpenCLPacketPipeline()
    except:
        from pylibfreenect2 import CpuPacketPipeline
        pipeline = CpuPacketPipeline()

    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = False
    need_color_depth_map = False

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
        if need_color_depth_map else None

    point = pcl.PointCloud()
    visual = pcl.pcl_visualization.CloudViewing()
    visual.ShowColorCloud(cloud)

    while True:
        frames = listener.waitForNewFrame()

        color = frames["color"]
        ir = frames["ir"]
        depth = frames["depth"]

        registration.apply(color,
                           depth,
                           undistorted,
                           registered,
                           bigdepth=bigdepth,
                           color_depth_map=color_depth_map)

        # NOTE for visualization:
        # cv2.imshow without OpenGL backend seems to be quite slow to draw all
        # things below. Try commenting out some imshow if you don't have a fast
        # visualization backend.
        # cv2.imshow("ir", ir.asarray() / 65535.)
        # cv2.imshow("depth", depth.asarray() / 4500.)
        # cv2.imshow("color", cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3))))
        # cv2.imshow("registered", registered.asarray(np.uint8))

        # if need_bigdepth:
        #     cv2.imshow("bigdepth", cv2.resize(bigdepth.asarray(np.float32),
        #                                       (int(1920 / 3), int(1082 / 3))))
        # if need_color_depth_map:
        #     cv2.imshow("color_depth_map", color_depth_map.reshape(424, 512))

        undistorted_arrray = undistorted.asarray(dtype=np.float32, ndim=2)
        # registered_array = registered.asarray(dtype=np.uint8)
        point = pcl.PointCloud(undistorted_arrray)
        # visual.ShowColorCloud(cloud)

        listener.release(frames)

        # key = cv2.waitKey(delay=1)
        # if key == ord('q'):
        #     break
        v = True
        while v:
            v = not (visual.WasStopped())

    device.stop()
    device.close()

    sys.exit(0)
示例#13
0
def projection():

    pipeline = CpuPacketPipeline()
    print("Packet pipeline:", type(pipeline).__name__)

    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)
    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = True
    need_color_depth_map = True

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
        if need_color_depth_map else None

    color_pub = rospy.Publisher('color', Image, queue_size=10)
    depth_pub = rospy.Publisher('depth', Image, queue_size=10)
    ir_pub = rospy.Publisher('ir', Image, queue_size=10)
    small_depth_pub = rospy.Publisher('small_depth', Image, queue_size=10)

    rospy.init_node('projection', anonymous=True)
    rate = rospy.Rate(10)  # 10hz

    while not rospy.is_shutdown():
        frames = listener.waitForNewFrame()

        color = frames["color"]
        ir = frames["ir"]
        depth = frames["depth"]

        registration.apply(color,
                           depth,
                           undistorted,
                           registered,
                           bigdepth=bigdepth,
                           color_depth_map=color_depth_map)

        color_image = CvBridge().cv2_to_imgmsg(color.asarray())
        color_pub.publish(color_image)

        depth_image = CvBridge().cv2_to_imgmsg(bigdepth.asarray(np.float32))
        depth_pub.publish(depth_image)

        ir_image = CvBridge().cv2_to_imgmsg(ir.asarray())
        ir_pub.publish(ir_image)

        small_depth_image = CvBridge().cv2_to_imgmsg(depth.asarray())
        small_depth_pub.publish(small_depth_image)

        rate.sleep()

        listener.release(frames)

        key = cv2.waitKey(delay=1)
        if key == ord('q'):
            break

    device.stop()
    device.close()
    sys.exit(0)
示例#14
0
    def run(self):
        pipeline = CpuPacketPipeline()
        print("Packet pipeline:", type(pipeline).__name__)

        enable_rgb = True
        enable_depth = True

        fn = Freenect2()
        num_devices = fn.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        serial = fn.getDeviceSerialNumber(0)
        device = fn.openDevice(serial, pipeline=pipeline)

        types = 0
        if enable_rgb:
            types |= FrameType.Color
        if enable_depth:
            types |= (FrameType.Ir | FrameType.Depth)
        listener = SyncMultiFrameListener(types)

        # Register listeners
        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        if enable_rgb and enable_depth:
            device.start()
        else:
            device.startStreams(rgb=enable_rgb, depth=enable_depth)

        # NOTE: must be called after device.start()
        if enable_depth:
            registration = Registration(device.getIrCameraParams(),
                                        device.getColorCameraParams())

        undistorted = Frame(512, 424, 4)
        registered = Frame(512, 424, 4)

        count = 0
        while True:
            mutex = Lock()
            mutex.acquire()
            frames = listener.waitForNewFrame()

            if enable_rgb:
                color = frames["color"]
            if enable_depth:
                ir = frames["ir"]
                depth = frames["depth"]

            if enable_rgb and enable_depth:
                registration.apply(color, depth, undistorted, registered)
            elif enable_depth:
                registration.undistortDepth(depth, undistorted)

            if enable_depth:
                cv2.imshow("ir", ir.asarray() / 65535.)
                nameir = self.makename("ir", count)
                #cv2.imwrite(nameir, ir.asarray() / 65535.)

                cv2.imshow("depth", depth.asarray() / 4500.)
                named = self.makename("depth", count)
                #cv2.imwrite(named, depth.asarray() / 4500.)

                cv2.imshow("undistorted",
                           undistorted.asarray(np.float32) / 4500.)
                nameu = self.makename("undistorted", count)
                #cv2.imwrite(nameu, undistorted.asarray(np.float32) / 4500.)
            if enable_rgb:
                cv2.imshow(
                    "color",
                    cv2.resize(color.asarray(),
                               (int(1920 / 3), int(1080 / 3))))
                namec = self.makename("color", count)
                #cv2.imwrite(namec,cv2.resize(color.asarray(),(int(1920 / 3), int(1080 / 3))))
            if enable_rgb and enable_depth:
                cv2.imshow("registered", registered.asarray(np.uint8))
                namer = self.makename("registered", count)
                #cv2.imwrite(namer,registered.asarray(np.uint8))
            mutex.release()

            count = count + 1

            listener.release(frames)

            key = cv2.waitKey(delay=1)
            if key == ord('q'):
                break

        device.stop()
        device.close()
def main():
    try:
        from pylibfreenect2 import OpenGLPacketPipeline
        pipeline = OpenGLPacketPipeline()
    except:
        try:
            from pylibfreenect2 import OpenCLPacketPipeline
            pipeline = OpenCLPacketPipeline()
        except:
            from pylibfreenect2 import CpuPacketPipeline
            pipeline = CpuPacketPipeline()
    print("Packet pipeline:", type(pipeline).__name__)

    # Create and set logger
    #logger = createConsoleLogger(LoggerLevel.Debug)
    #setGlobalLogger(logger)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    cascPath = "/home/bjornar/MScDissertation/TrainingData/FaceDetection/haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(cascPath)

    #faceExpModel = tf.keras.models.load_model("/home/bjornar/MScDissertation/createModel/ModelCreatedFromWeights.hdf5")

    faceExpModel = tf.keras.models.load_model(
        "/home/bjornar/ML_models/FER/Good models(80+)/tf_keras_weights_ninthRev-88percent/tf_keras_weights_ninthRev.hdf5"
    )
    print "To detect facial expression press 'e'"
    expRecognition = 0

    while True:
        frames = listener.waitForNewFrame()

        color = frames["color"]
        #color = cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3)))
        color = cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3)))

        #movementClass = estimate_head_pose.headPoseEstimation(color)
        #print movementClass
        #cv2.imshow("color", color)
        if cv2.waitKey(5) == ord('e'):
            expRecognition = 1
            print "Detecting facial expression..."

        if expRecognition:
            gray, faces = detectFace(color, faceCascade)

            prediction = []

            if len(faces) > 0:
                croppedFace = cropFaces(color, faces)
                #displayCrop(croppedFace)
                for face in croppedFace:
                    #cv2.imwrite("croppedface.png", face)
                    prediction.append(predictImg(face, faceExpModel))
                #displayFace(faces, color)
                #cv2.waitKey(0)
                for (x, y, w, h) in faces:
                    cv2.rectangle(color, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)

                #displayFace(faces, color)
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(color, prediction[0], (20, 70), font, 1,
                            (0, 255, 0), 4)
                cv2.imshow("", color)

        else:
            cv2.imshow("", color)

        listener.release(frames)

        key = cv2.waitKey(delay=1)
        if key == ord('q'):
            break

    device.stop()
    device.close()
def main():
    instructions = 0
    print "--- Intention classification for communication between a human and a robot ---"
    if instructions == 1:
        print "First you will be required to present a facial expression before you will do a head movement."
        print "If done correctly these gestures will be detected by the robot and it will perform the desired task."
        raw_input("Press Enter to continue...")

    if instructions == 1:
        print "This is the module for facial expression recognition."
        print "This program can detect the emotions: Happy and Angry."
        print "The program will look for the expression for 3 seconds."
    raw_input("To proceed to Facial Expression Recognition press Enter...")
    predictExp = 0

    # Load Facial Expression Recognition trained model
    print "- Loading FER model... -"
    #faceExpModel = tf.keras.models.load_model("/home/bjornar/ML_models/FER/Good models(80+)/tf_keras_weights_ninthRev-88percent/tf_keras_weights_ninthRev.hdf5")
    faceExpModel = tf.keras.models.load_model(
        "/home/project/Bjorn/tf_keras_weights_ninthRev-88percent/tf_keras_weights_ninthRev.hdf5"
    )

    # Load Face Cascade for Face Detection
    print "- Loading Face Cascade for Face Detection... -"
    #cascPath = "/home/bjornar/MScDissertation/TrainingData/FaceDetection/haarcascade_frontalface_default.xml"
    cascPath = "/home/project/Bjorn/IntentionClassification-Repository/haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(cascPath)

    ## Initializing Head Movement variables
    # Introduce mark_detector to detect landmarks.
    mark_detector = MarkDetector()

    sample_frame = cv2.imread("sample_frame.png")
    # Setup process and queues for multiprocessing.
    img_queue = Queue()
    box_queue = Queue()
    img_queue.put(sample_frame)
    box_process = Process(target=get_face,
                          args=(
                              mark_detector,
                              img_queue,
                              box_queue,
                          ))
    box_process.start()

    # Introduce pose estimator to solve pose. Get one frame to setup the
    # estimator according to the image size.
    height, width = sample_frame.shape[:2]

    pose_estimator = PoseEstimator(img_size=(height, width))

    # Introduce scalar stabilizers for pose.
    pose_stabilizers = [
        Stabilizer(state_num=2,
                   measure_num=1,
                   cov_process=0.1,
                   cov_measure=0.1) for _ in range(6)
    ]

    noseMarks = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0],
                 [0, 0], [0, 0], [0, 0]]
    counter = 0
    font = cv2.FONT_HERSHEY_SIMPLEX
    numXPoints = 0
    numYPoints = 0
    sumX = 0
    sumY = 0

    currentTime = 0
    previousTime1 = 0
    previousTime2 = 0

    directionArray = []
    moveSequence = []
    moves = []
    classifyMoves = 0
    headPoseDirection = 'emtpy'

    if camera == 'kinect':
        ## Initialize Kinect camera
        print "Initializing camera..."
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
        #print("Packet pipeline:", type(pipeline).__name__)

        # Create and set logger
        #logger = createConsoleLogger(LoggerLevel.Debug)
        setGlobalLogger()

        fn = Freenect2()
        num_devices = fn.enumerateDevices()
        if num_devices == 0:
            print("- No device connected! -")
            sys.exit(1)

        serial = fn.getDeviceSerialNumber(0)
        device = fn.openDevice(serial, pipeline=pipeline)

        listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                          | FrameType.Depth)

        # Register listeners
        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        device.start()

        # NOTE: must be called after device.start()
        registration = Registration(device.getIrCameraParams(),
                                    device.getColorCameraParams())

    elif camera == 'webcam':
        #video_capture = cv2.VideoCapture(0)
        video_capture = cv2.VideoCapture(-1)

    elif camera == 'video':
        #video_capture = cv2.VideoCapture(0)
        video_capture = cv2.VideoCapture(
            "/home/project/Bjorn/SonyHandycamTest.mp4")

    ## Facial Expression Recognition variables
    FER_prediction = []
    FERclass = ''
    FERstart = 0
    classifyMoves = 0

    ## Head movement variables
    predictHeadMov = 3
    HeadMov = []
    HMCclass = ''
    detectedFaces = []
    mark = []

    notDone = 0
    progressStatus = [0, 0]

    while notDone in progressStatus:  # This waits for each module to finsih
        if camera == 'kinect':
            frames = listener.waitForNewFrame()

            color = frames["color"]
            color = color.asarray()
            color = cv2.resize(color, (int(873), int(491)))
            color = color[0:480, 150:790]
            color = np.delete(color, np.s_[3::], 2)

        elif camera == 'webcam':
            # Capture frame-by-frame
            ret, color = video_capture.read()

        elif camera == 'video':
            # Capture frame-by-frame
            ret, color = video_capture.read()

        ## Detect facial expression
        predictExpNums = [1, 2]
        if predictExp == 0:
            while predictExp not in predictExpNums:
                predictExp = int(
                    raw_input(
                        "\nPress 1 to detect Facial Expression or press 2 to do Head Movement classification."
                    ))
            if predictExp == 1:
                predictExp = 1
                print "------ Facial Expression Recognition ------"
            elif predictExp == 2:
                predictHeadMov = 0
                progressStatus[0] = 1

            FERstart = time.time()

        elif predictExp == 1:
            currentTime = time.time()
            if currentTime - FERstart < 10:
                FER_prediction.append(
                    facialExpressionRecogntion(color, faceExpModel,
                                               faceCascade))
            else:
                predictExp = 2
                FER_prediction = filter(None, FER_prediction)
                FERclass = mostCommon(FER_prediction)
                FERclass = FERclass[2:7]
                predictHeadMov = 0
                if FERclass == '':
                    print("Did not detect an expression, try again.")
                    predictExp = 0
                else:
                    progressStatus[0] = 1
                    print "Detected expression: " + str(FERclass)
                    print "Progress: FER DONE"
                # else:
                #     #cv2.imwrite("sample_frame.png", color)
                #     break

        ## Detect head movement class
        if predictHeadMov == 0:
            predictHeadMov = int(
                raw_input(
                    "\nPress 1 to do Head Movement classification or 2 to skip."
                ))
            if predictHeadMov == 1:
                predictHeadMov = 1
                print "------ Head Movement Classification ------"
                moveTime = int(
                    raw_input(
                        "How many seconds should I record your movement?"))
                #moveTime = 2
            else:
                predictHeadMov = 2
            timer = time.time()
            previousTime1 = timer
            previousTime2 = timer

        if predictHeadMov == 1:
            print color.shape
            color = color[0:480, 0:480]
            color = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
            cv2.imshow("", color)
            raw_input()
            print color.shape
            # Feed frame to image queue.
            img_queue.put(color)

            #pdb.set_trace()

            # Get face from box queue.
            facebox = box_queue.get()
            print color.shape

            if facebox is not None:
                # Detect landmarks from image of 128x128.
                face_img = color[facebox[1]:facebox[3], facebox[0]:facebox[2]]
                face_img = cv2.resize(face_img,
                                      (CNN_INPUT_SIZE, CNN_INPUT_SIZE))
                face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
                marks = mark_detector.detect_marks(face_img)

                #Convert the marks locations from local CNN to global image.
                marks *= (facebox[2] - facebox[0])
                marks[:, 0] += facebox[0]
                marks[:, 1] += facebox[1]

                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(color, headPoseDirection, (20, 70), font, 1,
                            (0, 255, 0), 4)

                # # Get average position of nose
                noseMarksTemp = []
                noseMarksTemp.append(marks[30][0])
                noseMarksTemp.append(marks[30][1])
                noseMarks[0] = noseMarksTemp

                for i in range(9, 0, -1):
                    noseMarks[i] = noseMarks[i - 1]

                # Get the direction of head movement
                headPoseDirection = calculateDirection(noseMarks)

                directionArray.append(headPoseDirection)

                if classifyMoves == 0:
                    classifyMoves = 1
                    timer = time.time()
                    previousTime1 = timer
                    previousTime2 = timer

                currentTime = time.time()
                if currentTime - previousTime1 > moveTime and classifyMoves == 1:
                    print "------------------------------------------------"
                    print "Elapsed timer 1: " + str(currentTime -
                                                    previousTime1)

                    # Get most common direction
                    HMCclass = mostCommon(directionArray)

                    classifyMoves = 2
                    directionArray = []
                    previousTime1 = currentTime

                    progressStatus[1] = 1
                    print "Progress: HMC DONE"
            else:
                print "Did not detect a face"
        elif predictHeadMov == 2:
            progressStatus[1] = 1
            print "Skipped Head Movement Estimation."
            break

        # if notDone in progressStatus and predictHeadMov == 2 and predictExp == 2:
        #     print "You seem to have skipped one or more tasks."
        #     inpt = ''
        #     while inpt == '':
        #         inpt = raw_input("To do FER press 1 and to do HMC press 2...")
        #         if input == '1':
        #             predictExp = 1
        #         elif input == '2':
        #             predictHeadMov

        cv2.imshow("", color)
        if camera == 'kinect':
            listener.release(frames)

        key = cv2.waitKey(delay=1)
        if key == ord('q'):
            break

    if camera == 'kinect':
        listener.release(frames)
        device.stop()
        device.close()
    elif camera == 'webcam' or camera == 'video':
        video_capture.release()

    cv2.destroyAllWindows()

    # Clean up the multiprocessing process.
    box_process.terminate()
    box_process.join()

    print "---------------- RESULT ----------------"

    if FERclass != '':
        print "Detected facial expression: " + str(FERclass)
    else:
        print "Did not detect any expression."

    if HMCclass != '':
        print "Detected head movement: " + str(HMCclass)
    else:
        print "Did not detect a head movement."

    print "----------------------------------------"

    intentionClassification = [FERclass, HMCclass]

    return intentionClassification
示例#17
0
    def __init__(self, enable_rgb, enable_depth):
        ''' Init method called upon creation of Kinect object '''

        # according to the system, it loads the correct pipeline
        # and prints a log for the user
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            self.pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                self.pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                self.pipeline = CpuPacketPipeline()

        rospy.loginfo(color.BOLD + color.YELLOW + '-- PACKET PIPELINE: ' +
                      str(type(self.pipeline).__name__) + ' --' + color.END)

        self.enable_rgb = enable_rgb
        self.enable_depth = enable_depth

        # creates the freenect2 device
        self.fn = Freenect2()
        # if no kinects are plugged in the system, it quits
        num_devices = self.fn.enumerateDevices()
        if num_devices == 0:
            rospy.loginfo(color.BOLD + color.RED +
                          '-- ERROR: NO DEVICE CONNECTED!! --' + color.END)
            sys.exit(1)

        # otherwise it gets the first one available
        self.serial = self.fn.getDeviceSerialNumber(0)
        self.device = self.fn.openDevice(self.serial, pipeline=self.pipeline)

        # defines the streams to be acquired according to what the user wants
        types = 0
        if self.enable_rgb:
            types |= FrameType.Color
        if self.enable_depth:
            types |= (FrameType.Ir | FrameType.Depth)
        self.listener = SyncMultiFrameListener(types)

        # Register listeners
        if self.enable_rgb:
            self.device.setColorFrameListener(self.listener)
        if self.enable_depth:
            self.device.setIrAndDepthFrameListener(self.listener)

        if self.enable_rgb and self.enable_depth:
            self.device.start()
        else:
            self.device.startStreams(rgb=self.enable_rgb,
                                     depth=self.enable_depth)

        # NOTE: must be called after device.start()
        if self.enable_depth:
            self.registration = Registration(
                self.device.getIrCameraParams(),
                self.device.getColorCameraParams())

        # last number is bytes per pixel
        self.undistorted = Frame(512, 424, 4)
        self.registered = Frame(512, 424, 4)

        self.color_hsv = None
示例#18
0
    def run(self):
        self._isrunning = True

        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
        if self._debug:
            print("Packet pipeline:", type(pipeline).__name__)

        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if self._debug:
            print("Number of devices: {}".format(num_devices))

        serial = self.fn.getDeviceSerialNumber(0)
        device = self.fn.openDevice(serial, pipeline=pipeline)
        listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                          | FrameType.Depth)

        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        if self._debug:
            print("Init done")

        device.start()

        registration = Registration(device.getIrCameraParams(),
                                    device.getColorCameraParams())

        params = device.getIrCameraParams()
        self.cx = params.cx
        self.cy = params.cy
        self.fx = params.fx
        self.fy = params.fy

        undistorted = Frame(512, 424, 4)
        registered = Frame(512, 424, 4)

        while self._isrunning:
            frames = listener.waitForNewFrame()

            color = frames["color"]
            ir = frames["ir"]
            depth = frames["depth"]

            registration.apply(color,
                               depth,
                               undistorted,
                               registered,
                               bigdepth=None,
                               color_depth_map=None)

            if self._save_color:
                self._set_color_image(
                    cv2.cvtColor(color.asarray(), cv2.COLOR_BGR2RGB))
            if self._save_depth or self._save_pointcloud:
                depth_arr = depth.asarray()
                if self._save_depth:
                    self._set_depth_image(depth_arr)
            if self._save_ir:
                self._set_ir_image(ir.asarray())
            if self._save_registered or self._save_pointcloud:
                reg = cv2.cvtColor(registered.asarray(np.uint8),
                                   cv2.COLOR_BGR2RGB)
                if self._save_registered:
                    self._set_registered_image(reg)
            if self._save_undistorted:
                und = undistorted.asarray(np.uint8)
                self._set_undistorted_image(
                    cv2.cvtColor(und, cv2.COLOR_BGR2RGB))
            if self._save_pointcloud:
                self.compute_pointcloud(reg, depth_arr)

            listener.release(frames)

        if self._debug:
            print("Stopping device")
        device.stop()
        if self._debug:
            print("Closing device")
        device.close()
        if self._debug:
            print("Device stopped and closed")
示例#19
0
    print("hi")
finally:
    import pylibfreenect2
from pylibfreenect2 import Freenect2, SyncMultiFrameListener
from pylibfreenect2 import FrameType, Registration, Frame

try:
    from pylibfreenect2 import OpenCLPacketPipeline
    pipeline = OpenCLPacketPipeline()
except:
    try:
        from pylibfreenect2 import OpenGLPacketPipeline
        pipeline = OpenGLPacketPipeline()
    except:
        from pylibfreenect2 import CpuPacketPipeline
        pipeline = CpuPacketPipeline()
print("Packet pipeline:", type(pipeline).__name__)

fn = Freenect2()
num_devices = fn.enumerateDevices()
if num_devices == 0:
    print("No device connected!")
    sys.exit(1)

serial = fn.getDeviceSerialNumber(0)
device = fn.openDevice(serial, pipeline=pipeline)

types = 0
types |= (FrameType.Ir | FrameType.Depth)
listener = SyncMultiFrameListener(types)
device.setIrAndDepthFrameListener(listener)
示例#20
0
def main(lata, longa, latb, longb):
    lata = conv.convert_gps(float(lata))
    longa = conv.convert_gps(float(longa))
    latb = conv.convert_gps(float(latb))
    longb = conv.convert_gps(float(longb))
    try:
        from pylibfreenect2 import OpenGLPacketPipeline
        pipeline = OpenGLPacketPipeline()
    except:
        try:
            from pylibfreenect2 import OpenCLPacketPipeline
            pipeline = OpenCLPacketPipeline()
        except:
            from pylibfreenect2 import CpuPacketPipeline
            pipeline = CpuPacketPipeline()
    print("Packet pipeline:", type(pipeline).__name__)
    #create list for gps location of obstacles

    ###############################################################

    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = False
    need_color_depth_map = False

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
     if need_color_depth_map else None

    while True:
        frames = listener.waitForNewFrame()
        #time.sleep(5)
        color = frames["color"]
        ir = frames["ir"]
        depth = frames["depth"]

        registration.apply(color,
                           depth,
                           undistorted,
                           registered,
                           bigdepth=bigdepth,
                           color_depth_map=color_depth_map)

        # NOTE for visualization:
        # cv2.imshow without OpenGL backend seems to be quite slow to draw all
        # things below. Try commenting out some imshow if you don't have a fast
        # visualization backend.
        #cv2.imshow("ir", ir.asarray() / 65535.)
        #time.sleep(2)
        cv2.imshow("depth", depth.asarray() / 4500.)

        #Comment the coming lines
        print('center value is:', depth.asarray(np.float32).item((212, 256)))
        print('down1 value is:', depth.asarray(np.float32).item((270, 256)))
        '''print('up1 value is:',depth.asarray(np.float32).item((150,256)))
		print('up2 value is:',depth.asarray(np.float32).item((100,256)))
		
		print('down2 value is:',depth.asarray(np.float32).item((350,256)))
		print('right1 value is:',depth.asarray(np.float32).item((212,300)))
		print('right2 value is:',depth.asarray(np.float32).item((212,350)))
		print('left1 value is:',depth.asarray(np.float32).item((212,200)))
		print('left2 value is:',depth.asarray(np.float32).item((212,150)))'''

        x = depth.asarray(np.float32)
        y = np.logical_and(np.greater(x, 1200), np.less(x, 1500))
        #print(np.extract(y, x))
        no_of_pixels = np.count_nonzero(np.extract(y, x))
        print(no_of_pixels)
        #get gps coordinate at this location########
        '''
		assumingthegps received is lat,long variables:
		obs.append([lat,long])
		this is to be retrieved in the path planning code.
		'''
        if no_of_pixels > 14000:  #approx distance from the camera = 1.5 m
            print('big Obstacle found, stop!!!')
            obs.append(findPosition())
        elif no_of_pixels > 8000:  #approx distance from the camera = 1.5 m
            print('small Obstacle found!!')
            obs.append(findPosition())
        #cv2.imshow("color", cv2.resize(color.asarray(),(int(1920 / 3), int(1080 / 3))))
        #cv2.imshow("registered", registered.asarray(np.uint8))

        if need_bigdepth:
            cv2.imshow(
                "bigdepth",
                cv2.resize(bigdepth.asarray(np.float32),
                           (int(1920 / 3), int(1082 / 3))))
        if need_color_depth_map:
            cv2.imshow("color_depth_map", color_depth_map.reshape(424, 512))

        listener.release(frames)
        #time.sleep(20)
        key = cv2.waitKey(delay=1) & 0xFF
        if key == ord('q'):
            break

    #######################################################

    target(latb, longb, lata, longa)
    #stage3.py main  fnc
    #######################################################
    device.stop()
    device.close()