def __init__(self, device_id=0):
        """Create an OpenCV capture object associated with the provided webcam
        device ID.
        """
        logger = createConsoleLogger(LoggerLevel.Error)
        setGlobalLogger(logger)
        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if num_devices == 0:
            print("No Kinect!")
            raise LookupError()

        serial = self.fn.getDeviceSerialNumber(0)
        self.device = self.fn.openDevice(serial, pipeline=pipeline)

        types = 0
        types |= FrameType.Color

        self.listener = SyncMultiFrameListener(types)
        self.device.setColorFrameListener(self.listener)

        # Start a thread to continuously capture frames.
        # This must be done because different layers of buffering in the webcam
        # and OS drivers will cause you to retrieve old frames if they aren't
        # continuously read.
        self._capture_frame = None
        # Use a lock to prevent access concurrent access to the camera.
        self._capture_lock = threading.Lock()
        self._capture_thread = threading.Thread(target=self._grab_frames)
        self._capture_thread.daemon = True
        self._capture_thread.start()
Beispiel #2
0
    def _start_kinect(self):
        if self.device == 'kinect':
            freenect.sync_get_depth() #Grabbing a frame initializes the device
            freenect.sync_get_video()

        elif self.device == 'kinect2':
            # a: Identify pipeline to use: 1) OpenGL, 2) OpenCL, 3) CPU
            try:
                self.pipeline = FN2.OpenCLPacketPipeline()
            except:
                try:
                    self.pipeline = FN2.OpenGLPacketPipeline()
                except:
                    self.pipeline = FN2.CpuPacketPipeline()
            self._print('PacketPipeline: ' + type(self.pipeline).__name__)

            # b: Create and set logger
            self.logger = FN2.createConsoleLogger(FN2.LoggerLevel.NONE)
            FN2.setGlobalLogger(self.logger)

            # c: Identify device and create listener
            self.fn = FN2.Freenect2()
            serial = self.fn.getDeviceSerialNumber(0)
            self.K2device = self.fn.openDevice(serial, pipeline=self.pipeline)

            self.listener = FN2.SyncMultiFrameListener(
                FN2.FrameType.Color | FN2.FrameType.Depth)
            # d: Register listeners
            self.K2device.setColorFrameListener(self.listener)
            self.K2device.setIrAndDepthFrameListener(self.listener)
            # e: Start device and create registration
            self.K2device.start()
            self.registration = FN2.Registration(self.K2device.getIrCameraParams(), self.K2device.getColorCameraParams())
    def __init__(self, use_kinect=False, basket=False, gui=True):
        self.connected = False
        self.max_radius = 100000
        self.running = False
        self.killed = False
        self.gui = gui
        self.basket = basket
        self.use_kinect = use_kinect
        # self.fig, self.ax = plt.subplots()

        if self.use_kinect:
            self.px = []
            self.py = []
            logger = createConsoleLogger(LoggerLevel.Error)
            setGlobalLogger(logger)

        while not self.connected:
            # Bad, but needed
            if self.use_kinect:
                self.connect_kinect()
            else:
                self.connect_camera()

        self.thread = threading.Thread(target=self.video_thread)
        self.thread.start()
    def __init__(self, device_no=0):
        # Package Pipeline
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            self.pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                self.pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                self.pipeline = CpuPacketPipeline()
        print "Packet pipeline: {}".format(type(self.pipeline).__name__)

        # Create and set logger
        logger = createConsoleLogger(LoggerLevel.Error)
        setGlobalLogger(logger)

        # Detect Kinect Devices
        self.fn = Freenect2()
        num_devices = self.fn.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            self.device = None
            sys.exit(1)

        # Create Device and Frame Listeners
        self.serial = self.fn.getDeviceSerialNumber(device_no)
        self.device = self.fn.openDevice(self.serial, pipeline=self.pipeline)
        self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                               | FrameType.Depth)

        # Register Listeners
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        # Start Device
        self.device.start()

        # Set Camera Parameters
        self._ir_params = self.device.getIrCameraParams()
        self._color_params = self.device.getColorCameraParams()
        '''
        self._ir_params.fx = cameraParams.params['ir_fx']
        self._ir_params.fy = cameraParams.params['ir_fy']
        self._ir_params.cx = cameraParams.params['ir_cx']
        self._ir_params.cy = cameraParams.params['ir_cy']

        self._color_params.fx = cameraParams.params['col_fx']
        self._color_params.fy = cameraParams.params['col_fy']
        self._color_params.cx = cameraParams.params['col_cx']
        self._color_params.cy = cameraParams.params['col_cy']
        '''
        # Registration
        self.registration = Registration(self._ir_params, self._color_params)

        print '[Info] Initialization Finished!'
Beispiel #5
0
    def start(self):
        """Starts the Kinect v2 sensor stream.

        Raises
        ------
        IOError
            If the Kinect v2 is not detected.
        """
        # open packet pipeline
        if self._packet_pipeline_mode == Kinect2PacketPipelineMode.OPENGL:
            self._pipeline = lf2.OpenGLPacketPipeline()
        elif self._packet_pipeline_mode == Kinect2PacketPipelineMode.CPU:
            self._pipeline = lf2.CpuPacketPipeline()

        # setup logger
        self._logger = lf2.createConsoleLogger(lf2.LoggerLevel.Warning)
        lf2.setGlobalLogger(self._logger)

        # check devices
        self._fn_handle = lf2.Freenect2()
        self._num_devices = self._fn_handle.enumerateDevices()
        if self._num_devices == 0:
            raise IOError(
                'Failed to start stream. No Kinect2 devices available!')
        if self._num_devices <= self._device_num:
            raise IOError(
                'Failed to start stream. Device num %d unavailable!' %
                (self._device_num))

        # open device
        self._serial = self._fn_handle.getDeviceSerialNumber(self._device_num)
        self._device = self._fn_handle.openDevice(self._serial,
                                                  pipeline=self._pipeline)

        # add device sync modes
        self._listener = lf2.SyncMultiFrameListener(lf2.FrameType.Color
                                                    | lf2.FrameType.Ir
                                                    | lf2.FrameType.Depth)
        self._device.setColorFrameListener(self._listener)
        self._device.setIrAndDepthFrameListener(self._listener)

        # start device
        self._device.start()

        # open registration
        self._registration = None
        if self._registration_mode == Kinect2RegistrationMode.COLOR_TO_DEPTH:
            logging.debug('Using color to depth registration')
            self._registration = lf2.Registration(
                self._device.getIrCameraParams(),
                self._device.getColorCameraParams())
        self._running = True
Beispiel #6
0
    def __init__(self):
        try:
            from pylibfreenect2 import OpenCLPacketPipeline
            pipeline = OpenCLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenGLPacketPipeline
                pipeline = OpenGLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
        print("Packet pipeline:", type(pipeline).__name__)

        # Create and set logger
        logger = createConsoleLogger(LoggerLevel.Debug)
        setGlobalLogger(logger)

        self.freenect = Freenect2()
        num_devices = self.freenect.enumerateDevices()
        if num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        serial = self.freenect.getDeviceSerialNumber(0)
        self.device = self.freenect.openDevice(serial, pipeline=pipeline)

        self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                               | FrameType.Depth)

        # Register listeners
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)

        self.device.start()

        self.registration = Registration(self.device.getIrCameraParams(),
                                         self.device.getColorCameraParams())

        self.run_loop = True
        self.lock = threading.Lock()
        self.img_buffer = np.zeros(
            (RES_COLOR[1], RES_COLOR[0], 3, SIZE_AVG_FILTER))
        self.idx_buffer = 0

        # Close on Ctrl-C
        def signal_handler(signal, frame):
            self.run_loop = False

        signal.signal(signal.SIGINT, signal_handler)
Beispiel #7
0
    def __init__(self, calibration_folder=None):
        self.freenect = Freenect2()
        setGlobalLogger(createConsoleLogger(LoggerLevel.NONE))
        num_devices = self.freenect.enumerateDevices()
        if num_devices == 0:
            raise ValueError("No kinect2 device connected!")

        serial = self.freenect.getDeviceSerialNumber(0)
        self.device = self.freenect.openDevice(serial, pipeline=pipeline)
        self.listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                               | FrameType.Depth)
        self.device.setColorFrameListener(self.listener)
        self.device.setIrAndDepthFrameListener(self.listener)
        self.device.start()

        self.calibration = {}
        if calibration_folder is not None:
            self.calibration = utils.load_calibration(calibration_folder)
def test_logger():
    logger_default = createConsoleLoggerWithDefaultLevel()
    assert isinstance(logger_default, Logger)

    for level in [LoggerLevel.NONE, LoggerLevel.Error,
                  LoggerLevel.Warning,
                  LoggerLevel.Info,
                  LoggerLevel.Debug]:
        logger = createConsoleLogger(level)
        setGlobalLogger(logger)
        assert getGlobalLogger().level() == level

    # Turn logging off
    setGlobalLogger(None)
    # Set to default
    setGlobalLogger(logger_default)

    logger = getGlobalLogger()
    if sys.version_info.major >= 3:
        message = b"test debugging message"
    else:
        message = "test debugging message"
    logger.log(LoggerLevel.Debug, message)
Beispiel #9
0
def test_logger():
    logger_default = createConsoleLoggerWithDefaultLevel()
    assert isinstance(logger_default, Logger)

    for level in [
            LoggerLevel.NONE, LoggerLevel.Error, LoggerLevel.Warning,
            LoggerLevel.Info, LoggerLevel.Debug
    ]:
        logger = createConsoleLogger(level)
        setGlobalLogger(logger)
        assert getGlobalLogger().level() == level

    # Turn logging off
    setGlobalLogger(None)
    # Set to default
    setGlobalLogger(logger_default)

    logger = getGlobalLogger()
    if sys.version_info.major >= 3:
        message = b"test debugging message"
    else:
        message = "test debugging message"
    logger.log(LoggerLevel.Debug, message)
def main():
    instructions = 0
    print "--- Intention classification for communication between a human and a robot ---"
    if instructions == 1:
        print "First you will be required to present a facial expression before you will do a head movement."
        print "If done correctly these gestures will be detected by the robot and it will perform the desired task."
        raw_input("Press Enter to continue...")

    if instructions == 1:
        print "This is the module for facial expression recognition."
        print "This program can detect the emotions: Happy and Angry."
        print "The program will look for the expression for 3 seconds."
    raw_input("To proceed to Facial Expression Recognition press Enter...")
    predictExp = 0

    # Load Facial Expression Recognition trained model
    print "- Loading FER model... -"
    #faceExpModel = tf.keras.models.load_model("/home/bjornar/ML_models/FER/Good models(80+)/tf_keras_weights_ninthRev-88percent/tf_keras_weights_ninthRev.hdf5")
    faceExpModel = tf.keras.models.load_model(
        "/home/project/Bjorn/tf_keras_weights_ninthRev-88percent/tf_keras_weights_ninthRev.hdf5"
    )

    # Load Face Cascade for Face Detection
    print "- Loading Face Cascade for Face Detection... -"
    #cascPath = "/home/bjornar/MScDissertation/TrainingData/FaceDetection/haarcascade_frontalface_default.xml"
    cascPath = "/home/project/Bjorn/IntentionClassification-Repository/haarcascade_frontalface_default.xml"
    faceCascade = cv2.CascadeClassifier(cascPath)

    ## Initializing Head Movement variables
    # Introduce mark_detector to detect landmarks.
    mark_detector = MarkDetector()

    sample_frame = cv2.imread("sample_frame.png")
    # Setup process and queues for multiprocessing.
    img_queue = Queue()
    box_queue = Queue()
    img_queue.put(sample_frame)
    box_process = Process(target=get_face,
                          args=(
                              mark_detector,
                              img_queue,
                              box_queue,
                          ))
    box_process.start()

    # Introduce pose estimator to solve pose. Get one frame to setup the
    # estimator according to the image size.
    height, width = sample_frame.shape[:2]

    pose_estimator = PoseEstimator(img_size=(height, width))

    # Introduce scalar stabilizers for pose.
    pose_stabilizers = [
        Stabilizer(state_num=2,
                   measure_num=1,
                   cov_process=0.1,
                   cov_measure=0.1) for _ in range(6)
    ]

    noseMarks = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0],
                 [0, 0], [0, 0], [0, 0]]
    counter = 0
    font = cv2.FONT_HERSHEY_SIMPLEX
    numXPoints = 0
    numYPoints = 0
    sumX = 0
    sumY = 0

    currentTime = 0
    previousTime1 = 0
    previousTime2 = 0

    directionArray = []
    moveSequence = []
    moves = []
    classifyMoves = 0
    headPoseDirection = 'emtpy'

    if camera == 'kinect':
        ## Initialize Kinect camera
        print "Initializing camera..."
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                pipeline = CpuPacketPipeline()
        #print("Packet pipeline:", type(pipeline).__name__)

        # Create and set logger
        #logger = createConsoleLogger(LoggerLevel.Debug)
        setGlobalLogger()

        fn = Freenect2()
        num_devices = fn.enumerateDevices()
        if num_devices == 0:
            print("- No device connected! -")
            sys.exit(1)

        serial = fn.getDeviceSerialNumber(0)
        device = fn.openDevice(serial, pipeline=pipeline)

        listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                          | FrameType.Depth)

        # Register listeners
        device.setColorFrameListener(listener)
        device.setIrAndDepthFrameListener(listener)

        device.start()

        # NOTE: must be called after device.start()
        registration = Registration(device.getIrCameraParams(),
                                    device.getColorCameraParams())

    elif camera == 'webcam':
        #video_capture = cv2.VideoCapture(0)
        video_capture = cv2.VideoCapture(-1)

    elif camera == 'video':
        #video_capture = cv2.VideoCapture(0)
        video_capture = cv2.VideoCapture(
            "/home/project/Bjorn/SonyHandycamTest.mp4")

    ## Facial Expression Recognition variables
    FER_prediction = []
    FERclass = ''
    FERstart = 0
    classifyMoves = 0

    ## Head movement variables
    predictHeadMov = 3
    HeadMov = []
    HMCclass = ''
    detectedFaces = []
    mark = []

    notDone = 0
    progressStatus = [0, 0]

    while notDone in progressStatus:  # This waits for each module to finsih
        if camera == 'kinect':
            frames = listener.waitForNewFrame()

            color = frames["color"]
            color = color.asarray()
            color = cv2.resize(color, (int(873), int(491)))
            color = color[0:480, 150:790]
            color = np.delete(color, np.s_[3::], 2)

        elif camera == 'webcam':
            # Capture frame-by-frame
            ret, color = video_capture.read()

        elif camera == 'video':
            # Capture frame-by-frame
            ret, color = video_capture.read()

        ## Detect facial expression
        predictExpNums = [1, 2]
        if predictExp == 0:
            while predictExp not in predictExpNums:
                predictExp = int(
                    raw_input(
                        "\nPress 1 to detect Facial Expression or press 2 to do Head Movement classification."
                    ))
            if predictExp == 1:
                predictExp = 1
                print "------ Facial Expression Recognition ------"
            elif predictExp == 2:
                predictHeadMov = 0
                progressStatus[0] = 1

            FERstart = time.time()

        elif predictExp == 1:
            currentTime = time.time()
            if currentTime - FERstart < 10:
                FER_prediction.append(
                    facialExpressionRecogntion(color, faceExpModel,
                                               faceCascade))
            else:
                predictExp = 2
                FER_prediction = filter(None, FER_prediction)
                FERclass = mostCommon(FER_prediction)
                FERclass = FERclass[2:7]
                predictHeadMov = 0
                if FERclass == '':
                    print("Did not detect an expression, try again.")
                    predictExp = 0
                else:
                    progressStatus[0] = 1
                    print "Detected expression: " + str(FERclass)
                    print "Progress: FER DONE"
                # else:
                #     #cv2.imwrite("sample_frame.png", color)
                #     break

        ## Detect head movement class
        if predictHeadMov == 0:
            predictHeadMov = int(
                raw_input(
                    "\nPress 1 to do Head Movement classification or 2 to skip."
                ))
            if predictHeadMov == 1:
                predictHeadMov = 1
                print "------ Head Movement Classification ------"
                moveTime = int(
                    raw_input(
                        "How many seconds should I record your movement?"))
                #moveTime = 2
            else:
                predictHeadMov = 2
            timer = time.time()
            previousTime1 = timer
            previousTime2 = timer

        if predictHeadMov == 1:
            print color.shape
            color = color[0:480, 0:480]
            color = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
            cv2.imshow("", color)
            raw_input()
            print color.shape
            # Feed frame to image queue.
            img_queue.put(color)

            #pdb.set_trace()

            # Get face from box queue.
            facebox = box_queue.get()
            print color.shape

            if facebox is not None:
                # Detect landmarks from image of 128x128.
                face_img = color[facebox[1]:facebox[3], facebox[0]:facebox[2]]
                face_img = cv2.resize(face_img,
                                      (CNN_INPUT_SIZE, CNN_INPUT_SIZE))
                face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
                marks = mark_detector.detect_marks(face_img)

                #Convert the marks locations from local CNN to global image.
                marks *= (facebox[2] - facebox[0])
                marks[:, 0] += facebox[0]
                marks[:, 1] += facebox[1]

                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(color, headPoseDirection, (20, 70), font, 1,
                            (0, 255, 0), 4)

                # # Get average position of nose
                noseMarksTemp = []
                noseMarksTemp.append(marks[30][0])
                noseMarksTemp.append(marks[30][1])
                noseMarks[0] = noseMarksTemp

                for i in range(9, 0, -1):
                    noseMarks[i] = noseMarks[i - 1]

                # Get the direction of head movement
                headPoseDirection = calculateDirection(noseMarks)

                directionArray.append(headPoseDirection)

                if classifyMoves == 0:
                    classifyMoves = 1
                    timer = time.time()
                    previousTime1 = timer
                    previousTime2 = timer

                currentTime = time.time()
                if currentTime - previousTime1 > moveTime and classifyMoves == 1:
                    print "------------------------------------------------"
                    print "Elapsed timer 1: " + str(currentTime -
                                                    previousTime1)

                    # Get most common direction
                    HMCclass = mostCommon(directionArray)

                    classifyMoves = 2
                    directionArray = []
                    previousTime1 = currentTime

                    progressStatus[1] = 1
                    print "Progress: HMC DONE"
            else:
                print "Did not detect a face"
        elif predictHeadMov == 2:
            progressStatus[1] = 1
            print "Skipped Head Movement Estimation."
            break

        # if notDone in progressStatus and predictHeadMov == 2 and predictExp == 2:
        #     print "You seem to have skipped one or more tasks."
        #     inpt = ''
        #     while inpt == '':
        #         inpt = raw_input("To do FER press 1 and to do HMC press 2...")
        #         if input == '1':
        #             predictExp = 1
        #         elif input == '2':
        #             predictHeadMov

        cv2.imshow("", color)
        if camera == 'kinect':
            listener.release(frames)

        key = cv2.waitKey(delay=1)
        if key == ord('q'):
            break

    if camera == 'kinect':
        listener.release(frames)
        device.stop()
        device.close()
    elif camera == 'webcam' or camera == 'video':
        video_capture.release()

    cv2.destroyAllWindows()

    # Clean up the multiprocessing process.
    box_process.terminate()
    box_process.join()

    print "---------------- RESULT ----------------"

    if FERclass != '':
        print "Detected facial expression: " + str(FERclass)
    else:
        print "Did not detect any expression."

    if HMCclass != '':
        print "Detected head movement: " + str(HMCclass)
    else:
        print "Did not detect a head movement."

    print "----------------------------------------"

    intentionClassification = [FERclass, HMCclass]

    return intentionClassification
Beispiel #11
0
def projection():

    pipeline = CpuPacketPipeline()
    print("Packet pipeline:", type(pipeline).__name__)

    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)
    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = True
    need_color_depth_map = True

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
        if need_color_depth_map else None

    color_pub = rospy.Publisher('color', Image, queue_size=10)
    depth_pub = rospy.Publisher('depth', Image, queue_size=10)
    ir_pub = rospy.Publisher('ir', Image, queue_size=10)
    small_depth_pub = rospy.Publisher('small_depth', Image, queue_size=10)

    rospy.init_node('projection', anonymous=True)
    rate = rospy.Rate(10)  # 10hz

    while not rospy.is_shutdown():
        frames = listener.waitForNewFrame()

        color = frames["color"]
        ir = frames["ir"]
        depth = frames["depth"]

        registration.apply(color,
                           depth,
                           undistorted,
                           registered,
                           bigdepth=bigdepth,
                           color_depth_map=color_depth_map)

        color_image = CvBridge().cv2_to_imgmsg(color.asarray())
        color_pub.publish(color_image)

        depth_image = CvBridge().cv2_to_imgmsg(bigdepth.asarray(np.float32))
        depth_pub.publish(depth_image)

        ir_image = CvBridge().cv2_to_imgmsg(ir.asarray())
        ir_pub.publish(ir_image)

        small_depth_image = CvBridge().cv2_to_imgmsg(depth.asarray())
        small_depth_pub.publish(small_depth_image)

        rate.sleep()

        listener.release(frames)

        key = cv2.waitKey(delay=1)
        if key == ord('q'):
            break

    device.stop()
    device.close()
    sys.exit(0)
    def start(self):
        """Starts the Kinect v2 sensor stream.

        Raises
        ------
        IOError
            If the Kinect v2 is not detected.
        """

        # setup logger
        self._logger = lf2.createConsoleLogger(lf2.LoggerLevel.Warning)
        lf2.setGlobalLogger(self._logger)

        # open packet pipeline
        self._pipeline = None
        if (self._packet_pipeline_mode == Kinect2PacketPipelineMode.OPENGL or
                self._packet_pipeline_mode == Kinect2PacketPipelineMode.AUTO):
            # Try OpenGL packet pipeline first or if specified
            try:
                self._pipeline = lf2.OpenGLPacketPipeline()
            except BaseException:
                logging.warning("OpenGL not available. "
                                "Defaulting to CPU-based packet pipeline.")

        if self._pipeline is None and (self._packet_pipeline_mode
                                       == Kinect2PacketPipelineMode.OPENCL
                                       or self._packet_pipeline_mode
                                       == Kinect2PacketPipelineMode.AUTO):
            # Try OpenCL if available
            try:
                self._pipeline = lf2.OpenCLPacketPipeline()
            except BaseException:
                logging.warning(
                    "OpenCL not available. Defaulting to CPU packet pipeline.")

        if (self._pipeline is None or self._packet_pipeline_mode
                == Kinect2PacketPipelineMode.CPU):  # CPU packet pipeline
            self._pipeline = lf2.CpuPacketPipeline()

        # check devices
        self._fn_handle = lf2.Freenect2()
        self._num_devices = self._fn_handle.enumerateDevices()
        if self._num_devices == 0:
            raise IOError(
                "Failed to start stream. No Kinect2 devices available!")
        if self._num_devices <= self._device_num:
            raise IOError(
                "Failed to start stream. Device num %d unavailable!" %
                (self._device_num))

        # open device
        self._serial = self._fn_handle.getDeviceSerialNumber(self._device_num)
        self._device = self._fn_handle.openDevice(self._serial,
                                                  pipeline=self._pipeline)

        # add device sync modes
        self._listener = lf2.SyncMultiFrameListener(lf2.FrameType.Color
                                                    | lf2.FrameType.Ir
                                                    | lf2.FrameType.Depth)
        self._device.setColorFrameListener(self._listener)
        self._device.setIrAndDepthFrameListener(self._listener)

        # start device
        self._device.start()

        # open registration
        self._registration = None
        if self._registration_mode == Kinect2RegistrationMode.COLOR_TO_DEPTH:
            logging.debug("Using color to depth registration")
            self._registration = lf2.Registration(
                self._device.getIrCameraParams(),
                self._device.getColorCameraParams(),
            )
        self._running = True
import sys
from pylibfreenect2 import Freenect2, SyncMultiFrameListener
from pylibfreenect2 import FrameType, Registration, Frame
from pylibfreenect2 import createConsoleLogger, setGlobalLogger
from pylibfreenect2 import LoggerLevel

try:
    from pylibfreenect2 import OpenGLPacketPipeline
    pipeline = OpenGLPacketPipeline()
except:
    from pylibfreenect2 import CpuPacketPipeline
    pipeline = CpuPacketPipeline()

# Create and set logger
logger = createConsoleLogger(LoggerLevel.Debug)
setGlobalLogger(logger)

fn = Freenect2()
num_devices = fn.enumerateDevices()
if num_devices == 0:
    print("No device connected!")
    sys.exit(1)

serial = fn.getDeviceSerialNumber(0)
device = fn.openDevice(serial, pipeline=pipeline)

listener = SyncMultiFrameListener(
    FrameType.Color | FrameType.Ir | FrameType.Depth)

# Register listeners
device.setColorFrameListener(listener)
def main():
    t1=time.time()
    inputPath = "DataSet/"
    IDfile = open(IDFilePath,mode='a')
    try:
        from pylibfreenect2 import OpenCLPacketPipeline
        pipeline = OpenCLPacketPipeline()
        print("Using OpenCL")
        input()
    except:
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            pipeline = OpenGLPacketPipeline()
        except:
            from pylibfreenect2 import CpuPacketPipeline
            pipeline = CpuPacketPipeline()

    #print("Packet pipeline:", type(pipeline).__name__)
    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)
    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)
    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)
    
    #print("FirmwareVersion: "+ firmware)
    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir | FrameType.Depth)
    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)
    device.start()
    g.InitializeCamera()
    h.InitializeCameras()
    t2=time.time()
    print("Initialization before the loop "+str(t2-t1))
    while True:
        t3=time.time()
        t_ms = time.time()
        ts = datetime.datetime.now()

        print(t_ms)
        t_ms = t_ms * 1000
        t_ms = str(t_ms).split('.')[0]
        print(t_ms)
        imageID = t_ms

        print(str(ts).split('.')[0])
        ts = str(ts).split('.')[0]
        tsList1 = str(ts).split(' ')
        date = tsList1[0]
        tm = tsList1[1]
        print(tsList1)
        tsListTime = str(tsList1[1]).split(':')
        tsTime = str(tsListTime[0]) + "." + str(tsListTime[1]) + "." + str(tsListTime[2])
        print(tsTime)
        ts = str(tsList1[0]) + "_" + tsTime
        print(ts)
        t4=time.time()
        print("Starting part of the while loop: "+str(t4-t3))
        print("Press \'Enter\' to trigger the acquisition of images or \'q\' to Quit")
        key = input()
        print(key)
        if(not key):
            t5=time.time()
            captureKinect(device, serial, listener, ts, imageID)
            t0=time.time()
            g.TakeImages(imageID, ts)
            t9=time.time()
            h.TakeImages(imageID, ts)
            t6 = time.time()
            writeToXML(date, tm, ts, imageID)
            t7 = time.time()
            writeIDtoFile(IDfile,imageID)
            t8=time.time()
            '''ans = input("Display the images captured? (y/n)")
            if(ans=='y'):
                displayImagesCaptured(inputPath, ts, imageID)
            t9=time.time()'''
            print("Kinect Capture took "+str(t0-t5))
            print("Ensenso took "+str(t9-t0))
            print("GiGE took "+str(t6-t9))
            print("Writing to XML took "+str(t7-t6))
            print("Writing ID took "+ str(t8-t7))
            #print("Displaying images took "+str(t6-t5))
            print("Total time after pressing enter: "+str(t8-t5))
            print("Total loop time: "+str(t8-t3))
        elif(key =='q'):
            ta=time.time()
            closeKinect(device)
            g.ShutdownCamera()
            h.ShutdownCameras()
            tb=time.time()
            print("Close "+str(tb-ta))
            #print("Overall "+str(tb-t11))
            IDfile.close()
            sys.exit(0)
            break
Beispiel #15
0
def main():
    try:
        from pylibfreenect2 import OpenCLPacketPipeline
        pipeline = OpenCLPacketPipeline()
    except:
        from pylibfreenect2 import CpuPacketPipeline
        pipeline = CpuPacketPipeline()

    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = False
    need_color_depth_map = False

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
        if need_color_depth_map else None

    point = pcl.PointCloud()
    visual = pcl.pcl_visualization.CloudViewing()
    visual.ShowColorCloud(cloud)

    while True:
        frames = listener.waitForNewFrame()

        color = frames["color"]
        ir = frames["ir"]
        depth = frames["depth"]

        registration.apply(color,
                           depth,
                           undistorted,
                           registered,
                           bigdepth=bigdepth,
                           color_depth_map=color_depth_map)

        # NOTE for visualization:
        # cv2.imshow without OpenGL backend seems to be quite slow to draw all
        # things below. Try commenting out some imshow if you don't have a fast
        # visualization backend.
        # cv2.imshow("ir", ir.asarray() / 65535.)
        # cv2.imshow("depth", depth.asarray() / 4500.)
        # cv2.imshow("color", cv2.resize(color.asarray(), (int(1920 / 3), int(1080 / 3))))
        # cv2.imshow("registered", registered.asarray(np.uint8))

        # if need_bigdepth:
        #     cv2.imshow("bigdepth", cv2.resize(bigdepth.asarray(np.float32),
        #                                       (int(1920 / 3), int(1082 / 3))))
        # if need_color_depth_map:
        #     cv2.imshow("color_depth_map", color_depth_map.reshape(424, 512))

        undistorted_arrray = undistorted.asarray(dtype=np.float32, ndim=2)
        # registered_array = registered.asarray(dtype=np.uint8)
        point = pcl.PointCloud(undistorted_arrray)
        # visual.ShowColorCloud(cloud)

        listener.release(frames)

        # key = cv2.waitKey(delay=1)
        # if key == ord('q'):
        #     break
        v = True
        while v:
            v = not (visual.WasStopped())

    device.stop()
    device.close()

    sys.exit(0)
Beispiel #16
0
    def __init__(self):
        
        try:
            from pylibfreenect2 import OpenGLPacketPipeline
            self._pipeline = OpenGLPacketPipeline()
        except:
            try:
                from pylibfreenect2 import OpenCLPacketPipeline
                self._pipeline = OpenCLPacketPipeline()
            except:
                from pylibfreenect2 import CpuPacketPipeline
                self._pipeline = CpuPacketPipeline()
        print("Packet pipeline:", type(self._pipeline).__name__)

        pygame.init()

        # Used to manage how fast the screen updates
        self._clock = pygame.time.Clock()

        # Set the width and height of the screen [width, height]
        self._screen_width = 500
        self._screen_heigth = 300
        self._screen = pygame.display.set_mode((self._screen_width, self._screen_heigth), pygame.HWSURFACE|pygame.DOUBLEBUF|pygame.RESIZABLE, 32)

        # Set the titel of the screen- window
        pygame.display.set_caption("Kamera")

        # Loop until the user clicks the close button.
        self._done = False

        # Kinect runtime object, we want only color and body frames 
        #self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color)

        # Create and set logger
        logger = createConsoleLogger(LoggerLevel.Debug)
        setGlobalLogger(logger)

        self._fn = Freenect2()
        self._num_devices = self._fn.enumerateDevices()
        if self._num_devices == 0:
            print("No device connected!")
            sys.exit(1)

        self._serial = self._fn.getDeviceSerialNumber(0)
        self._device = self._fn.openDevice(self._serial, pipeline=self._pipeline)

        self._listener = SyncMultiFrameListener(
            FrameType.Color | FrameType.Ir | FrameType.Depth)

        # Register listeners
        self._device.setColorFrameListener(self._listener)
        self._device.setIrAndDepthFrameListener(self._listener)

        self._device.start()

        # NOTE: must be called after device.start()
        self._registration = Registration(self._device.getIrCameraParams(),
                                    self._device.getColorCameraParams())

        self._undistorted = Frame(512, 424, 4)
        self._registered = Frame(512, 424, 4)

        # Optinal parameters for registration
        # set True if you need
        self._need_bigdepth = False
        self._need_color_depth_map = False

        # back buffer surface for getting Kinect color frames, 32bit color, width and height equal to the Kinect color frame size
        #self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width, self._kinect.color_frame_desc.Height), 0, 32)
        self._bigdepth = Frame(1920, 1082, 4) if self._need_bigdepth else None
        self._color_depth_map = np.zeros((424, 512),  np.int32).ravel() if self._need_color_depth_map else None
        self._frame_surface = pygame.Surface((self._registered.width, self._registered.height), 0, 32)
#!/usr/bin/env python3

import numpy as np
import cv2
import sys
import face_recognition

# TODO ADD THIS
from pylibfreenect2 import Freenect2, SyncMultiFrameListener
from pylibfreenect2 import FrameType, Registration, Frame
from pylibfreenect2 import setGlobalLogger

setGlobalLogger(None)
try:
    print("OpenGL Pipeline")
    from pylibfreenect2 import OpenGLPacketPipeline
    pipeline = OpenGLPacketPipeline()
except:
    try:
        print("OpenCL Pipeline")
        from pylibfreenect2 import OpenCLPacketPipeline
        pipeline = OpenCLPacketPipeline()
    except:
        print("CPU Pipeline")
        from pylibfreenect2 import CpuPacketPipeline
        pipeline = CpuPacketPipeline()

# TODO Update daisy eye with these params...
RGB_W = 1920
RGB_H = 1080
Beispiel #18
0
def CaptureVideo():
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = True
    need_color_depth_map = False

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512), np.int32).ravel() \
        if need_color_depth_map else None

    # cap = cv2.VideoCapture(0)
    res_old = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]

    while (True):
        # Capture frame-by-frame
        # ret, frame = cap.read()
        frames = listener.waitForNewFrame()
        img = frames["color"].asarray()[:, :, :3]
        frame = cv2.resize(img, (int(1920 / 3), int(1080 / 3)))

        start = time.time()
        # Display the resulting frame
        box, conf, cls = eval_video_detection(net_detection_, frame)

        p_box = []
        for i in range(len(box)):
            p_box_per_person = {}
            if int(cls[i]) == 15 and conf[i] > 0.0:
                p_box_per_person['p1'] = (box[i][0], box[i][1])
                p_box_per_person['p2'] = (box[i][2], box[i][3])
                p_box_per_person['height_difference'] = box[i][3] - box[i][1]
                p_box_per_person['weight_difference'] = box[i][2] - box[i][0]
                p_box_per_person['conf'] = conf[i]
                p_box_per_person['cls'] = int(cls[i])
                p_box.append(p_box_per_person)

        # print p_box
        if len(p_box) == 0:
            cv2.putText(frame, Category_labels_[11], (30, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 2)
            cv2.imshow('Webcam', frame)
        else:
            p_box = sorted(p_box,
                           key=lambda e: e.__getitem__('weight_difference'),
                           reverse=True)
            p_hh_low = float(p_box[0]['p1'][1]) - (1 / float(
                (p_box[0]['p2'][1] - p_box[0]['p1'][1]))
                                                   ) * 2000.0  # 记录剪裁框的位置大小
            p_hh_hige = float(p_box[0]['p2'][1]) + (1 / float(
                (p_box[0]['p2'][1] - p_box[0]['p1'][1]))) * 2000.0
            print p_box[0]['p2'][0] - p_box[0]['p1'][0]
            if p_box[0]['p2'][0] - p_box[0]['p1'][0] > 400:
                Penalty_ratio_factor = 30000.0
            elif p_box[0]['p2'][0] - p_box[0]['p1'][0] > 300:
                Penalty_ratio_factor = 15000.0
            elif p_box[0]['p2'][0] - p_box[0]['p1'][0] > 200:
                Penalty_ratio_factor = 8000.0
            elif p_box[0]['p2'][0] - p_box[0]['p1'][0] > 100:
                Penalty_ratio_factor = 3000.0
            else:
                Penalty_ratio_factor = 1000.0
            p_ww_low = float(p_box[0]['p1'][0]) - (1 / float(
                (p_box[0]['p2'][0] - p_box[0]['p1'][0]))
                                                   ) * Penalty_ratio_factor
            p_ww_hige = float(p_box[0]['p2'][0]) + (1 / float(
                (p_box[0]['p2'][0] - p_box[0]['p1'][0]))
                                                    ) * Penalty_ratio_factor
            cropframe = frame[
                int(max(p_hh_low, 0)):int(min(p_hh_hige, frame.shape[0])),
                int(max(p_ww_low, 0)):int(min(p_ww_hige, frame.shape[1]))]
            res = eval_video_classification(net_classification_, cropframe,
                                            res_old)

            cv2.imshow('CropWebcam', cropframe)
            cv2.rectangle(frame, p_box[0]['p1'], p_box[0]['p2'], (0, 0, 255))
            p3 = (max(p_box[0]['p1'][0], 15), max(p_box[0]['p1'][1], 15))
            title = "%s:%.2f" % (CLASSES[int(15)], conf[i])
            cv2.putText(frame, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)

            print res
            res_old = res
            #print np.max(res)
            if np.max(res) < 0.9:
                cv2.putText(frame, Category_labels_[11], (30, 60),
                            cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 2)
            else:
                Category_res = np.argmax(res)
                Category_res = (Category_res > 11 and 11 or Category_res)
                # print Category_res
                cv2.putText(frame, Category_labels_[Category_res], (30, 60),
                            cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 2)
            cv2.imshow('Webcam', frame)

        end = time.time()
        seconds = end - start
        # Calculate frames per second
        fps = 1 / seconds
        print("fps: {0}".format(fps))
        # Wait to press 'q' key for break
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        listener.release(frames)
    # When everything done, release the capture
    # cap.release()
    device.stop()
    device.close()
    cv2.destroyAllWindows()
    return

save_frames = False
num_frames = 100
frame_i = 0
#clear test frames if there are any left
if len(sys.argv) > 1:
    if sys.argv[1] == "test":
        files = glob.glob('/test_frames/*')
        save_frames = True
        for f in files:
            os.remove(f)

# Create and set logger
logger = createConsoleLogger(LoggerLevel.Debug)
setGlobalLogger(logger)

fn = Freenect2()
num_devices = fn.enumerateDevices()
if num_devices == 0:
    print("No device connected!")
    sys.exit(1)

serial = fn.getDeviceSerialNumber(0)
device = fn.openDevice(serial, pipeline=pipeline)

listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                  | FrameType.Depth)

# Register listeners
device.setColorFrameListener(listener)
Beispiel #20
0
def main(lata, longa, latb, longb):
    lata = conv.convert_gps(float(lata))
    longa = conv.convert_gps(float(longa))
    latb = conv.convert_gps(float(latb))
    longb = conv.convert_gps(float(longb))
    try:
        from pylibfreenect2 import OpenGLPacketPipeline
        pipeline = OpenGLPacketPipeline()
    except:
        try:
            from pylibfreenect2 import OpenCLPacketPipeline
            pipeline = OpenCLPacketPipeline()
        except:
            from pylibfreenect2 import CpuPacketPipeline
            pipeline = CpuPacketPipeline()
    print("Packet pipeline:", type(pipeline).__name__)
    #create list for gps location of obstacles

    ###############################################################

    # Create and set logger
    logger = createConsoleLogger(LoggerLevel.Debug)
    setGlobalLogger(logger)

    fn = Freenect2()
    num_devices = fn.enumerateDevices()
    if num_devices == 0:
        print("No device connected!")
        sys.exit(1)

    serial = fn.getDeviceSerialNumber(0)
    device = fn.openDevice(serial, pipeline=pipeline)

    listener = SyncMultiFrameListener(FrameType.Color | FrameType.Ir
                                      | FrameType.Depth)

    # Register listeners
    device.setColorFrameListener(listener)
    device.setIrAndDepthFrameListener(listener)

    device.start()

    # NOTE: must be called after device.start()
    registration = Registration(device.getIrCameraParams(),
                                device.getColorCameraParams())

    undistorted = Frame(512, 424, 4)
    registered = Frame(512, 424, 4)

    # Optinal parameters for registration
    # set True if you need
    need_bigdepth = False
    need_color_depth_map = False

    bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None
    color_depth_map = np.zeros((424, 512),  np.int32).ravel() \
     if need_color_depth_map else None

    while True:
        frames = listener.waitForNewFrame()
        #time.sleep(5)
        color = frames["color"]
        ir = frames["ir"]
        depth = frames["depth"]

        registration.apply(color,
                           depth,
                           undistorted,
                           registered,
                           bigdepth=bigdepth,
                           color_depth_map=color_depth_map)

        # NOTE for visualization:
        # cv2.imshow without OpenGL backend seems to be quite slow to draw all
        # things below. Try commenting out some imshow if you don't have a fast
        # visualization backend.
        #cv2.imshow("ir", ir.asarray() / 65535.)
        #time.sleep(2)
        cv2.imshow("depth", depth.asarray() / 4500.)

        #Comment the coming lines
        print('center value is:', depth.asarray(np.float32).item((212, 256)))
        print('down1 value is:', depth.asarray(np.float32).item((270, 256)))
        '''print('up1 value is:',depth.asarray(np.float32).item((150,256)))
		print('up2 value is:',depth.asarray(np.float32).item((100,256)))
		
		print('down2 value is:',depth.asarray(np.float32).item((350,256)))
		print('right1 value is:',depth.asarray(np.float32).item((212,300)))
		print('right2 value is:',depth.asarray(np.float32).item((212,350)))
		print('left1 value is:',depth.asarray(np.float32).item((212,200)))
		print('left2 value is:',depth.asarray(np.float32).item((212,150)))'''

        x = depth.asarray(np.float32)
        y = np.logical_and(np.greater(x, 1200), np.less(x, 1500))
        #print(np.extract(y, x))
        no_of_pixels = np.count_nonzero(np.extract(y, x))
        print(no_of_pixels)
        #get gps coordinate at this location########
        '''
		assumingthegps received is lat,long variables:
		obs.append([lat,long])
		this is to be retrieved in the path planning code.
		'''
        if no_of_pixels > 14000:  #approx distance from the camera = 1.5 m
            print('big Obstacle found, stop!!!')
            obs.append(findPosition())
        elif no_of_pixels > 8000:  #approx distance from the camera = 1.5 m
            print('small Obstacle found!!')
            obs.append(findPosition())
        #cv2.imshow("color", cv2.resize(color.asarray(),(int(1920 / 3), int(1080 / 3))))
        #cv2.imshow("registered", registered.asarray(np.uint8))

        if need_bigdepth:
            cv2.imshow(
                "bigdepth",
                cv2.resize(bigdepth.asarray(np.float32),
                           (int(1920 / 3), int(1082 / 3))))
        if need_color_depth_map:
            cv2.imshow("color_depth_map", color_depth_map.reshape(424, 512))

        listener.release(frames)
        #time.sleep(20)
        key = cv2.waitKey(delay=1) & 0xFF
        if key == ord('q'):
            break

    #######################################################

    target(latb, longb, lata, longa)
    #stage3.py main  fnc
    #######################################################
    device.stop()
    device.close()