Example #1
0
    def liveview_thread(self):
        """
        Publishes one liveview frame.
        :return: None
        """
        try:
            # read next image
            self.success_liveview = False
            data = self.incoming.read(8)
            common = common_header(data)
            data = self.incoming.read(128)

            self.img_msg = CompressedImage()  # message to publish
            if common['payload_type'] == 1:  # jpeg frame
                payload = payload_header(data)
                image_file = self.incoming.read(payload['jpeg_data_size'])

                # fill message fields
                self.img_msg.header.seq = common['sequence_number']
                self.img_msg.header.stamp.secs = common['time_stamp'] / 1000.
                self.img_msg.header.stamp.nsecs = common['time_stamp'] * 1000.
                self.img_msg.header.frame_id = "right_sony_cam"

                self.img_msg.format = 'jpeg'
                self.img_msg.data = image_file
                # end fill

                self.pub.publish(self.img_msg)

            self.success_liveview = True

        except Exception as err:
            rospy.logerr("Couldn't get liveview")
Example #2
0
   def run(self):
      global options, grabber, decoder, display, image_copy

      if options.debug:
         print("using LiveView grabber")
   
      self.active = False
      self.photomode = False

      # grabber control signals
      self.event_start_stream = threading.Event()
      self.event_stop_stream = threading.Event()
      self.event_stopped_stream = threading.Event()
      self.event_terminate = threading.Event()
      self.event_terminated = threading.Event()

      # decoder control signals
      self.event_decoding = threading.Event()
      self.event_decoder_terminated = threading.Event()

      # display control signals
      self.lock_offscreen = threading.Semaphore()

      # export to other threads
      self.frame_count = 0
      grabber = self

      # Search for available camera
      if options.debug:
         print("searching for camera")

      search = ControlPoint()
      cameras =  search.discover(1)

      if len(cameras):
         camera = SonyAPI(QX_ADDR=cameras[0])
      else:
         print("No camera found, aborting")
         return

      # Check if we need to do 'startRecMode'
      mode = camera.getAvailableApiList()

      # Need a better method to check for the presence of a camera
      if type(mode) != dict:
         print("No camera found, aborting")
         display.terminate_clicked()
         self.event_terminated.set()
         return

      # For those cameras which need it
      if 'startRecMode' in (mode['result'])[0]:
         camera.startRecMode()
         time.sleep(5)

         # and re-read capabilities
         mode = camera.getAvailableApiList()

      if options.debug:
         print("Versions: %s" % camera.getVersions())
         print("API List: %s" % mode)

      if 'setLiveviewFrameInfo' in (mode['result'])[0]:
         if options.info:
            camera.setLiveviewFrameInfo([{"frameInfo": True}])
         else:
            camera.setLiveviewFrameInfo([{"frameInfo": False}])

      if 'getAvailableLiveviewSize' in (mode['result'])[0]:
         if options.large and len((camera.getAvailableLiveviewSize()['result'])[0]) > 1:
            url = camera.liveview(["L"])
         else:
            url = camera.liveview()
      else:
         url = camera.liveview()

      incoming_image = None
      frame_info = None

      # Ensure that we're in correct mode (movie by default)
      mode = camera.getAvailableShootMode()
      if type(mode) == dict:
         if options.still:
            if (mode['result'])[0] != 'still':
               if 'still' in (mode['result'])[1]:
                  camera.setShootMode(["still"])
                  self.photomode = True
            else:
               self.photomode = True
         else:
            if (mode['result'])[0] != 'movie':
               if 'movie' in (mode['result'])[1]:
                  camera.setShootMode(["movie"])
               else:
                  self.photomode = True

      lst = SonyAPI.LiveviewStreamThread(url)
      lst.start()

      while not self.event_terminate.isSet():
         # Handle events from the camera (record start/stop)
         if self.frame_count % 50 == 0:
            mode = camera.getEvent(["false"])
         else:
            mode = None

         if mode and type(mode) == dict:
            status = mode['result'][1]
            if self.active == False and status['cameraStatus'] == 'MovieRecording':
               self.frame_count = 0
               self.start_time = datetime.datetime.now()
               self.active = True
               if options.debug:
                  print("started capture %s" % self.start_time)
            elif self.active == True and status['cameraStatus'] == 'IDLE':
               self.active = False
               self.end_time = datetime.datetime.now()
               if options.debug:
                  elapsed = self.end_time - self.start_time
                  '''
                  print("Stopped capture: frames = ", self.frame_count, \
                     ", delta = ", elapsed.seconds + (float(elapsed.microseconds) / 1000000), \
                     ", fps = ", self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000)))
                  '''
                  print("Stopped capture: frames = %d, delta = %d, fps = %d" % \
                     (self.frame_count, elapsed.seconds + (float(elapsed.microseconds) / 1000000), \
                     self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000))))

         # read header, confirms image is also ready to read
         header = lst.get_header()

         if header:
            image_file = io.BytesIO(lst.get_latest_view())
            incoming_image = Image.open(image_file)
            frame_info = lst.get_frameinfo()

         if options.gui == True :
            # Correct display size if changed
            if incoming_image and ((incoming_image.size)[0] != display.width):
               if options.debug:
                  print("adjusted width from %d to %d" % (display.width, (incoming_image.size)[0]))
               display.width = (incoming_image.size)[0]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            if incoming_image and ((incoming_image.size)[1] != display.height):
               if options.debug:
                  print("adjusted height from %d to %d" % (display.height, (incoming_image.size)[1]))
               display.height = (incoming_image.size)[1]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            # copy image to the display
            if incoming_image:
               image_copy = incoming_image.convert("RGB")

               # draw frame info to image
               if frame_info:
                  for x in range(len(frame_info)):
                     left = frame_info[x]['left'] * display.width / 10000
                     top = frame_info[x]['top'] * display.height / 10000
                     right = frame_info[x]['right'] * display.width / 10000
                     bottom = frame_info[x]['bottom'] * display.height / 10000

                     dr = ImageDraw.Draw(image_copy)
                     dr.line((left, top, left, bottom), fill="white", width=3)
                     dr.line((right, top, right, bottom), fill="white", width=3)
                     dr.line((left, top, right, top), fill="white", width=3)
                     dr.line((left, bottom, right, bottom), fill="white", width=3)

               display.copy_to_offscreen(image_copy)

         if options.debug and header:
            common = common_header(header)
            print("Frame: %d, %d, %s" % (common['sequence_number'], common['time_stamp'], datetime.datetime.now()))

         # count frames
         self.frame_count = self.frame_count + 1

         # handle events
         if self.event_start_stream.isSet():
            if self.photomode == True:
               camera.actTakePicture()
            else:
               camera.startMovieRec()
            self.event_start_stream.clear()

         if self.event_stop_stream.isSet():
            camera.stopMovieRec()
            self.event_stop_stream.clear()

         # give OS a breather
         #time.sleep(0.01)

      # declare that we're done...
      self.event_terminated.set()
      self.event_terminate.clear()
Example #3
0
   def run(self):
      global options, grabber, decoder, display, image_copy

      if options.debug:
         print("using LiveView grabber")
   
      self.active = False
      self.photomode = False

      # grabber control signals
      self.event_start_stream = threading.Event()
      self.event_stop_stream = threading.Event()
      self.event_stopped_stream = threading.Event()
      self.event_terminate = threading.Event()
      self.event_terminated = threading.Event()

      # decoder control signals
      self.event_decoding = threading.Event()
      self.event_decoder_terminated = threading.Event()

      # display control signals
      self.lock_offscreen = threading.Semaphore()

      # export to other threads
      self.frame_count = 0
      grabber = self

      # Search for available camera
      if options.debug:
         print("searching for camera")

      search = ControlPoint()
      cameras =  search.discover(1)

      if len(cameras):
         camera = SonyAPI(QX_ADDR=cameras[0])
      else:
         print("No camera found, aborting")
         return

      # Check if we need to do 'startRecMode'
      mode = camera.getAvailableApiList()

      # Need a better method to check for the presence of a camera
      if type(mode) != dict:
         print("No camera found, aborting")
         display.terminate_clicked()
         self.event_terminated.set()
         return

      # For those cameras which need it
      if 'startRecMode' in (mode['result'])[0]:
         camera.startRecMode()
         time.sleep(5)

         # and re-read capabilities
         mode = camera.getAvailableApiList()

      if options.debug:
         print("Versions: %s" % camera.getVersions())
         print("API List: %s" % mode)

      if 'setLiveviewFrameInfo' in (mode['result'])[0]:
         if options.info:
            camera.setLiveviewFrameInfo([{"frameInfo": True}])
         else:
            camera.setLiveviewFrameInfo([{"frameInfo": False}])

      if 'getAvailableLiveviewSize' in (mode['result'])[0]:
         if options.large and len((camera.getAvailableLiveviewSize()['result'])[0]) > 1:
            url = camera.liveview(["L"])
         else:
            url = camera.liveview()
      else:
         url = camera.liveview()

      incoming_image = None
      frame_info = None

      # Ensure that we're in correct mode (movie by default)
      mode = camera.getAvailableShootMode()
      if type(mode) == dict:
         if options.still:
            if (mode['result'])[0] != 'still':
               if 'still' in (mode['result'])[1]:
                  camera.setShootMode(["still"])
                  self.photomode = True
            else:
               self.photomode = True
         else:
            if (mode['result'])[0] != 'movie':
               if 'movie' in (mode['result'])[1]:
                  camera.setShootMode(["movie"])
               else:
                  self.photomode = True

      lst = SonyAPI.LiveviewStreamThread(url)
      lst.start()

      while not self.event_terminate.isSet():
         # Handle events from the camera (record start/stop)
         if self.frame_count % 50 == 0:
            mode = camera.getEvent(["false"])
         else:
            mode = None

         if mode and type(mode) == dict:
            status = mode['result'][1]
            if self.active == False and status['cameraStatus'] == 'MovieRecording':
               self.frame_count = 0
               self.start_time = datetime.datetime.now()
               self.active = True
               if options.debug:
                  print("started capture %s" % self.start_time)
            elif self.active == True and status['cameraStatus'] == 'IDLE':
               self.active = False
               self.end_time = datetime.datetime.now()
               if options.debug:
                  elapsed = self.end_time - self.start_time
                  '''
                  print("Stopped capture: frames = ", self.frame_count, \
                     ", delta = ", elapsed.seconds + (float(elapsed.microseconds) / 1000000), \
                     ", fps = ", self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000)))
                  '''
                  print("Stopped capture: frames = %d, delta = %d, fps = %d" % \
                     (self.frame_count, elapsed.seconds + (float(elapsed.microseconds) / 1000000), \
                     self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000))))

         # read header, confirms image is also ready to read
         header = lst.get_header()

         if header:
            image_file = io.BytesIO(lst.get_latest_view())
            incoming_image = Image.open(image_file)
            frame_info = lst.get_frameinfo()

         if options.gui == True :
            # Correct display size if changed
            if incoming_image and ((incoming_image.size)[0] != display.width):
               if options.debug:
                  print("adjusted width from %d to %d" % (display.width, (incoming_image.size)[0]))
               display.width = (incoming_image.size)[0]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            if incoming_image and ((incoming_image.size)[1] != display.height):
               if options.debug:
                  print("adjusted height from %d to %d" % (display.height, (incoming_image.size)[1]))
               display.height = (incoming_image.size)[1]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            # copy image to the display
            if incoming_image:
               image_copy = incoming_image.convert("RGB")

               # draw frame info to image
               if frame_info:
                  for x in range(len(frame_info)):
                     left = frame_info[x]['left'] * display.width / 10000
                     top = frame_info[x]['top'] * display.height / 10000
                     right = frame_info[x]['right'] * display.width / 10000
                     bottom = frame_info[x]['bottom'] * display.height / 10000

                     dr = ImageDraw.Draw(image_copy)
                     dr.line((left, top, left, bottom), fill="white", width=3)
                     dr.line((right, top, right, bottom), fill="white", width=3)
                     dr.line((left, top, right, top), fill="white", width=3)
                     dr.line((left, bottom, right, bottom), fill="white", width=3)

               display.copy_to_offscreen(image_copy)

         if options.debug and header:
            common = common_header(header)
            print("Frame: %d, %d, %s" % (common['sequence_number'], common['time_stamp'], datetime.datetime.now()))

         # count frames
         self.frame_count = self.frame_count + 1

         # handle events
         if self.event_start_stream.isSet():
            if self.photomode == True:
               camera.actTakePicture()
            else:
               camera.startMovieRec()
            self.event_start_stream.clear()

         if self.event_stop_stream.isSet():
            camera.stopMovieRec()
            self.event_stop_stream.clear()

         # give OS a breather
         #time.sleep(0.01)

      # declare that we're done...
      self.event_terminated.set()
      self.event_terminate.clear()
Example #4
0
        pygame.HWSURFACE)
screen.set_alpha(None)

# Loop forever, or until user quits or presses 'ESC' key
pygame.event.set_allowed([pygame.QUIT, pygame.KEYDOWN])
while not done:
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
            done = True
        elif event.type == pygame.KEYDOWN:
            if event.key == pygame.K_ESCAPE:
                done = True

    # read next image
    data = incoming.read(8)
    common = common_header(data)
    data = incoming.read(128)

    if common['payload_type'] == 1:
        payload = payload_header(data)
        image_file = io.BytesIO(incoming.read(payload['jpeg_data_size']))
        incoming_image = pygame.image.load(image_file).convert()
        if options.zoom:
            incoming_image = pygame.transform.scale(incoming_image, \
               (infoObject.current_w, infoObject.current_h))
        incoming.read(payload['padding_size'])
    elif common['payload_type'] == 2:
        frame_info = payload_header(data, 2)
        if frame_info['jpeg_data_size']:
            frame_sequence = common['sequence_number']
            frame_data = incoming.read(frame_info['jpeg_data_size'])
Example #5
0
    def run(self):
        global options, grabber, decoder, display, image_copy

        if options.debug:
            print "using LiveView grabber"

        self.active = False
        self.photomode = False

        # grabber control signals
        self.event_start_stream = threading.Event()
        self.event_stop_stream = threading.Event()
        self.event_stopped_stream = threading.Event()
        self.event_terminate = threading.Event()
        self.event_terminated = threading.Event()

        # decoder control signals
        self.event_decoding = threading.Event()
        self.event_decoder_terminated = threading.Event()

        # display control signals
        self.lock_offscreen = threading.Semaphore()

        # export to other threads
        self.frame_count = 0
        grabber = self

        # Search for available camera
        if options.debug:
            print "searching for camera"

        search = ControlPoint()
        cameras = search.discover(1)

        if len(cameras):
            camera = SonyAPI(QX_ADDR=cameras[0])
        else:
            print "No camera found, aborting"
            return

        # Check if we need to do 'startRecMode'
        mode = camera.getAvailableApiList()

        # Need a better method to check for the presence of a camera
        if type(mode) != dict:
            print "No camera found, aborting"
            display.terminate_clicked()
            self.event_terminated.set()
            return

        # For those cameras which need it
        if 'startRecMode' in (mode['result'])[0]:
            camera.startRecMode()
            time.sleep(5)

            # and re-read capabilities
            mode = camera.getAvailableApiList()

        if options.debug:
            print "Versions:", camera.getVersions()
            print "API List:", mode

        if 'setLiveviewFrameInfo' in (mode['result'])[0]:
            if options.info:
                camera.setLiveviewFrameInfo([{"frameInfo": True}])
            else:
                camera.setLiveviewFrameInfo([{"frameInfo": False}])

        if 'getAvailableLiveviewSize' in (mode['result'])[0]:
            if options.large and len(
                (camera.getAvailableLiveviewSize()['result'])[0]) > 1:
                incoming = camera.liveview(["L"])
            else:
                incoming = camera.liveview()
        else:
            incoming = camera.liveview()

        incoming_image = None
        frame_sequence = None
        frame_info = None
        frame_data = None

        # Ensure that we're in correct mode (movie by default)
        mode = camera.getAvailableShootMode()
        if type(mode) == dict:
            if options.still:
                if (mode['result'])[0] != 'still':
                    if 'still' in (mode['result'])[1]:
                        camera.setShootMode(["still"])
                        self.photomode = True
                else:
                    self.photomode = True
            else:
                if (mode['result'])[0] != 'movie':
                    if 'movie' in (mode['result'])[1]:
                        camera.setShootMode(["movie"])
                    else:
                        self.photomode = True

        while not self.event_terminate.isSet():
            # Handle events from the camera (record start/stop)
            if self.frame_count % 50 == 0:
                mode = camera.getEvent(["false"])
            else:
                mode = None

            if mode and type(mode) == dict:
                status = mode['result'][1]
                if self.active == False and status[
                        'cameraStatus'] == 'MovieRecording':
                    self.frame_count = 0
                    self.start_time = datetime.datetime.now()
                    self.active = True
                    if options.debug:
                        print "started capture", self.start_time
                elif self.active == True and status['cameraStatus'] == 'IDLE':
                    self.active = False
                    self.end_time = datetime.datetime.now()
                    if options.debug:
                        elapsed = self.end_time - self.start_time
                        print "Stopped capture: frames = ", self.frame_count,
                        print ", delta = ", elapsed.seconds + (
                            float(elapsed.microseconds) / 1000000),
                        print ", fps = ", self.frame_count / (
                            elapsed.seconds +
                            (float(elapsed.microseconds) / 1000000))

            # read next image
            data = incoming.read(8)
            common = common_header(data)
            data = incoming.read(128)

            if common['payload_type'] == 1:
                payload = payload_header(data)
                image_file = io.BytesIO(
                    incoming.read(payload['jpeg_data_size']))
                incoming_image = Image.open(image_file)
                incoming.read(payload['padding_size'])
            elif common['payload_type'] == 2:
                frame_info = payload_header(data, 2)
                if frame_info['jpeg_data_size']:
                    frame_sequence = common['sequence_number']
                    frame_data = incoming.read(frame_info['jpeg_data_size'])
                    incoming.read(frame_info['padding_size'])

            if options.gui == True:
                # Correct display size if changed
                if incoming_image and (
                    (incoming_image.size)[0] != display.width):
                    if options.debug:
                        print "adjusted width from", display.width, "to", (
                            incoming_image.size)[0]
                    display.width = (incoming_image.size)[0]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
                                                       False, 8, display.width,
                                                       display.height)

                if incoming_image and (
                    (incoming_image.size)[1] != display.height):
                    if options.debug:
                        print "adjusted height from", display.height, "to", (
                            incoming_image.size)[1]
                    display.height = (incoming_image.size)[1]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
                                                       False, 8, display.width,
                                                       display.height)

                # copy image to the display
                if incoming_image:
                    image_copy = incoming_image.convert("RGB")

                    # only recent frame info to image
                    if frame_info and frame_sequence >= common['sequence_number']-1 \
                          and frame_info['jpeg_data_size']:
                        for x in range(frame_info['frame_count']):
                            x = x * frame_info['frame_size']
                            (left, top, right,
                             bottom) = struct.unpack(">HHHH",
                                                     frame_data[x:x + 8])
                            left = left * display.width / 10000
                            top = top * display.height / 10000
                            right = right * display.width / 10000
                            bottom = bottom * display.height / 10000

                            dr = ImageDraw.Draw(image_copy)
                            dr.line((left, top, left, bottom),
                                    fill="white",
                                    width=3)
                            dr.line((right, top, right, bottom),
                                    fill="white",
                                    width=3)
                            dr.line((left, top, right, top),
                                    fill="white",
                                    width=3)
                            dr.line((left, bottom, right, bottom),
                                    fill="white",
                                    width=3)

                    display.copy_to_offscreen(image_copy)

            if options.debug:
                print "Frame:", common['sequence_number'], common[
                    'time_stemp'], datetime.datetime.now()

            # count frames
            self.frame_count = self.frame_count + 1

            # handle events
            if self.event_start_stream.isSet():
                if self.photomode == True:
                    camera.actTakePicture()
                else:
                    camera.startMovieRec()
                self.event_start_stream.clear()

            if self.event_stop_stream.isSet():
                camera.stopMovieRec()
                self.event_stop_stream.clear()

            # give OS a breather
            #time.sleep(0.01)

        # declare that we're done...
        self.event_terminated.set()
        self.event_terminate.clear()
# Loop forever, or until user quits or presses 'ESC' key
pygame.event.set_allowed([pygame.QUIT, pygame.KEYDOWN])

rate = rate_eval()

while not done:
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
            done = True
        elif event.type == pygame.KEYDOWN:
            if event.key == pygame.K_ESCAPE:
                done = True

    # read next image
    data = incoming.read(8)
    common = common_header(data)
    data = incoming.read(128)

    if common['payload_type']==1:
       payload = payload_header(data)
       image_file = io.BytesIO(incoming.read(payload['jpeg_data_size']))

       # Check display rate is better than capture rate
       # only really needed on slow computers (ie. Raspberry Pi)
       if rate.too_slow(common['time_stamp']):
          incoming.read(payload['padding_size'])
          continue

       incoming_image = pygame.image.load(image_file).convert()
       if options.zoom:
          incoming_image = pygame.transform.scale(incoming_image, \
Example #7
0
    def run(self):
        global options, grabber, decoder, display, image_copy

        if options.debug:
            print "using LiveView grabber"

        self.active = False
        self.photomode = False

        # grabber control signals
        self.event_start_stream = threading.Event()
        self.event_stop_stream = threading.Event()
        self.event_stopped_stream = threading.Event()
        self.event_terminate = threading.Event()
        self.event_terminated = threading.Event()

        # decoder control signals
        self.event_decoding = threading.Event()
        self.event_decoder_terminated = threading.Event()

        # display control signals
        self.lock_offscreen = threading.Semaphore()

        # export to other threads
        self.frame_count = 0
        grabber = self

        # Search for available camera
        if options.debug:
            print "searching for camera"

        search = ControlPoint()
        cameras = search.discover(1)

        if len(cameras):
            camera = SonyAPI(QX_ADDR=cameras[0])
        else:
            print "No camera found, aborting"
            return

        # Check if we need to do 'startRecMode'
        mode = camera.getAvailableApiList()

        # Need a better method to check for the presence of a camera
        if type(mode) != dict:
            print "No camera found, aborting"
            display.terminate_clicked()
            self.event_terminated.set()
            return

        # For those cameras which need it
        if "startRecMode" in (mode["result"])[0]:
            camera.startRecMode()
            time.sleep(5)

            # and re-read capabilities
            mode = camera.getAvailableApiList()

        if options.debug:
            print "Versions:", camera.getVersions()
            print "API List:", mode

        if "setLiveviewFrameInfo" in (mode["result"])[0]:
            if options.info:
                camera.setLiveviewFrameInfo([{"frameInfo": True}])
            else:
                camera.setLiveviewFrameInfo([{"frameInfo": False}])

        if "getAvailableLiveviewSize" in (mode["result"])[0]:
            if options.large and len((camera.getAvailableLiveviewSize()["result"])[0]) > 1:
                incoming = camera.liveview(["L"])
            else:
                incoming = camera.liveview()
        else:
            incoming = camera.liveview()

        incoming_image = None
        frame_sequence = None
        frame_info = None
        frame_data = None

        # Ensure that we're in correct mode (movie by default)
        mode = camera.getAvailableShootMode()
        if type(mode) == dict:
            if options.still:
                if (mode["result"])[0] != "still":
                    if "still" in (mode["result"])[1]:
                        camera.setShootMode(["still"])
                        self.photomode = True
                else:
                    self.photomode = True
            else:
                if (mode["result"])[0] != "movie":
                    if "movie" in (mode["result"])[1]:
                        camera.setShootMode(["movie"])
                    else:
                        self.photomode = True

        while not self.event_terminate.isSet():
            # Handle events from the camera (record start/stop)
            if self.frame_count % 50 == 0:
                mode = camera.getEvent(["false"])
            else:
                mode = None

            if mode and type(mode) == dict:
                status = mode["result"][1]
                if self.active == False and status["cameraStatus"] == "MovieRecording":
                    self.frame_count = 0
                    self.start_time = datetime.datetime.now()
                    self.active = True
                    if options.debug:
                        print "started capture", self.start_time
                elif self.active == True and status["cameraStatus"] == "IDLE":
                    self.active = False
                    self.end_time = datetime.datetime.now()
                    if options.debug:
                        elapsed = self.end_time - self.start_time
                        print "Stopped capture: frames = ", self.frame_count,
                        print ", delta = ", elapsed.seconds + (float(elapsed.microseconds) / 1000000),
                        print ", fps = ", self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000))

            # read next image
            data = incoming.read(8)
            common = common_header(data)
            data = incoming.read(128)

            if common["payload_type"] == 1:
                payload = payload_header(data)
                image_file = io.BytesIO(incoming.read(payload["jpeg_data_size"]))
                incoming_image = Image.open(image_file)
                incoming.read(payload["padding_size"])
            elif common["payload_type"] == 2:
                frame_info = payload_header(data, 2)
                if frame_info["jpeg_data_size"]:
                    frame_sequence = common["sequence_number"]
                    frame_data = incoming.read(frame_info["jpeg_data_size"])
                    incoming.read(frame_info["padding_size"])

            if options.gui == True:
                # Correct display size if changed
                if incoming_image and ((incoming_image.size)[0] != display.width):
                    if options.debug:
                        print "adjusted width from", display.width, "to", (incoming_image.size)[0]
                    display.width = (incoming_image.size)[0]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, display.width, display.height)

                if incoming_image and ((incoming_image.size)[1] != display.height):
                    if options.debug:
                        print "adjusted height from", display.height, "to", (incoming_image.size)[1]
                    display.height = (incoming_image.size)[1]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, display.width, display.height)

                # copy image to the display
                if incoming_image:
                    image_copy = incoming_image.convert("RGB")

                    # only recent frame info to image
                    if frame_info and frame_sequence >= common["sequence_number"] - 1 and frame_info["jpeg_data_size"]:
                        for x in range(frame_info["frame_count"]):
                            x = x * frame_info["frame_size"]
                            (left, top, right, bottom) = struct.unpack(">HHHH", frame_data[x : x + 8])
                            left = left * display.width / 10000
                            top = top * display.height / 10000
                            right = right * display.width / 10000
                            bottom = bottom * display.height / 10000

                            dr = ImageDraw.Draw(image_copy)
                            dr.line((left, top, left, bottom), fill="white", width=3)
                            dr.line((right, top, right, bottom), fill="white", width=3)
                            dr.line((left, top, right, top), fill="white", width=3)
                            dr.line((left, bottom, right, bottom), fill="white", width=3)

                    display.copy_to_offscreen(image_copy)

            if options.debug:
                print "Frame:", common["sequence_number"], common["time_stemp"], datetime.datetime.now()

            # count frames
            self.frame_count = self.frame_count + 1

            # handle events
            if self.event_start_stream.isSet():
                if self.photomode == True:
                    camera.actTakePicture()
                else:
                    camera.startMovieRec()
                self.event_start_stream.clear()

            if self.event_stop_stream.isSet():
                camera.stopMovieRec()
                self.event_stop_stream.clear()

            # give OS a breather
            # time.sleep(0.01)

        # declare that we're done...
        self.event_terminated.set()
        self.event_terminate.clear()