Exemple #1
0
def liveview():

    search = ControlPoint()
    cameras =  search.discover(5)

    if len(cameras):
        camera = SonyAPI(QX_ADDR=cameras[0])
    else:
        print("No camera found, aborting")
        quit()

    mode = camera.getAvailableApiList()
    if 'startRecMode' in (mode['result'])[0]:
        camera.startRecMode()
        time.sleep(2)

    #sizes = camera.getLiveviewSize()
    #print('Supported liveview size:', sizes)

    url = camera.liveview()

    lst = SonyAPI.LiveviewStreamThread(url)
    lst.start()
    print('LiveviewStreamThread startedr.')
    t_end=time.time() + 60*1
    i=0
#    while time.time()<t_end:
        #print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")
    print lst.get_latest_view()
        #print(type(lst.get_latest_view))
 #       i+=1
    print(i)
Exemple #2
0
    def find_camera(self, duration=3):
        """
        Uses the discover protocol and sets communication with one camera.
        """
        # get available camera
        search = ControlPoint()
        self.cameras = search.discover(duration)

        # use first found camera
        if len(self.cameras):
            self.cam = SonyAPI(self.cameras[0])
        else:
            self.cam = None
            rospy.logerr("No camera found")
            return

        self.mode = self.cam.getAvailableApiList()

        # some cameras need startRecMode call
        # no needed in QX1
        if 'starRecMode' in (self.mode['result'])[0]:
            self.cam.startRecMode()
            time.sleep(5)
        else:
            rospy.loginfo("No startRecMode available")

        # re-read capabilities
        self.mode = self.cam.getAvailableApiList()

        rospy.loginfo("Found camera")
        return
def liveview():
    # Connect and set-up camera
    search = ControlPoint()
    cameras = search.discover(5)

    if len(cameras):
        camera = SonyAPI(QX_ADDR=cameras[0])
    else:
        print("No camera found, aborting")
        quit()

    mode = camera.getAvailableApiList()

    # some cameras need `startRecMode` before we can use liveview
    #   For those camera which doesn't require this, just comment out the following 2 lines
    if 'startRecMode' in (mode['result'])[0]:
        camera.startRecMode()
        time.sleep(2)

    sizes = camera.getLiveviewSize()
    print('Supported liveview size:', sizes)
    # url = camera.liveview("M")
    url = camera.liveview()

    lst = SonyAPI.LiveviewStreamThread(url)
    lst.start()
    print('[i] LiveviewStreamThread started.')
    return lst.get_latest_view
def liveview():
    # Connect and set-up camera
    search = ControlPoint()
    cameras =  search.discover(5)

    if len(cameras):
        camera = SonyAPI(QX_ADDR=cameras[0])
    else:
        print("No camera found, aborting")
        quit()

    mode = camera.getAvailableApiList()

    # some cameras need `startRecMode` before we can use liveview
    #   For those camera which doesn't require this, just comment out the following 2 lines
    if 'startRecMode' in (mode['result'])[0]:
        camera.startRecMode()
        time.sleep(2)

    sizes = camera.getLiveviewSize()
    print('Supported liveview size:', sizes)
    # url = camera.liveview("M")
    url = camera.liveview()

    lst = SonyAPI.LiveviewStreamThread(url)
    lst.start()
    print('[i] LiveviewStreamThread started.')
    return lst.get_latest_view
Exemple #5
0
def getImage():

    search = ControlPoint()
    cameras = search.discover(5)

    if len(cameras):
        camera = SonyAPI(QX_ADDR=cameras[0])

    else:
        print("No camera found, aborting")
        quit()

    mode = camera.getAvailableApiList()
    if 'startRecMode' in (mode['result'])[0]:
        camera.startRecMode()
        time.sleep(2)

    import socket

    s = socket.socket()
    print "Socket successfully created"

    port = 52347

    s.bind(('', port))
    print "socket binded to %s" % (port)

    s.listen(5)
    print "socket is listening"

    while True:

        c, addr = s.accept()
        print 'Got connection from', addr
        toSend = camera.actTakePicture()
        pic_url = toSend['result'][0][0]
        img = urllib2.urlopen(pic_url).read()
        c.send(str(img))
        c.close()
Exemple #6
0
    def __init__(self, picture_size, preview_size, zoom=30, ssid="DIRECT-LKE0:ILCE-6000", pw="UeyxbxAG",
                 iface="wlp2s0"):
        Camera.__init__(self, picture_size, preview_size, zoom, type='sony_wifi')
        self.live_stream = None
        if not sony_enabled:
            raise CameraException("pysony module not installed")
        self.previous_wifi = \
        subprocess.check_output(["nmcli", "--fields", "NAME", "c", "show", "--active"]).decode("utf-8").split("\n")[
            1].strip()
        self.sony_api_version = "1.0"
        if self.previous_wifi == ssid:
            self.previous_wifi = ""
        else:
            try:
                wifi_list = subprocess.check_output(["nmcli", "--fields", "SSID", "device", "wifi"]).decode(
                    "utf-8").split("\n")
                wifi_list = [wifi_list[i].strip() for i in range(len(wifi_list))]

                if ssid in wifi_list:
                    subprocess.check_call(["nmcli", "con", "up", "id", ssid])
                else:
                    raise CameraException("Sony Wifi not found")

            except subprocess.CalledProcessError:
                raise CameraException("Cannot connect to wifi")
        search = ControlPoint()
        cameras = search.discover()
        self.last_preview_frame = Image.new('RGB', preview_size, (0, 0, 0))

        if len(cameras):
            self.camera = SonyAPI(QX_ADDR=cameras[0])
        else:
            raise CameraException("No camera found")


        options = self.camera.getAvailableApiList()['result'][0]
        logging.info(str(options))
   def run(self):
      global options, grabber, decoder, display, image_copy

      if options.debug:
         print("using LiveView grabber")
   
      self.active = False
      self.photomode = False

      # grabber control signals
      self.event_start_stream = threading.Event()
      self.event_stop_stream = threading.Event()
      self.event_stopped_stream = threading.Event()
      self.event_terminate = threading.Event()
      self.event_terminated = threading.Event()

      # decoder control signals
      self.event_decoding = threading.Event()
      self.event_decoder_terminated = threading.Event()

      # display control signals
      self.lock_offscreen = threading.Semaphore()

      # export to other threads
      self.frame_count = 0
      grabber = self

      # Search for available camera
      if options.debug:
         print("searching for camera")

      search = ControlPoint()
      cameras =  search.discover(1)

      if len(cameras):
         camera = SonyAPI(QX_ADDR=cameras[0])
      else:
         print("No camera found, aborting")
         return

      # Check if we need to do 'startRecMode'
      mode = camera.getAvailableApiList()

      # Need a better method to check for the presence of a camera
      if type(mode) != dict:
         print("No camera found, aborting")
         display.terminate_clicked()
         self.event_terminated.set()
         return

      # For those cameras which need it
      if 'startRecMode' in (mode['result'])[0]:
         camera.startRecMode()
         time.sleep(5)

         # and re-read capabilities
         mode = camera.getAvailableApiList()

      if options.debug:
         print("Versions: %s" % camera.getVersions())
         print("API List: %s" % mode)

      if 'setLiveviewFrameInfo' in (mode['result'])[0]:
         if options.info:
            camera.setLiveviewFrameInfo([{"frameInfo": True}])
         else:
            camera.setLiveviewFrameInfo([{"frameInfo": False}])

      if 'getAvailableLiveviewSize' in (mode['result'])[0]:
         if options.large and len((camera.getAvailableLiveviewSize()['result'])[0]) > 1:
            url = camera.liveview(["L"])
         else:
            url = camera.liveview()
      else:
         url = camera.liveview()

      incoming_image = None
      frame_info = None

      # Ensure that we're in correct mode (movie by default)
      mode = camera.getAvailableShootMode()
      if type(mode) == dict:
         if options.still:
            if (mode['result'])[0] != 'still':
               if 'still' in (mode['result'])[1]:
                  camera.setShootMode(["still"])
                  self.photomode = True
            else:
               self.photomode = True
         else:
            if (mode['result'])[0] != 'movie':
               if 'movie' in (mode['result'])[1]:
                  camera.setShootMode(["movie"])
               else:
                  self.photomode = True

      lst = SonyAPI.LiveviewStreamThread(url)
      lst.start()

      while not self.event_terminate.isSet():
         # Handle events from the camera (record start/stop)
         if self.frame_count % 50 == 0:
            mode = camera.getEvent(["false"])
         else:
            mode = None

         if mode and type(mode) == dict:
            status = mode['result'][1]
            if self.active == False and status['cameraStatus'] == 'MovieRecording':
               self.frame_count = 0
               self.start_time = datetime.datetime.now()
               self.active = True
               if options.debug:
                  print("started capture %s" % self.start_time)
            elif self.active == True and status['cameraStatus'] == 'IDLE':
               self.active = False
               self.end_time = datetime.datetime.now()
               if options.debug:
                  elapsed = self.end_time - self.start_time
                  '''
                  print("Stopped capture: frames = ", self.frame_count, \
                     ", delta = ", elapsed.seconds + (float(elapsed.microseconds) / 1000000), \
                     ", fps = ", self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000)))
                  '''
                  print("Stopped capture: frames = %d, delta = %d, fps = %d" % \
                     (self.frame_count, elapsed.seconds + (float(elapsed.microseconds) / 1000000), \
                     self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000))))

         # read header, confirms image is also ready to read
         header = lst.get_header()

         if header:
            image_file = io.BytesIO(lst.get_latest_view())
            incoming_image = Image.open(image_file)
            frame_info = lst.get_frameinfo()

         if options.gui == True :
            # Correct display size if changed
            if incoming_image and ((incoming_image.size)[0] != display.width):
               if options.debug:
                  print("adjusted width from %d to %d" % (display.width, (incoming_image.size)[0]))
               display.width = (incoming_image.size)[0]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            if incoming_image and ((incoming_image.size)[1] != display.height):
               if options.debug:
                  print("adjusted height from %d to %d" % (display.height, (incoming_image.size)[1]))
               display.height = (incoming_image.size)[1]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            # copy image to the display
            if incoming_image:
               image_copy = incoming_image.convert("RGB")

               # draw frame info to image
               if frame_info:
                  for x in range(len(frame_info)):
                     left = frame_info[x]['left'] * display.width / 10000
                     top = frame_info[x]['top'] * display.height / 10000
                     right = frame_info[x]['right'] * display.width / 10000
                     bottom = frame_info[x]['bottom'] * display.height / 10000

                     dr = ImageDraw.Draw(image_copy)
                     dr.line((left, top, left, bottom), fill="white", width=3)
                     dr.line((right, top, right, bottom), fill="white", width=3)
                     dr.line((left, top, right, top), fill="white", width=3)
                     dr.line((left, bottom, right, bottom), fill="white", width=3)

               display.copy_to_offscreen(image_copy)

         if options.debug and header:
            common = common_header(header)
            print("Frame: %d, %d, %s" % (common['sequence_number'], common['time_stamp'], datetime.datetime.now()))

         # count frames
         self.frame_count = self.frame_count + 1

         # handle events
         if self.event_start_stream.isSet():
            if self.photomode == True:
               camera.actTakePicture()
            else:
               camera.startMovieRec()
            self.event_start_stream.clear()

         if self.event_stop_stream.isSet():
            camera.stopMovieRec()
            self.event_stop_stream.clear()

         # give OS a breather
         #time.sleep(0.01)

      # declare that we're done...
      self.event_terminated.set()
      self.event_terminate.clear()
   def run(self):
      global options, grabber, decoder, display, image_copy

      if options.debug:
         print("using LiveView grabber")
   
      self.active = False
      self.photomode = False

      # grabber control signals
      self.event_start_stream = threading.Event()
      self.event_stop_stream = threading.Event()
      self.event_stopped_stream = threading.Event()
      self.event_terminate = threading.Event()
      self.event_terminated = threading.Event()

      # decoder control signals
      self.event_decoding = threading.Event()
      self.event_decoder_terminated = threading.Event()

      # display control signals
      self.lock_offscreen = threading.Semaphore()

      # export to other threads
      self.frame_count = 0
      grabber = self

      # Search for available camera
      if options.debug:
         print("searching for camera")

      search = ControlPoint()
      cameras =  search.discover(1)

      if len(cameras):
         camera = SonyAPI(QX_ADDR=cameras[0])
      else:
         print("No camera found, aborting")
         return

      # Check if we need to do 'startRecMode'
      mode = camera.getAvailableApiList()

      # Need a better method to check for the presence of a camera
      if type(mode) != dict:
         print("No camera found, aborting")
         display.terminate_clicked()
         self.event_terminated.set()
         return

      # For those cameras which need it
      if 'startRecMode' in (mode['result'])[0]:
         camera.startRecMode()
         time.sleep(5)

         # and re-read capabilities
         mode = camera.getAvailableApiList()

      if options.debug:
         print("Versions: %s" % camera.getVersions())
         print("API List: %s" % mode)

      if 'setLiveviewFrameInfo' in (mode['result'])[0]:
         if options.info:
            camera.setLiveviewFrameInfo([{"frameInfo": True}])
         else:
            camera.setLiveviewFrameInfo([{"frameInfo": False}])

      if 'getAvailableLiveviewSize' in (mode['result'])[0]:
         if options.large and len((camera.getAvailableLiveviewSize()['result'])[0]) > 1:
            url = camera.liveview(["L"])
         else:
            url = camera.liveview()
      else:
         url = camera.liveview()

      incoming_image = None
      frame_info = None

      # Ensure that we're in correct mode (movie by default)
      mode = camera.getAvailableShootMode()
      if type(mode) == dict:
         if options.still:
            if (mode['result'])[0] != 'still':
               if 'still' in (mode['result'])[1]:
                  camera.setShootMode(["still"])
                  self.photomode = True
            else:
               self.photomode = True
         else:
            if (mode['result'])[0] != 'movie':
               if 'movie' in (mode['result'])[1]:
                  camera.setShootMode(["movie"])
               else:
                  self.photomode = True

      lst = SonyAPI.LiveviewStreamThread(url)
      lst.start()

      while not self.event_terminate.isSet():
         # Handle events from the camera (record start/stop)
         if self.frame_count % 50 == 0:
            mode = camera.getEvent(["false"])
         else:
            mode = None

         if mode and type(mode) == dict:
            status = mode['result'][1]
            if self.active == False and status['cameraStatus'] == 'MovieRecording':
               self.frame_count = 0
               self.start_time = datetime.datetime.now()
               self.active = True
               if options.debug:
                  print("started capture %s" % self.start_time)
            elif self.active == True and status['cameraStatus'] == 'IDLE':
               self.active = False
               self.end_time = datetime.datetime.now()
               if options.debug:
                  elapsed = self.end_time - self.start_time
                  '''
                  print("Stopped capture: frames = ", self.frame_count, \
                     ", delta = ", elapsed.seconds + (float(elapsed.microseconds) / 1000000), \
                     ", fps = ", self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000)))
                  '''
                  print("Stopped capture: frames = %d, delta = %d, fps = %d" % \
                     (self.frame_count, elapsed.seconds + (float(elapsed.microseconds) / 1000000), \
                     self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000))))

         # read header, confirms image is also ready to read
         header = lst.get_header()

         if header:
            image_file = io.BytesIO(lst.get_latest_view())
            incoming_image = Image.open(image_file)
            frame_info = lst.get_frameinfo()

         if options.gui == True :
            # Correct display size if changed
            if incoming_image and ((incoming_image.size)[0] != display.width):
               if options.debug:
                  print("adjusted width from %d to %d" % (display.width, (incoming_image.size)[0]))
               display.width = (incoming_image.size)[0]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            if incoming_image and ((incoming_image.size)[1] != display.height):
               if options.debug:
                  print("adjusted height from %d to %d" % (display.height, (incoming_image.size)[1]))
               display.height = (incoming_image.size)[1]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            # copy image to the display
            if incoming_image:
               image_copy = incoming_image.convert("RGB")

               # draw frame info to image
               if frame_info:
                  for x in range(len(frame_info)):
                     left = frame_info[x]['left'] * display.width / 10000
                     top = frame_info[x]['top'] * display.height / 10000
                     right = frame_info[x]['right'] * display.width / 10000
                     bottom = frame_info[x]['bottom'] * display.height / 10000

                     dr = ImageDraw.Draw(image_copy)
                     dr.line((left, top, left, bottom), fill="white", width=3)
                     dr.line((right, top, right, bottom), fill="white", width=3)
                     dr.line((left, top, right, top), fill="white", width=3)
                     dr.line((left, bottom, right, bottom), fill="white", width=3)

               display.copy_to_offscreen(image_copy)

         if options.debug and header:
            common = common_header(header)
            print("Frame: %d, %d, %s" % (common['sequence_number'], common['time_stamp'], datetime.datetime.now()))

         # count frames
         self.frame_count = self.frame_count + 1

         # handle events
         if self.event_start_stream.isSet():
            if self.photomode == True:
               camera.actTakePicture()
            else:
               camera.startMovieRec()
            self.event_start_stream.clear()

         if self.event_stop_stream.isSet():
            camera.stopMovieRec()
            self.event_stop_stream.clear()

         # give OS a breather
         #time.sleep(0.01)

      # declare that we're done...
      self.event_terminated.set()
      self.event_terminate.clear()
                    help="Enable LiveFrameInfo (if available)")
parser.add_argument("-z",
                    "--zoom",
                    action="store_true",
                    dest="zoom",
                    help="Zoom image to fill screen")
parser.add_argument("-s",
                    "--skip",
                    action="store_true",
                    dest="skip",
                    help="Skip frames if computer too slow")

options = parser.parse_args()

# Connect and set-up camera
search = ControlPoint()
cameras = search.discover()

if len(cameras):
    camera = SonyAPI(QX_ADDR=cameras[0])
else:
    print("No camera found, aborting")
    quit()

mode = camera.getAvailableApiList()

# For those cameras which need it
if 'startRecMode' in (mode['result'])[0]:
    camera.startRecMode()
    time.sleep(5)
# =====================================================================

parser = argparse.ArgumentParser(prog="pygameLiveView")

# General Options
parser.set_defaults(debug=None, file=None, width=None, height=None)
parser.add_argument("-l", "--large", action="store_true", dest="large", help="Use HighRes liveview (if available)" )
parser.add_argument("-i", "--info", action="store_true", dest="info", help="Enable LiveFrameInfo (if available)" )
parser.add_argument("-z", "--zoom", action="store_true", dest="zoom", help="Zoom image to fill screen" )
parser.add_argument("-s", "--skip", action="store_true", dest="skip", help="Skip frames if computer too slow" )

options = parser.parse_args()

# Connect and set-up camera
search = ControlPoint()
cameras =  search.discover()

if len(cameras):
   camera = SonyAPI(QX_ADDR=cameras[0])
else:
   print("No camera found, aborting")
   quit()

mode = camera.getAvailableApiList()

# For those cameras which need it
if 'startRecMode' in (mode['result'])[0]:
   camera.startRecMode()
   time.sleep(5)
Exemple #11
0
    def run(self):
        global options, grabber, decoder, display, image_copy

        if options.debug:
            print "using LiveView grabber"

        self.active = False
        self.photomode = False

        # grabber control signals
        self.event_start_stream = threading.Event()
        self.event_stop_stream = threading.Event()
        self.event_stopped_stream = threading.Event()
        self.event_terminate = threading.Event()
        self.event_terminated = threading.Event()

        # decoder control signals
        self.event_decoding = threading.Event()
        self.event_decoder_terminated = threading.Event()

        # display control signals
        self.lock_offscreen = threading.Semaphore()

        # export to other threads
        self.frame_count = 0
        grabber = self

        # Search for available camera
        if options.debug:
            print "searching for camera"

        search = ControlPoint()
        cameras = search.discover(1)

        if len(cameras):
            camera = SonyAPI(QX_ADDR=cameras[0])
        else:
            print "No camera found, aborting"
            return

        # Check if we need to do 'startRecMode'
        mode = camera.getAvailableApiList()

        # Need a better method to check for the presence of a camera
        if type(mode) != dict:
            print "No camera found, aborting"
            display.terminate_clicked()
            self.event_terminated.set()
            return

        # For those cameras which need it
        if 'startRecMode' in (mode['result'])[0]:
            camera.startRecMode()
            time.sleep(5)

            # and re-read capabilities
            mode = camera.getAvailableApiList()

        if options.debug:
            print "Versions:", camera.getVersions()
            print "API List:", mode

        if 'setLiveviewFrameInfo' in (mode['result'])[0]:
            if options.info:
                camera.setLiveviewFrameInfo([{"frameInfo": True}])
            else:
                camera.setLiveviewFrameInfo([{"frameInfo": False}])

        if 'getAvailableLiveviewSize' in (mode['result'])[0]:
            if options.large and len(
                (camera.getAvailableLiveviewSize()['result'])[0]) > 1:
                incoming = camera.liveview(["L"])
            else:
                incoming = camera.liveview()
        else:
            incoming = camera.liveview()

        incoming_image = None
        frame_sequence = None
        frame_info = None
        frame_data = None

        # Ensure that we're in correct mode (movie by default)
        mode = camera.getAvailableShootMode()
        if type(mode) == dict:
            if options.still:
                if (mode['result'])[0] != 'still':
                    if 'still' in (mode['result'])[1]:
                        camera.setShootMode(["still"])
                        self.photomode = True
                else:
                    self.photomode = True
            else:
                if (mode['result'])[0] != 'movie':
                    if 'movie' in (mode['result'])[1]:
                        camera.setShootMode(["movie"])
                    else:
                        self.photomode = True

        while not self.event_terminate.isSet():
            # Handle events from the camera (record start/stop)
            if self.frame_count % 50 == 0:
                mode = camera.getEvent(["false"])
            else:
                mode = None

            if mode and type(mode) == dict:
                status = mode['result'][1]
                if self.active == False and status[
                        'cameraStatus'] == 'MovieRecording':
                    self.frame_count = 0
                    self.start_time = datetime.datetime.now()
                    self.active = True
                    if options.debug:
                        print "started capture", self.start_time
                elif self.active == True and status['cameraStatus'] == 'IDLE':
                    self.active = False
                    self.end_time = datetime.datetime.now()
                    if options.debug:
                        elapsed = self.end_time - self.start_time
                        print "Stopped capture: frames = ", self.frame_count,
                        print ", delta = ", elapsed.seconds + (
                            float(elapsed.microseconds) / 1000000),
                        print ", fps = ", self.frame_count / (
                            elapsed.seconds +
                            (float(elapsed.microseconds) / 1000000))

            # read next image
            data = incoming.read(8)
            common = common_header(data)
            data = incoming.read(128)

            if common['payload_type'] == 1:
                payload = payload_header(data)
                image_file = io.BytesIO(
                    incoming.read(payload['jpeg_data_size']))
                incoming_image = Image.open(image_file)
                incoming.read(payload['padding_size'])
            elif common['payload_type'] == 2:
                frame_info = payload_header(data, 2)
                if frame_info['jpeg_data_size']:
                    frame_sequence = common['sequence_number']
                    frame_data = incoming.read(frame_info['jpeg_data_size'])
                    incoming.read(frame_info['padding_size'])

            if options.gui == True:
                # Correct display size if changed
                if incoming_image and (
                    (incoming_image.size)[0] != display.width):
                    if options.debug:
                        print "adjusted width from", display.width, "to", (
                            incoming_image.size)[0]
                    display.width = (incoming_image.size)[0]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
                                                       False, 8, display.width,
                                                       display.height)

                if incoming_image and (
                    (incoming_image.size)[1] != display.height):
                    if options.debug:
                        print "adjusted height from", display.height, "to", (
                            incoming_image.size)[1]
                    display.height = (incoming_image.size)[1]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
                                                       False, 8, display.width,
                                                       display.height)

                # copy image to the display
                if incoming_image:
                    image_copy = incoming_image.convert("RGB")

                    # only recent frame info to image
                    if frame_info and frame_sequence >= common['sequence_number']-1 \
                          and frame_info['jpeg_data_size']:
                        for x in range(frame_info['frame_count']):
                            x = x * frame_info['frame_size']
                            (left, top, right,
                             bottom) = struct.unpack(">HHHH",
                                                     frame_data[x:x + 8])
                            left = left * display.width / 10000
                            top = top * display.height / 10000
                            right = right * display.width / 10000
                            bottom = bottom * display.height / 10000

                            dr = ImageDraw.Draw(image_copy)
                            dr.line((left, top, left, bottom),
                                    fill="white",
                                    width=3)
                            dr.line((right, top, right, bottom),
                                    fill="white",
                                    width=3)
                            dr.line((left, top, right, top),
                                    fill="white",
                                    width=3)
                            dr.line((left, bottom, right, bottom),
                                    fill="white",
                                    width=3)

                    display.copy_to_offscreen(image_copy)

            if options.debug:
                print "Frame:", common['sequence_number'], common[
                    'time_stemp'], datetime.datetime.now()

            # count frames
            self.frame_count = self.frame_count + 1

            # handle events
            if self.event_start_stream.isSet():
                if self.photomode == True:
                    camera.actTakePicture()
                else:
                    camera.startMovieRec()
                self.event_start_stream.clear()

            if self.event_stop_stream.isSet():
                camera.stopMovieRec()
                self.event_stop_stream.clear()

            # give OS a breather
            #time.sleep(0.01)

        # declare that we're done...
        self.event_terminated.set()
        self.event_terminate.clear()
Exemple #12
0
    def run(self):
        global options, grabber, decoder, display, image_copy

        if options.debug:
            print "using LiveView grabber"

        self.active = False
        self.photomode = False

        # grabber control signals
        self.event_start_stream = threading.Event()
        self.event_stop_stream = threading.Event()
        self.event_stopped_stream = threading.Event()
        self.event_terminate = threading.Event()
        self.event_terminated = threading.Event()

        # decoder control signals
        self.event_decoding = threading.Event()
        self.event_decoder_terminated = threading.Event()

        # display control signals
        self.lock_offscreen = threading.Semaphore()

        # export to other threads
        self.frame_count = 0
        grabber = self

        # Search for available camera
        if options.debug:
            print "searching for camera"

        search = ControlPoint()
        cameras = search.discover(1)

        if len(cameras):
            camera = SonyAPI(QX_ADDR=cameras[0])
        else:
            print "No camera found, aborting"
            return

        # Check if we need to do 'startRecMode'
        mode = camera.getAvailableApiList()

        # Need a better method to check for the presence of a camera
        if type(mode) != dict:
            print "No camera found, aborting"
            display.terminate_clicked()
            self.event_terminated.set()
            return

        # For those cameras which need it
        if "startRecMode" in (mode["result"])[0]:
            camera.startRecMode()
            time.sleep(5)

            # and re-read capabilities
            mode = camera.getAvailableApiList()

        if options.debug:
            print "Versions:", camera.getVersions()
            print "API List:", mode

        if "setLiveviewFrameInfo" in (mode["result"])[0]:
            if options.info:
                camera.setLiveviewFrameInfo([{"frameInfo": True}])
            else:
                camera.setLiveviewFrameInfo([{"frameInfo": False}])

        if "getAvailableLiveviewSize" in (mode["result"])[0]:
            if options.large and len((camera.getAvailableLiveviewSize()["result"])[0]) > 1:
                incoming = camera.liveview(["L"])
            else:
                incoming = camera.liveview()
        else:
            incoming = camera.liveview()

        incoming_image = None
        frame_sequence = None
        frame_info = None
        frame_data = None

        # Ensure that we're in correct mode (movie by default)
        mode = camera.getAvailableShootMode()
        if type(mode) == dict:
            if options.still:
                if (mode["result"])[0] != "still":
                    if "still" in (mode["result"])[1]:
                        camera.setShootMode(["still"])
                        self.photomode = True
                else:
                    self.photomode = True
            else:
                if (mode["result"])[0] != "movie":
                    if "movie" in (mode["result"])[1]:
                        camera.setShootMode(["movie"])
                    else:
                        self.photomode = True

        while not self.event_terminate.isSet():
            # Handle events from the camera (record start/stop)
            if self.frame_count % 50 == 0:
                mode = camera.getEvent(["false"])
            else:
                mode = None

            if mode and type(mode) == dict:
                status = mode["result"][1]
                if self.active == False and status["cameraStatus"] == "MovieRecording":
                    self.frame_count = 0
                    self.start_time = datetime.datetime.now()
                    self.active = True
                    if options.debug:
                        print "started capture", self.start_time
                elif self.active == True and status["cameraStatus"] == "IDLE":
                    self.active = False
                    self.end_time = datetime.datetime.now()
                    if options.debug:
                        elapsed = self.end_time - self.start_time
                        print "Stopped capture: frames = ", self.frame_count,
                        print ", delta = ", elapsed.seconds + (float(elapsed.microseconds) / 1000000),
                        print ", fps = ", self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000))

            # read next image
            data = incoming.read(8)
            common = common_header(data)
            data = incoming.read(128)

            if common["payload_type"] == 1:
                payload = payload_header(data)
                image_file = io.BytesIO(incoming.read(payload["jpeg_data_size"]))
                incoming_image = Image.open(image_file)
                incoming.read(payload["padding_size"])
            elif common["payload_type"] == 2:
                frame_info = payload_header(data, 2)
                if frame_info["jpeg_data_size"]:
                    frame_sequence = common["sequence_number"]
                    frame_data = incoming.read(frame_info["jpeg_data_size"])
                    incoming.read(frame_info["padding_size"])

            if options.gui == True:
                # Correct display size if changed
                if incoming_image and ((incoming_image.size)[0] != display.width):
                    if options.debug:
                        print "adjusted width from", display.width, "to", (incoming_image.size)[0]
                    display.width = (incoming_image.size)[0]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, display.width, display.height)

                if incoming_image and ((incoming_image.size)[1] != display.height):
                    if options.debug:
                        print "adjusted height from", display.height, "to", (incoming_image.size)[1]
                    display.height = (incoming_image.size)[1]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, display.width, display.height)

                # copy image to the display
                if incoming_image:
                    image_copy = incoming_image.convert("RGB")

                    # only recent frame info to image
                    if frame_info and frame_sequence >= common["sequence_number"] - 1 and frame_info["jpeg_data_size"]:
                        for x in range(frame_info["frame_count"]):
                            x = x * frame_info["frame_size"]
                            (left, top, right, bottom) = struct.unpack(">HHHH", frame_data[x : x + 8])
                            left = left * display.width / 10000
                            top = top * display.height / 10000
                            right = right * display.width / 10000
                            bottom = bottom * display.height / 10000

                            dr = ImageDraw.Draw(image_copy)
                            dr.line((left, top, left, bottom), fill="white", width=3)
                            dr.line((right, top, right, bottom), fill="white", width=3)
                            dr.line((left, top, right, top), fill="white", width=3)
                            dr.line((left, bottom, right, bottom), fill="white", width=3)

                    display.copy_to_offscreen(image_copy)

            if options.debug:
                print "Frame:", common["sequence_number"], common["time_stemp"], datetime.datetime.now()

            # count frames
            self.frame_count = self.frame_count + 1

            # handle events
            if self.event_start_stream.isSet():
                if self.photomode == True:
                    camera.actTakePicture()
                else:
                    camera.startMovieRec()
                self.event_start_stream.clear()

            if self.event_stop_stream.isSet():
                camera.stopMovieRec()
                self.event_stop_stream.clear()

            # give OS a breather
            # time.sleep(0.01)

        # declare that we're done...
        self.event_terminated.set()
        self.event_terminate.clear()