Exemplo n.º 1
0
def liveview():

    search = ControlPoint()
    cameras =  search.discover(5)

    if len(cameras):
        camera = SonyAPI(QX_ADDR=cameras[0])
    else:
        print("No camera found, aborting")
        quit()

    mode = camera.getAvailableApiList()
    if 'startRecMode' in (mode['result'])[0]:
        camera.startRecMode()
        time.sleep(2)

    #sizes = camera.getLiveviewSize()
    #print('Supported liveview size:', sizes)

    url = camera.liveview()

    lst = SonyAPI.LiveviewStreamThread(url)
    lst.start()
    print('LiveviewStreamThread startedr.')
    t_end=time.time() + 60*1
    i=0
#    while time.time()<t_end:
        #print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")
    print lst.get_latest_view()
        #print(type(lst.get_latest_view))
 #       i+=1
    print(i)
def liveview():
    # Connect and set-up camera
    search = ControlPoint()
    cameras =  search.discover(5)

    if len(cameras):
        camera = SonyAPI(QX_ADDR=cameras[0])
    else:
        print("No camera found, aborting")
        quit()

    mode = camera.getAvailableApiList()

    # some cameras need `startRecMode` before we can use liveview
    #   For those camera which doesn't require this, just comment out the following 2 lines
    if 'startRecMode' in (mode['result'])[0]:
        camera.startRecMode()
        time.sleep(2)

    sizes = camera.getLiveviewSize()
    print('Supported liveview size:', sizes)
    # url = camera.liveview("M")
    url = camera.liveview()

    lst = SonyAPI.LiveviewStreamThread(url)
    lst.start()
    print('[i] LiveviewStreamThread started.')
    return lst.get_latest_view
def liveview():
    # Connect and set-up camera
    search = ControlPoint()
    cameras = search.discover(5)

    if len(cameras):
        camera = SonyAPI(QX_ADDR=cameras[0])
    else:
        print("No camera found, aborting")
        quit()

    mode = camera.getAvailableApiList()

    # some cameras need `startRecMode` before we can use liveview
    #   For those camera which doesn't require this, just comment out the following 2 lines
    if 'startRecMode' in (mode['result'])[0]:
        camera.startRecMode()
        time.sleep(2)

    sizes = camera.getLiveviewSize()
    print('Supported liveview size:', sizes)
    # url = camera.liveview("M")
    url = camera.liveview()

    lst = SonyAPI.LiveviewStreamThread(url)
    lst.start()
    print('[i] LiveviewStreamThread started.')
    return lst.get_latest_view
Exemplo n.º 4
0
def getImage():

    search = ControlPoint()
    cameras = search.discover(5)

    if len(cameras):
        camera = SonyAPI(QX_ADDR=cameras[0])

    else:
        print("No camera found, aborting")
        quit()

    mode = camera.getAvailableApiList()
    if 'startRecMode' in (mode['result'])[0]:
        camera.startRecMode()
        time.sleep(2)

    import socket

    s = socket.socket()
    print "Socket successfully created"

    port = 52347

    s.bind(('', port))
    print "socket binded to %s" % (port)

    s.listen(5)
    print "socket is listening"

    while True:

        c, addr = s.accept()
        print 'Got connection from', addr
        toSend = camera.actTakePicture()
        pic_url = toSend['result'][0][0]
        img = urllib2.urlopen(pic_url).read()
        c.send(str(img))
        c.close()
Exemplo n.º 5
0
   def run(self):
      global options, grabber, decoder, display, image_copy

      if options.debug:
         print("using LiveView grabber")
   
      self.active = False
      self.photomode = False

      # grabber control signals
      self.event_start_stream = threading.Event()
      self.event_stop_stream = threading.Event()
      self.event_stopped_stream = threading.Event()
      self.event_terminate = threading.Event()
      self.event_terminated = threading.Event()

      # decoder control signals
      self.event_decoding = threading.Event()
      self.event_decoder_terminated = threading.Event()

      # display control signals
      self.lock_offscreen = threading.Semaphore()

      # export to other threads
      self.frame_count = 0
      grabber = self

      # Search for available camera
      if options.debug:
         print("searching for camera")

      search = ControlPoint()
      cameras =  search.discover(1)

      if len(cameras):
         camera = SonyAPI(QX_ADDR=cameras[0])
      else:
         print("No camera found, aborting")
         return

      # Check if we need to do 'startRecMode'
      mode = camera.getAvailableApiList()

      # Need a better method to check for the presence of a camera
      if type(mode) != dict:
         print("No camera found, aborting")
         display.terminate_clicked()
         self.event_terminated.set()
         return

      # For those cameras which need it
      if 'startRecMode' in (mode['result'])[0]:
         camera.startRecMode()
         time.sleep(5)

         # and re-read capabilities
         mode = camera.getAvailableApiList()

      if options.debug:
         print("Versions: %s" % camera.getVersions())
         print("API List: %s" % mode)

      if 'setLiveviewFrameInfo' in (mode['result'])[0]:
         if options.info:
            camera.setLiveviewFrameInfo([{"frameInfo": True}])
         else:
            camera.setLiveviewFrameInfo([{"frameInfo": False}])

      if 'getAvailableLiveviewSize' in (mode['result'])[0]:
         if options.large and len((camera.getAvailableLiveviewSize()['result'])[0]) > 1:
            url = camera.liveview(["L"])
         else:
            url = camera.liveview()
      else:
         url = camera.liveview()

      incoming_image = None
      frame_info = None

      # Ensure that we're in correct mode (movie by default)
      mode = camera.getAvailableShootMode()
      if type(mode) == dict:
         if options.still:
            if (mode['result'])[0] != 'still':
               if 'still' in (mode['result'])[1]:
                  camera.setShootMode(["still"])
                  self.photomode = True
            else:
               self.photomode = True
         else:
            if (mode['result'])[0] != 'movie':
               if 'movie' in (mode['result'])[1]:
                  camera.setShootMode(["movie"])
               else:
                  self.photomode = True

      lst = SonyAPI.LiveviewStreamThread(url)
      lst.start()

      while not self.event_terminate.isSet():
         # Handle events from the camera (record start/stop)
         if self.frame_count % 50 == 0:
            mode = camera.getEvent(["false"])
         else:
            mode = None

         if mode and type(mode) == dict:
            status = mode['result'][1]
            if self.active == False and status['cameraStatus'] == 'MovieRecording':
               self.frame_count = 0
               self.start_time = datetime.datetime.now()
               self.active = True
               if options.debug:
                  print("started capture %s" % self.start_time)
            elif self.active == True and status['cameraStatus'] == 'IDLE':
               self.active = False
               self.end_time = datetime.datetime.now()
               if options.debug:
                  elapsed = self.end_time - self.start_time
                  '''
                  print("Stopped capture: frames = ", self.frame_count, \
                     ", delta = ", elapsed.seconds + (float(elapsed.microseconds) / 1000000), \
                     ", fps = ", self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000)))
                  '''
                  print("Stopped capture: frames = %d, delta = %d, fps = %d" % \
                     (self.frame_count, elapsed.seconds + (float(elapsed.microseconds) / 1000000), \
                     self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000))))

         # read header, confirms image is also ready to read
         header = lst.get_header()

         if header:
            image_file = io.BytesIO(lst.get_latest_view())
            incoming_image = Image.open(image_file)
            frame_info = lst.get_frameinfo()

         if options.gui == True :
            # Correct display size if changed
            if incoming_image and ((incoming_image.size)[0] != display.width):
               if options.debug:
                  print("adjusted width from %d to %d" % (display.width, (incoming_image.size)[0]))
               display.width = (incoming_image.size)[0]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            if incoming_image and ((incoming_image.size)[1] != display.height):
               if options.debug:
                  print("adjusted height from %d to %d" % (display.height, (incoming_image.size)[1]))
               display.height = (incoming_image.size)[1]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            # copy image to the display
            if incoming_image:
               image_copy = incoming_image.convert("RGB")

               # draw frame info to image
               if frame_info:
                  for x in range(len(frame_info)):
                     left = frame_info[x]['left'] * display.width / 10000
                     top = frame_info[x]['top'] * display.height / 10000
                     right = frame_info[x]['right'] * display.width / 10000
                     bottom = frame_info[x]['bottom'] * display.height / 10000

                     dr = ImageDraw.Draw(image_copy)
                     dr.line((left, top, left, bottom), fill="white", width=3)
                     dr.line((right, top, right, bottom), fill="white", width=3)
                     dr.line((left, top, right, top), fill="white", width=3)
                     dr.line((left, bottom, right, bottom), fill="white", width=3)

               display.copy_to_offscreen(image_copy)

         if options.debug and header:
            common = common_header(header)
            print("Frame: %d, %d, %s" % (common['sequence_number'], common['time_stamp'], datetime.datetime.now()))

         # count frames
         self.frame_count = self.frame_count + 1

         # handle events
         if self.event_start_stream.isSet():
            if self.photomode == True:
               camera.actTakePicture()
            else:
               camera.startMovieRec()
            self.event_start_stream.clear()

         if self.event_stop_stream.isSet():
            camera.stopMovieRec()
            self.event_stop_stream.clear()

         # give OS a breather
         #time.sleep(0.01)

      # declare that we're done...
      self.event_terminated.set()
      self.event_terminate.clear()
Exemplo n.º 6
0
   def run(self):
      global options, grabber, decoder, display, image_copy

      if options.debug:
         print("using LiveView grabber")
   
      self.active = False
      self.photomode = False

      # grabber control signals
      self.event_start_stream = threading.Event()
      self.event_stop_stream = threading.Event()
      self.event_stopped_stream = threading.Event()
      self.event_terminate = threading.Event()
      self.event_terminated = threading.Event()

      # decoder control signals
      self.event_decoding = threading.Event()
      self.event_decoder_terminated = threading.Event()

      # display control signals
      self.lock_offscreen = threading.Semaphore()

      # export to other threads
      self.frame_count = 0
      grabber = self

      # Search for available camera
      if options.debug:
         print("searching for camera")

      search = ControlPoint()
      cameras =  search.discover(1)

      if len(cameras):
         camera = SonyAPI(QX_ADDR=cameras[0])
      else:
         print("No camera found, aborting")
         return

      # Check if we need to do 'startRecMode'
      mode = camera.getAvailableApiList()

      # Need a better method to check for the presence of a camera
      if type(mode) != dict:
         print("No camera found, aborting")
         display.terminate_clicked()
         self.event_terminated.set()
         return

      # For those cameras which need it
      if 'startRecMode' in (mode['result'])[0]:
         camera.startRecMode()
         time.sleep(5)

         # and re-read capabilities
         mode = camera.getAvailableApiList()

      if options.debug:
         print("Versions: %s" % camera.getVersions())
         print("API List: %s" % mode)

      if 'setLiveviewFrameInfo' in (mode['result'])[0]:
         if options.info:
            camera.setLiveviewFrameInfo([{"frameInfo": True}])
         else:
            camera.setLiveviewFrameInfo([{"frameInfo": False}])

      if 'getAvailableLiveviewSize' in (mode['result'])[0]:
         if options.large and len((camera.getAvailableLiveviewSize()['result'])[0]) > 1:
            url = camera.liveview(["L"])
         else:
            url = camera.liveview()
      else:
         url = camera.liveview()

      incoming_image = None
      frame_info = None

      # Ensure that we're in correct mode (movie by default)
      mode = camera.getAvailableShootMode()
      if type(mode) == dict:
         if options.still:
            if (mode['result'])[0] != 'still':
               if 'still' in (mode['result'])[1]:
                  camera.setShootMode(["still"])
                  self.photomode = True
            else:
               self.photomode = True
         else:
            if (mode['result'])[0] != 'movie':
               if 'movie' in (mode['result'])[1]:
                  camera.setShootMode(["movie"])
               else:
                  self.photomode = True

      lst = SonyAPI.LiveviewStreamThread(url)
      lst.start()

      while not self.event_terminate.isSet():
         # Handle events from the camera (record start/stop)
         if self.frame_count % 50 == 0:
            mode = camera.getEvent(["false"])
         else:
            mode = None

         if mode and type(mode) == dict:
            status = mode['result'][1]
            if self.active == False and status['cameraStatus'] == 'MovieRecording':
               self.frame_count = 0
               self.start_time = datetime.datetime.now()
               self.active = True
               if options.debug:
                  print("started capture %s" % self.start_time)
            elif self.active == True and status['cameraStatus'] == 'IDLE':
               self.active = False
               self.end_time = datetime.datetime.now()
               if options.debug:
                  elapsed = self.end_time - self.start_time
                  '''
                  print("Stopped capture: frames = ", self.frame_count, \
                     ", delta = ", elapsed.seconds + (float(elapsed.microseconds) / 1000000), \
                     ", fps = ", self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000)))
                  '''
                  print("Stopped capture: frames = %d, delta = %d, fps = %d" % \
                     (self.frame_count, elapsed.seconds + (float(elapsed.microseconds) / 1000000), \
                     self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000))))

         # read header, confirms image is also ready to read
         header = lst.get_header()

         if header:
            image_file = io.BytesIO(lst.get_latest_view())
            incoming_image = Image.open(image_file)
            frame_info = lst.get_frameinfo()

         if options.gui == True :
            # Correct display size if changed
            if incoming_image and ((incoming_image.size)[0] != display.width):
               if options.debug:
                  print("adjusted width from %d to %d" % (display.width, (incoming_image.size)[0]))
               display.width = (incoming_image.size)[0]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            if incoming_image and ((incoming_image.size)[1] != display.height):
               if options.debug:
                  print("adjusted height from %d to %d" % (display.height, (incoming_image.size)[1]))
               display.height = (incoming_image.size)[1]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            # copy image to the display
            if incoming_image:
               image_copy = incoming_image.convert("RGB")

               # draw frame info to image
               if frame_info:
                  for x in range(len(frame_info)):
                     left = frame_info[x]['left'] * display.width / 10000
                     top = frame_info[x]['top'] * display.height / 10000
                     right = frame_info[x]['right'] * display.width / 10000
                     bottom = frame_info[x]['bottom'] * display.height / 10000

                     dr = ImageDraw.Draw(image_copy)
                     dr.line((left, top, left, bottom), fill="white", width=3)
                     dr.line((right, top, right, bottom), fill="white", width=3)
                     dr.line((left, top, right, top), fill="white", width=3)
                     dr.line((left, bottom, right, bottom), fill="white", width=3)

               display.copy_to_offscreen(image_copy)

         if options.debug and header:
            common = common_header(header)
            print("Frame: %d, %d, %s" % (common['sequence_number'], common['time_stamp'], datetime.datetime.now()))

         # count frames
         self.frame_count = self.frame_count + 1

         # handle events
         if self.event_start_stream.isSet():
            if self.photomode == True:
               camera.actTakePicture()
            else:
               camera.startMovieRec()
            self.event_start_stream.clear()

         if self.event_stop_stream.isSet():
            camera.stopMovieRec()
            self.event_stop_stream.clear()

         # give OS a breather
         #time.sleep(0.01)

      # declare that we're done...
      self.event_terminated.set()
      self.event_terminate.clear()
Exemplo n.º 7
0
                    dest="skip",
                    help="Skip frames if computer too slow")

options = parser.parse_args()

# Connect and set-up camera
search = ControlPoint()
cameras = search.discover()

if len(cameras):
    camera = SonyAPI(QX_ADDR=cameras[0])
else:
    print("No camera found, aborting")
    quit()

mode = camera.getAvailableApiList()

# For those cameras which need it
if 'startRecMode' in (mode['result'])[0]:
    camera.startRecMode()
    time.sleep(5)

    # and re-read capabilities
    mode = camera.getAvailableApiList()

if 'setLiveviewFrameInfo' in (mode['result'])[0]:
    if options.info:
        camera.setLiveviewFrameInfo([{"frameInfo": True}])
    else:
        camera.setLiveviewFrameInfo([{"frameInfo": False}])
Exemplo n.º 8
0
   def run(self):
      global options, grabber, decoder, display, image_copy

      if options.debug:
         print "using LiveView grabber"
   
      self.active = False

      # grabber control signals
      self.event_start_stream = threading.Event()
      self.event_stop_stream = threading.Event()
      self.event_stopped_stream = threading.Event()
      self.event_terminate = threading.Event()
      self.event_terminated = threading.Event()

      # decoder control signals
      self.event_decoding = threading.Event()
      self.event_decoder_terminated = threading.Event()

      # display control signals
      self.lock_offscreen = threading.Semaphore()

      # export to other threads
      self.frame_count = 0
      grabber = self

      camera = SonyAPI()

      # Check if we need to do 'startRecMode'
      mode = camera.getAvailableApiList()

      # Need a better method to check for the presence of a camera
      if type(mode) != dict:
         print "No camera found, aborting"
         display.terminate_clicked()
         self.event_terminated.set()
         return

      if options.debug:
         print "Versions:", camera.getVersions()
         print "API List:", mode

      # For those cameras which need it
      if 'startRecMode' in (mode['result'])[0]:
         camera.startRecMode()

      # Ensure that we're in 'Movie' mode
      mode = camera.getAvailableShootMode()
      if type(mode) == dict:
         if (mode['result'])[0] != 'movie':
            if 'movie' in (mode['result'])[1]:
               camera.setShootMode(["movie"])
            else:
               print "'movie' mode not supported"

      # Wait for camera to be ready and check whether we're recoding
      mode = camera.getEvent(["true"])
      if options.debug:
         print "Event:", mode
      if type(mode) == dict:
         status = mode['result'][1]
         if status['cameraStatus'] == 'MovieRecording':
            self.active = True
            self.start_time = datetime.datetime.now()

      incoming = camera.liveview()

      while not self.event_terminate.isSet():
         if options.gui == True :
            # read next image
            data = incoming.read(8)
            data = incoming.read(128)
            payload = payload_header(data)
            image_file = io.BytesIO(incoming.read(payload['jpeg_data_size']))
            incoming_image = Image.open(image_file)
            incoming.read(payload['padding_size'])

            # set initial width/height
            if display.offscreen == None:
               display.width = (incoming_image.size)[0]
               display.height = (incoming_image.size)[1]
               if options.debug:
                  print "Display set to",(incoming_image.size)[0],"x", (incoming_image.size)[1]

               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            # Correct display size if changed
            if ((incoming_image.size)[0] != display.width):
               if options.debug:
                  print "adjusted width from", display.width, "to", (incoming_image.size)[0]
               display.width = (incoming_image.size)[0]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            if ((incoming_image.size)[1] != display.height):
               if options.debug:
                  print "adjusted height from", display.height, "to", (incoming_image.size)[1]
               display.height = (incoming_image.size)[1]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            # copy image to the display
            image_copy = incoming_image.convert("RGB")
            display.copy_to_offscreen(image_copy)

         if self.event_start_stream.isSet():
            camera.startMovieRec()
            self.frame_count = 0

            # and we're ready to go..
            self.start_time = datetime.datetime.now()
            self.event_start_stream.clear()

         # loop until request to stop
         if self.active == True:
            while not self.event_stop_stream.isSet():
               if options.gui == True :
                  # read next image
                  data = incoming.read(8)
                  data = incoming.read(128)
                  payload = payload_header(data)
                  image_file = io.BytesIO(incoming.read(payload['jpeg_data_size']))
                  incoming_image = Image.open(image_file)
                  incoming.read(payload['padding_size'])

                  image_copy = incoming_image.convert("RGB")
                  display.copy_to_offscreen(image_copy)

                  # copy image to the display
                  display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, 
                     display.width, display.height * 2)
                  display.copy_to_offscreen(image_copy)
  
               self.frame_count = self.frame_count + 1

               if self.event_terminate.isSet():
                  break

         if self.event_stop_stream.isSet():
            # clean up
            camera.stopMovieRec()
            self.end_time = datetime.datetime.now()
            self.event_stop_stream.clear()
            self.active = False

         time.sleep(0.1)

      # declare that we're done...
      self.event_terminated.set()
      self.event_terminate.clear()
Exemplo n.º 9
0
class CameraHandler:
    def __init__(self):
        self.find_camera()
        self.hdpic_seq = 0
        self.hdpicture = None
        self.hdpic_tstamp = Time(0, 0)
        self.hdpic_msg = CompressedImage()  # message to publish
        self.cameras = None
        self.cam = None
        self.mode = None
        self.hdpic_resp = None
        self.polled_image_resp = None
        self.hdpic_pub = None
        self.incoming = None
        self.img_msg = CompressedImage()
        self.pub = None
        self.success_liveview = False

    def find_camera(self, duration=3):
        """
        Uses the discover protocol and sets communication with one camera.
        """
        # get available camera
        search = ControlPoint()
        self.cameras = search.discover(duration)

        # use first found camera
        if len(self.cameras):
            self.cam = SonyAPI(self.cameras[0])
        else:
            self.cam = None
            rospy.logerr("No camera found")
            return

        self.mode = self.cam.getAvailableApiList()

        # some cameras need startRecMode call
        # no needed in QX1
        if 'starRecMode' in (self.mode['result'])[0]:
            self.cam.startRecMode()
            time.sleep(5)
        else:
            rospy.loginfo("No startRecMode available")

        # re-read capabilities
        self.mode = self.cam.getAvailableApiList()

        rospy.loginfo("Found camera")
        return

    def polled_image_error(self, msg):
        """
        Returns a failed GetPolledImageResponse.
        :param msg: Error message to return with GetPolledImageResponse.
        :return: GetPolledImageResponse.
        """
        rospy.logerr(msg)
        return GetPolledImageResponse(success=False,
                                      status_message=msg,
                                      stamp=Time(0))

    def take_pic_thread(self):
        """
        Takes one hd picture and saves it to polled_image_resp.
        :return: None
        """
        self.polled_image_resp = None
        try:
            self.cam.setShootMode(param=['still'])

            # set timestamp for picture
            now = time.time()
            self.hdpic_tstamp = Time(now)

            # get status snapshot of cam
            event = self.cam.getEvent(param=[False])

            if "error" in event:
                self.polled_image_resp = self.polled_image_error(
                    str(event['error']))
                return

            # check if is available to take pic
            if event['result'][1]['cameraStatus'] != 'IDLE':
                rospy.loginfo("Camera is busy")
                self.polled_image_resp = self.polled_image_error(
                    "Camera is busy")
                return

            # take pic
            self.hdpic_resp = self.cam.actTakePicture()
            if 'error' in self.hdpic_resp:
                rospy.logerr(self.hdpic_resp['error'])
                self.polled_image_resp = self.polled_image_error(
                    str(self.hdpic_resp['error']))
                return

            # download pic
            url = self.hdpic_resp['result'][0][0].replace('\\', '')
            self.hdpicture = urllib2.urlopen(url).read()
            self.hdpic_seq += 1  # increment sequence counter

            rospy.loginfo("Picture taken")

            # publish one pic
            self.pub_hdpic()
            # service response
            self.polled_image_resp = GetPolledImageResponse(
                success=True,
                status_message="Picture taken",
                stamp=self.hdpic_tstamp)
            return

        except Exception as err:
            rospy.logerr(str(err))
            self.polled_image_resp = self.polled_image_error(
                "Couldn't take picture")
            return

    def take_picture(self, req):
        """
        Callback function to handle polled camera requests.
        :return: None
        """
        # starts take pic thread and waits TIMEOUT_TAKEPIC seconds to complete
        t_takepic = Thread(target=self.take_pic_thread)
        t_takepic.start()
        t_takepic.join(TIMEOUT_TAKEPIC)

        # checks if take picture action was succesf
        if self.polled_image_resp:
            return self.polled_image_resp
        else:
            return self.polled_image_error(
                "Couldn't take picture, it took too long")

    def set_serv_pic(self):
        """
        Sets the service to take pictures.
        :return: None
        """
        # service setup
        s = rospy.Service('sony_cam/request_image', GetPolledImage,
                          self.take_picture)
        rospy.loginfo("Ready to take pictures")

        # waits for requests
        rospy.spin()

    def prep_pub_hdpic(self):
        """
        Set up of hd picture publisher.
        """
        self.hdpic_pub = rospy.Publisher('hdpicture/compressed',
                                         CompressedImage,
                                         queue_size=1)

    def pub_hdpic(self):
        """
        Publishes one hdpicture message.
        """
        # fill message fields
        self.hdpic_msg.header.seq = self.hdpic_seq
        self.hdpic_msg.header.stamp = self.hdpic_tstamp
        self.hdpic_msg.header.frame_id = "right_sony_cam"

        self.hdpic_msg.format = 'jpeg'
        self.hdpic_msg.data = self.hdpicture
        # end fill

        self.hdpic_pub.publish(self.hdpic_msg)

    def liveview_thread(self):
        """
        Publishes one liveview frame.
        :return: None
        """
        try:
            # read next image
            self.success_liveview = False
            data = self.incoming.read(8)
            common = common_header(data)
            data = self.incoming.read(128)

            self.img_msg = CompressedImage()  # message to publish
            if common['payload_type'] == 1:  # jpeg frame
                payload = payload_header(data)
                image_file = self.incoming.read(payload['jpeg_data_size'])

                # fill message fields
                self.img_msg.header.seq = common['sequence_number']
                self.img_msg.header.stamp.secs = common['time_stamp'] / 1000.
                self.img_msg.header.stamp.nsecs = common['time_stamp'] * 1000.
                self.img_msg.header.frame_id = "right_sony_cam"

                self.img_msg.format = 'jpeg'
                self.img_msg.data = image_file
                # end fill

                self.pub.publish(self.img_msg)

            self.success_liveview = True

        except Exception as err:
            rospy.logerr("Couldn't get liveview")

    def reset_liveview(self):
        """
        Executes necessary steps to begin a liveview.
        :return: Boolean, True when there are no errors.
        """
        # disables packets with liveview frame info(face detection frames, focus frames and tracking frames)
        # supported in QX1
        try:
            if 'setLiveviewFrameInfo' in (self.mode['result'])[0]:
                self.cam.setLiveviewFrameInfo([{"frameInfo": False}])

            self.incoming = urllib2.urlopen(self.cam.liveview())

        except Exception:
            rospy.logerr("Couldn't reset liveview")
            return False

        else:
            return True

    def liveview(self):
        """
        Begins liveview streaming.
        """
        # publisher setup
        self.pub = rospy.Publisher('liveview/compressed',
                                   CompressedImage,
                                   queue_size=10)
        rospy.loginfo("Beginning liveview")

        reset_f = True
        while reset_f:
            try:
                reset_f = not self.reset_liveview()
            except Exception:
                rospy.logerr("Couldn't begin liveview set up")
                time.sleep(3)

        while not rospy.is_shutdown():
            try:
                t_getlive = Thread(target=self.liveview_thread)
                t_getlive.start()
                t_getlive.join(TIMEOUT_GETLIVEVIEW)

                if not self.success_liveview:
                    rospy.logerr(
                        "Couldn't get liveview, it took too long or other errors"
                    )
                    t_getresetlive = Thread(target=self.reset_liveview)
                    t_getresetlive.start()
                    t_getresetlive.join(TIMEOUT_GETLIVEVIEW)
                    time.sleep(3)

            except Exception as err:
                rospy.logerr("Couldn't get liveview")
                time.sleep(3)
Exemplo n.º 10
0
parser.add_argument("-z", "--zoom", action="store_true", dest="zoom", help="Zoom image to fill screen" )
parser.add_argument("-s", "--skip", action="store_true", dest="skip", help="Skip frames if computer too slow" )

options = parser.parse_args()

# Connect and set-up camera
search = ControlPoint()
cameras =  search.discover()

if len(cameras):
   camera = SonyAPI(QX_ADDR=cameras[0])
else:
   print("No camera found, aborting")
   quit()

mode = camera.getAvailableApiList()

# For those cameras which need it
if 'startRecMode' in (mode['result'])[0]:
   camera.startRecMode()
   time.sleep(5)

   # and re-read capabilities
   mode = camera.getAvailableApiList()

if 'setLiveviewFrameInfo' in (mode['result'])[0]:
   if options.info:
      camera.setLiveviewFrameInfo([{"frameInfo": True}])
   else:
      camera.setLiveviewFrameInfo([{"frameInfo": False}])
Exemplo n.º 11
0
    def run(self):
        global options, grabber, decoder, display, image_copy

        if options.debug:
            print "using LiveView grabber"

        self.active = False
        self.photomode = False

        # grabber control signals
        self.event_start_stream = threading.Event()
        self.event_stop_stream = threading.Event()
        self.event_stopped_stream = threading.Event()
        self.event_terminate = threading.Event()
        self.event_terminated = threading.Event()

        # decoder control signals
        self.event_decoding = threading.Event()
        self.event_decoder_terminated = threading.Event()

        # display control signals
        self.lock_offscreen = threading.Semaphore()

        # export to other threads
        self.frame_count = 0
        grabber = self

        # Search for available camera
        if options.debug:
            print "searching for camera"

        search = ControlPoint()
        cameras = search.discover(1)

        if len(cameras):
            camera = SonyAPI(QX_ADDR=cameras[0])
        else:
            print "No camera found, aborting"
            return

        # Check if we need to do 'startRecMode'
        mode = camera.getAvailableApiList()

        # Need a better method to check for the presence of a camera
        if type(mode) != dict:
            print "No camera found, aborting"
            display.terminate_clicked()
            self.event_terminated.set()
            return

        # For those cameras which need it
        if 'startRecMode' in (mode['result'])[0]:
            camera.startRecMode()
            time.sleep(5)

            # and re-read capabilities
            mode = camera.getAvailableApiList()

        if options.debug:
            print "Versions:", camera.getVersions()
            print "API List:", mode

        if 'setLiveviewFrameInfo' in (mode['result'])[0]:
            if options.info:
                camera.setLiveviewFrameInfo([{"frameInfo": True}])
            else:
                camera.setLiveviewFrameInfo([{"frameInfo": False}])

        if 'getAvailableLiveviewSize' in (mode['result'])[0]:
            if options.large and len(
                (camera.getAvailableLiveviewSize()['result'])[0]) > 1:
                incoming = camera.liveview(["L"])
            else:
                incoming = camera.liveview()
        else:
            incoming = camera.liveview()

        incoming_image = None
        frame_sequence = None
        frame_info = None
        frame_data = None

        # Ensure that we're in correct mode (movie by default)
        mode = camera.getAvailableShootMode()
        if type(mode) == dict:
            if options.still:
                if (mode['result'])[0] != 'still':
                    if 'still' in (mode['result'])[1]:
                        camera.setShootMode(["still"])
                        self.photomode = True
                else:
                    self.photomode = True
            else:
                if (mode['result'])[0] != 'movie':
                    if 'movie' in (mode['result'])[1]:
                        camera.setShootMode(["movie"])
                    else:
                        self.photomode = True

        while not self.event_terminate.isSet():
            # Handle events from the camera (record start/stop)
            if self.frame_count % 50 == 0:
                mode = camera.getEvent(["false"])
            else:
                mode = None

            if mode and type(mode) == dict:
                status = mode['result'][1]
                if self.active == False and status[
                        'cameraStatus'] == 'MovieRecording':
                    self.frame_count = 0
                    self.start_time = datetime.datetime.now()
                    self.active = True
                    if options.debug:
                        print "started capture", self.start_time
                elif self.active == True and status['cameraStatus'] == 'IDLE':
                    self.active = False
                    self.end_time = datetime.datetime.now()
                    if options.debug:
                        elapsed = self.end_time - self.start_time
                        print "Stopped capture: frames = ", self.frame_count,
                        print ", delta = ", elapsed.seconds + (
                            float(elapsed.microseconds) / 1000000),
                        print ", fps = ", self.frame_count / (
                            elapsed.seconds +
                            (float(elapsed.microseconds) / 1000000))

            # read next image
            data = incoming.read(8)
            common = common_header(data)
            data = incoming.read(128)

            if common['payload_type'] == 1:
                payload = payload_header(data)
                image_file = io.BytesIO(
                    incoming.read(payload['jpeg_data_size']))
                incoming_image = Image.open(image_file)
                incoming.read(payload['padding_size'])
            elif common['payload_type'] == 2:
                frame_info = payload_header(data, 2)
                if frame_info['jpeg_data_size']:
                    frame_sequence = common['sequence_number']
                    frame_data = incoming.read(frame_info['jpeg_data_size'])
                    incoming.read(frame_info['padding_size'])

            if options.gui == True:
                # Correct display size if changed
                if incoming_image and (
                    (incoming_image.size)[0] != display.width):
                    if options.debug:
                        print "adjusted width from", display.width, "to", (
                            incoming_image.size)[0]
                    display.width = (incoming_image.size)[0]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
                                                       False, 8, display.width,
                                                       display.height)

                if incoming_image and (
                    (incoming_image.size)[1] != display.height):
                    if options.debug:
                        print "adjusted height from", display.height, "to", (
                            incoming_image.size)[1]
                    display.height = (incoming_image.size)[1]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
                                                       False, 8, display.width,
                                                       display.height)

                # copy image to the display
                if incoming_image:
                    image_copy = incoming_image.convert("RGB")

                    # only recent frame info to image
                    if frame_info and frame_sequence >= common['sequence_number']-1 \
                          and frame_info['jpeg_data_size']:
                        for x in range(frame_info['frame_count']):
                            x = x * frame_info['frame_size']
                            (left, top, right,
                             bottom) = struct.unpack(">HHHH",
                                                     frame_data[x:x + 8])
                            left = left * display.width / 10000
                            top = top * display.height / 10000
                            right = right * display.width / 10000
                            bottom = bottom * display.height / 10000

                            dr = ImageDraw.Draw(image_copy)
                            dr.line((left, top, left, bottom),
                                    fill="white",
                                    width=3)
                            dr.line((right, top, right, bottom),
                                    fill="white",
                                    width=3)
                            dr.line((left, top, right, top),
                                    fill="white",
                                    width=3)
                            dr.line((left, bottom, right, bottom),
                                    fill="white",
                                    width=3)

                    display.copy_to_offscreen(image_copy)

            if options.debug:
                print "Frame:", common['sequence_number'], common[
                    'time_stemp'], datetime.datetime.now()

            # count frames
            self.frame_count = self.frame_count + 1

            # handle events
            if self.event_start_stream.isSet():
                if self.photomode == True:
                    camera.actTakePicture()
                else:
                    camera.startMovieRec()
                self.event_start_stream.clear()

            if self.event_stop_stream.isSet():
                camera.stopMovieRec()
                self.event_stop_stream.clear()

            # give OS a breather
            #time.sleep(0.01)

        # declare that we're done...
        self.event_terminated.set()
        self.event_terminate.clear()
Exemplo n.º 12
0
    def run(self):
        global options, grabber, decoder, display, image_copy

        if options.debug:
            print "using LiveView grabber"

        self.active = False

        # grabber control signals
        self.event_start_stream = threading.Event()
        self.event_stop_stream = threading.Event()
        self.event_stopped_stream = threading.Event()
        self.event_terminate = threading.Event()
        self.event_terminated = threading.Event()

        # decoder control signals
        self.event_decoding = threading.Event()
        self.event_decoder_terminated = threading.Event()

        # display control signals
        self.lock_offscreen = threading.Semaphore()

        # export to other threads
        self.frame_count = 0
        grabber = self

        camera = SonyAPI()

        # Check if we need to do 'startRecMode'
        mode = camera.getAvailableApiList()

        # Need a better method to check for the presence of a camera
        if type(mode) != dict:
            print "No camera found, aborting"
            display.terminate_clicked()
            self.event_terminated.set()
            return

        if options.debug:
            print "Versions:", camera.getVersions()
            print "API List:", mode

        # For those cameras which need it
        if 'startRecMode' in (mode['result'])[0]:
            camera.startRecMode()

        # Ensure that we're in 'Movie' mode
        mode = camera.getAvailableShootMode()
        if type(mode) == dict:
            if (mode['result'])[0] != 'movie':
                if 'movie' in (mode['result'])[1]:
                    camera.setShootMode(["movie"])
                else:
                    print "'movie' mode not supported"

        # Wait for camera to be ready and check whether we're recoding
        mode = camera.getEvent(["true"])
        if options.debug:
            print "Event:", mode
        if type(mode) == dict:
            status = mode['result'][1]
            if status['cameraStatus'] == 'MovieRecording':
                self.active = True
                self.start_time = datetime.datetime.now()

        incoming = camera.liveview()

        while not self.event_terminate.isSet():
            if options.gui == True:
                # read next image
                data = incoming.read(8)
                data = incoming.read(128)
                payload = payload_header(data)
                image_file = io.BytesIO(
                    incoming.read(payload['jpeg_data_size']))
                incoming_image = Image.open(image_file)
                incoming.read(payload['padding_size'])

                # set initial width/height
                if display.offscreen == None:
                    display.width = (incoming_image.size)[0]
                    display.height = (incoming_image.size)[1]
                    if options.debug:
                        print "Display set to", (
                            incoming_image.size)[0], "x", (
                                incoming_image.size)[1]

                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
                                                       False, 8, display.width,
                                                       display.height)

                # Correct display size if changed
                if ((incoming_image.size)[0] != display.width):
                    if options.debug:
                        print "adjusted width from", display.width, "to", (
                            incoming_image.size)[0]
                    display.width = (incoming_image.size)[0]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
                                                       False, 8, display.width,
                                                       display.height)

                if ((incoming_image.size)[1] != display.height):
                    if options.debug:
                        print "adjusted height from", display.height, "to", (
                            incoming_image.size)[1]
                    display.height = (incoming_image.size)[1]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
                                                       False, 8, display.width,
                                                       display.height)

                # copy image to the display
                image_copy = incoming_image.convert("RGB")
                display.copy_to_offscreen(image_copy)

            if self.event_start_stream.isSet():
                camera.startMovieRec()
                self.frame_count = 0

                # and we're ready to go..
                self.start_time = datetime.datetime.now()
                self.event_start_stream.clear()

            # loop until request to stop
            if self.active == True:
                while not self.event_stop_stream.isSet():
                    if options.gui == True:
                        # read next image
                        data = incoming.read(8)
                        data = incoming.read(128)
                        payload = payload_header(data)
                        image_file = io.BytesIO(
                            incoming.read(payload['jpeg_data_size']))
                        incoming_image = Image.open(image_file)
                        incoming.read(payload['padding_size'])

                        image_copy = incoming_image.convert("RGB")
                        display.copy_to_offscreen(image_copy)

                        # copy image to the display
                        display.offscreen = gtk.gdk.Pixbuf(
                            gtk.gdk.COLORSPACE_RGB, False, 8, display.width,
                            display.height * 2)
                        display.copy_to_offscreen(image_copy)

                    self.frame_count = self.frame_count + 1

                    if self.event_terminate.isSet():
                        break

            if self.event_stop_stream.isSet():
                # clean up
                camera.stopMovieRec()
                self.end_time = datetime.datetime.now()
                self.event_stop_stream.clear()
                self.active = False

            time.sleep(0.1)

        # declare that we're done...
        self.event_terminated.set()
        self.event_terminate.clear()
Exemplo n.º 13
0
    def run(self):
        global options, grabber, decoder, display, image_copy

        if options.debug:
            print "using LiveView grabber"

        self.active = False
        self.photomode = False

        # grabber control signals
        self.event_start_stream = threading.Event()
        self.event_stop_stream = threading.Event()
        self.event_stopped_stream = threading.Event()
        self.event_terminate = threading.Event()
        self.event_terminated = threading.Event()

        # decoder control signals
        self.event_decoding = threading.Event()
        self.event_decoder_terminated = threading.Event()

        # display control signals
        self.lock_offscreen = threading.Semaphore()

        # export to other threads
        self.frame_count = 0
        grabber = self

        # Search for available camera
        if options.debug:
            print "searching for camera"

        search = ControlPoint()
        cameras = search.discover(1)

        if len(cameras):
            camera = SonyAPI(QX_ADDR=cameras[0])
        else:
            print "No camera found, aborting"
            return

        # Check if we need to do 'startRecMode'
        mode = camera.getAvailableApiList()

        # Need a better method to check for the presence of a camera
        if type(mode) != dict:
            print "No camera found, aborting"
            display.terminate_clicked()
            self.event_terminated.set()
            return

        # For those cameras which need it
        if "startRecMode" in (mode["result"])[0]:
            camera.startRecMode()
            time.sleep(5)

            # and re-read capabilities
            mode = camera.getAvailableApiList()

        if options.debug:
            print "Versions:", camera.getVersions()
            print "API List:", mode

        if "setLiveviewFrameInfo" in (mode["result"])[0]:
            if options.info:
                camera.setLiveviewFrameInfo([{"frameInfo": True}])
            else:
                camera.setLiveviewFrameInfo([{"frameInfo": False}])

        if "getAvailableLiveviewSize" in (mode["result"])[0]:
            if options.large and len((camera.getAvailableLiveviewSize()["result"])[0]) > 1:
                incoming = camera.liveview(["L"])
            else:
                incoming = camera.liveview()
        else:
            incoming = camera.liveview()

        incoming_image = None
        frame_sequence = None
        frame_info = None
        frame_data = None

        # Ensure that we're in correct mode (movie by default)
        mode = camera.getAvailableShootMode()
        if type(mode) == dict:
            if options.still:
                if (mode["result"])[0] != "still":
                    if "still" in (mode["result"])[1]:
                        camera.setShootMode(["still"])
                        self.photomode = True
                else:
                    self.photomode = True
            else:
                if (mode["result"])[0] != "movie":
                    if "movie" in (mode["result"])[1]:
                        camera.setShootMode(["movie"])
                    else:
                        self.photomode = True

        while not self.event_terminate.isSet():
            # Handle events from the camera (record start/stop)
            if self.frame_count % 50 == 0:
                mode = camera.getEvent(["false"])
            else:
                mode = None

            if mode and type(mode) == dict:
                status = mode["result"][1]
                if self.active == False and status["cameraStatus"] == "MovieRecording":
                    self.frame_count = 0
                    self.start_time = datetime.datetime.now()
                    self.active = True
                    if options.debug:
                        print "started capture", self.start_time
                elif self.active == True and status["cameraStatus"] == "IDLE":
                    self.active = False
                    self.end_time = datetime.datetime.now()
                    if options.debug:
                        elapsed = self.end_time - self.start_time
                        print "Stopped capture: frames = ", self.frame_count,
                        print ", delta = ", elapsed.seconds + (float(elapsed.microseconds) / 1000000),
                        print ", fps = ", self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000))

            # read next image
            data = incoming.read(8)
            common = common_header(data)
            data = incoming.read(128)

            if common["payload_type"] == 1:
                payload = payload_header(data)
                image_file = io.BytesIO(incoming.read(payload["jpeg_data_size"]))
                incoming_image = Image.open(image_file)
                incoming.read(payload["padding_size"])
            elif common["payload_type"] == 2:
                frame_info = payload_header(data, 2)
                if frame_info["jpeg_data_size"]:
                    frame_sequence = common["sequence_number"]
                    frame_data = incoming.read(frame_info["jpeg_data_size"])
                    incoming.read(frame_info["padding_size"])

            if options.gui == True:
                # Correct display size if changed
                if incoming_image and ((incoming_image.size)[0] != display.width):
                    if options.debug:
                        print "adjusted width from", display.width, "to", (incoming_image.size)[0]
                    display.width = (incoming_image.size)[0]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, display.width, display.height)

                if incoming_image and ((incoming_image.size)[1] != display.height):
                    if options.debug:
                        print "adjusted height from", display.height, "to", (incoming_image.size)[1]
                    display.height = (incoming_image.size)[1]
                    display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, display.width, display.height)

                # copy image to the display
                if incoming_image:
                    image_copy = incoming_image.convert("RGB")

                    # only recent frame info to image
                    if frame_info and frame_sequence >= common["sequence_number"] - 1 and frame_info["jpeg_data_size"]:
                        for x in range(frame_info["frame_count"]):
                            x = x * frame_info["frame_size"]
                            (left, top, right, bottom) = struct.unpack(">HHHH", frame_data[x : x + 8])
                            left = left * display.width / 10000
                            top = top * display.height / 10000
                            right = right * display.width / 10000
                            bottom = bottom * display.height / 10000

                            dr = ImageDraw.Draw(image_copy)
                            dr.line((left, top, left, bottom), fill="white", width=3)
                            dr.line((right, top, right, bottom), fill="white", width=3)
                            dr.line((left, top, right, top), fill="white", width=3)
                            dr.line((left, bottom, right, bottom), fill="white", width=3)

                    display.copy_to_offscreen(image_copy)

            if options.debug:
                print "Frame:", common["sequence_number"], common["time_stemp"], datetime.datetime.now()

            # count frames
            self.frame_count = self.frame_count + 1

            # handle events
            if self.event_start_stream.isSet():
                if self.photomode == True:
                    camera.actTakePicture()
                else:
                    camera.startMovieRec()
                self.event_start_stream.clear()

            if self.event_stop_stream.isSet():
                camera.stopMovieRec()
                self.event_stop_stream.clear()

            # give OS a breather
            # time.sleep(0.01)

        # declare that we're done...
        self.event_terminated.set()
        self.event_terminate.clear()
Exemplo n.º 14
0
class Camera_sonywifi(Camera):
    def __init__(self, picture_size, preview_size, zoom=30, ssid="DIRECT-LKE0:ILCE-6000", pw="UeyxbxAG",
                 iface="wlp2s0"):
        Camera.__init__(self, picture_size, preview_size, zoom, type='sony_wifi')
        self.live_stream = None
        if not sony_enabled:
            raise CameraException("pysony module not installed")
        self.previous_wifi = \
        subprocess.check_output(["nmcli", "--fields", "NAME", "c", "show", "--active"]).decode("utf-8").split("\n")[
            1].strip()
        self.sony_api_version = "1.0"
        if self.previous_wifi == ssid:
            self.previous_wifi = ""
        else:
            try:
                wifi_list = subprocess.check_output(["nmcli", "--fields", "SSID", "device", "wifi"]).decode(
                    "utf-8").split("\n")
                wifi_list = [wifi_list[i].strip() for i in range(len(wifi_list))]

                if ssid in wifi_list:
                    subprocess.check_call(["nmcli", "con", "up", "id", ssid])
                else:
                    raise CameraException("Sony Wifi not found")

            except subprocess.CalledProcessError:
                raise CameraException("Cannot connect to wifi")
        search = ControlPoint()
        cameras = search.discover()
        self.last_preview_frame = Image.new('RGB', preview_size, (0, 0, 0))

        if len(cameras):
            self.camera = SonyAPI(QX_ADDR=cameras[0])
        else:
            raise CameraException("No camera found")


        options = self.camera.getAvailableApiList()['result'][0]
        logging.info(str(options))

    def zoom_in(self):
        self.camera.actZoom(["in", "1shot"])

    def zoom_out(self):
        self.camera.actZoom(["out", "1shot"])

    def __del__(self):
        self.teardown()

    def teardown(self):
        if sony_enabled:
            if self.live_stream is not None:
                self.stop_preview_stream()
            if self.previous_wifi is not "":
                try:
                    subprocess.check_call(["nmcli", "con", "up", "id", self.previous_wifi])
                except subprocess.CalledProcessError:
                    raise CameraException("Cannot connect to previous wifi " + self.previous_wifi)

    def start_preview_stream(self):
        # For those cameras which need it
        options = self.camera.getAvailableApiList()['result'][0]
        logging.info("starting preview")
        if 'startRecMode' in options:
            self.camera.startRecMode()
            time.sleep(1)
            options = self.camera.getAvailableApiList()['result'][0]
            print(str(options))
        self.camera.setLiveviewFrameInfo([{"frameInfo": False}])
        url = self.camera.liveview()
        assert isinstance(url, str)
        print(url)
        self.live_stream = SonyAPI.LiveviewStreamThread(url)
        self.live_stream.start()
        self.preview_active = True

    def stop_preview_stream(self):
        logging.info("stopping preview")
        if self.live_stream is not None:
            self.live_stream.stop()
            options = self.camera.getAvailableApiList()['result'][0]
            # if self.preview_active and 'endRecMode' in (options):
            #    self.camera.stopRecMode()
            # if self.live_stream.is_active():
            #    self.camera.stopLiveview() # todo:  is this correct?
        self.preview_active = False

    def get_preview_frame(self, filename=None):
        # read header, confirms image is also ready to read
        header = False
        while not header:
            header = self.live_stream.get_header()

        if header:
            # image_file = io.BytesIO(self.live_stream.get_latest_view())
            # incoming_image = Image.open(image_file)
            # frame_info = self.live_stream.get_frameinfo()
            data = self.live_stream.get_latest_view()
            img = Image.open(io.BytesIO(data))
            # img=img.resize(self.preview_size, Image.ANTIALIAS)
        else:
            img = self.last_preview_frame
        if filename is not None:
            img.save(filename)
        return img

    def take_picture(self, filename="/tmp/picture.jpg"):
        options = self.camera.getAvailableApiList()['result'][0]
        url = self.camera.actTakePicture()
        logging.info(url)
        response = requests.get(url['result'][0][0].replace('\\', ''))
        img = Image.open(io.BytesIO(response.content))
        if filename is not None:
            img.save(filename)
        return img