Esempio n. 1
0
    def get_image(self):
        """
        **SUMMARY**

        This method returns the Kinect camera image.

        **RETURNS**

        The Kinect's color camera image.

        **EXAMPLE**

        >>> k = Kinect()
        >>> while True:
        ...     k.get_image().show()

        """
        if not FREENECT_ENABLED:
            logger.warning("You don't seem to have the freenect library "
                           "installed. This will make it hard to use "
                           "a Kinect.")
            return

        video = freenect.sync_get_video(self.device_number)[0]
        self.capture_time = time.time()
        #video = video[:, :, ::-1]  # RGB -> BGR
        return Factory.Image(video.transpose([1, 0, 2]), camera=self)
Esempio n. 2
0
    def get_depth(self):
        """
        **SUMMARY**

        This method returns the Kinect depth image.

        **RETURNS**

        The Kinect's depth camera image as a grayscale image.

        **EXAMPLE**

        >>> k = Kinect()
        >>> while True:
        ...     d = k.get_depth()
        ...     img = k.get_image()
        ...     result = img.side_by_side(d)
        ...     result.show()
        """

        if not FREENECT_ENABLED:
            logger.warning("You don't seem to have the freenect library "
                           "installed. This will make it hard to use "
                           "a Kinect.")
            return

        depth = freenect.sync_get_depth(self.device_number)[0]
        self.capture_time = time.time()
        np.clip(depth, 0, 2 ** 10 - 1, depth)
        depth >>= 2
        depth = depth.astype(np.uint8).transpose()

        return Factory.Image(depth, camera=self)
Esempio n. 3
0
    def get_depth(self):
        """
        **SUMMARY**

        This method returns the Kinect depth image.

        **RETURNS**

        The Kinect's depth camera image as a grayscale image.

        **EXAMPLE**

        >>> k = Kinect()
        >>> while True:
        ...     d = k.get_depth()
        ...     img = k.get_image()
        ...     result = img.side_by_side(d)
        ...     result.show()
        """

        if not FREENECT_ENABLED:
            logger.warning("You don't seem to have the freenect library "
                           "installed. This will make it hard to use "
                           "a Kinect.")
            return

        depth = freenect.sync_get_depth(self.device_number)[0]
        self.capture_time = time.time()
        np.clip(depth, 0, 2**10 - 1, depth)
        depth >>= 2
        depth = depth.astype(np.uint8).transpose()

        return Factory.Image(depth, camera=self)
Esempio n. 4
0
    def get_image(self):
        """
        **SUMMARY**

        This method returns the Kinect camera image.

        **RETURNS**

        The Kinect's color camera image.

        **EXAMPLE**

        >>> k = Kinect()
        >>> while True:
        ...     k.get_image().show()

        """
        if not FREENECT_ENABLED:
            logger.warning("You don't seem to have the freenect library "
                           "installed. This will make it hard to use "
                           "a Kinect.")
            return

        video = freenect.sync_get_video(self.device_number)[0]
        self.capture_time = time.time()
        #video = video[:, :, ::-1]  # RGB -> BGR
        return Factory.Image(video.transpose([1, 0, 2]), camera=self)
Esempio n. 5
0
    def get_depth_matrix(self):

        if not FREENECT_ENABLED:
            logger.warning("You don't seem to have the freenect library "
                           "installed. This will make it hard to use "
                           "a Kinect.")
            return

        self.capture_time = time.time()
        return freenect.sync_get_depth(self.device_number)[0]
Esempio n. 6
0
    def get_depth_matrix(self):

        if not FREENECT_ENABLED:
            logger.warning("You don't seem to have the freenect library "
                           "installed. This will make it hard to use "
                           "a Kinect.")
            return

        self.capture_time = time.time()
        return freenect.sync_get_depth(self.device_number)[0]
Esempio n. 7
0
    def __init__(self, device_number=0):
        """
        **SUMMARY**

        In the kinect contructor, device_number indicates which kinect to
        connect to. It defaults to 0.

        **PARAMETERS**

        * *device_number* - The index of the kinect, these go from 0 upward.
        """
        super(Kinect, self).__init__()
        self.device_number = device_number
        if not FREENECT_ENABLED:
            logger.warning("You don't seem to have the freenect library "
                           "installed. This will make it hard to use "
                           "a Kinect.")
Esempio n. 8
0
    def __init__(self, device_number=0):
        """
        **SUMMARY**

        In the kinect contructor, device_number indicates which kinect to
        connect to. It defaults to 0.

        **PARAMETERS**

        * *device_number* - The index of the kinect, these go from 0 upward.
        """
        super(Kinect, self).__init__()
        self.device_number = device_number
        if not FREENECT_ENABLED:
            logger.warning("You don't seem to have the freenect library "
                           "installed. This will make it hard to use "
                           "a Kinect.")
Esempio n. 9
0
    def __init__(self, fname=None, name=None):
        self._cascade = None
        if name is None:
            self._name = fname
        else:
            self._name = name

        if fname is not None:
            if os.path.exists(fname):
                self._fhandle = os.path.abspath(fname)
            else:
                self._fhandle = os.path.join(DATA_DIR, 'HaarCascades', fname)
                if not os.path.exists(self._fhandle):
                    logger.warning("Could not find Haar Cascade file " + fname)
                    logger.warning("Try running the function "
                                   "img.list_haar_features() to see what is "
                                   "available")
                    return

            if self._fhandle in HaarCascade._cache:
                self._cascade = HaarCascade._cache[self._fhandle]
                return
            else:
                self._cascade = cv2.CascadeClassifier(self._fhandle)

            HaarCascade._cache[self._fhandle] = self._cascade
        else:
            logger.warning("No file path mentioned.")
def find_barcode(img, do_zlib=True, zxing_path=""):
    """
    **SUMMARY**

    This function requires zbar and the zbar python wrapper
    to be installed or zxing and the zxing python library.

    **ZBAR**

    To install please visit:
    http://zbar.sourceforge.net/

    On Ubuntu Linux 12.04 or greater:
    sudo apt-get install python-zbar


    **ZXING**

    If you have the python-zxing library installed, you can find 2d and 1d
    barcodes in your image.  These are returned as Barcode feature objects
    in a FeatureSet.  The single parameter is the ZXing_path along with
    setting the do_zlib flag to False. You do not need the parameter if you
    don't have the ZXING_LIBRARY env parameter set.

    You can clone python-zxing at:

    http://github.com/oostendo/python-zxing

    **INSTALLING ZEBRA CROSSING**

    * Download the latest version of zebra crossing from:
     http://code.google.com/p/zxing/

    * unpack the zip file where ever you see fit

      >>> cd zxing-x.x, where x.x is the version number of zebra crossing
      >>> ant -f core/build.xml
      >>> ant -f javase/build.xml

      This should build the library, but double check the readme

    * Get our helper library

      >>> git clone git://github.com/oostendo/python-zxing.git
      >>> cd python-zxing
      >>> python setup.py install

    * Our library does not have a setup file. You will need to add
       it to your path variables. On OSX/Linux use a text editor to modify
       your shell file (e.g. .bashrc)

      export ZXING_LIBRARY=<FULL PATH OF ZXING LIBRARY - (i.e. step 2)>
      for example:

      export ZXING_LIBRARY=/my/install/path/zxing-x.x/

      On windows you will need to add these same variables to the system
      variable, e.g.

      http://www.computerhope.com/issues/ch000549.htm

    * On OSX/Linux source your shell rc file (e.g. source .bashrc). Windows
     users may need to restart.

    * Go grab some barcodes!

    .. Warning::
      Users on OSX may see the following error:

      RuntimeWarning: tmpnam is a potential security risk to your program

      We are working to resolve this issue. For normal use this should not
      be a problem.

    **Returns**

    A :py:class:`FeatureSet` of :py:class:`Barcode` objects. If no barcodes
     are detected the method returns None.

    **EXAMPLE**

    >>> bc = cam.getImage()
    >>> barcodes = img.findBarcodes()
    >>> for b in barcodes:
    >>>     b.draw()

    **SEE ALSO**

    :py:class:`FeatureSet`
    :py:class:`Barcode`

    """
    if do_zlib:
        try:
            import zbar
        except:
            logger.warning('The zbar library is not installed, please '
                           'install to read barcodes')
            return None

        #configure zbar
        scanner = zbar.ImageScanner()
        scanner.parse_config('enable')
        raw = img.get_pil().convert('L').tostring()
        width = img.width
        height = img.height

        # wrap image data
        image = zbar.Image(width, height, 'Y800', raw)

        # scan the image for barcodes
        scanner.scan(image)
        barcode = None
        # extract results
        for symbol in image:
            # do something useful with results
            barcode = symbol
    else:
        if not ZXING_ENABLED:
            logger.warn("Zebra Crossing (ZXing) Library not installed. "
                        "Please see the release notes.")
            return None

        global _barcode_reader
        if not _barcode_reader:
            if not zxing_path:
                _barcode_reader = zxing.BarCodeReader()
            else:
                _barcode_reader = zxing.BarCodeReader(zxing_path)

        tmp_filename = os.tmpnam() + ".png"
        img.save(tmp_filename)
        barcode = _barcode_reader.decode(tmp_filename)
        os.unlink(tmp_filename)

    if barcode:
        f = Factory.Barcode(img, barcode)
        return FeatureSet([f])
    else:
        return None
Esempio n. 11
0
def find_haar_features(img,
                       cascade,
                       scale_factor=1.2,
                       min_neighbors=2,
                       use_canny=cv2.cv.CV_HAAR_DO_CANNY_PRUNING,
                       min_size=(20, 20),
                       max_size=(1000, 1000)):
    """
    **SUMMARY**

    A Haar like feature cascase is a really robust way of finding the
    location of a known object. This technique works really well for a few
    specific applications like face, pedestrian, and vehicle detection. It
    is worth noting that this approach **IS NOT A MAGIC BULLET** . Creating
    a cascade file requires a large number of images that have been sorted
    by a human.vIf you want to find Haar Features (useful for face
    detection among other purposes) this will return Haar feature objects
    in a FeatureSet.

    For more information, consult the cv2.CascadeClassifier documentation.

    To see what features are available run img.list_haar_features() or you
    can provide your own haarcascade file if you have one available.

    Note that the cascade parameter can be either a filename, or a
    HaarCascade loaded with cv2.CascadeClassifier(),
    or a SimpleCV HaarCascade object.

    **PARAMETERS**

    * *cascade* - The Haar Cascade file, this can be either the path to a
      cascade file or a HaarCascased SimpleCV object that has already been
      loaded.

    * *scale_factor* - The scaling factor for subsequent rounds of the
      Haar cascade (default 1.2) in terms of a percentage
      (i.e. 1.2 = 20% increase in size)

    * *min_neighbors* - The minimum number of rectangles that makes up an
      object. Ususally detected faces are clustered around the face, this
      is the number of detections in a cluster that we need for detection.
      Higher values here should reduce false positives and decrease false
      negatives.

    * *use-canny* - Whether or not to use Canny pruning to reject areas
     with too many edges (default yes, set to 0 to disable)

    * *min_size* - Minimum window size. By default, it is set to the size
      of samples the classifier has been trained on ((20,20) for face
      detection)

    * *max_size* - Maximum window size. By default, it is set to the size
      of samples the classifier has been trained on ((1000,1000) for face
      detection)

    **RETURNS**

    A feature set of HaarFeatures

    **EXAMPLE**

    >>> faces = HaarCascade(
        ...         "./SimpleCV/data/Features/HaarCascades/face.xml",
        ...         "myFaces")
    >>> cam = Camera()
    >>> while True:
    >>>     f = cam.get_image().find_haar_features(faces)
    >>>     if f is not None:
    >>>          f.show()

    **NOTES**

    OpenCV Docs:
    - http://opencv.willowgarage.com/documentation/python/
      objdetect_cascade_classification.html

    Wikipedia:
    - http://en.wikipedia.org/wiki/Viola-Jones_object_detection_framework
    - http://en.wikipedia.org/wiki/Haar-like_features

    The video on this pages shows how Haar features and cascades work to
    located faces:
    - http://dismagazine.com/dystopia/evolved-lifestyles/8115/
    anti-surveillance-how-to-hide-from-machines/

    """
    if isinstance(cascade, basestring):
        cascade = HaarCascade(cascade)
        if not cascade.get_cascade():
            return None
    elif isinstance(cascade, HaarCascade):
        pass
    else:
        logger.warning('Could not initialize HaarCascade. '
                       'Enter Valid cascade value.')
        return None

    haar_classify = cv2.CascadeClassifier(cascade.get_fhandle())
    objects = haar_classify.detectMultiScale(img.to_gray(),
                                             scaleFactor=scale_factor,
                                             minNeighbors=min_neighbors,
                                             minSize=min_size,
                                             flags=use_canny)

    if objects is not None and len(objects) != 0:
        return FeatureSet(
            [Factory.HaarFeature(img, o, cascade, True) for o in objects])

    return None
def find_haar_features(img, cascade, scale_factor=1.2, min_neighbors=2,
                       use_canny=cv2.cv.CV_HAAR_DO_CANNY_PRUNING,
                       min_size=(20, 20), max_size=(1000, 1000)):
    """
    **SUMMARY**

    A Haar like feature cascase is a really robust way of finding the
    location of a known object. This technique works really well for a few
    specific applications like face, pedestrian, and vehicle detection. It
    is worth noting that this approach **IS NOT A MAGIC BULLET** . Creating
    a cascade file requires a large number of images that have been sorted
    by a human.vIf you want to find Haar Features (useful for face
    detection among other purposes) this will return Haar feature objects
    in a FeatureSet.

    For more information, consult the cv2.CascadeClassifier documentation.

    To see what features are available run img.list_haar_features() or you
    can provide your own haarcascade file if you have one available.

    Note that the cascade parameter can be either a filename, or a
    HaarCascade loaded with cv2.CascadeClassifier(),
    or a SimpleCV HaarCascade object.

    **PARAMETERS**

    * *cascade* - The Haar Cascade file, this can be either the path to a
      cascade file or a HaarCascased SimpleCV object that has already been
      loaded.

    * *scale_factor* - The scaling factor for subsequent rounds of the
      Haar cascade (default 1.2) in terms of a percentage
      (i.e. 1.2 = 20% increase in size)

    * *min_neighbors* - The minimum number of rectangles that makes up an
      object. Ususally detected faces are clustered around the face, this
      is the number of detections in a cluster that we need for detection.
      Higher values here should reduce false positives and decrease false
      negatives.

    * *use-canny* - Whether or not to use Canny pruning to reject areas
     with too many edges (default yes, set to 0 to disable)

    * *min_size* - Minimum window size. By default, it is set to the size
      of samples the classifier has been trained on ((20,20) for face
      detection)

    * *max_size* - Maximum window size. By default, it is set to the size
      of samples the classifier has been trained on ((1000,1000) for face
      detection)

    **RETURNS**

    A feature set of HaarFeatures

    **EXAMPLE**

    >>> faces = HaarCascade(
        ...         "./SimpleCV/data/Features/HaarCascades/face.xml",
        ...         "myFaces")
    >>> cam = Camera()
    >>> while True:
    >>>     f = cam.get_image().find_haar_features(faces)
    >>>     if f is not None:
    >>>          f.show()

    **NOTES**

    OpenCV Docs:
    - http://opencv.willowgarage.com/documentation/python/
      objdetect_cascade_classification.html

    Wikipedia:
    - http://en.wikipedia.org/wiki/Viola-Jones_object_detection_framework
    - http://en.wikipedia.org/wiki/Haar-like_features

    The video on this pages shows how Haar features and cascades work to
    located faces:
    - http://dismagazine.com/dystopia/evolved-lifestyles/8115/
    anti-surveillance-how-to-hide-from-machines/

    """
    if isinstance(cascade, basestring):
        cascade = HaarCascade(cascade)
        if not cascade.get_cascade():
            return None
    elif isinstance(cascade, HaarCascade):
        pass
    else:
        logger.warning('Could not initialize HaarCascade. '
                       'Enter Valid cascade value.')
        return None

    haar_classify = cv2.CascadeClassifier(cascade.get_fhandle())
    objects = haar_classify.detectMultiScale(
        img.to_gray(), scaleFactor=scale_factor,
        minNeighbors=min_neighbors, minSize=min_size,
        flags=use_canny)

    if objects is not None and len(objects) != 0:
        return FeatureSet(
            [Factory.HaarFeature(img, o, cascade, True) for o in objects])

    return None