コード例 #1
0
ファイル: camera.py プロジェクト: janfrs/kwc-ros-pkg
 def __init__(self, device_num = 0):
     self.camera = vd.VidereCap(device_num)
     #self.camera.process_frames()
     self.camera.set_mode(vd.NONE)
     vd.set_gain(self.camera.capture, 96)
     vd.set_exposure(self.camera.capture, 255)
     self.camera.process_frames()
     self.low_exposure_left  = cv.cvCreateImage(cv.cvGetSize(self.camera.left_debayered), 8, 3)
     self.low_exposure_right = cv.cvCreateImage(cv.cvGetSize(self.camera.right_debayered), 8, 3)
コード例 #2
0
 def __init__(self, device_num=0):
     self.camera = vd.VidereCap(device_num)
     #self.camera.process_frames()
     self.camera.set_mode(vd.NONE)
     vd.set_gain(self.camera.capture, 96)
     vd.set_exposure(self.camera.capture, 255)
     self.camera.process_frames()
     self.low_exposure_left = cv.cvCreateImage(
         cv.cvGetSize(self.camera.left_debayered), 8, 3)
     self.low_exposure_right = cv.cvCreateImage(
         cv.cvGetSize(self.camera.right_debayered), 8, 3)
コード例 #3
0
    def detect(self, image):
        # image size is needed by underlying opencv lib to allocate memory
        image_size = opencv.cvGetSize(image)

        # the algorithm works with grayscale images
        grayscale = opencv.cvCreateImage(image_size, 8, 1)
        opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)

        # more underlying c lib memory allocation
        storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(storage)

        # equalize histogram
        opencv.cvEqualizeHist(grayscale, grayscale)

        # detect faces using haar cascade, the used file is trained to
        # detect frontal faces
        cascade = opencv.cvLoadHaarClassifierCascade(
            'haarcascade_frontalface_alt.xml', opencv.cvSize(1, 1))
        faces = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                           opencv.CV_HAAR_DO_CANNY_PRUNING,
                                           opencv.cvSize(100, 100))

        # draw rectangles around faces
        for face in faces:
            opencv.cvRectangle(
                image, opencv.cvPoint(int(face.x), int(face.y)),
                opencv.cvPoint(int(face.x + face.width),
                               int(face.y + face.height)),
                opencv.CV_RGB(127, 255, 0), 2)

        # return faces casted to list here, otherwise some obscure bug
        # in opencv will make it segfault if the casting happens later
        return image, list(faces)
コード例 #4
0
ファイル: __init__.py プロジェクト: aoloe/shoebot
def findcontours(iplimage, threshold=100):
    srcimage = opencv.cvCloneImage(iplimage)    
    # create the storage area and bw image
    grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)
    opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)
    #threshold
    opencv.cvThreshold(grayscale, grayscale, threshold, 255, opencv.CV_THRESH_BINARY)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)   
    # find the contours
    nb_contours, contours = opencv.cvFindContours (grayscale, storage)
    # comment this out if you do not want approximation
    contours = opencv.cvApproxPoly (contours, opencv.sizeof_CvContour, storage, opencv.CV_POLY_APPROX_DP, 3, 1)
    # next line is for ctypes-opencv
    #contours = opencv.cvApproxPoly (contours, opencv.sizeof(opencv.CvContour), storage, opencv.CV_POLY_APPROX_DP, 3, 1)
    conts = []
    for cont in contours.hrange():
        points=[]
        for pt in cont:
            points.append((pt.x,pt.y))                
        conts.append(points)
    opencv.cvReleaseMemStorage(storage)    
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(grayscale)
    return (nb_contours, conts)
コード例 #5
0
ファイル: __init__.py プロジェクト: speters33w/shoebot
def findcontours(iplimage, threshold=100):
    srcimage = opencv.cvCloneImage(iplimage)
    # create the storage area and bw image
    grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)
    opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)
    # threshold
    opencv.cvThreshold(grayscale, grayscale, threshold, 255, opencv.CV_THRESH_BINARY)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
    # find the contours
    nb_contours, contours = opencv.cvFindContours(grayscale, storage)
    # comment this out if you do not want approximation
    contours = opencv.cvApproxPoly(contours, opencv.sizeof_CvContour, storage, opencv.CV_POLY_APPROX_DP, 3, 1)
    # next line is for ctypes-opencv
    # contours = opencv.cvApproxPoly (contours, opencv.sizeof(opencv.CvContour), storage, opencv.CV_POLY_APPROX_DP, 3, 1)
    conts = []
    for cont in contours.hrange():
        points = []
        for pt in cont:
            points.append((pt.x, pt.y))
        conts.append(points)
    opencv.cvReleaseMemStorage(storage)
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(grayscale)
    return (nb_contours, conts)
コード例 #6
0
    def detect(self, image):
        # image size is needed by underlying opencv lib to allocate memory
        image_size = opencv.cvGetSize(image)

        # the algorithm works with grayscale images
        grayscale = opencv.cvCreateImage(image_size, 8, 1)
        opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)

        # more underlying c lib memory allocation
        storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(storage)

        # equalize histogram
        opencv.cvEqualizeHist(grayscale, grayscale)

        # detect faces using haar cascade, the used file is trained to
        # detect frontal faces
        cascade = opencv.cvLoadHaarClassifierCascade(
            'haarcascade_frontalface_alt.xml', opencv.cvSize(1, 1))
        faces = opencv.cvHaarDetectObjects(
            grayscale, cascade, storage, 1.2, 2,
            opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(100, 100))

        # draw rectangles around faces
        for face in faces:
            opencv.cvRectangle(
                image, opencv.cvPoint(
                    int(face.x), int(face.y)),
                    opencv.cvPoint(int(face.x + face.width),
                    int(face.y + face.height)), opencv.CV_RGB(127, 255, 0), 2)

        # return faces casted to list here, otherwise some obscure bug
        # in opencv will make it segfault if the casting happens later
        return image, list(faces)
コード例 #7
0
def get_test_data():
    cfg = configuration.configuration('../data/ROS_test_client/',
                                      'dummyScanner')
    pc = processor.processor(cfg)
    pc.scan_dataset = scan_dataset()
    pc.scan_dataset.image_artag_filename = ''
    pc.scan_dataset.table_plane_translation = np.matrix([0, 0, 0]).T
    #approximate height of hokuyo above the ground, assume it to be fixed for now.
    pc.scan_dataset.ground_plane_translation = np.matrix([0, 0, 1.32]).T
    pc.scan_dataset.ground_plane_rotation = ''
    #pc.scan_dataset.is_test_set = True
    pc.scan_dataset.is_labeled = True
    pc.scan_dataset.id = 'ROStest'
    pc.load_raw_data(pc.scan_dataset.id)
    image_size = cv.cvGetSize(pc.img)

    #don't do any mapping to not change the pts3d!
    #create point cloud from raw range points
    (pc.pts3d, pc.scan_indices,
     pc.intensities) = pc.create_pointcloud(pc.laserscans, True, True)

    #region of interest in image pixels
    polygon = label_object()
    polygon.set_label('surface')
    width_min = image_size.width * 0.4
    width_max = image_size.width * 0.95
    height_min = image_size.height * 0.3
    height_max = image_size.height * 0.95
    polygon.add_point((width_min, height_min))
    polygon.add_point((width_min, height_max))
    polygon.add_point((width_max, height_max))
    polygon.add_point((width_max, height_min))

    return pc.pts3d, pc.intensities, None, pc.img, pc.image_angle, polygon
コード例 #8
0
def get_test_data():
    cfg = configuration.configuration("../data/ROS_test_client/", "dummyScanner")
    pc = processor.processor(cfg)
    pc.scan_dataset = scan_dataset()
    pc.scan_dataset.image_artag_filename = ""
    pc.scan_dataset.table_plane_translation = np.matrix([0, 0, 0]).T
    # approximate height of hokuyo above the ground, assume it to be fixed for now.
    pc.scan_dataset.ground_plane_translation = np.matrix([0, 0, 1.32]).T
    pc.scan_dataset.ground_plane_rotation = ""
    # pc.scan_dataset.is_test_set = True
    pc.scan_dataset.is_labeled = True
    pc.scan_dataset.id = "ROStest"
    pc.load_raw_data(pc.scan_dataset.id)
    image_size = cv.cvGetSize(pc.img)

    # don't do any mapping to not change the pts3d!
    # create point cloud from raw range points
    (pc.pts3d, pc.scan_indices, pc.intensities) = pc.create_pointcloud(pc.laserscans, True, True)

    # region of interest in image pixels
    polygon = label_object()
    polygon.set_label("surface")
    width_min = image_size.width * 0.4
    width_max = image_size.width * 0.95
    height_min = image_size.height * 0.3
    height_max = image_size.height * 0.95
    polygon.add_point((width_min, height_min))
    polygon.add_point((width_min, height_max))
    polygon.add_point((width_max, height_max))
    polygon.add_point((width_max, height_min))

    return pc.pts3d, pc.intensities, None, pc.img, pc.image_angle, polygon
コード例 #9
0
ファイル: fr.py プロジェクト: alien9/cam
def detect(image):
    # Find out how large the file is, as the underlying C-based code
    # needs to allocate memory in the following steps
    image_size = opencv.cvGetSize(image)

    # create grayscale version - this is also the point where the allegation about
    # facial recognition being racist might be most true. A caucasian face would have more
    # definition on a webcam image than an African face when greyscaled.
    # I would suggest that adding in a routine to overlay edge-detection enhancements may
    # help, but you would also need to do this to the training images as well.
    grayscale = opencv.cvCreateImage(image_size, 8, 1)
    opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)

    # create storage (It is C-based so you need to do this sort of thing)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)

    # equalize histogram
    opencv.cvEqualizeHist(grayscale, grayscale)

    # detect objects - Haar cascade step
    # In this case, the code uses a frontal_face cascade - trained to spot faces that look directly
    # at the camera. In reality, I found that no bearded or hairy person must have been in the training
    # set of images, as the detection routine turned out to be beardist as well as a little racist!
    cascade = opencv.cvLoadHaarClassifierCascade('haarcascade_frontalface_alt.xml', opencv.cvSize(1,1))

    faces = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50, 50))

    if faces:
        for face in faces:
            # Hmm should I do a min-size check?
            # Draw a Chartreuse rectangle around the face - Chartruese rocks 
            opencv.cvRectangle(image, opencv.cvPoint( int(face.x), int(face.y)),
                         opencv.cvPoint(int(face.x + face.width), int(face.y + face.height)),
                         opencv.CV_RGB(127, 255, 0), 2) # RGB #7FFF00 width=2
コード例 #10
0
ファイル: __init__.py プロジェクト: speters33w/shoebot
 def __init__(self, frame):
     self.blob_image = opencv.cvCloneImage(frame.iplimage)
     self.blob_gray = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)
     self.blob_mask = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)
     opencv.cvSet(self.blob_mask, 1)
     opencv.cvCvtColor(self.blob_image, self.blob_gray, opencv.CV_BGR2GRAY)
     # opencv.cvEqualizeHist(self.blob_gray, self.blob_gray)
     # opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_BINARY)
     # opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_TOZERO)
     opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.bthresh, 255, frame.bthreshmode)
     # opencv.cvAdaptiveThreshold(self.blob_gray, self.blob_gray, 255, opencv.CV_ADAPTIVE_THRESH_MEAN_C, opencv.CV_THRESH_BINARY_INV)
     self._frame_blobs = CBlobResult(self.blob_gray, self.blob_mask, 100, True)
     self._frame_blobs.filter_blobs(10, 10000)
     self.count = self._frame_blobs.GetNumBlobs()
     self.items = []
     for i in range(self.count):
         self.items.append(self._frame_blobs.GetBlob(i))
コード例 #11
0
ファイル: __init__.py プロジェクト: aoloe/shoebot
 def __init__(self, frame):
     self.blob_image = opencv.cvCloneImage(frame.iplimage)
     self.blob_gray = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)
     self.blob_mask = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)
     opencv.cvSet(self.blob_mask, 1)        
     opencv.cvCvtColor(self.blob_image, self.blob_gray, opencv.CV_BGR2GRAY)
     #opencv.cvEqualizeHist(self.blob_gray, self.blob_gray)
     #opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_BINARY)	
     #opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_TOZERO)
     opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.bthresh, 255, frame.bthreshmode)
     #opencv.cvAdaptiveThreshold(self.blob_gray, self.blob_gray, 255, opencv.CV_ADAPTIVE_THRESH_MEAN_C, opencv.CV_THRESH_BINARY_INV)
     self._frame_blobs = CBlobResult(self.blob_gray, self.blob_mask, 100, True)
     self._frame_blobs.filter_blobs(10, 10000)
     self.count = self._frame_blobs.GetNumBlobs()
     self.items = []
     for i in range(self.count):
         self.items.append(self._frame_blobs.GetBlob(i)) 
コード例 #12
0
ファイル: stalker.py プロジェクト: 1stvamp/stalkertux
def main(argv):
    # Frames per second
    fps = 20
    tux_pos = 5
    tux_pos_min = 0.0
    tux_pos_max = 9.0

    try:
        opts, args = getopt.getopt(argv, "fps", ["framerate=",])
    except getopt.GetoptError:
            sys.exit(2)

    for opt, arg in opts:
            if opt in ("-fps", "--framerate"):
                fps = arg

    camera = highgui.cvCreateCameraCapture(0)

    while True:
        highgui.cvNamedWindow('Camera', 1)
        im = highgui.cvQueryFrame(camera)
        if im is None:
            break
        # mirror
        opencv.cv.cvFlip(im, None, 1)

#        positions = face.detect(im, 'haarcascade_data/haarcascade_profileface.xml')
        positions = face.detect(im, 'haarcascade_data/haarcascade_frontalface_alt2.xml')
#        if not positions:
#            positions = face.detect(im, 'haarcascade_data/haarcascade_frontalface_alt2.xml')

        # display webcam image
        highgui.cvShowImage('Camera', im)

        # Division of the screen to count as "walking" motion to trigger tux
        image_size = opencv.cvGetSize(im)
        motion_block = image_size.width / 9

        if positions:
            mp = None
            for position in positions:
                if not mp or mp['width'] > position['width']:
                    mp = position
            pos = (mp['x'] + (mp['width'] / 2)) / motion_block
            print "tux pos: %f" % tux_pos
            print "pos: %f" % pos

            if pos != tux_pos:
                if tux_pos > pos:
                    move_tux_right(tux_pos - pos)
                elif tux_pos < pos:
                    move_tux_left(pos - tux_pos)
                tux_pos = pos

        if highgui.cvWaitKey(fps) >= 0:
            highgui.cvDestroyWindow('Camera')
            sys.exit(0)
コード例 #13
0
ファイル: __init__.py プロジェクト: speters33w/shoebot
def ipl2cairo(iplimage):
    srcimage = opencv.cvCloneImage(iplimage)
    width = srcimage.width
    height = srcimage.height
    image = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 4)
    opencv.cvCvtColor(srcimage, image, opencv.CV_BGR2BGRA)
    buffer = numpy.fromstring(image.imageData, dtype=numpy.uint32).astype(numpy.uint32)
    buffer.shape = (image.width, image.height)
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(image)
    return cairo.ImageSurface.create_for_data(buffer, cairo.FORMAT_RGB24, width, height, width * 4)
コード例 #14
0
ファイル: __init__.py プロジェクト: aoloe/shoebot
def ipl2cairo(iplimage):
    srcimage = opencv.cvCloneImage(iplimage)
    width = srcimage.width
    height = srcimage.height 
    image = opencv.cvCreateImage(opencv.cvGetSize(srcimage),8, 4)
    opencv.cvCvtColor(srcimage,image,opencv.CV_BGR2BGRA)
    buffer = numpy.fromstring(image.imageData, dtype=numpy.uint32).astype(numpy.uint32)
    buffer.shape = (image.width, image.height)
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(image)    
    return cairo.ImageSurface.create_for_data(buffer, cairo.FORMAT_RGB24, width, height, width*4)
コード例 #15
0
def run():
    filename = 'C:\\Documents and Settings\\rmccormack\\Desktop\\work_projects\\openCV\\test\\test1.jpg'
    im = hg.cvLoadImage(filename)
    if not im:
        print "Error opening %s" % filename
        sys.exit(-1)
    im2 = opencv.cvCreateImage(opencv.cvGetSize(im), 8, 4)
    opencv.cvCvtColor(im, im2, opencv.CV_BGR2BGRA)
    buffer = numpy.fromstring(im2.imageData,
                              dtype=numpy.uint32).astype(numpy.float32)
    buffer.shape = (im2.width, im2.height)
    return buffer
コード例 #16
0
def test_segmentation():
    rospy.wait_for_service('segment_pointcloud')
    try:
        pts3d, intensities, labels, image, image_angle, polygon = get_test_data(
        )
        ROS_pointcloud = convert_pointcloud_to_ROS(pts3d, intensities, labels)
        ROS_image = convert_cvimage_to_ROS(image)
        imageSize = cv.cvGetSize(image)

        ROS_polygon = convert_polygon_to_ROS(polygon)

        segment_pointcloud = rospy.ServiceProxy('segment_pointcloud',
                                                Segmentation)
        request = SegmentationRequest()
        request.imageWidth = 41
        request.pointcloud = ROS_pointcloud
        request.image = ROS_image
        request.imageWidth = imageSize.width
        request.imageHeight = imageSize.height
        request.imageAngle = image_angle
        request.regionOfInterest2D = ROS_polygon
        request.laserHeightAboveGround = 1.32
        request.numberOfPointsToClassify = 1000  #-1 == all

        response = segment_pointcloud(request)

        pts3d_bound, intensities, labels = convert_ROS_pointcloud_to_pointcloud(
            response.pointcloud)
        cfg = configuration.configuration('../data/ROS_test_client/',
                                          'dummyScanner')
        pc = processor.processor(cfg)

        pc.scan_dataset = scan_dataset()
        pc.scan_dataset.image_artag_filename = ''
        pc.scan_dataset.table_plane_translation = np.matrix([0, 0, 0]).T

        pc.scan_dataset.ground_plane_translation = np.matrix(
            [0, 0, request.laserHeightAboveGround]).T
        pc.scan_dataset.ground_plane_rotation = ''
        pc.scan_dataset.is_labeled = True
        pc.scan_dataset.id = 'ROStest'
        pc.image_angle = ''
        pc.pts3d_bound = pts3d_bound
        pc.map_polys = labels
        pc.scan_dataset.ground_plane_normal = np.matrix([0., 0., 1.]).T

        from enthought.mayavi import mlab
        pc.display_3d('labels')
        mlab.show()

        return response
    except rospy.ServiceException, e:
        print "Service call failed: %s" % e
コード例 #17
0
ファイル: __init__.py プロジェクト: aoloe/shoebot
    def __init__(self, src="", time=None):

        self.src = src
        self.time = time
        if self.time:
            hg.cvSetCaptureProperty(self.src, hg.CV_CAP_PROP_POS_FRAMES, self.time)    
        self.iplimage = hg.cvQueryFrame(self.src)
        self.width = self.iplimage.width
        self.height = self.iplimage.height
        self.image = opencv.cvCreateImage(opencv.cvGetSize(self.iplimage),8, 4)
        opencv.cvCvtColor(self.iplimage,self.image,opencv.CV_BGR2BGRA)
        self.buffer = numpy.fromstring(self.image.imageData, dtype=numpy.uint32).astype(numpy.uint32)
        self.buffer.shape = (self.image.width, self.image.height)
        self.time = hg.cvGetCaptureProperty(self.src, hg.CV_CAP_PROP_POS_MSEC)
コード例 #18
0
ファイル: __init__.py プロジェクト: speters33w/shoebot
    def __init__(self, src="", time=None):

        self.src = src
        self.time = time
        if self.time:
            hg.cvSetCaptureProperty(self.src, hg.CV_CAP_PROP_POS_FRAMES, self.time)
        self.iplimage = hg.cvQueryFrame(self.src)
        self.width = self.iplimage.width
        self.height = self.iplimage.height
        self.image = opencv.cvCreateImage(opencv.cvGetSize(self.iplimage), 8, 4)
        opencv.cvCvtColor(self.iplimage, self.image, opencv.CV_BGR2BGRA)
        self.buffer = numpy.fromstring(self.image.imageData, dtype=numpy.uint32).astype(numpy.uint32)
        self.buffer.shape = (self.image.width, self.image.height)
        self.time = hg.cvGetCaptureProperty(self.src, hg.CV_CAP_PROP_POS_MSEC)
コード例 #19
0
def test_segmentation():
    rospy.wait_for_service("segment_pointcloud")
    try:
        pts3d, intensities, labels, image, image_angle, polygon = get_test_data()
        ROS_pointcloud = convert_pointcloud_to_ROS(pts3d, intensities, labels)
        ROS_image = convert_cvimage_to_ROS(image)
        imageSize = cv.cvGetSize(image)

        ROS_polygon = convert_polygon_to_ROS(polygon)

        segment_pointcloud = rospy.ServiceProxy("segment_pointcloud", Segmentation)
        request = SegmentationRequest()
        request.imageWidth = 41
        request.pointcloud = ROS_pointcloud
        request.image = ROS_image
        request.imageWidth = imageSize.width
        request.imageHeight = imageSize.height
        request.imageAngle = image_angle
        request.regionOfInterest2D = ROS_polygon
        request.laserHeightAboveGround = 1.32
        request.numberOfPointsToClassify = 1000  # -1 == all

        response = segment_pointcloud(request)

        pts3d_bound, intensities, labels = convert_ROS_pointcloud_to_pointcloud(response.pointcloud)
        cfg = configuration.configuration("../data/ROS_test_client/", "dummyScanner")
        pc = processor.processor(cfg)

        pc.scan_dataset = scan_dataset()
        pc.scan_dataset.image_artag_filename = ""
        pc.scan_dataset.table_plane_translation = np.matrix([0, 0, 0]).T

        pc.scan_dataset.ground_plane_translation = np.matrix([0, 0, request.laserHeightAboveGround]).T
        pc.scan_dataset.ground_plane_rotation = ""
        pc.scan_dataset.is_labeled = True
        pc.scan_dataset.id = "ROStest"
        pc.image_angle = ""
        pc.pts3d_bound = pts3d_bound
        pc.map_polys = labels
        pc.scan_dataset.ground_plane_normal = np.matrix([0.0, 0.0, 1.0]).T

        from enthought.mayavi import mlab

        pc.display_3d("labels")
        mlab.show()

        return response
    except rospy.ServiceException, e:
        print "Service call failed: %s" % e
コード例 #20
0
ファイル: __init__.py プロジェクト: speters33w/shoebot
    def detectObject(self, classifier):
        self.grayscale = opencv.cvCreateImage(opencv.cvGetSize(self.iplimage), 8, 1)
        opencv.cvCvtColor(self.iplimage, self.grayscale, opencv.CV_BGR2GRAY)
        self.storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(self.storage)
        opencv.cvEqualizeHist(self.grayscale, self.grayscale)

        try:
            self.cascade = opencv.cvLoadHaarClassifierCascade(os.path.join(os.path.dirname(__file__), classifier+".xml"),opencv.cvSize(1, 1))
        except:
            raise AttributeError("could not load classifier file")

        self.objects = opencv.cvHaarDetectObjects(self.grayscale, self.cascade, self.storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50, 50))

        return self.objects
コード例 #21
0
ファイル: __init__.py プロジェクト: aoloe/shoebot
 def detectObject(self, classifier):
     self.grayscale = opencv.cvCreateImage(opencv.cvGetSize(self.iplimage), 8, 1)
     opencv.cvCvtColor(self.iplimage, self.grayscale, opencv.CV_BGR2GRAY)
     self.storage = opencv.cvCreateMemStorage(0)
     opencv.cvClearMemStorage(self.storage)
     opencv.cvEqualizeHist(self.grayscale, self.grayscale)
     
     try:
         self.cascade = opencv.cvLoadHaarClassifierCascade(os.path.join(os.path.dirname(__file__), classifier+".xml"),opencv.cvSize(1,1))
     except:
         raise AttributeError, "could not load classifier file"            
     
     self.objects = opencv.cvHaarDetectObjects(self.grayscale, self.cascade, self.storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50,50))
     
     return self.objects        
コード例 #22
0
ファイル: __init__.py プロジェクト: aoloe/shoebot
def detectHaar(iplimage, classifier):
    srcimage = opencv.cvCloneImage(iplimage)
    grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)
    opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
    opencv.cvEqualizeHist(grayscale, grayscale)    
    try:
        cascade = opencv.cvLoadHaarClassifierCascade(os.path.join(os.path.dirname(__file__), classifier+".xml"),opencv.cvSize(1,1))
    except:
        raise AttributeError, "could not load classifier file"                
    objs = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50,50))    
    objects = []    
    for obj in objs:
        objects.append(Haarobj(obj))        
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(grayscale)
    opencv.cvReleaseMemStorage(storage)    
    return objects 
コード例 #23
0
ファイル: __init__.py プロジェクト: speters33w/shoebot
def detectHaar(iplimage, classifier):
    srcimage = opencv.cvCloneImage(iplimage)
    grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)
    opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
    opencv.cvEqualizeHist(grayscale, grayscale)
    try:
        cascade = opencv.cvLoadHaarClassifierCascade(os.path.join(os.path.dirname(__file__), classifier + ".xml"), opencv.cvSize(1, 1))
    except:
        raise AttributeError("could not load classifier file")
    objs = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50, 50))
    objects = []
    for obj in objs:
        objects.append(Haarobj(obj))
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(grayscale)
    opencv.cvReleaseMemStorage(storage)
    return objects
コード例 #24
0
ファイル: recogniser.py プロジェクト: 55wang/FaceRecognition
  def detect(self, pil_image, cascade_name, recogn_w = 50, recogn_h = 50):
    # Get cascade:
    cascade = self.get_cascade(cascade_name)

    image = opencv.PIL2Ipl(pil_image) 
    image_size = opencv.cvGetSize(image)
    grayscale = image
    if pil_image.mode == "RGB": 
      # create grayscale version
      grayscale = opencv.cvCreateImage(image_size, 8, 1)
      # Change to RGB2Gray - I dont think itll affect the conversion
      opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)
 
    # create storage
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
 
    # equalize histogram
    opencv.cvEqualizeHist(grayscale, grayscale)
 
    # detect objects
    return opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(recogn_w, recogn_h))
コード例 #25
0
    def detect(self, pil_image, cascade_name, recogn_w=50, recogn_h=50):
        # Get cascade:
        cascade = self.get_cascade(cascade_name)

        image = opencv.PIL2Ipl(pil_image)
        image_size = opencv.cvGetSize(image)
        grayscale = image
        if pil_image.mode == "RGB":
            # create grayscale version
            grayscale = opencv.cvCreateImage(image_size, 8, 1)
            # Change to RGB2Gray - I dont think itll affect the conversion
            opencv.cvCvtColor(image, grayscale, opencv.CV_BGR2GRAY)

        # create storage
        storage = opencv.cvCreateMemStorage(0)
        opencv.cvClearMemStorage(storage)

        # equalize histogram
        opencv.cvEqualizeHist(grayscale, grayscale)

        # detect objects
        return opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2,
                                          opencv.CV_HAAR_DO_CANNY_PRUNING,
                                          opencv.cvSize(recogn_w, recogn_h))
コード例 #26
0
        import sys; sys.exit()
    pc.img = cv.cvCloneImage(np.array(np_img) ) #only works in CV 2.0, but not cv 2.1
    pc.laserscans = []
    pc.scan_indices = np.array( range(N) )
    pc.camPts = camPts
    pc.camPts_bound = camPts[:,idx_list]
    pc.pts3d_bound = pc.pts3d[:,idx_list]
    pc.scan_indices_bound = np.array(range(N))[idx_list]
    pc.intensities_bound = pc.intensities[idx_list]
    pc.idx_list = idx_list
    ###pc.map = (pc.camPts_bound, pc.camPts, pc.idx_list, pc.pts3d_bound,pc.scan_indices_bound, pc.intensities_bound)

#create dummy surface polygon to define ROI
# [Below] These polygons will be used to generate a 'mask' cv image and used to 
# limit the area of image where 3D projected points are considered.
image_size = cv.cvGetSize(pc.img)
w = image_size.width
h = image_size.height
crop_region = (0.3,0.3,0.3,0.3) # % to crop left, crop right, crop top, crop bot
surface_ROI, list_of_edges = define_ROI_polygons(w, h, crop_region) 
pc.scan_dataset.polygons.append(surface_ROI)
for p in list_of_edges: pc.scan_dataset.polygons.append(p)


''' [Below] Does 'map_laser_into_cam2D' to get camPts, and bounds on all values
    Optionally applies translate and rotate relative to a groundplane (if defined)
'''
if not cfg.device=='PR2':
    pc.do_all_point_cloud()
else:
    pc.do_all_point_cloud(map_already_exists=True)
コード例 #27
0
    def prepare(self, features_k_nearest_neighbors, nonzero_indices = None, all_save_load = False, regenerate_neightborhood_indices = False):
        #print np.shape(self.processor.pts3d_bound), 'shape pts3d_bound'

        imgTmp = cv.cvCloneImage(self.processor.img)
        self.imNP = ut.cv2np(imgTmp,format='BGR')
        ###self.processor.map2d = np.asarray(self.processor.camPts_bound) #copied from laser to image mapping
        
        if features_k_nearest_neighbors == None or features_k_nearest_neighbors == False: #use range
            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)
            
            #print len(nonzero_indices)
            #print np.shape(np.asarray((self.processor.pts3d_bound.T)[nonzero_indices]))
            
            if nonzero_indices != None:
                print ut.getTime(), 'query ball tree for ', len(nonzero_indices), 'points'
                kdtree_query = kdtree.KDTree((self.processor.pts3d_bound.T)[nonzero_indices])
            else:
                print ut.getTime(), 'query ball tree'
                kdtree_query = kdtree.KDTree(self.processor.pts3d_bound.T)
            
            filename = self.processor.config.path+'/data/'+self.processor.scan_dataset.id+'_sphere_neighborhood_indices_'+str(self.processor.feature_radius)+'.pkl'
            if all_save_load == True and os.path.exists(filename) and regenerate_neightborhood_indices == False:
                #if its already there, load it:
                print ut.getTime(), 'loading',filename
                self.kdtree_queried_indices = ut.load_pickle(filename)    
            else:
                self.kdtree_queried_indices = kdtree_query.query_ball_tree(self.kdtree2d, self.processor.feature_radius, 2.0, 0.2) #approximate
                print ut.getTime(), 'queried kdtree: ',len(self.kdtree_queried_indices),'points, radius:',self.processor.feature_radius
                if all_save_load == True:
                    ut.save_pickle(self.kdtree_queried_indices, filename)
                    
            #make dict out of list for faster operations? (doesn't seem to change speed significantly):
            #self.kdtree_queried_indices = dict(zip(xrange(len(self.kdtree_queried_indices)), self.kdtree_queried_indices))
        
        else: #experiemental: use_20_nearest_neighbors == True
            #TODO: exclude invalid values in get_featurevector (uncomment code there)
           
            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)
            self.kdtree_queried_indices = []
            print ut.getTime(), 'kdtree single queries for kNN start, k=', features_k_nearest_neighbors
            count = 0
            for point in ((self.processor.pts3d_bound.T)[nonzero_indices]):
                count = count + 1
                result = self.kdtree2d.query(point, features_k_nearest_neighbors,0.2,2,self.processor.feature_radius)
                #existing = result[0][0] != np.Inf
                #print existing
                #print result[1]
                self.kdtree_queried_indices += [result[1]] #[existing]
                if count % 4096 == 0:
                    print ut.getTime(),count
            print ut.getTime(), 'kdtree singe queries end'
            
            #convert to numpy array -> faster access
            self.kdtree_queried_indices = np.asarray(self.kdtree_queried_indices)
        
        #print self.kdtree_queried_indices
        #takes long to compute:
        #avg_len = 0
        #minlen = 999999
        #maxlen = 0
        #for x in self.kdtree_queried_indices:
        #    avg_len += len(x)
        #    minlen = min(minlen, len(x))
        #    maxlen = max(maxlen, len(x))
        #avg_len = avg_len / len(self.kdtree_queried_indices)
        #print ut.getTime(), "range neighbors: avg_len", avg_len, 'minlen', minlen, 'maxlen', maxlen
        
        
        #create HSV numpy images:
        # compute the hsv version of the image 
        image_size = cv.cvGetSize(self.processor.img)
        img_h = cv.cvCreateImage (image_size, 8, 1)
        img_s = cv.cvCreateImage (image_size, 8, 1)
        img_v = cv.cvCreateImage (image_size, 8, 1)
        img_hsv = cv.cvCreateImage (image_size, 8, 3)
        
        cv.cvCvtColor (self.processor.img, img_hsv, cv.CV_BGR2HSV)
        
        cv.cvSplit (img_hsv, img_h, img_s, img_v, None)
        self.imNP_h = ut.cv2np(img_h)
        self.imNP_s = ut.cv2np(img_s)
        self.imNP_v = ut.cv2np(img_v)
        
        textures = texture_features.eigen_texture(self.processor.img)
        self.imNP_tex1 = textures[:,:,0]
        self.imNP_tex2 = textures[:,:,1]
        
        self.debug_before_first_featurevector = True
        
        self.generate_voi_histogram(self.processor.point_of_interest,self.processor.voi_width)
コード例 #28
0
 def undistort_img(self, image):
     if self.temp is None:
         self.temp = cv.cvCreateImage(cv.cvGetSize(image), 8, 3)
     #cv.cvUndistort2(image, self.temp, self.intrinsic_mat_cv, self.lens_distortion_radial_cv)
     cv.cvRemap(image, self.temp, self.mapx, self.mapy)
     return self.temp
コード例 #29
0
    def prepare(self,
                features_k_nearest_neighbors,
                nonzero_indices=None,
                all_save_load=False,
                regenerate_neightborhood_indices=False):
        #print np.shape(self.processor.pts3d_bound), 'shape pts3d_bound'

        imgTmp = cv.cvCloneImage(self.processor.img)
        self.imNP = ut.cv2np(imgTmp, format='BGR')
        ###self.processor.map2d = np.asarray(self.processor.camPts_bound) #copied from laser to image mapping

        if features_k_nearest_neighbors == None or features_k_nearest_neighbors == False:  #use range
            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)

            #print len(nonzero_indices)
            #print np.shape(np.asarray((self.processor.pts3d_bound.T)[nonzero_indices]))

            if nonzero_indices != None:
                print ut.getTime(), 'query ball tree for ', len(
                    nonzero_indices), 'points'
                kdtree_query = kdtree.KDTree(
                    (self.processor.pts3d_bound.T)[nonzero_indices])
            else:
                print ut.getTime(), 'query ball tree'
                kdtree_query = kdtree.KDTree(self.processor.pts3d_bound.T)

            filename = self.processor.config.path + '/data/' + self.processor.scan_dataset.id + '_sphere_neighborhood_indices_' + str(
                self.processor.feature_radius) + '.pkl'
            if all_save_load == True and os.path.exists(
                    filename) and regenerate_neightborhood_indices == False:
                #if its already there, load it:
                print ut.getTime(), 'loading', filename
                self.kdtree_queried_indices = ut.load_pickle(filename)
            else:
                self.kdtree_queried_indices = kdtree_query.query_ball_tree(
                    self.kdtree2d, self.processor.feature_radius, 2.0,
                    0.2)  #approximate
                print ut.getTime(), 'queried kdtree: ', len(
                    self.kdtree_queried_indices
                ), 'points, radius:', self.processor.feature_radius
                if all_save_load == True:
                    ut.save_pickle(self.kdtree_queried_indices, filename)

            #make dict out of list for faster operations? (doesn't seem to change speed significantly):
            #self.kdtree_queried_indices = dict(zip(xrange(len(self.kdtree_queried_indices)), self.kdtree_queried_indices))

        else:  #experiemental: use_20_nearest_neighbors == True
            #TODO: exclude invalid values in get_featurevector (uncomment code there)

            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)
            self.kdtree_queried_indices = []
            print ut.getTime(
            ), 'kdtree single queries for kNN start, k=', features_k_nearest_neighbors
            count = 0
            for point in ((self.processor.pts3d_bound.T)[nonzero_indices]):
                count = count + 1
                result = self.kdtree2d.query(point,
                                             features_k_nearest_neighbors, 0.2,
                                             2, self.processor.feature_radius)
                #existing = result[0][0] != np.Inf
                #print existing
                #print result[1]
                self.kdtree_queried_indices += [result[1]]  #[existing]
                if count % 4096 == 0:
                    print ut.getTime(), count
            print ut.getTime(), 'kdtree singe queries end'

            #convert to numpy array -> faster access
            self.kdtree_queried_indices = np.asarray(
                self.kdtree_queried_indices)

        #print self.kdtree_queried_indices
        #takes long to compute:
        #avg_len = 0
        #minlen = 999999
        #maxlen = 0
        #for x in self.kdtree_queried_indices:
        #    avg_len += len(x)
        #    minlen = min(minlen, len(x))
        #    maxlen = max(maxlen, len(x))
        #avg_len = avg_len / len(self.kdtree_queried_indices)
        #print ut.getTime(), "range neighbors: avg_len", avg_len, 'minlen', minlen, 'maxlen', maxlen

        #create HSV numpy images:
        # compute the hsv version of the image
        image_size = cv.cvGetSize(self.processor.img)
        img_h = cv.cvCreateImage(image_size, 8, 1)
        img_s = cv.cvCreateImage(image_size, 8, 1)
        img_v = cv.cvCreateImage(image_size, 8, 1)
        img_hsv = cv.cvCreateImage(image_size, 8, 3)

        cv.cvCvtColor(self.processor.img, img_hsv, cv.CV_BGR2HSV)

        cv.cvSplit(img_hsv, img_h, img_s, img_v, None)
        self.imNP_h = ut.cv2np(img_h)
        self.imNP_s = ut.cv2np(img_s)
        self.imNP_v = ut.cv2np(img_v)

        textures = texture_features.eigen_texture(self.processor.img)
        self.imNP_tex1 = textures[:, :, 0]
        self.imNP_tex2 = textures[:, :, 1]

        self.debug_before_first_featurevector = True

        self.generate_voi_histogram(self.processor.point_of_interest,
                                    self.processor.voi_width)
コード例 #30
0
ファイル: ball.py プロジェクト: relet/Holodeck-Minigolf
  #return to drawing routines
  ballax = ballax * FRICTION
  ballay = ballay * FRICTION
  odeBBall.setLinearVel((ballax, zvel, ballay))

  if distance((ballx, bally), (holex, holey)) < 0.25 and abs(ballax)<5 and abs(ballay)<5:
    resetGame()
    switchMode(MODE_DETECT_OBSTACLES)
    return 

  if abs(ballax)<0.01 and abs(ballay)<0.01:
    switchMode(MODE_READY)


im = queryWebcam()
igray = opencv.cvCreateImage(opencv.cvGetSize(im), 8, 1)
iwhite = opencv.cvCreateImage(opencv.cvGetSize(im), 8, 1)
stor = opencv.cvCreateMemStorage(0)

lastframe = time.time()
font = pygame.font.Font(None, 20)
while 1:
    for event in pygame.event.get():
      if event.type == pygame.QUIT: exit()
      if event.type == pygame.MOUSEBUTTONDOWN:
        if mode == MODE_DETECT_UNITSQUARE:
          setIsoRect(event)
        else:
          exit()
      if event.type == pygame.KEYDOWN: keyPress(event)
    #screen.fill(black)
コード例 #31
0
ファイル: camera.py プロジェクト: janfrs/kwc-ros-pkg
 def undistort_img(self, image):
     if self.temp is None:
         self.temp = cv.cvCreateImage(cv.cvGetSize(image), 8, 3)
     #cv.cvUndistort2(image, self.temp, self.intrinsic_mat_cv, self.lens_distortion_radial_cv)
     cv.cvRemap(image, self.temp, self.mapx, self.mapy)
     return self.temp
コード例 #32
0
 def size(self):
   return opencv.cvGetSize(self.image)