コード例 #1
0
 def jitter( self, i_image , i_n_examples ):
     """1) Apply various random affine transform to i_image (scale, rotation), 
           where i_n_examples is the number of transformations. The affine transforms happen 
           around the center of the original region of interest. 
        2) Translate roi with various values - crop the image from this region.
        3) The same normalisation is then applied to all the cropped images, specified by setParams"""
     
     #Store transforms applied in format tx ty scale angle
     o_transforms = numpy.array([0., 0.,  1.,  0.]) 
     
     if self.__roi == None:
         print "No region of interest - returning input image!"
         return None
     #Always return the normalised verion of the input image
     image = cv.Ipl2NumPy(self.normalise(i_image))
     o_data = numpy.tile( None, (image.shape[0], image.shape[1], 1))
     o_data[:,:,0] =  image
     if i_n_examples == 0:
         return ( o_data, o_transforms)
     #Rotation point should be around original roi center
     center = cv.cvPoint2D32f( self.__roi.x + self.__roi.width/2,self.__roi.y + self.__roi.height/2 )
     angles = numpy.random.uniform(-30.,30.,i_n_examples)
     scales = numpy.random.uniform(0.9,1.1, i_n_examples)
     tx = numpy.int32( numpy.round( numpy.random.uniform(-20,20,i_n_examples)))
     ty = numpy.int32( numpy.round( numpy.random.uniform(-20,20,i_n_examples)))
     x = self.__roi.x + tx
     y = self.__roi.y + ty
     #FIXME: Extend valid indices to outliers due to affine transform!!!
     min_x = 0; min_y = 0;
     max_x = i_image.width - self.__roi.width - 1
     max_y = i_image.height - self.__roi.height - 1  
     valid_x = numpy.hstack([numpy.nonzero( x >= min_x )[0], numpy.nonzero( x < max_x)[0]])
     valid_y = numpy.hstack([numpy.nonzero( y >= min_y )[0], numpy.nonzero( y < max_y)[0]])
     valid_idx = numpy.unique(numpy.hstack([valid_x, valid_y]))
     original_roi = cv.cvRect( self.__roi.x,  self.__roi.y,  self.__roi.width,  self.__roi.height)
     if self.__rot_mat == None:
         original_rot_matrix = None
     else:
         original_rot_matrix = cv.cvCloneImage(self.__rot_mat)
     for index in valid_idx:
         params = numpy.array([tx[index], ty[index], scales[index], angles[index] ]) 
         self.setAffineTransform( center, scales[index], angles[index])
         self.__roi.x = int( x[index] )
         self.__roi.y = int( y[index] )     
         image = cv.Ipl2NumPy(self.normalise( i_image ))
         o_data = numpy.dstack( [o_data, image])
         o_transforms = numpy.vstack( [o_transforms, params])  
     #Restore the original region of interest
     self.__roi.x = original_roi.x
     self.__roi.y = original_roi.y
     if original_rot_matrix == None:
         self.__rot_mat= None
     else:
         self.__rot_mat = cv.cvCloneImage(original_rot_matrix)
     return (o_data, o_transforms)
コード例 #2
0
ファイル: laser_cam_callib.py プロジェクト: gt-ros-pkg/hrl
    def __init__(self, transFunc, seeds, deltas, names, pts, img, cam_proj_mat, cam_centers, zoom, id):
        '''
        transFunc => takes in param vector, returns homogeneous Xform
        seeds => initial param vector (1XM array)
        deltas => step sizes in param vector quantities (1XM array)
        pts => 3D points to be mapped into img (3XN array)
        img => an OpenCV image from the camera
        '''

        self.dataset_id = id

        # Note that adaptors are deprecated...
        imgTmp = cv.cvCloneImage(img)
        imNP = cv.adaptors.Ipl2NumPy(imgTmp)
        self.height = imNP.shape[0] * zoom
        self.width = imNP.shape[1] * zoom  
        self.zoom = zoom

        pyc.size(self.width + 200, self.height)
        #pyc.size(self.height + 200, self.width + 200)
        self.pts = pts
        self.img = img
        self.vals = seeds
        self.selected = np.zeros( self.vals.shape[0] )
        self.selected[0] = 1.0
        self.dels = deltas
        self.names = names
        self.transFunc = transFunc
        self.cam_proj_mat = cam_proj_mat
        self.cam_centers = cam_centers
        self.display_laser = True
コード例 #3
0
    def __init__(self, transFunc, seeds, deltas, names, pts, img, cam_proj_mat,
                 cam_centers, zoom, id):
        '''
        transFunc => takes in param vector, returns homogeneous Xform
        seeds => initial param vector (1XM array)
        deltas => step sizes in param vector quantities (1XM array)
        pts => 3D points to be mapped into img (3XN array)
        img => an OpenCV image from the camera
        '''

        self.dataset_id = id

        # Note that adaptors are deprecated...
        imgTmp = cv.cvCloneImage(img)
        imNP = cv.adaptors.Ipl2NumPy(imgTmp)
        self.height = imNP.shape[0] * zoom
        self.width = imNP.shape[1] * zoom
        self.zoom = zoom

        pyc.size(self.width + 200, self.height)
        #pyc.size(self.height + 200, self.width + 200)
        self.pts = pts
        self.img = img
        self.vals = seeds
        self.selected = np.zeros(self.vals.shape[0])
        self.selected[0] = 1.0
        self.dels = deltas
        self.names = names
        self.transFunc = transFunc
        self.cam_proj_mat = cam_proj_mat
        self.cam_centers = cam_centers
        self.display_laser = True
コード例 #4
0
ファイル: __init__.py プロジェクト: speters33w/shoebot
def findcontours(iplimage, threshold=100):
    srcimage = opencv.cvCloneImage(iplimage)
    # create the storage area and bw image
    grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)
    opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)
    # threshold
    opencv.cvThreshold(grayscale, grayscale, threshold, 255, opencv.CV_THRESH_BINARY)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
    # find the contours
    nb_contours, contours = opencv.cvFindContours(grayscale, storage)
    # comment this out if you do not want approximation
    contours = opencv.cvApproxPoly(contours, opencv.sizeof_CvContour, storage, opencv.CV_POLY_APPROX_DP, 3, 1)
    # next line is for ctypes-opencv
    # contours = opencv.cvApproxPoly (contours, opencv.sizeof(opencv.CvContour), storage, opencv.CV_POLY_APPROX_DP, 3, 1)
    conts = []
    for cont in contours.hrange():
        points = []
        for pt in cont:
            points.append((pt.x, pt.y))
        conts.append(points)
    opencv.cvReleaseMemStorage(storage)
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(grayscale)
    return (nb_contours, conts)
コード例 #5
0
ファイル: __init__.py プロジェクト: aoloe/shoebot
def findcontours(iplimage, threshold=100):
    srcimage = opencv.cvCloneImage(iplimage)    
    # create the storage area and bw image
    grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)
    opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)
    #threshold
    opencv.cvThreshold(grayscale, grayscale, threshold, 255, opencv.CV_THRESH_BINARY)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)   
    # find the contours
    nb_contours, contours = opencv.cvFindContours (grayscale, storage)
    # comment this out if you do not want approximation
    contours = opencv.cvApproxPoly (contours, opencv.sizeof_CvContour, storage, opencv.CV_POLY_APPROX_DP, 3, 1)
    # next line is for ctypes-opencv
    #contours = opencv.cvApproxPoly (contours, opencv.sizeof(opencv.CvContour), storage, opencv.CV_POLY_APPROX_DP, 3, 1)
    conts = []
    for cont in contours.hrange():
        points=[]
        for pt in cont:
            points.append((pt.x,pt.y))                
        conts.append(points)
    opencv.cvReleaseMemStorage(storage)    
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(grayscale)
    return (nb_contours, conts)
コード例 #6
0
ファイル: __init__.py プロジェクト: aoloe/shoebot
def ipl2cairo(iplimage):
    srcimage = opencv.cvCloneImage(iplimage)
    width = srcimage.width
    height = srcimage.height 
    image = opencv.cvCreateImage(opencv.cvGetSize(srcimage),8, 4)
    opencv.cvCvtColor(srcimage,image,opencv.CV_BGR2BGRA)
    buffer = numpy.fromstring(image.imageData, dtype=numpy.uint32).astype(numpy.uint32)
    buffer.shape = (image.width, image.height)
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(image)    
    return cairo.ImageSurface.create_for_data(buffer, cairo.FORMAT_RGB24, width, height, width*4)
コード例 #7
0
ファイル: __init__.py プロジェクト: speters33w/shoebot
def ipl2cairo(iplimage):
    srcimage = opencv.cvCloneImage(iplimage)
    width = srcimage.width
    height = srcimage.height
    image = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 4)
    opencv.cvCvtColor(srcimage, image, opencv.CV_BGR2BGRA)
    buffer = numpy.fromstring(image.imageData, dtype=numpy.uint32).astype(numpy.uint32)
    buffer.shape = (image.width, image.height)
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(image)
    return cairo.ImageSurface.create_for_data(buffer, cairo.FORMAT_RGB24, width, height, width * 4)
コード例 #8
0
ファイル: __init__.py プロジェクト: speters33w/shoebot
 def __init__(self, frame):
     self.blob_image = opencv.cvCloneImage(frame.iplimage)
     self.blob_gray = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)
     self.blob_mask = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)
     opencv.cvSet(self.blob_mask, 1)
     opencv.cvCvtColor(self.blob_image, self.blob_gray, opencv.CV_BGR2GRAY)
     # opencv.cvEqualizeHist(self.blob_gray, self.blob_gray)
     # opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_BINARY)
     # opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_TOZERO)
     opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.bthresh, 255, frame.bthreshmode)
     # opencv.cvAdaptiveThreshold(self.blob_gray, self.blob_gray, 255, opencv.CV_ADAPTIVE_THRESH_MEAN_C, opencv.CV_THRESH_BINARY_INV)
     self._frame_blobs = CBlobResult(self.blob_gray, self.blob_mask, 100, True)
     self._frame_blobs.filter_blobs(10, 10000)
     self.count = self._frame_blobs.GetNumBlobs()
     self.items = []
     for i in range(self.count):
         self.items.append(self._frame_blobs.GetBlob(i))
コード例 #9
0
ファイル: __init__.py プロジェクト: aoloe/shoebot
 def __init__(self, frame):
     self.blob_image = opencv.cvCloneImage(frame.iplimage)
     self.blob_gray = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)
     self.blob_mask = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)
     opencv.cvSet(self.blob_mask, 1)        
     opencv.cvCvtColor(self.blob_image, self.blob_gray, opencv.CV_BGR2GRAY)
     #opencv.cvEqualizeHist(self.blob_gray, self.blob_gray)
     #opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_BINARY)	
     #opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_TOZERO)
     opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.bthresh, 255, frame.bthreshmode)
     #opencv.cvAdaptiveThreshold(self.blob_gray, self.blob_gray, 255, opencv.CV_ADAPTIVE_THRESH_MEAN_C, opencv.CV_THRESH_BINARY_INV)
     self._frame_blobs = CBlobResult(self.blob_gray, self.blob_mask, 100, True)
     self._frame_blobs.filter_blobs(10, 10000)
     self.count = self._frame_blobs.GetNumBlobs()
     self.items = []
     for i in range(self.count):
         self.items.append(self._frame_blobs.GetBlob(i)) 
コード例 #10
0
ファイル: __init__.py プロジェクト: aoloe/shoebot
def detectHaar(iplimage, classifier):
    srcimage = opencv.cvCloneImage(iplimage)
    grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)
    opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
    opencv.cvEqualizeHist(grayscale, grayscale)    
    try:
        cascade = opencv.cvLoadHaarClassifierCascade(os.path.join(os.path.dirname(__file__), classifier+".xml"),opencv.cvSize(1,1))
    except:
        raise AttributeError, "could not load classifier file"                
    objs = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50,50))    
    objects = []    
    for obj in objs:
        objects.append(Haarobj(obj))        
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(grayscale)
    opencv.cvReleaseMemStorage(storage)    
    return objects 
コード例 #11
0
ファイル: __init__.py プロジェクト: speters33w/shoebot
def detectHaar(iplimage, classifier):
    srcimage = opencv.cvCloneImage(iplimage)
    grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)
    opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)
    storage = opencv.cvCreateMemStorage(0)
    opencv.cvClearMemStorage(storage)
    opencv.cvEqualizeHist(grayscale, grayscale)
    try:
        cascade = opencv.cvLoadHaarClassifierCascade(os.path.join(os.path.dirname(__file__), classifier + ".xml"), opencv.cvSize(1, 1))
    except:
        raise AttributeError("could not load classifier file")
    objs = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50, 50))
    objects = []
    for obj in objs:
        objects.append(Haarobj(obj))
    opencv.cvReleaseImage(srcimage)
    opencv.cvReleaseImage(grayscale)
    opencv.cvReleaseMemStorage(storage)
    return objects
コード例 #12
0
ファイル: cv1.py プロジェクト: Atom-machinerule/OpenQbo
def clone_image(image):
    return cv.cvCloneImage(image)
コード例 #13
0
ファイル: test_display_pr2_try1.py プロジェクト: wklharry/hrl
elif cfg.device == 'PR2bag':
    ''' Instructions: $ roslaunch pr2_lcs_helper get_server2_client.launch
                      <Opens up services which will be called>
       $ roslaunch pr2_lcs_helper laser_assembler_optical_frame_launch.launch 
       <listens to tilt_scan, provides 'assemble_scan' service, in optical frame 
        (which is actually left eye)>
       $ rosbag play <bag with tf, camera, camera_info, tilt_scan> --clock
       $ rosrun laser_camera_segmentation test_display_pr2_try1.py
    '''
    roslib.load_manifest('pr2_lcs_helper')
    import test_uv as PR2_access  #rename soon
    cloud, roscv_img, pc.pts3d, pc.intensities, camPts, colors, idx_list = \
                                             PR2_access.fetch_PR2_data('all')
    N = len(idx_list)
    Nbound = len(idx_list[idx_list])
    pc.img = cv.cvCloneImage(np.array(roscv_img))
    pc.laserscans = []
    pc.scan_indices = np.array(range(N))

    camPts_bound = camPts[:, idx_list]
    pts3d_bound = pc.pts3d[:, idx_list]
    scan_indices_bound = np.array(range(N))[idx_list]
    intensities_bound = pc.intensities[idx_list]
    pc.map = (camPts_bound, camPts, idx_list, pts3d_bound, scan_indices_bound,
              intensities_bound)
elif cfg.device == 'PR2':
    rospy.init_node('rosSegmentCloud')
    print 'STARTING NEW COLLECTION NODE'
    ac = acquire_pr2_data.AcquireCloud()
    ac.print_test()
    print 'FINISHED COLLECTING, SENDING DATA BACK'
コード例 #14
0
    def reDraw(self):
        pyc.background(255)
        pyc.lightSpecular(255 * 30 / 640, 255 * 30 / 640, 255 * 30 / 640)
        pyc.directionalLight(255, 255, 255, 0, 0, 1)
        pyc.specular(102, 102, 102)

        # Project self.pts into image, and display it
        imgTmp = cv.cvCloneImage(self.img)
        imNP = cv.adaptors.Ipl2NumPy(scale(imgTmp, self.zoom))

        color_list = [(255, 255, 0), (255, 0, 0), (0, 255, 255), (0, 255, 0),
                      (0, 0, 255), (0, 100, 100), (100, 100, 0), (100, 0, 100),
                      (100, 200, 100), (200, 100, 100), (100, 100, 200),
                      (100, 0, 200), (0, 200, 100), (0, 100, 200),
                      (200, 0, 100), (100, 0, 100), (255, 152, 7)]

        XformPts = tr.transform(self.transFunc(self.vals), self.pts)
        camPts = self.cam_proj_mat * tr.xyzToHomogenous(XformPts)
        camPts = camPts / camPts[2]
        camPts[0] = (camPts[0] + self.cam_centers[0]) * self.zoom
        camPts[1] = (camPts[1] + self.cam_centers[1]) * self.zoom
        camPts = np.matrix(np.round(camPts), 'int')

        conditions = np.concatenate([
            camPts[0] >= 0, camPts[0] < imNP.shape[1], camPts[1] >= 0,
            camPts[1] < imNP.shape[0]
        ], 0)
        r, c = np.where(np.all(conditions, 0))
        camPts_bound = camPts[:, c.A[0]]
        x = np.asarray(self.pts[0])[0][c.A[0]]
        x = x - x.min()
        x = x / x.max() * 256  #512 #number of colors
        x = np.floor(x)
        x = np.asarray(np.matrix(x, 'int'))[0]
        if self.display_laser:
            map2d = np.asarray(camPts_bound[0:2])
            n, m = map2d.shape
            for i in range(0, m):
                imNP[map2d[1, i],
                     map2d[0, i], :] = [x[i], 256 - x[i],
                                        128 + x[i] / 2]  #color_list[x[i]]
        imgTmp = cv.adaptors.NumPy2Ipl(imNP)
        #imgSmall = cv.cvCreateImage(cv.cvSize(imgTmp.width/3, imgTmp.height/3), cv.IPL_DEPTH_8U, 3)
        #cv.cvResize(imgTmp, imgSmall, cv.CV_INTER_AREA)
        im = cv.adaptors.Ipl2PIL(imgTmp)

        #pyc.rotate(math.radians(90))
        pyc.image(im, 0, 0, self.width, self.height)
        #pyc.rotate(math.radians(-90))

        # Display the current values of the parameter vector (and highlight the selected one)
        pyc.textSize(10)
        for i, val in enumerate(self.vals):
            if np.nonzero(self.selected)[0] == i:
                print 'x',
            print '%8.4f ' % self.vals[i],
            pval = '%7s: %8.4f' % (self.names[i], self.vals[i])
            pyc.text(pval, self.width + 15, 20 + 20 * i, 0)
            if np.nonzero(self.selected)[0] == i:
                pyc.fill(255, 0, 0)
                pyc.quad(self.width + 4.0, 15.0 + 20.0 * i - 7.0,
                         self.width + 4.0, 15.0 + 20.0 * i + 7.0,
                         self.width + 13.0, 15.0 + 20.0 * i, self.width + 13.0,
                         15.0 + 20.0 * i)
        print '\n'

        self.move(pyc.escape_handler(pyc.draw()))
コード例 #15
0
ファイル: cv1.py プロジェクト: Atom-machinerule/OpenQbo
def dilate(cv_image, n_times=1):
    dst  = cv.cvCloneImage(cv_image)
    cv.cvDilate(cv_img, dst, None, n_times)
    return dst
コード例 #16
0
    #ac.print_test()
    print 'FINISHED COLLECTING, SENDING DATA BACK'
               
    np_img, pc.pts3d, pc.intensities, camPts, colors, idx_list = ac.return_data_for_segmentation() 
        # >> return self.img, self.pts3d, self.intensities, 
        #        self.camPts, self.colors, self.camPts_idx
    pc.pts3d = np.array(pc.pts3d)
    N = len(idx_list)
    Nbound = len( idx_list[idx_list] )
    print 'Camera view contains', Nbound, 'of', N, '3D points within its field of view.\n' 
    if Nbound == 0:
        print 'WARNING: Cannot start segmentation if no 3D points overlap image. Exiting.' 
        print 'This can happen if the pointcloud is empty [], if the transformation is wrong, \
               or the camera and scanner are not pointed in the same direction'
        import sys; sys.exit()
    pc.img = cv.cvCloneImage(np.array(np_img) ) #only works in CV 2.0, but not cv 2.1
    pc.laserscans = []
    pc.scan_indices = np.array( range(N) )
    pc.camPts = camPts
    pc.camPts_bound = camPts[:,idx_list]
    pc.pts3d_bound = pc.pts3d[:,idx_list]
    pc.scan_indices_bound = np.array(range(N))[idx_list]
    pc.intensities_bound = pc.intensities[idx_list]
    pc.idx_list = idx_list
    ###pc.map = (pc.camPts_bound, pc.camPts, pc.idx_list, pc.pts3d_bound,pc.scan_indices_bound, pc.intensities_bound)

#create dummy surface polygon to define ROI
# [Below] These polygons will be used to generate a 'mask' cv image and used to 
# limit the area of image where 3D projected points are considered.
image_size = cv.cvGetSize(pc.img)
w = image_size.width
コード例 #17
0
ファイル: laser_cam_callib.py プロジェクト: gt-ros-pkg/hrl
    def reDraw(self):
        pyc.background(255)
        pyc.lightSpecular(255*30/640, 255*30/640, 255*30/640)
        pyc.directionalLight(255,255,255,0,0,1)
        pyc.specular(102, 102, 102)

        # Project self.pts into image, and display it
        imgTmp = cv.cvCloneImage(self.img)
        imNP = cv.adaptors.Ipl2NumPy(scale(imgTmp, self.zoom))

        color_list = [(255,255,0),(255,0,0),(0,255,255),(0,255,0),(0,0,255),(0,100,100),(100,100,0),
                  (100,0,100),(100,200,100),(200,100,100),(100,100,200),(100,0,200),(0,200,100),
                  (0,100,200),(200,0,100),(100,0,100),(255,152,7) ]

        XformPts = tr.transform( self.transFunc(self.vals), self.pts )
        camPts = self.cam_proj_mat * tr.xyzToHomogenous(XformPts)
        camPts = camPts / camPts[2]
        camPts[0] = (camPts[0] + self.cam_centers[0]) * self.zoom
        camPts[1] = (camPts[1] + self.cam_centers[1]) * self.zoom
        camPts = np.matrix( np.round(camPts), 'int')
        
        conditions = np.concatenate([camPts[0] >= 0, 
                                     camPts[0] < imNP.shape[1],
                                     camPts[1] >= 0,
                                     camPts[1] < imNP.shape[0]], 0)
        r, c = np.where(np.all(conditions, 0))
        camPts_bound  = camPts[:, c.A[0]]
        x = np.asarray(self.pts[0])[0][c.A[0]]
        x = x - x.min()
        x = x / x.max() * 256 #512 #number of colors
        x = np.floor(x)
        x = np.asarray(np.matrix(x,'int'))[0]
        if self.display_laser:
            map2d = np.asarray(camPts_bound[0:2])
            n,m = map2d.shape
            for i in range(0,m):
                imNP[map2d[1,i],map2d[0,i], :] = [x[i],256-x[i],128+x[i]/2]#color_list[x[i]]
        imgTmp = cv.adaptors.NumPy2Ipl(imNP)
        #imgSmall = cv.cvCreateImage(cv.cvSize(imgTmp.width/3, imgTmp.height/3), cv.IPL_DEPTH_8U, 3)
        #cv.cvResize(imgTmp, imgSmall, cv.CV_INTER_AREA)
        im = cv.adaptors.Ipl2PIL(imgTmp)

        #pyc.rotate(math.radians(90))
        pyc.image(im, 0,0, self.width, self.height)
        #pyc.rotate(math.radians(-90))


        # Display the current values of the parameter vector (and highlight the selected one)
        pyc.textSize(10)
        for i, val in enumerate(self.vals):
            if np.nonzero(self.selected)[0] == i: 
                print 'x',
            print '%8.4f ' % self.vals[i], 
            pval = '%7s: %8.4f' % (self.names[i], self.vals[i])
            pyc.text(pval, self.width + 15, 20 + 20*i, 0)
            if np.nonzero(self.selected)[0] == i:
                pyc.fill(255,0,0)
                pyc.quad(self.width+4.0,  15.0 + 20.0*i - 7.0,
                         self.width+4.0,  15.0 + 20.0*i + 7.0,
                         self.width+13.0, 15.0 + 20.0*i,
                         self.width+13.0, 15.0 + 20.0*i)
        print '\n'
        
        self.move(pyc.escape_handler(pyc.draw()))
コード例 #18
0
    
'''
if cfg.device=='PR2':
    rospy.init_node('rosSegmentCloud')
    print 'STARTING NEW COLLECTION NODE'
    ac = acquire_pr2_data.AcquireCloud()
    #ac.print_test()
    print 'FINISHED COLLECTING, SENDING DATA BACK'
               
    np_img, pc.pts3d, pc.intensities, camPts, colors, idx_list = ac.return_data_for_segmentation() 
        # >> return self.img, self.pts3d, self.intensities, 
        #        self.camPts, self.colors, self.camPts_idx
    pc.pts3d = np.array(pc.pts3d)
    N = len(idx_list)
    Nbound = len( idx_list[idx_list] )
    pc.img = cv.cvCloneImage(np.array(np_img) )
    pc.laserscans = []
    pc.scan_indices = np.array( range(N) )
    camPts_bound = camPts[:,idx_list]
    pts3d_bound = pc.pts3d[:,idx_list]
    scan_indices_bound = np.array(range(N))[idx_list]
    intensities_bound = pc.intensities[idx_list]
    pc.map = (camPts_bound, camPts, idx_list, pts3d_bound,
                            scan_indices_bound, intensities_bound)

#create dummy surface polygon to define ROI
# [Below] These polygons will be used to generate a 'mask' cv image and used to 
# limit the area of image where 3D projected points are considered.
image_size = cv.cvGetSize(pc.img)
w = image_size.width
h = image_size.height
コード例 #19
0
    def prepare(self,
                features_k_nearest_neighbors,
                nonzero_indices=None,
                all_save_load=False,
                regenerate_neightborhood_indices=False):
        #print np.shape(self.processor.pts3d_bound), 'shape pts3d_bound'

        imgTmp = cv.cvCloneImage(self.processor.img)
        self.imNP = ut.cv2np(imgTmp, format='BGR')
        ###self.processor.map2d = np.asarray(self.processor.camPts_bound) #copied from laser to image mapping

        if features_k_nearest_neighbors == None or features_k_nearest_neighbors == False:  #use range
            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)

            #print len(nonzero_indices)
            #print np.shape(np.asarray((self.processor.pts3d_bound.T)[nonzero_indices]))

            if nonzero_indices != None:
                print ut.getTime(), 'query ball tree for ', len(
                    nonzero_indices), 'points'
                kdtree_query = kdtree.KDTree(
                    (self.processor.pts3d_bound.T)[nonzero_indices])
            else:
                print ut.getTime(), 'query ball tree'
                kdtree_query = kdtree.KDTree(self.processor.pts3d_bound.T)

            filename = self.processor.config.path + '/data/' + self.processor.scan_dataset.id + '_sphere_neighborhood_indices_' + str(
                self.processor.feature_radius) + '.pkl'
            if all_save_load == True and os.path.exists(
                    filename) and regenerate_neightborhood_indices == False:
                #if its already there, load it:
                print ut.getTime(), 'loading', filename
                self.kdtree_queried_indices = ut.load_pickle(filename)
            else:
                self.kdtree_queried_indices = kdtree_query.query_ball_tree(
                    self.kdtree2d, self.processor.feature_radius, 2.0,
                    0.2)  #approximate
                print ut.getTime(), 'queried kdtree: ', len(
                    self.kdtree_queried_indices
                ), 'points, radius:', self.processor.feature_radius
                if all_save_load == True:
                    ut.save_pickle(self.kdtree_queried_indices, filename)

            #make dict out of list for faster operations? (doesn't seem to change speed significantly):
            #self.kdtree_queried_indices = dict(zip(xrange(len(self.kdtree_queried_indices)), self.kdtree_queried_indices))

        else:  #experiemental: use_20_nearest_neighbors == True
            #TODO: exclude invalid values in get_featurevector (uncomment code there)

            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)
            self.kdtree_queried_indices = []
            print ut.getTime(
            ), 'kdtree single queries for kNN start, k=', features_k_nearest_neighbors
            count = 0
            for point in ((self.processor.pts3d_bound.T)[nonzero_indices]):
                count = count + 1
                result = self.kdtree2d.query(point,
                                             features_k_nearest_neighbors, 0.2,
                                             2, self.processor.feature_radius)
                #existing = result[0][0] != np.Inf
                #print existing
                #print result[1]
                self.kdtree_queried_indices += [result[1]]  #[existing]
                if count % 4096 == 0:
                    print ut.getTime(), count
            print ut.getTime(), 'kdtree singe queries end'

            #convert to numpy array -> faster access
            self.kdtree_queried_indices = np.asarray(
                self.kdtree_queried_indices)

        #print self.kdtree_queried_indices
        #takes long to compute:
        #avg_len = 0
        #minlen = 999999
        #maxlen = 0
        #for x in self.kdtree_queried_indices:
        #    avg_len += len(x)
        #    minlen = min(minlen, len(x))
        #    maxlen = max(maxlen, len(x))
        #avg_len = avg_len / len(self.kdtree_queried_indices)
        #print ut.getTime(), "range neighbors: avg_len", avg_len, 'minlen', minlen, 'maxlen', maxlen

        #create HSV numpy images:
        # compute the hsv version of the image
        image_size = cv.cvGetSize(self.processor.img)
        img_h = cv.cvCreateImage(image_size, 8, 1)
        img_s = cv.cvCreateImage(image_size, 8, 1)
        img_v = cv.cvCreateImage(image_size, 8, 1)
        img_hsv = cv.cvCreateImage(image_size, 8, 3)

        cv.cvCvtColor(self.processor.img, img_hsv, cv.CV_BGR2HSV)

        cv.cvSplit(img_hsv, img_h, img_s, img_v, None)
        self.imNP_h = ut.cv2np(img_h)
        self.imNP_s = ut.cv2np(img_s)
        self.imNP_v = ut.cv2np(img_v)

        textures = texture_features.eigen_texture(self.processor.img)
        self.imNP_tex1 = textures[:, :, 0]
        self.imNP_tex2 = textures[:, :, 1]

        self.debug_before_first_featurevector = True

        self.generate_voi_histogram(self.processor.point_of_interest,
                                    self.processor.voi_width)
コード例 #20
0
def convert_cvimage_to_ROS(image):
    imgTmp = cv.cvCloneImage(image)
    imNP = ut.cv2np(imgTmp,format='BGR')
    ROS_image = np.reshape(imNP,(1,-1))[0]

    return ROS_image
コード例 #21
0
ファイル: cv1.py プロジェクト: hxfabc2012/OpenQbo-1
def morpho_open(cv_image, n_times=1):
    dst = cv.cvCloneImage(cv_image)
    dst2 = cv.cvCloneImage(cv_image)
    cv.cvErode(cv_image, dst, None, n_times)
    cv.cvDilate(dst, dst2, None, n_times)
    return dst2
コード例 #22
0
ファイル: cv1.py プロジェクト: hxfabc2012/OpenQbo-1
def clone_image(image):
    return cv.cvCloneImage(image)
コード例 #23
0
ファイル: cv1.py プロジェクト: hxfabc2012/OpenQbo-1
def erode(cv_image, n_times=1):
    dst = cv.cvCloneImage(cv_image)
    cv.cvErode(cv_img, dst, None, n_times)
    return dst
コード例 #24
0
ファイル: cv1.py プロジェクト: hxfabc2012/OpenQbo-1
def dilate(cv_image, n_times=1):
    dst = cv.cvCloneImage(cv_image)
    cv.cvDilate(cv_img, dst, None, n_times)
    return dst
コード例 #25
0
ファイル: cv1.py プロジェクト: Atom-machinerule/OpenQbo
def morpho_open(cv_image, n_times=1):
    dst  = cv.cvCloneImage(cv_image)
    dst2 = cv.cvCloneImage(cv_image)
    cv.cvErode(cv_image, dst, None, n_times)
    cv.cvDilate(dst, dst2, None, n_times)
    return dst2
コード例 #26
0
ファイル: cv1.py プロジェクト: Atom-machinerule/OpenQbo
def erode(cv_image, n_times=1):
    dst  = cv.cvCloneImage(cv_image)
    cv.cvErode(cv_img, dst, None, n_times)
    return dst
コード例 #27
0
    def prepare(self, features_k_nearest_neighbors, nonzero_indices = None, all_save_load = False, regenerate_neightborhood_indices = False):
        #print np.shape(self.processor.pts3d_bound), 'shape pts3d_bound'

        imgTmp = cv.cvCloneImage(self.processor.img)
        self.imNP = ut.cv2np(imgTmp,format='BGR')
        ###self.processor.map2d = np.asarray(self.processor.camPts_bound) #copied from laser to image mapping
        
        if features_k_nearest_neighbors == None or features_k_nearest_neighbors == False: #use range
            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)
            
            #print len(nonzero_indices)
            #print np.shape(np.asarray((self.processor.pts3d_bound.T)[nonzero_indices]))
            
            if nonzero_indices != None:
                print ut.getTime(), 'query ball tree for ', len(nonzero_indices), 'points'
                kdtree_query = kdtree.KDTree((self.processor.pts3d_bound.T)[nonzero_indices])
            else:
                print ut.getTime(), 'query ball tree'
                kdtree_query = kdtree.KDTree(self.processor.pts3d_bound.T)
            
            filename = self.processor.config.path+'/data/'+self.processor.scan_dataset.id+'_sphere_neighborhood_indices_'+str(self.processor.feature_radius)+'.pkl'
            if all_save_load == True and os.path.exists(filename) and regenerate_neightborhood_indices == False:
                #if its already there, load it:
                print ut.getTime(), 'loading',filename
                self.kdtree_queried_indices = ut.load_pickle(filename)    
            else:
                self.kdtree_queried_indices = kdtree_query.query_ball_tree(self.kdtree2d, self.processor.feature_radius, 2.0, 0.2) #approximate
                print ut.getTime(), 'queried kdtree: ',len(self.kdtree_queried_indices),'points, radius:',self.processor.feature_radius
                if all_save_load == True:
                    ut.save_pickle(self.kdtree_queried_indices, filename)
                    
            #make dict out of list for faster operations? (doesn't seem to change speed significantly):
            #self.kdtree_queried_indices = dict(zip(xrange(len(self.kdtree_queried_indices)), self.kdtree_queried_indices))
        
        else: #experiemental: use_20_nearest_neighbors == True
            #TODO: exclude invalid values in get_featurevector (uncomment code there)
           
            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)
            self.kdtree_queried_indices = []
            print ut.getTime(), 'kdtree single queries for kNN start, k=', features_k_nearest_neighbors
            count = 0
            for point in ((self.processor.pts3d_bound.T)[nonzero_indices]):
                count = count + 1
                result = self.kdtree2d.query(point, features_k_nearest_neighbors,0.2,2,self.processor.feature_radius)
                #existing = result[0][0] != np.Inf
                #print existing
                #print result[1]
                self.kdtree_queried_indices += [result[1]] #[existing]
                if count % 4096 == 0:
                    print ut.getTime(),count
            print ut.getTime(), 'kdtree singe queries end'
            
            #convert to numpy array -> faster access
            self.kdtree_queried_indices = np.asarray(self.kdtree_queried_indices)
        
        #print self.kdtree_queried_indices
        #takes long to compute:
        #avg_len = 0
        #minlen = 999999
        #maxlen = 0
        #for x in self.kdtree_queried_indices:
        #    avg_len += len(x)
        #    minlen = min(minlen, len(x))
        #    maxlen = max(maxlen, len(x))
        #avg_len = avg_len / len(self.kdtree_queried_indices)
        #print ut.getTime(), "range neighbors: avg_len", avg_len, 'minlen', minlen, 'maxlen', maxlen
        
        
        #create HSV numpy images:
        # compute the hsv version of the image 
        image_size = cv.cvGetSize(self.processor.img)
        img_h = cv.cvCreateImage (image_size, 8, 1)
        img_s = cv.cvCreateImage (image_size, 8, 1)
        img_v = cv.cvCreateImage (image_size, 8, 1)
        img_hsv = cv.cvCreateImage (image_size, 8, 3)
        
        cv.cvCvtColor (self.processor.img, img_hsv, cv.CV_BGR2HSV)
        
        cv.cvSplit (img_hsv, img_h, img_s, img_v, None)
        self.imNP_h = ut.cv2np(img_h)
        self.imNP_s = ut.cv2np(img_s)
        self.imNP_v = ut.cv2np(img_v)
        
        textures = texture_features.eigen_texture(self.processor.img)
        self.imNP_tex1 = textures[:,:,0]
        self.imNP_tex2 = textures[:,:,1]
        
        self.debug_before_first_featurevector = True
        
        self.generate_voi_histogram(self.processor.point_of_interest,self.processor.voi_width)
コード例 #28
0
    def jitter(self, i_image, i_n_examples):
        """1) Apply various random affine transform to i_image (scale, rotation), 
              where i_n_examples is the number of transformations. The affine transforms happen 
              around the center of the original region of interest. 
           2) Translate roi with various values - crop the image from this region.
           3) The same normalisation is then applied to all the cropped images, specified by setParams"""

        #Store transforms applied in format tx ty scale angle
        o_transforms = numpy.array([0., 0., 1., 0.])

        if self.__roi == None:
            print "No region of interest - returning input image!"
            return None
        #Always return the normalised verion of the input image
        image = cv.Ipl2NumPy(self.normalise(i_image))
        o_data = numpy.tile(None, (image.shape[0], image.shape[1], 1))
        o_data[:, :, 0] = image
        if i_n_examples == 0:
            return (o_data, o_transforms)
        #Rotation point should be around original roi center
        center = cv.cvPoint2D32f(self.__roi.x + self.__roi.width / 2,
                                 self.__roi.y + self.__roi.height / 2)
        angles = numpy.random.uniform(-30., 30., i_n_examples)
        scales = numpy.random.uniform(0.9, 1.1, i_n_examples)
        tx = numpy.int32(
            numpy.round(numpy.random.uniform(-20, 20, i_n_examples)))
        ty = numpy.int32(
            numpy.round(numpy.random.uniform(-20, 20, i_n_examples)))
        x = self.__roi.x + tx
        y = self.__roi.y + ty
        #FIXME: Extend valid indices to outliers due to affine transform!!!
        min_x = 0
        min_y = 0
        max_x = i_image.width - self.__roi.width - 1
        max_y = i_image.height - self.__roi.height - 1
        valid_x = numpy.hstack(
            [numpy.nonzero(x >= min_x)[0],
             numpy.nonzero(x < max_x)[0]])
        valid_y = numpy.hstack(
            [numpy.nonzero(y >= min_y)[0],
             numpy.nonzero(y < max_y)[0]])
        valid_idx = numpy.unique(numpy.hstack([valid_x, valid_y]))
        original_roi = cv.cvRect(self.__roi.x, self.__roi.y, self.__roi.width,
                                 self.__roi.height)
        if self.__rot_mat == None:
            original_rot_matrix = None
        else:
            original_rot_matrix = cv.cvCloneImage(self.__rot_mat)
        for index in valid_idx:
            params = numpy.array(
                [tx[index], ty[index], scales[index], angles[index]])
            self.setAffineTransform(center, scales[index], angles[index])
            self.__roi.x = int(x[index])
            self.__roi.y = int(y[index])
            image = cv.Ipl2NumPy(self.normalise(i_image))
            o_data = numpy.dstack([o_data, image])
            o_transforms = numpy.vstack([o_transforms, params])
        #Restore the original region of interest
        self.__roi.x = original_roi.x
        self.__roi.y = original_roi.y
        if original_rot_matrix == None:
            self.__rot_mat = None
        else:
            self.__rot_mat = cv.cvCloneImage(original_rot_matrix)
        return (o_data, o_transforms)