def drawImage(image,h,w,psize):
    """
    Draw the image as a continuous line on a surface h by w "pixels" where 
    each continuous line representation of a pixel in image is represented
    on the output suface using psize by psize "pixels".

    @param  image   an opencv image with at least 3 channels
    @param  h       integer representing the hight of the output surface
    @param  w       integer representing the width of the output surface
    @param  psize   ammount that each pixel in the input image is scaled up
    """
    h = (h/psize)-2
    w = (w/psize)-2
    size = opencv.cvSize(w,h)
    scaled = opencv.cvCreateImage(size,8,3)
    red = opencv.cvCreateImage(size,8,1)
    blue = opencv.cvCreateImage(size,8,1)
    green = opencv.cvCreateImage(size,8,1)
    opencv.cvSplit(scaled,blue,green,red,None)
    opencv.cvEqualizeHist(red,red)
    opencv.cvEqualizeHist(green,green)
    opencv.cvEqualizeHist(blue,blue)
    opencv.cvMerge(red,green,blue,None,scaled)
    opencv.cvResize(image,scaled,opencv.CV_INTER_LINEAR)
    opencv.cvNot(scaled,scaled)

    # Draw each pixel in the image
    xr = range(scaled.width)
    whitespace = 0
    for y in range(scaled.height):
        for x in xr:
            s = opencv.cvGet2D(scaled,y,x)
            s = [s[j] for j in range(3)]
            if (sum(s)/710.0 < 1.0/psize):
                whitespace = whitespace+psize
            else:
                if whitespace is not 0:
                    line(whitespace,6,(xr[0]>0))
                    whitespace = 0
                drawPixel([j/255.0 for j in s],psize,(xr[0]>0))
        if whitespace is not 0:
            line(whitespace,6,(xr[0]>0))
            whitespace = 0
        line(psize,2)
        xr.reverse()
        displayImage(output)
        events = pygame.event.get()
        for event in events:
            if event.type == QUIT:
		exit()
예제 #2
0
파일: cv1.py 프로젝트: hxfabc2012/OpenQbo-1
def mask(img, img_mask):
    dim = img.width, img.height
    depth = img.depth
    channels = img.nChannels

    r_chan = cv.cvCreateImage(cv.cvSize(*dim), depth, 1)
    g_chan = cv.cvCreateImage(cv.cvSize(*dim), depth, 1)
    b_chan = cv.cvCreateImage(cv.cvSize(*dim), depth, 1)
    combined = cv.cvCreateImage(cv.cvSize(*dim), depth, 3)
    cv.cvSplit(img, r_chan, g_chan, b_chan, None)

    cv.cvAnd(r_chan, img_mask, r_chan)
    cv.cvAnd(g_chan, img_mask, g_chan)
    cv.cvAnd(b_chan, img_mask, b_chan)
    cv.cvMerge(r_chan, g_chan, b_chan, None, combined)
    return combined
예제 #3
0
def mask(img, img_mask):
    dim      = img.width, img.height
    depth    = img.depth
    channels = img.nChannels

    r_chan = cv.cvCreateImage(cv.cvSize(*dim), depth, 1)
    g_chan = cv.cvCreateImage(cv.cvSize(*dim), depth, 1)
    b_chan = cv.cvCreateImage(cv.cvSize(*dim), depth, 1)
    combined = cv.cvCreateImage(cv.cvSize(*dim), depth, 3)
    cv.cvSplit(img, r_chan, g_chan, b_chan, None)

    cv.cvAnd(r_chan, img_mask, r_chan)
    cv.cvAnd(g_chan, img_mask, g_chan)
    cv.cvAnd(b_chan, img_mask, b_chan)
    cv.cvMerge(r_chan, g_chan, b_chan, None, combined)
    return combined
예제 #4
0
    def prepare(self, features_k_nearest_neighbors, nonzero_indices = None, all_save_load = False, regenerate_neightborhood_indices = False):
        #print np.shape(self.processor.pts3d_bound), 'shape pts3d_bound'

        imgTmp = cv.cvCloneImage(self.processor.img)
        self.imNP = ut.cv2np(imgTmp,format='BGR')
        ###self.processor.map2d = np.asarray(self.processor.camPts_bound) #copied from laser to image mapping
        
        if features_k_nearest_neighbors == None or features_k_nearest_neighbors == False: #use range
            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)
            
            #print len(nonzero_indices)
            #print np.shape(np.asarray((self.processor.pts3d_bound.T)[nonzero_indices]))
            
            if nonzero_indices != None:
                print ut.getTime(), 'query ball tree for ', len(nonzero_indices), 'points'
                kdtree_query = kdtree.KDTree((self.processor.pts3d_bound.T)[nonzero_indices])
            else:
                print ut.getTime(), 'query ball tree'
                kdtree_query = kdtree.KDTree(self.processor.pts3d_bound.T)
            
            filename = self.processor.config.path+'/data/'+self.processor.scan_dataset.id+'_sphere_neighborhood_indices_'+str(self.processor.feature_radius)+'.pkl'
            if all_save_load == True and os.path.exists(filename) and regenerate_neightborhood_indices == False:
                #if its already there, load it:
                print ut.getTime(), 'loading',filename
                self.kdtree_queried_indices = ut.load_pickle(filename)    
            else:
                self.kdtree_queried_indices = kdtree_query.query_ball_tree(self.kdtree2d, self.processor.feature_radius, 2.0, 0.2) #approximate
                print ut.getTime(), 'queried kdtree: ',len(self.kdtree_queried_indices),'points, radius:',self.processor.feature_radius
                if all_save_load == True:
                    ut.save_pickle(self.kdtree_queried_indices, filename)
                    
            #make dict out of list for faster operations? (doesn't seem to change speed significantly):
            #self.kdtree_queried_indices = dict(zip(xrange(len(self.kdtree_queried_indices)), self.kdtree_queried_indices))
        
        else: #experiemental: use_20_nearest_neighbors == True
            #TODO: exclude invalid values in get_featurevector (uncomment code there)
           
            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)
            self.kdtree_queried_indices = []
            print ut.getTime(), 'kdtree single queries for kNN start, k=', features_k_nearest_neighbors
            count = 0
            for point in ((self.processor.pts3d_bound.T)[nonzero_indices]):
                count = count + 1
                result = self.kdtree2d.query(point, features_k_nearest_neighbors,0.2,2,self.processor.feature_radius)
                #existing = result[0][0] != np.Inf
                #print existing
                #print result[1]
                self.kdtree_queried_indices += [result[1]] #[existing]
                if count % 4096 == 0:
                    print ut.getTime(),count
            print ut.getTime(), 'kdtree singe queries end'
            
            #convert to numpy array -> faster access
            self.kdtree_queried_indices = np.asarray(self.kdtree_queried_indices)
        
        #print self.kdtree_queried_indices
        #takes long to compute:
        #avg_len = 0
        #minlen = 999999
        #maxlen = 0
        #for x in self.kdtree_queried_indices:
        #    avg_len += len(x)
        #    minlen = min(minlen, len(x))
        #    maxlen = max(maxlen, len(x))
        #avg_len = avg_len / len(self.kdtree_queried_indices)
        #print ut.getTime(), "range neighbors: avg_len", avg_len, 'minlen', minlen, 'maxlen', maxlen
        
        
        #create HSV numpy images:
        # compute the hsv version of the image 
        image_size = cv.cvGetSize(self.processor.img)
        img_h = cv.cvCreateImage (image_size, 8, 1)
        img_s = cv.cvCreateImage (image_size, 8, 1)
        img_v = cv.cvCreateImage (image_size, 8, 1)
        img_hsv = cv.cvCreateImage (image_size, 8, 3)
        
        cv.cvCvtColor (self.processor.img, img_hsv, cv.CV_BGR2HSV)
        
        cv.cvSplit (img_hsv, img_h, img_s, img_v, None)
        self.imNP_h = ut.cv2np(img_h)
        self.imNP_s = ut.cv2np(img_s)
        self.imNP_v = ut.cv2np(img_v)
        
        textures = texture_features.eigen_texture(self.processor.img)
        self.imNP_tex1 = textures[:,:,0]
        self.imNP_tex2 = textures[:,:,1]
        
        self.debug_before_first_featurevector = True
        
        self.generate_voi_histogram(self.processor.point_of_interest,self.processor.voi_width)
예제 #5
0
    def prepare(self,
                features_k_nearest_neighbors,
                nonzero_indices=None,
                all_save_load=False,
                regenerate_neightborhood_indices=False):
        #print np.shape(self.processor.pts3d_bound), 'shape pts3d_bound'

        imgTmp = cv.cvCloneImage(self.processor.img)
        self.imNP = ut.cv2np(imgTmp, format='BGR')
        ###self.processor.map2d = np.asarray(self.processor.camPts_bound) #copied from laser to image mapping

        if features_k_nearest_neighbors == None or features_k_nearest_neighbors == False:  #use range
            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)

            #print len(nonzero_indices)
            #print np.shape(np.asarray((self.processor.pts3d_bound.T)[nonzero_indices]))

            if nonzero_indices != None:
                print ut.getTime(), 'query ball tree for ', len(
                    nonzero_indices), 'points'
                kdtree_query = kdtree.KDTree(
                    (self.processor.pts3d_bound.T)[nonzero_indices])
            else:
                print ut.getTime(), 'query ball tree'
                kdtree_query = kdtree.KDTree(self.processor.pts3d_bound.T)

            filename = self.processor.config.path + '/data/' + self.processor.scan_dataset.id + '_sphere_neighborhood_indices_' + str(
                self.processor.feature_radius) + '.pkl'
            if all_save_load == True and os.path.exists(
                    filename) and regenerate_neightborhood_indices == False:
                #if its already there, load it:
                print ut.getTime(), 'loading', filename
                self.kdtree_queried_indices = ut.load_pickle(filename)
            else:
                self.kdtree_queried_indices = kdtree_query.query_ball_tree(
                    self.kdtree2d, self.processor.feature_radius, 2.0,
                    0.2)  #approximate
                print ut.getTime(), 'queried kdtree: ', len(
                    self.kdtree_queried_indices
                ), 'points, radius:', self.processor.feature_radius
                if all_save_load == True:
                    ut.save_pickle(self.kdtree_queried_indices, filename)

            #make dict out of list for faster operations? (doesn't seem to change speed significantly):
            #self.kdtree_queried_indices = dict(zip(xrange(len(self.kdtree_queried_indices)), self.kdtree_queried_indices))

        else:  #experiemental: use_20_nearest_neighbors == True
            #TODO: exclude invalid values in get_featurevector (uncomment code there)

            self.kdtree2d = kdtree.KDTree(self.processor.pts3d_bound.T)
            self.kdtree_queried_indices = []
            print ut.getTime(
            ), 'kdtree single queries for kNN start, k=', features_k_nearest_neighbors
            count = 0
            for point in ((self.processor.pts3d_bound.T)[nonzero_indices]):
                count = count + 1
                result = self.kdtree2d.query(point,
                                             features_k_nearest_neighbors, 0.2,
                                             2, self.processor.feature_radius)
                #existing = result[0][0] != np.Inf
                #print existing
                #print result[1]
                self.kdtree_queried_indices += [result[1]]  #[existing]
                if count % 4096 == 0:
                    print ut.getTime(), count
            print ut.getTime(), 'kdtree singe queries end'

            #convert to numpy array -> faster access
            self.kdtree_queried_indices = np.asarray(
                self.kdtree_queried_indices)

        #print self.kdtree_queried_indices
        #takes long to compute:
        #avg_len = 0
        #minlen = 999999
        #maxlen = 0
        #for x in self.kdtree_queried_indices:
        #    avg_len += len(x)
        #    minlen = min(minlen, len(x))
        #    maxlen = max(maxlen, len(x))
        #avg_len = avg_len / len(self.kdtree_queried_indices)
        #print ut.getTime(), "range neighbors: avg_len", avg_len, 'minlen', minlen, 'maxlen', maxlen

        #create HSV numpy images:
        # compute the hsv version of the image
        image_size = cv.cvGetSize(self.processor.img)
        img_h = cv.cvCreateImage(image_size, 8, 1)
        img_s = cv.cvCreateImage(image_size, 8, 1)
        img_v = cv.cvCreateImage(image_size, 8, 1)
        img_hsv = cv.cvCreateImage(image_size, 8, 3)

        cv.cvCvtColor(self.processor.img, img_hsv, cv.CV_BGR2HSV)

        cv.cvSplit(img_hsv, img_h, img_s, img_v, None)
        self.imNP_h = ut.cv2np(img_h)
        self.imNP_s = ut.cv2np(img_s)
        self.imNP_v = ut.cv2np(img_v)

        textures = texture_features.eigen_texture(self.processor.img)
        self.imNP_tex1 = textures[:, :, 0]
        self.imNP_tex2 = textures[:, :, 1]

        self.debug_before_first_featurevector = True

        self.generate_voi_histogram(self.processor.point_of_interest,
                                    self.processor.voi_width)
예제 #6
0
파일: cv1.py 프로젝트: hxfabc2012/OpenQbo-1
def split(image):
    img1 = cv.cvCreateImage(cv.cvSize(image.width, image.height), 8, 1)
    img2 = cv.cvCreateImage(cv.cvSize(image.width, image.height), 8, 1)
    img3 = cv.cvCreateImage(cv.cvSize(image.width, image.height), 8, 1)
    cv.cvSplit(image, img1, img2, img3, None)
    return (img1, img2, img3)
예제 #7
0
def split(image):
    img1 = cv.cvCreateImage(cv.cvSize(image.width, image.height), 8, 1)
    img2 = cv.cvCreateImage(cv.cvSize(image.width, image.height), 8, 1)
    img3 = cv.cvCreateImage(cv.cvSize(image.width, image.height), 8, 1)
    cv.cvSplit(image, img1, img2, img3, None)
    return (img1, img2, img3)