Exemplo n.º 1
0
def main():
    pub = rospy.TopicPub('image', Image)
    rospy.ready('test_send')

    image = cv.cvCreateImage(cv.cvSize(640,480), 8, 1)
    cv.cvSet(image, 133)

    while not rospy.is_shutdown():
        pub.publish(Image(None, 'test', image.width, image.height, 'none', 'mono8', image.imageData))
        time.sleep(.033)

    rospy.spin()
Exemplo n.º 2
0
def main():
    pub = rospy.TopicPub('image', Image)
    rospy.ready('test_send')

    image = cv.cvCreateImage(cv.cvSize(640, 480), 8, 1)
    cv.cvSet(image, 133)

    while not rospy.is_shutdown():
        pub.publish(
            Image(None, 'test', image.width, image.height, 'none', 'mono8',
                  image.imageData))
        time.sleep(.033)

    rospy.spin()
Exemplo n.º 3
0
def tile_images(img_width, img_height, num_width, num_height, images, channels=3):
    w = img_width * num_width
    h = img_height * num_height
    image = cv.cvCreateImage(cv.cvSize(int(w), int(h)), 8, channels)
    cv.cvSet(image, cv.cvScalar(255,255,255))
    while len(images) > 0:
        try:
            for y in range(int(num_height)):
                for x in range(int(num_width)):
                    small_tile = images.pop()
                    img_x = x * img_width
                    img_y = y * img_height
                    cropped = cv.cvGetSubRect(image, cv.cvRect(img_x, img_y, img_width,img_height))
                    cv.cvCopy(small_tile, cropped)
        except exceptions.IndexError, e:
            break
Exemplo n.º 4
0
 def __init__(self, frame):
     self.blob_image = opencv.cvCloneImage(frame.iplimage)
     self.blob_gray = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)
     self.blob_mask = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)
     opencv.cvSet(self.blob_mask, 1)
     opencv.cvCvtColor(self.blob_image, self.blob_gray, opencv.CV_BGR2GRAY)
     # opencv.cvEqualizeHist(self.blob_gray, self.blob_gray)
     # opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_BINARY)
     # opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_TOZERO)
     opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.bthresh, 255, frame.bthreshmode)
     # opencv.cvAdaptiveThreshold(self.blob_gray, self.blob_gray, 255, opencv.CV_ADAPTIVE_THRESH_MEAN_C, opencv.CV_THRESH_BINARY_INV)
     self._frame_blobs = CBlobResult(self.blob_gray, self.blob_mask, 100, True)
     self._frame_blobs.filter_blobs(10, 10000)
     self.count = self._frame_blobs.GetNumBlobs()
     self.items = []
     for i in range(self.count):
         self.items.append(self._frame_blobs.GetBlob(i))
Exemplo n.º 5
0
 def __init__(self, frame):
     self.blob_image = opencv.cvCloneImage(frame.iplimage)
     self.blob_gray = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)
     self.blob_mask = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)
     opencv.cvSet(self.blob_mask, 1)        
     opencv.cvCvtColor(self.blob_image, self.blob_gray, opencv.CV_BGR2GRAY)
     #opencv.cvEqualizeHist(self.blob_gray, self.blob_gray)
     #opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_BINARY)	
     #opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_TOZERO)
     opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.bthresh, 255, frame.bthreshmode)
     #opencv.cvAdaptiveThreshold(self.blob_gray, self.blob_gray, 255, opencv.CV_ADAPTIVE_THRESH_MEAN_C, opencv.CV_THRESH_BINARY_INV)
     self._frame_blobs = CBlobResult(self.blob_gray, self.blob_mask, 100, True)
     self._frame_blobs.filter_blobs(10, 10000)
     self.count = self._frame_blobs.GetNumBlobs()
     self.items = []
     for i in range(self.count):
         self.items.append(self._frame_blobs.GetBlob(i)) 
Exemplo n.º 6
0
def tile_images(img_width,
                img_height,
                num_width,
                num_height,
                images,
                channels=3):
    w = img_width * num_width
    h = img_height * num_height
    image = cv.cvCreateImage(cv.cvSize(int(w), int(h)), 8, channels)
    cv.cvSet(image, cv.cvScalar(255, 255, 255))
    while len(images) > 0:
        try:
            for y in range(int(num_height)):
                for x in range(int(num_width)):
                    small_tile = images.pop()
                    img_x = x * img_width
                    img_y = y * img_height
                    cropped = cv.cvGetSubRect(
                        image, cv.cvRect(img_x, img_y, img_width, img_height))
                    cv.cvCopy(small_tile, cropped)
        except exceptions.IndexError, e:
            break
Exemplo n.º 7
0
    def create_train_datastructures(self):
        #loop through all marked datasets
        self.processor.scan_dataset = self.processor.scans_database.get_dataset(0)
          
        training_set_size = 0
        
        data = []
        #get size of training set in total
        while False != self.processor.scan_dataset:
            if self.processor.scan_dataset.is_training_set:
                
                filename = self.processor.get_features_filename(True)
                print 'loading', filename
                dict = ut.load_pickle(filename)

                # make an equal size of points for each class: use object labels more often:
                difference = np.sum(dict['labels'] == processor.LABEL_SURFACE) - np.sum(dict['labels'] == processor.LABEL_CLUTTER)
                #print getTime(), filename
                #print getTime(), 'surface',np.sum(dict['labels'] == LABEL_SURFACE)
                #print getTime(), 'clutter',np.sum(dict['labels'] == LABEL_CLUTTER)
                #print getTime(), difference, "difference = np.sum(dict['labels'] == LABEL_SURFACE) - np.sum(dict['labels'] == LABEL_CLUTTER)"
                #print getTime(), ''
                if difference > 0:
                    clutter_features = (dict['features'])[np.nonzero(dict['labels'] == processor.LABEL_CLUTTER)]
                    if len(clutter_features) > 0: #if there are none, do nothin'
                        dict['set_size'] += difference
                        dict['features'] = np.vstack((dict['features'], clutter_features[np.random.randint(0,len(clutter_features),size=difference)]))
                        dict['labels'] = np.hstack((dict['labels'], np.ones(difference) * processor.LABEL_CLUTTER))
                elif difference < 0: 
                    surface_features = (dict['features'])[np.nonzero(dict['labels'] == processor.LABEL_SURFACE)]
                    if len(surface_features) > 0: #if there are none, do nothin'
                        difference = -difference
                        dict['set_size'] += difference
                        dict['features'] = np.vstack((dict['features'], surface_features[np.random.randint(0,len(surface_features),size=difference)]))
                        dict['labels'] = np.hstack((dict['labels'], np.ones(difference) * processor.LABEL_SURFACE))
                    
                training_set_size += dict['set_size']
                data.append(dict)
            #get next one
            self.processor.scan_dataset = self.processor.scans_database.get_next_dataset()
            #print getTime(),  self.scan_dataset
        
        #create training set:
        self.processor.scan_dataset = self.processor.scans_database.get_dataset(0)
        current_training_set_index = 0
        
       
        feature_vector_length = len(self.processor.features.get_indexvector(self.features))
        print getTime(), feature_vector_length
        #create dataset matrices:
        print getTime(), '#training set size ', training_set_size 
        
        #deactivate for now:
        max_traning_size = 1800000#2040000
        #if training_set_size < max_traning_size:
        if True:       
            train_data = cv.cvCreateMat(training_set_size,feature_vector_length,cv.CV_32FC1) #CvMat* cvCreateMat(int rows, int cols, int type)
            train_labels = cv.cvCreateMat(training_set_size,1,cv.CV_32FC1)
            
            for dict in data:        
                for index in range(dict['set_size']):
                    #only train on surface and clutter
                    if dict['labels'][index] == processor.LABEL_SURFACE or dict['labels'][index]== processor.LABEL_CLUTTER:
                    
                        #print getTime(), point3d
                        #print getTime(), 'fvindexv',self.get_features_indexvector(features)
                        #print getTime(), 'len', len(self.get_features_indexvector(features))
                        fv = (dict['features'][index])[self.processor.features.get_indexvector(self.features)]
    
                        #print getTime(), 'fv',fv
                        #print getTime(), np.shape(fv)
                        for fv_index, fv_value in enumerate(fv):
                            train_data[current_training_set_index][fv_index] = fv_value
                        train_labels[current_training_set_index] = dict['labels'][index]
    #                    for fv_index, fv_value in enumerate(fv):
    #                        print getTime(), train_data[current_training_set_index][fv_index]
    #                    print getTime(), '##',train_labels[current_training_set_index],'##'                    
                        #print getTime(), 'fv ', fv
                        #print getTime(), 'tr ',train_data[index]
                        current_training_set_index = current_training_set_index + 1
            
                        #if current_training_set_index % 4096 == 0:
                        #    print getTime(), 'label', dict['labels'][index], 'fv', fv        
                        if current_training_set_index %  16384 == 0:
                            print getTime(), 'reading features:', current_training_set_index, 'of', training_set_size, '(',(float(current_training_set_index)/float(training_set_size)*100.0),'%)'
    
        else:
            print getTime(), 'more than',max_traning_size,'features, sample from them...'
            #select 2040000 features:
            all_data = []
            all_labels = []
            for dict in data:  
                for index in range(dict['set_size']):
                    if dict['labels'][index] == processor.LABEL_SURFACE or dict['labels'][index]== processor.LABEL_CLUTTER:
                        fv = (dict['features'][index])[self.processor.features.get_indexvector(self.features)]
                        all_data += [fv]
                        all_labels += [dict['labels'][index]]
                        
                        current_training_set_index = current_training_set_index + 1    
                        if current_training_set_index %  16384 == 0:
                            print getTime(), 'reading features:', current_training_set_index, 'of', training_set_size, '(',(float(current_training_set_index)/float(training_set_size)*100.0),'%)'
            
            del data
            import random
            indices = np.array(random.sample(xrange(len(all_labels)),max_traning_size))
            all_data = np.asarray(all_data)
            all_labels = np.asarray(all_labels)
            
            all_data = all_data[indices]
            all_labels = all_labels[indices]
            
            train_data = cv.cvCreateMat(max_traning_size,feature_vector_length,cv.CV_32FC1) #CvMat* cvCreateMat(int rows, int cols, int type)
            train_labels = cv.cvCreateMat(max_traning_size,1,cv.CV_32FC1)
                        
            for index in range(max_traning_size):
                for fv_index, fv_value in enumerate(all_data[index]):
                    train_data[index][fv_index] = fv_value
                    train_labels[index] = all_labels[index]
                if index % 16384 == 0:
                    print getTime(), 'setting features:', (float(index)/float(max_traning_size))
          
          
        print getTime(), 'start training Classifier'

        type_mask = cv.cvCreateMat(1, feature_vector_length+1, cv.CV_8UC1)
        cv.cvSet( type_mask, cv.CV_VAR_NUMERICAL, 0)
        type_mask[feature_vector_length] = cv.CV_VAR_CATEGORICAL
        
        return (train_data, train_labels, type_mask)