class TestGMMGestureModel(unittest.TestCase):
    def setUp(self):
        self.gesture_db = GestureDatabase()
        self.gesture = self.gesture_db.get_gesture_entry("come_here")
        self.D = self.gesture.dimensionality
        self.gesture.process_signal_accommodation()
        self.coordinates = self.gesture.get_merged_training_data()
        for i in xrange(len(self.coordinates)):
            for j in xrange(len(self.coordinates[i])):
                self.coordinates[i][j] = self.coordinates[i][j] * 100.0

        self.__learning_system = None

    @property
    def learning_system(self):
        if self.__learning_system == None:
            self.__learning_system = GMMGestureModel.create_and_learn(
                self.D, "test_gesture", self.coordinates, STOP_TH,
                MAX_CLUSTERS)
        return self.__learning_system

    def test_basic_comparation(self):
        a = self.learning_system
        return

    def test_check_auto_likelihood(self):
        other_gesture = copy(self.coordinates)
        other_gesture[0][0] = other_gesture[0][0] * 2.0
        self.assertGreater(
            self.learning_system.get_training_set_likelihood(),
            self.learning_system.compute_gesture_likelihood(other_gesture))

    def test_save_load_model(self):
        filename = INPUT_DIR + "/models/temp_gmm_model.yaml"
        self.learning_system.save_to_file(filename)
        loaded = GMMGestureModel.load_from_file(filename)
        loaded_training = loaded.get_training_set_data()

        self.assertEquals(len(loaded_training), len(self.coordinates))
        self.assertEquals(len(loaded_training[0]), len(self.coordinates[0]))
Ejemplo n.º 2
0
def generate_images(gestures_to_paint, time_wrapping=False):
    gesture_db = GestureDatabase()
    images = []
    max_number_demos = 24
    functions = get_plot_functions()
    image_index = 0

    for gesture_entry_name in gestures_to_paint:
        #load gesture
        gesture = gesture_db.get_gesture_entry(gesture_entry_name)
        print "loading gesture: %s" % gesture_entry_name

        try:

            #data preprocessing
            gesture.process_signal_accommodation(
                offset_accomodate=True,
                demo_longitude_accomodate=True,
                regular_sampling_acomodation=True,
                time_wrapping=False)

            for f in functions:
                #create plotting infrastructure
                fig1 = matplotlib.pyplot.figure(image_index)
                fig1.clear()
                leg = []
                #plot
                number_of_demos = min(max_number_demos,
                                      gesture.demonstration_count)
                m = None
                (plot_key_name, leg) = f(gesture, m, fig1, number_of_demos)

                if leg != None:
                    legend(tuple(leg))

                images.append((gesture.name, fig1))
                if m != None:
                    imageurl = REPORT_DIR + "/" + f.__name__ + "/" + gesture_entry_name + "-" + plot_key_name + ".png"
                else:
                    imageurl = REPORT_DIR + "/" + f.__name__ + "/" + gesture_entry_name + ".png"
                fig1.savefig(imageurl)
                image_index += 1
        except:
            continue
        '''models=gesture_db.get_models_by_gesture_name(gesture.name+".time.*k2time_wrapped\.gmm")
        print "loading models: %s"+str(models)
        models= [None]+models
        
        for m in models:
            for f in functions:
                #create plotting infrastructure
                fig1 = matplotlib.pyplot.figure(image_index)
                fig1.clear()
                leg=[]
                #plot
                number_of_demos=min(max_number_demos,gesture.demonstration_count)
                (plot_key_name,leg)=f(gesture,m,fig1,number_of_demos)
                    
                if leg!=None:
                    legend(tuple(leg))
                    
                images.append((gesture.name,fig1))
                imageurl=REPORT_DIR+"/"+f.__name__+"/"+gesture_entry_name+"-"+plot_key_name+".png"
                fig1.savefig(imageurl)
                image_index+=1
        '''

    return images
        (report_data, report_meta_data) = json.load(open(report_file_name))
    else:
        report_data = []
        report_meta_data = {
            "models": {},
            "positive_list": [],
            "negative_list": [],
            "experiment_datetime":
            datetime.datetime.now().strftime("%a, %d %b %Y %H:%M:%S +0000")
        }
        #-------------------------EVALUATE MODELS-----------------
        for gesture_entry_name in gesture_entries_names:
            rospy.loginfo(" =========== gesture %s ===========",
                          gesture_entry_name)
            rospy.loginfo("loading gesture...")
            gesture = gesture_db.get_gesture_entry(gesture_entry_name)
            rospy.loginfo("gesture loaded.")
            rospy.loginfo("gesture signal accomodation..")
            gesture.process_signal_accommodation(
                offset_accomodate=True,
                demo_longitude_accomodate=True,
                regular_sampling_acomodation=True)
            rospy.loginfo("gesture signal accomodation.Done")

            for model_name in models_names:
                rospy.loginfo("------ model: %s ------", model_name)
                rospy.loginfo("gesture to evaluate: %s ", gesture_entry_name)

                #Cache for maximize the speed of the report generation
                if not models.has_key(model_name):
                    rospy.loginfo("loading model...")
Ejemplo n.º 4
0
if __name__ == '__main__':
    from optparse import OptionParser
    parser = OptionParser("%prog [ opts ]\nThis examples take a gesture, creates the TMM model in the task space and show results in rviz")
    parser.add_option("-g","--gesture-entry", default="task_space_wave",  dest="gesture_entry", help="gesture to be get. Default: task_space_wave")
    (options, args) = parser.parse_args()
    
    rospy.init_node ('GMM_learning', anonymous = True)
    
    if PLOT:
    	clusters_pub = rospy.Publisher ("visualization_marker", Marker)
	gauss_pub = rospy.Publisher ("gaussian", Marker)
    
    gesture_db=GestureDatabase()
    rospy.loginfo("loading gesture entry: %s",options.gesture_entry)
    gesture=gesture_db.get_gesture_entry(options.gesture_entry)
    #print gesture.raw_data[0]
    gesture.process_signal_accommodation(offset_accomodate=True,demo_longitude_accomodate=True,regular_sampling_acomodation=True,time_wrapping=False)
    D=gesture.dimensionality
    coordinates=gesture.get_training_data(demo_index=0,scale=1.0)
    #explain the problem with singular matrixes ie: elbow -> shoulder... 2d
    
    
    rospy.loginfo("%s points loaded from database",str(len(coordinates[0])))
    coordinates= outlier_removal(coordinates)
    rospy.loginfo("outliers removed,  points len: %s",str(len(coordinates[0])))

    if K == None:
	   gmm_gesture_model = GMMGestureModel.create_and_learn(options.gesture_entry,D,coordinates,STOP_TH,MAX_CLUSTERS)
	   print "K opt = " + str(K)
    else: