コード例 #1
0
    def setUp(self):
        self.gesture_db = GestureDatabase()
        self.gesture = self.gesture_db.get_gesture_entry("come_here")
        self.D = self.gesture.dimensionality
        self.gesture.process_signal_accommodation()
        self.coordinates = self.gesture.get_merged_training_data()
        for i in xrange(len(self.coordinates)):
            for j in xrange(len(self.coordinates[i])):
                self.coordinates[i][j] = self.coordinates[i][j] * 100.0

        self.__learning_system = None
コード例 #2
0
class TestGMMGestureModel(unittest.TestCase):
    def setUp(self):
        self.gesture_db = GestureDatabase()
        self.gesture = self.gesture_db.get_gesture_entry("come_here")
        self.D = self.gesture.dimensionality
        self.gesture.process_signal_accommodation()
        self.coordinates = self.gesture.get_merged_training_data()
        for i in xrange(len(self.coordinates)):
            for j in xrange(len(self.coordinates[i])):
                self.coordinates[i][j] = self.coordinates[i][j] * 100.0

        self.__learning_system = None

    @property
    def learning_system(self):
        if self.__learning_system == None:
            self.__learning_system = GMMGestureModel.create_and_learn(
                self.D, "test_gesture", self.coordinates, STOP_TH,
                MAX_CLUSTERS)
        return self.__learning_system

    def test_basic_comparation(self):
        a = self.learning_system
        return

    def test_check_auto_likelihood(self):
        other_gesture = copy(self.coordinates)
        other_gesture[0][0] = other_gesture[0][0] * 2.0
        self.assertGreater(
            self.learning_system.get_training_set_likelihood(),
            self.learning_system.compute_gesture_likelihood(other_gesture))

    def test_save_load_model(self):
        filename = INPUT_DIR + "/models/temp_gmm_model.yaml"
        self.learning_system.save_to_file(filename)
        loaded = GMMGestureModel.load_from_file(filename)
        loaded_training = loaded.get_training_set_data()

        self.assertEquals(len(loaded_training), len(self.coordinates))
        self.assertEquals(len(loaded_training[0]), len(self.coordinates[0]))
コード例 #3
0
        help=
        "models to evaluate. if no specified, all that are evaluated. Otherwise provide comma separated models names ie: -m HMMGestureModel,GMMGestureModel,Time-GMMGestureModel,DTWGestureModel"
    )
    (options, args) = parser.parse_args()

    if options.gesture_entries != None:
        gesture_entries_names = options.gesture_entries.split(",")
    else:
        #default gestures to evaluate
        rospy.loginfo(
            "gestures must be specified using the parameter -g or (--gestures-entries). See help."
        )
        exit()

    rospy.loginfo("gestures to be analyzed: %s", str(gesture_entries_names))
    gesture_db = GestureDatabase()
    #gesture_entries_names=["come_here"]

    print "models to load: %s" % str(options.models_types)
    if options.models_types == None:
        models_names = gesture_db.get_models_names()
    else:
        models_types_name = options.models_types.split(",")
        models_names = []
        for mt in models_types_name:
            models_names = models_names + gesture_db.get_models_names(
                model_type_name=mt)

    print "models found: %s" % str(models_names)

    #cache for avoiding load models
コード例 #4
0
def generate_images(gestures_to_paint, time_wrapping=False):
    gesture_db = GestureDatabase()
    images = []
    max_number_demos = 24
    functions = get_plot_functions()
    image_index = 0

    for gesture_entry_name in gestures_to_paint:
        #load gesture
        gesture = gesture_db.get_gesture_entry(gesture_entry_name)
        print "loading gesture: %s" % gesture_entry_name

        try:

            #data preprocessing
            gesture.process_signal_accommodation(
                offset_accomodate=True,
                demo_longitude_accomodate=True,
                regular_sampling_acomodation=True,
                time_wrapping=False)

            for f in functions:
                #create plotting infrastructure
                fig1 = matplotlib.pyplot.figure(image_index)
                fig1.clear()
                leg = []
                #plot
                number_of_demos = min(max_number_demos,
                                      gesture.demonstration_count)
                m = None
                (plot_key_name, leg) = f(gesture, m, fig1, number_of_demos)

                if leg != None:
                    legend(tuple(leg))

                images.append((gesture.name, fig1))
                if m != None:
                    imageurl = REPORT_DIR + "/" + f.__name__ + "/" + gesture_entry_name + "-" + plot_key_name + ".png"
                else:
                    imageurl = REPORT_DIR + "/" + f.__name__ + "/" + gesture_entry_name + ".png"
                fig1.savefig(imageurl)
                image_index += 1
        except:
            continue
        '''models=gesture_db.get_models_by_gesture_name(gesture.name+".time.*k2time_wrapped\.gmm")
        print "loading models: %s"+str(models)
        models= [None]+models
        
        for m in models:
            for f in functions:
                #create plotting infrastructure
                fig1 = matplotlib.pyplot.figure(image_index)
                fig1.clear()
                leg=[]
                #plot
                number_of_demos=min(max_number_demos,gesture.demonstration_count)
                (plot_key_name,leg)=f(gesture,m,fig1,number_of_demos)
                    
                if leg!=None:
                    legend(tuple(leg))
                    
                images.append((gesture.name,fig1))
                imageurl=REPORT_DIR+"/"+f.__name__+"/"+gesture_entry_name+"-"+plot_key_name+".png"
                fig1.savefig(imageurl)
                image_index+=1
        '''

    return images
コード例 #5
0
        default=None,
        dest="negative_gestures_entries",
        help=
        "comma separated negative gestures names for the threshold calculation. Example: --negative-gestures-entries come_here_for_training,clapping"
    )
    parser.add_option(
        "-m",
        "--models-types",
        default=None,
        dest="models_types",
        help=
        "if no specified, all models are generated. Otherwise provide comma separated models names ie: -m HMMGestureModel,GMMGestureModel,Time-GMMGestureModel,DTWGestureModel"
    )
    (options, args) = parser.parse_args()

    gesture_db = GestureDatabase()

    if options.directory != None:
        if os.path.exists(
                options.directory) and not os.path.isfile(options.directory):
            gesture_db.GESTURE_DATABASE_DIRECTORY = options.directory
        else:
            rospy.logfatal("Incorrect directory. exiting.")
            exit(-1)

    models_types = "all"
    if options.models_types != None:
        models_types = options.models_types.split(",")

    print "[%s] models to generate: %s" % (options.models_types,
                                           str(models_types))
                      "--loglevel",
                      default="INFO",
                      dest="loglevel",
                      help="loglevel \"INFO\" (default) or \"DEBUG\"")
    (options, args) = parser.parse_args()

    if options.loglevel == "DEBUG":
        loglevel = rospy.DEBUG
        rospy.loginfo("DEBUG logging mode")
    else:
        loglevel = rospy.INFO

    rospy.init_node('batch_gesture_entries_from_bags')

    bags_db = BagDatabase()
    gesture_db = GestureDatabase()

    if options.directory != "":
        if os.path.exists(
                options.directory) and not os.path.isfile(options.directory):
            bags_db.BAGS_DIRECTORY = options.directory
        else:
            rospy.logfatal("Incorrect directory. exiting.")
            exit(-1)

    rospy.set_param("sim_time", True)
    #files on the directory root are modeled independently
    rospy.loginfo("input bag dir: %s", bags_db.BAGS_DIRECTORY)
    #selecting config files

    root_config_files = bags_db.get_root_config_files()
コード例 #7
0
# MAIN
# -------------------------------------------------------

if __name__ == '__main__':
    from optparse import OptionParser
    parser = OptionParser("%prog [ opts ]\nThis examples take a gesture, creates the TMM model in the task space and show results in rviz")
    parser.add_option("-g","--gesture-entry", default="task_space_wave",  dest="gesture_entry", help="gesture to be get. Default: task_space_wave")
    (options, args) = parser.parse_args()
    
    rospy.init_node ('GMM_learning', anonymous = True)
    
    if PLOT:
    	clusters_pub = rospy.Publisher ("visualization_marker", Marker)
	gauss_pub = rospy.Publisher ("gaussian", Marker)
    
    gesture_db=GestureDatabase()
    rospy.loginfo("loading gesture entry: %s",options.gesture_entry)
    gesture=gesture_db.get_gesture_entry(options.gesture_entry)
    #print gesture.raw_data[0]
    gesture.process_signal_accommodation(offset_accomodate=True,demo_longitude_accomodate=True,regular_sampling_acomodation=True,time_wrapping=False)
    D=gesture.dimensionality
    coordinates=gesture.get_training_data(demo_index=0,scale=1.0)
    #explain the problem with singular matrixes ie: elbow -> shoulder... 2d
    
    
    rospy.loginfo("%s points loaded from database",str(len(coordinates[0])))
    coordinates= outlier_removal(coordinates)
    rospy.loginfo("outliers removed,  points len: %s",str(len(coordinates[0])))

    if K == None:
	   gmm_gesture_model = GMMGestureModel.create_and_learn(options.gesture_entry,D,coordinates,STOP_TH,MAX_CLUSTERS)