Exemple #1
0
	def watch (self):

		jvids_map = {	'r': 'observe_raw',
						'm': 'observe_marked',
						's': 'observe_synced'
		}


		print_message ("Select a jvid file to play: ")
		print "	[ r ]: raw recording\n"
		print "	[ m ]: with pops marked\n"
		print "	[ s ]: synchronized\n"
		selection = raw_input ("--->")
		
		if not selection in jvids_map.keys ():
			print_message ("Error: didn't recognize that option")
			return self.watch ()

		else:
			
			### Step 1: change into the correct directory ###
			os.chdir (os.path.join (os.getcwd(), "Bin"))

			### Step 2: build the correct command and use it ###
			system_command = "./x64-release/Autosynch " + jvids_map[selection] + " " + self.selected_recording.full_filepath
			os.system (system_command)

			### Step 4: change back into the original directory ###
			os.chdir (os.path.join (os.getcwd(), ".."))	

			return
Exemple #2
0
	def select_recording (self):

		recording_names = [r.__str__() for r in self.all_recordings]
		names_and_recordings = zip (recording_names, self.all_recordings)

		### Step 1: ask them what they want to do ###
		print_message ("Would you like to work with an existing recording? (y/n)")
		answer = raw_input ("---> ")

		if answer == 'y':
			self.list_recordings ()
			print_message ("Enter the name of the recording you wish to work with")
			recording_name = raw_input ("---> ")
			if not recording_name in set(existing_recordings):
				print_message ("Error: the recording you entered, " + recording_name + ", doesn't exist yet.")
				self.select_recording ()
			else:
				print_message ("Opening " + recording_name)
				index = recording_names.index (recording_name)
				self.selected_recording = self.all_recordings[index]

		if answer == 'n':
			print_message ("Enter the name of the new recording")
			recording_name = raw_input ("---> ")
			self.selected_recording = self.create_new_recording (recording_name)
Exemple #3
0
    def init_preprocess (self):
        
        print_message("Entering preprocessing mode")

        ### Step 1: load in all memes ###
        print_status ("Initialization", "Loading Meme Objects (i.e. json or pickle, not feature representations yet)") 
        # self.get_memes ()             # from json
        self.load_memes ()              # from pkl
        self.print_memes_stats ()

        ### Step 2: filter them ###
        Preprocess.filter_memes (self.memes)
Exemple #4
0
    def record_main (self):

        while (True):
            print_message ("What would you like to do?")
            print " - R: record a new gesture"
            print " - Q: quit"
            response = raw_input ("---> ")
            response = response.lower ()

            if response == 'q':
                exit ()
            else:
                self.record_gesture ()
Exemple #5
0
    def record_main(self):

        while (True):
            print_message("What would you like to do?")
            print " - R: record a new gesture"
            print " - Q: quit"
            response = raw_input("---> ")
            response = response.lower()

            if response == 'q':
                exit()
            else:
                self.record_gesture()
Exemple #6
0
	def list_recordings (self):
	
		print_message ("########## Exising recordings: ##########")

		num_printed = 0
		for recording in existing_recordings:
			print recording,
			if num_printed % 2 == 0:
				print "\n",
			else:
				print "				",
			num_printed += 1
		print "\n"
Exemple #7
0
    def synth_main(self):

        ### Step 1: start the max patch ###
        self.max_interface.send_gesture('Start')

        ### Step 2: initialize local data ###
        print_message("Entering Main Loop: Continuous Gesture Recognition")
        self.gesture_recognizer.load_model()
        observed_gesture = Gesture()

        ### Step 3: enter main loop ###
        while (True):

            ### Step 1: add the current frame to observed_gesture ###
            frame = self.get_frame()
            observed_gesture.add_frame(frame)

            ### Step 2: get position and orientation (returns (None, None) if not a fist) ###
            (palm_position,
             palm_orientation) = self.get_position_and_orientation(frame)

            ### Step 3: Get the gesture, if appropriate ###
            send_gesture = None

            if observed_gesture.is_full():
                print 'x'

                classification_results = self.gesture_recognizer.classify_gesture(
                    observed_gesture)
                if classification_results:
                    prediction = classification_results[0]
                    prediction_prob = classification_results[1]
                    print_message("Prediction: " + str(prediction) +
                                  " | Probability: " + str(prediction_prob))
                    send_gesture = prediction
                    observed_gesture.clear()

            ### Step 4: send a gesture to max if one was observed ###
            if send_gesture:
                self.max_interface.send_gesture(send_gesture)

            ### Step 5: Send hand state to max if one was observed ###
            if len(frame.hands) > 0:
                self.max_interface.send_hand_state(frame.hands[0])
Exemple #8
0
    def synth_main (self):
        
        ### Step 1: start the max patch ###
        self.max_interface.send_gesture ('Start')

        ### Step 2: initialize local data ###
        print_message ("Entering Main Loop: Continuous Gesture Recognition")
        self.gesture_recognizer.load_model ()
        observed_gesture = Gesture ()

        ### Step 3: enter main loop ###
        while (True):

            ### Step 1: add the current frame to observed_gesture ###
            frame = self.get_frame ()
            observed_gesture.add_frame (frame)

            ### Step 2: get position and orientation (returns (None, None) if not a fist) ###
            (palm_position, palm_orientation) = self.get_position_and_orientation (frame)

            ### Step 3: Get the gesture, if appropriate ###
            send_gesture = None

            if observed_gesture.is_full ():
                print 'x'

                classification_results = self.gesture_recognizer.classify_gesture (observed_gesture)
                if classification_results:
                    prediction = classification_results [0]
                    prediction_prob = classification_results [1]
                    print_message("Prediction: " + str(prediction) + " | Probability: " + str(prediction_prob))
                    send_gesture = prediction
                    observed_gesture.clear ()



            ### Step 4: send a gesture to max if one was observed ###
            if send_gesture:
                self.max_interface.send_gesture (send_gesture)

            ### Step 5: Send hand state to max if one was observed ###
            if len(frame.hands) > 0:
                self.max_interface.send_hand_state (frame.hands[0])
Exemple #9
0
    def record_gesture(self):

        num_examples_recorded = 0
        max_examples = 20
        frames_per_example = 100

        ### Step 1: have them name the gesture ###
        print_message("What is this gesture called?")
        gesture_name = raw_input("---> ")
        print_message("Now we will begin recording " + str(max_examples) +
                      " examples of this gesture. Press Enter when ready.")
        sys.stdin.readline()

        while (num_examples_recorded < max_examples):

            ### Step 2: start the recording ###
            self.record_countdown()
            self.gesture_recognizer.start_recording_gesture(gesture_name)
            self.is_recording = True

            ### Step 3: get a single frame ###
            num_frames_recorded = 0
            while (num_frames_recorded < 100):
                frame = self.get_frame()
                self.gesture_recognizer.add_frame_to_recording(frame)
                num_frames_recorded += 1

            ### Step 4: stop the recording ###
            print_message("### Recording Complete ###")
            self.gesture_recognizer.stop_recording_gesture()
            self.is_recording = False
            num_examples_recorded += 1
Exemple #10
0
    def record_gesture (self):

        num_examples_recorded = 0
        max_examples = 20
        frames_per_example = 100

        ### Step 1: have them name the gesture ###
        print_message ("What is this gesture called?")
        gesture_name = raw_input("---> ")
        print_message ("Now we will begin recording " + str(max_examples) + " examples of this gesture. Press Enter when ready.")
        sys.stdin.readline ()

        while (num_examples_recorded < max_examples):

            ### Step 2: start the recording ###
            self.record_countdown ()
            self.gesture_recognizer.start_recording_gesture (gesture_name)
            self.is_recording = True

            ### Step 3: get a single frame ###
            num_frames_recorded = 0
            while (num_frames_recorded < 100):
                frame = self.get_frame ()
                self.gesture_recognizer.add_frame_to_recording (frame)
                num_frames_recorded += 1

            ### Step 4: stop the recording ###
            print_message ("### Recording Complete ###")
            self.gesture_recognizer.stop_recording_gesture ()
            self.is_recording = False
            num_examples_recorded += 1
Exemple #11
0
    def synth_main (self):

        print_message ("Recording Discrete events")
        while (True):

            ### Step 1: initialize the gesture ###
            observed_gesture = []

            ### Step 2: start the recording ###
            self.record_countdown ()

            ### Step 3: fill it with frames ###
            num_frames_recorded = 0
            while (num_frames_recorded < 100):
                frame = self.get_frame ()
                pose = Pose(frame)
                observed_gesture.append (pose.features)
                num_frames_recorded += 1


            ### Step 4: stop the recording and classify ###
            print_message ("### Recording Complete ###")
            self.gesture_recognizer.classify_gesture ("")

            print_message("enter to continue")
            sys.stdin.readline ()
Exemple #12
0
	def select_operation (self):

		operations_map = {	'r': self.record,
							't': self.train,
							's': self.synchronize,
							'w': self.watch,
							'q': exit
		}


		print_message ("Select an operation to apply to this file")
		print "	[ r ]: (re-)record something for this file\n"
		print "	[ t ]: train a classifier based on this recording\n"
		print "	[ s ]: mark and synchronize this recording\n"
		print "	[ w ]: watch one of the recorings\n"
		print "	[ q ]: quit\n"
		selected_operation = raw_input ("--->")

		if not selected_operation in operations_map.keys ():
			print_message ("Error: didn't recognize that operation")
			return self.select_operation ()
		else:
			if selected_operation == 'q':
				print_message ("### EXITING PROGRAM ###")
				exit ()
			else:
				return operations_map[selected_operation]
Exemple #13
0
	def run (self):

		### Step 1: record something if the file is new ###
		if self.selected_recording.is_new ():
			print_message ("Beginning recording")
			self.record ()
			print_message ("Finished recording")

		### Step 2: have the user select an operation to apply to the file ###
		selected_operation = self.select_operation ()

		print_message ("Beginning operation: " + selected_operation.__name__)
		selected_operation ()
		print_message ("Finished operation: " + selected_operation.__name__)
Exemple #14
0
    def synth_main(self):

        print_message("Recording Discrete events")
        while (True):

            ### Step 1: initialize the gesture ###
            observed_gesture = []

            ### Step 2: start the recording ###
            self.record_countdown()

            ### Step 3: fill it with frames ###
            num_frames_recorded = 0
            while (num_frames_recorded < 100):
                frame = self.get_frame()
                pose = Pose(frame)
                observed_gesture.append(pose.features)
                num_frames_recorded += 1

            ### Step 4: stop the recording and classify ###
            print_message("### Recording Complete ###")
            self.gesture_recognizer.classify_gesture("")

            print_message("enter to continue")
            sys.stdin.readline()
Exemple #15
0
    def record_gesture (self):

        num_examples_recorded = 0
        max_examples = 10

        ### Step 1: have them name the gesture ###
        print_message ("What is this gesture called?")
        gesture_name = raw_input("---> ")
        print_message ("Now we will begin recording " + str(max_examples) + " examples of this gesture, " + str(gesture_name) + ". Press Enter when ready.")
        sys.stdin.readline ()

        record_gesture = Gesture (gesture_name)



        #--- initialize parameters ---
        is_recording        = False
        num_frames_recorded = 0

        while (num_examples_recorded < max_examples):

            frame = self.get_frame ()
            record_gesture.add_frame (frame)

            if record_gesture.is_full ():


                ### --- Notify of recording status --- ###
                if is_recording:
                    print "."
                    num_frames_recorded += 1
                else:
                    print "x"

                ### --- Check if we should end the recording --- ###
                if num_frames_recorded >= record_gesture.gesture_length:
                    print_message ("### Recording Complete ###")
                    is_recording = False
                    num_frames_recorded = 0
                    num_examples_recorded += 1
                    self.gesture_recognizer.save_gesture(record_gesture)

                ### --- Check if we should start the recording --- ### 
                while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
                  line = sys.stdin.readline()
                  if line:
                    print_message ("### Started Recording ###")
                    is_recording = True
Exemple #16
0
    def interface_main (self):

        viable_options =['r', 't', 's']

        ### Step 1: get their requested mode ###
        print_message ("What mode would you like to enter?")
        print " - R: record mode"
        print " - T: train mode"
        print " - S: synth mode"
        response = raw_input ("---> ")
        response = response.lower ()

        if response == 'r':
            while (True):
                self.record_main ()
        elif response == 't':
            self.train_main ()
        elif response == 's':
            while (True):
                self.synth_main ()
        else:
            print_message("Error: did not recognize that option")
            self.interface_main ()
Exemple #17
0
    def interface_main(self):

        viable_options = ['r', 't', 's']

        ### Step 1: get their requested mode ###
        print_message("What mode would you like to enter?")
        print " - R: record mode"
        print " - T: train mode"
        print " - S: synth mode"
        response = raw_input("---> ")
        response = response.lower()

        if response == 'r':
            while (True):
                self.record_main()
        elif response == 't':
            self.train_main()
        elif response == 's':
            while (True):
                self.synth_main()
        else:
            print_message("Error: did not recognize that option")
            self.interface_main()
Exemple #18
0
    def record_gesture(self):

        num_examples_recorded = 0
        max_examples = 10

        ### Step 1: have them name the gesture ###
        print_message("What is this gesture called?")
        gesture_name = raw_input("---> ")
        print_message("Now we will begin recording " + str(max_examples) +
                      " examples of this gesture, " + str(gesture_name) +
                      ". Press Enter when ready.")
        sys.stdin.readline()

        record_gesture = Gesture(gesture_name)

        #--- initialize parameters ---
        is_recording = False
        num_frames_recorded = 0

        while (num_examples_recorded < max_examples):

            frame = self.get_frame()
            record_gesture.add_frame(frame)

            if record_gesture.is_full():

                ### --- Notify of recording status --- ###
                if is_recording:
                    print "."
                    num_frames_recorded += 1
                else:
                    print "x"

                ### --- Check if we should end the recording --- ###
                if num_frames_recorded >= record_gesture.gesture_length:
                    print_message("### Recording Complete ###")
                    is_recording = False
                    num_frames_recorded = 0
                    num_examples_recorded += 1
                    self.gesture_recognizer.save_gesture(record_gesture)

                ### --- Check if we should start the recording --- ###
                while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
                    line = sys.stdin.readline()
                    if line:
                        print_message("### Started Recording ###")
                        is_recording = True
Exemple #19
0
    def evaluate_classifier(self):

        print_message("Evaluating classifier on test data")
        total_score = 0.0

        for ex in self.testing_examples:

            features = ex[0]
            true_label = ex[1]
            prediction = self.classifier.predict(features)[0]
            prediction_probs = self.classifier.predict_proba(features)[0]

            classes = list(self.classifier.classes_)
            index = classes.index(prediction)
            prediction_prob = prediction_probs[index]
            total_score += prediction_prob
            if prediction_prob < 0.9:
                print "Exception: true_label = ", true_label
                for i, c_i in enumerate(classes):
                    c_i_score = prediction_probs[i]
                    print "	", c_i, ": ", prediction_probs[i]

        avg_score = total_score / float(len(self.testing_examples))
        print "average score: ", avg_score
	def evaluate_classifier (self):

		print_message ("Evaluating classifier on test data")
		total_score = 0.0

		for ex in self.testing_examples:

			features = ex[0]
			true_label = ex[1]
			prediction 			= self.classifier.predict (features)[0]
			prediction_probs 	= self.classifier.predict_proba (features)[0]

			classes = list(self.classifier.classes_)
			index = classes.index (prediction)
			prediction_prob = prediction_probs [index]
			total_score += prediction_prob
			if prediction_prob < 0.9:
				print "Exception: true_label = ", true_label
				for i, c_i in enumerate(classes):
					c_i_score = prediction_probs[i]
					print "	", c_i, ": ", prediction_probs[i]

		avg_score = total_score / float(len(self.testing_examples))
		print "average score: ", avg_score
Exemple #21
0
    def train_main(self):

        ### Step 1: load in all the gestures ###
        print_message("Loading gestures")
        self.gesture_recognizer.load_gestures()
        self.gesture_recognizer.print_gestures_stats()

        ### Step 2: train the HMMs ###
        print_message("Getting hmms")
        self.gesture_recognizer.get_hmms()

        ### Step 3: get examples ###
        print_message("Getting examples for training/testing")
        self.gesture_recognizer.get_all_examples()
        self.gesture_recognizer.split_training_testing_examples()

        ### Step 4: train the classifier and save the entire model ###
        self.gesture_recognizer.train_classifier()
        self.gesture_recognizer.save_model()

        ### Step 5: evaluate the classifier ###
        self.gesture_recognizer.evaluate_classifier()
Exemple #22
0
    def train_main (self):

        ### Step 1: load in all the gestures ###
        print_message ("Loading gestures")
        self.gesture_recognizer.load_gestures ()
        self.gesture_recognizer.print_gestures_stats ()

        ### Step 2: train the HMMs ###
        print_message ("Getting hmms")
        self.gesture_recognizer.get_hmms ()

        ### Step 3: get examples ###
        print_message ("Getting examples for training/testing")
        self.gesture_recognizer.get_all_examples ()
        self.gesture_recognizer.split_training_testing_examples ()

        ### Step 4: train the classifier and save the entire model ###
        self.gesture_recognizer.train_classifier ()
        self.gesture_recognizer.save_model ()

        ### Step 5: evaluate the classifier ###
        self.gesture_recognizer.evaluate_classifier ()
Exemple #23
0
    def synth_main_discrete(self):

        self.gesture_recognizer.load_model()

        print_message("Recording Discrete events")
        while (True):

            ### Step 1: initialize the gesture ###
            observed_gesture = Gesture()

            ### Step 2: start the recording ###
            self.record_countdown()

            ### Step 3: fill it with frames ###
            while not observed_gesture.is_full():
                frame = self.get_frame()
                observed_gesture.add_frame(frame)

            ### Step 4: stop the recording and classify ###
            print_message("### Recording Complete ###")
            self.gesture_recognizer.classify_gesture(observed_gesture)

            print_message("enter to continue")
            sys.stdin.readline()
Exemple #24
0
    def synth_main_discrete (self):

        self.gesture_recognizer.load_model ()

        print_message ("Recording Discrete events")
        while (True):

            ### Step 1: initialize the gesture ###
            observed_gesture = Gesture ()

            ### Step 2: start the recording ###
            self.record_countdown ()

            ### Step 3: fill it with frames ###
            while not observed_gesture.is_full ():
                frame = self.get_frame ()                
                observed_gesture.add_frame (frame)

            ### Step 4: stop the recording and classify ###
            print_message ("### Recording Complete ###")
            self.gesture_recognizer.classify_gesture (observed_gesture)

            print_message("enter to continue")
            sys.stdin.readline ()
Exemple #25
0
        prediction_probs = self.classifier.predict_proba(classifiable_rep)[0]

        ### Step 3: sort the probability scores ###
        classes = list(self.classifier.classes_)
        index = classes.index(prediction)
        prediction_prob = prediction_probs[index]
        # print_message ("Best Match: " + str(prediction) + " | Probability: " + str(prediction_prob))
        # for i, c_i in enumerate(classes):
        # print ' -', c_i, ': ', prediction_probs[i]

        if prediction_prob > self.prediction_prob_threshold:
            return (prediction, prediction_prob)

if __name__ == "__main__":

    print_message("##### Gesture Recognizer - Train and Evaluate #####")

    gr = Gesture_Recognizer()

    ### Step 1: load in all the gestures ###
    print_message("Loading gestures")
    gr.load_gestures()
    gr.print_gestures_stats()

    ### Step 2: train the HMMs ###
    print_message("Getting hmms")
    gr.get_hmms()

    ### Step 3: get examples ###
    print_message("Getting examples for training/testing")
    gr.get_all_examples()
Exemple #26
0
    def print_gestures_stats(self):

        print_message("Counts of 'Gesture' objects by type: ")
        for key, value in self.gestures.items():
            print "	- ", key, ": ", len(value)
			# print ' -', c_i, ': ', prediction_probs[i]

		if prediction_prob > self.prediction_prob_threshold:
			return (prediction, prediction_prob)









if __name__ == "__main__":

	print_message ("##### Gesture Recognizer - Train and Evaluate #####")

	gr = Gesture_Recognizer ()

	### Step 1: load in all the gestures ###
	print_message ("Loading gestures")
	gr.load_gestures ()
	gr.print_gestures_stats ()

	### Step 2: train the HMMs ###
	print_message ("Getting hmms")
	gr.get_hmms ()

	### Step 3: get examples ###
	print_message ("Getting examples for training/testing")
	gr.get_all_examples ()
	def print_gestures_stats (self):

		print_message ("Counts of 'Gesture' objects by type: ")
		for key, value in self.gestures.items ():
			print "	- ", key, ": ", len(value)
Exemple #29
0
	def print_training_data_stats (self):

		print_message ("Stats on Training Data")
		print "Total Examples: ", len(self.all_examples)
		print "Positive Examples: ", len(self.positive_examples)
Exemple #30
0
    def print_training_data_stats(self):

        print_message("Stats on Training Data")
        print "Total Examples: ", len(self.all_examples)
        print "Positive Examples: ", len(self.positive_examples)
Exemple #31
0
    def print_memes_stats (self):

        print_message ("Meme Example Stats:")
        self.meme_dataframe.describe()