def save_memes (self): for meme_type, meme_list in self.memes.iteritems (): pickle_filename = self.get_meme_pickle_filename (meme_type) pickle.dump (meme_list, open(pickle_filename, 'w')) print_status ("Saved meme_type", meme_type + " (at " + pickle_filename)
def get_hmms(self): for gesture_type in self.gesture_types: print_status("Get_Hmms", "Fitting for gesture_type: " + gesture_type) ### Step 1: fill hmm_examples appropriately ### hmm_examples = [] for gesture in self.gestures[gesture_type]: hmm_rep = gesture.get_hmm_rep() hmm_examples.append(hmm_rep) ### Step 2: fit parameters for the hmm ### hmm = GaussianHMM(self.num_hmm_states) hmm.fit(hmm_examples) ### Step 3: store the hmm in self.hmms ### self.hmms[gesture_type] = hmm print_inner_status( gesture_type, "predicted the following sequences: (score: sequence)") for example in hmm_examples: print " ", hmm.score(example), ": ", hmm.predict(example)
def save_gesture (self, gesture): ### Step 1: save the gesture ### save_filename = self.get_save_filename (gesture.name) gesture.pickle_self (save_filename) print_status ("Gesture Recognizer", "Saved recorded gesture at " + save_filename) print gesture.O
def on_init(self, controller): ### Step 1: create max interface ### self.max_interface = Max_Interface () ### Step 2: notify of initialization ### print_status ("Controller Listener", "controller initialized")
def on_init(self, controller): ### Step 1: create max interface ### self.max_interface = Max_Interface() ### Step 2: notify of initialization ### print_status("Controller Listener", "controller initialized")
def save_gesture(self, gesture): ### Step 1: save the gesture ### save_filename = self.get_save_filename(gesture.name) gesture.pickle_self(save_filename) print_status("Gesture Recognizer", "Saved recorded gesture at " + save_filename) print gesture.O
def save_examples (self): print_status ("Save Examples", "Begin") ### Step 1: save self.all_examples ### for meme_type, meme_list in self.all_examples.iteritems (): save_filename = self.get_examples_pickle_filename (meme_type) print_inner_status ("Save Examples", "Saving to " + save_filename) pickle.dump (meme_list, open(save_filename, 'w')) print_status ("Save Examples", "End")
def get_examples (self): ### Step 1: fill self.all_examples ### self.all_examples = {} print_status ("Get Examples", "Begin") for meme_type, meme_list in self.memes.iteritems(): print_inner_status ("Get Examples", "Converting to feature vectors: " + meme_type) self.all_examples [meme_type] = [m.get_features () for m in meme_list] print_status ("Get Examples", "Complete")
def on_connect(self, controller): print_status ("Synth Listener", "controller connected") # Enable gestures controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE); controller.enable_gesture(Leap.Gesture.TYPE_KEY_TAP); controller.enable_gesture(Leap.Gesture.TYPE_SCREEN_TAP); controller.enable_gesture(Leap.Gesture.TYPE_SWIPE);
def on_connect(self, controller): print_status("Synth Listener", "controller connected") # Enable gestures controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE) controller.enable_gesture(Leap.Gesture.TYPE_KEY_TAP) controller.enable_gesture(Leap.Gesture.TYPE_SCREEN_TAP) controller.enable_gesture(Leap.Gesture.TYPE_SWIPE)
def train_main(self): ### Step 1: load in the data and print out stats about it ### print_status("Gesture_Recognizer", "Loading Data") self.gesture_recognizer.load_data() # self.gesture_recognizer.eliminate_second_hand () self.gesture_recognizer.print_data_stats() ### Step 2: cluster the poses ### print_status("Gesture_Recognizer", "Training Model") self.gesture_recognizer.train_model()
def load_memes (self): print_status ("Loading memes", "Begin") meme_pkl_filenames = [f for f in os.listdir (self.memes_directory) if f[-4:] == '.pkl'] for meme_pkl_filename in meme_pkl_filenames: full_filename = os.path.join (self.memes_directory, meme_pkl_filename) meme_type = meme_pkl_filename[:-4] print_inner_status ("Load Memes", "Loading " + full_filename) self.memes[meme_type] = pickle.load (open(full_filename, 'r')) print_status ("Loading memes", "End")
def train_main (self): ### Step 1: load in the data and print out stats about it ### print_status ("Gesture_Recognizer", "Loading Data") self.gesture_recognizer.load_data () # self.gesture_recognizer.eliminate_second_hand () self.gesture_recognizer.print_data_stats () ### Step 2: cluster the poses ### print_status ("Gesture_Recognizer", "Training Model") self.gesture_recognizer.train_model ()
def init_preprocess (self): print_message("Entering preprocessing mode") ### Step 1: load in all memes ### print_status ("Initialization", "Loading Meme Objects (i.e. json or pickle, not feature representations yet)") # self.get_memes () # from json self.load_memes () # from pkl self.print_memes_stats () ### Step 2: filter them ### Preprocess.filter_memes (self.memes)
def load_examples (self): print_status ("Load Examples", "Begin") self.all_examples = {} example_filenames = [f for f in os.listdir (self.examples_directory) if f[-12:] == '.feature_vec'] for example_filename in example_filenames: full_filename = os.path.join (self.examples_directory, example_filename) meme_type = example_filename[:-12] print_inner_status ("Load Examples", "Loading " + meme_type) self.all_examples [meme_type] = pickle.load(open(full_filename, 'r')) print_status ("Load Examples", "Complete")
def get_hmms (self): for gesture_type in self.gesture_types: print_status ("Get_Hmms", "Fitting for gesture_type: " + gesture_type) ### Step 1: fill hmm_examples appropriately ### hmm_examples = [] for gesture in self.gestures[gesture_type]: hmm_rep = gesture.get_hmm_rep () hmm_examples.append (hmm_rep) ### Step 2: fit parameters for the hmm ### hmm = GaussianHMM (self.num_hmm_states) hmm.fit (hmm_examples) ### Step 3: store the hmm in self.hmms ### self.hmms[gesture_type] = hmm print_inner_status (gesture_type, "predicted the following sequences: (score: sequence)") for example in hmm_examples: print " ", hmm.score (example), ": ", hmm.predict (example)
def parse_meme_page(self, response): print_status("Spider", "Parse meme Page") meme_type = response.meta['meme_type'] hxs = HtmlXPathSelector(response) title_xpath = "//title/text()" title = hxs.select(title_xpath).extract()[0] splits = title.split('-') top_text = splits[0] bottom_text = splits[1] meme_item = Meme_Item() meme_item['meme_type'] = meme_type meme_item['top_text'] = top_text meme_item['bottom_text'] = bottom_text self.meme_counts[meme_type] += 1 return meme_item
def parse_meme_page (self, response): print_status ("Spider", "Parse meme Page") meme_type = response.meta['meme_type'] hxs = HtmlXPathSelector (response) title_xpath = "//title/text()" title = hxs.select (title_xpath).extract ()[0] splits = title.split('-') top_text = splits[0] bottom_text = splits [1] meme_item = Meme_Item () meme_item['meme_type'] = meme_type meme_item['top_text'] = top_text meme_item['bottom_text'] = bottom_text self.meme_counts[meme_type] += 1 return meme_item
def __init__(self, jvid_filename, recording_name, classifier_name=None, save=True, test=False): print_status("Trainer", "Starting") ### Step 1: get in the original skeletons ### self.skeletons_raw = read_in_skeletons(jvid_filename) ### Step 2: add derivatives to them (fills skeletons_with_derivs) ### self.skeletons_with_derivs = add_derivatives_to_skeletons( self.skeletons_raw, self.fd_interval_1, self.fd_interval_2, self.fd_interval_1, self.fd_interval_2) ### Step 3: put it into training data form ### (X, Y) = self.get_training_data() ### Step 4: get it in training data form ### all_data = zip(X, Y) print " ### Number of examples: ", len(all_data) random.shuffle(all_data) training = all_data # training = all_data[:5000] # testing = all_data[5000:] #--- training --- X_train = [t[0] for t in training] Y_train = [t[1] for t in training] #--- testing --- # X_test = [t[0] for t in testing] # Y_test = [t[1] for t in testing] #--- get the classifier --- print_status("Trainer", "Training the classifier") self.classifier = LogisticRegression().fit(X_train, Y_train) #--- saving data ---- if save: if classifier_name == None: classifier_name = recording_name.split('.')[0] + '.obj' classifier_name = os.path.join(os.getcwd(), 'python_backend/classifiers', classifier_name) print_status("Trainer", "Pickling the classifier at " + classifier_name) self.save_classifier(classifier_name)
def __init__ (self, mode="train"): ### Step 2: get memes dataframe ### print_status ("Initialization", "Loading memes") self.load_memes_pandas () self.print_memes_stats () ### Step 3: fill self.training_examples ### print_status ("Initialization", "Getting feature represenations (examples)") # self.get_examples () # convert memes -> feature vectors # self.load_examples () # get all feature vectors (examples) # self.save_examples () # save feature vectors in pkl files ### Step 4: train the classifier ### print_status ("Initialization", "Training classifier") # self.train_classifier () #train it # self.load_classifier ('unigram_classifier.obj') #load it ### Step 5: save the classifier ### print_status ("Initialization", "Saving classifier")
def __init__ (self, jvid_filename, recording_name, classifier_name=None, save=True, test=False): print_status ("Trainer", "Starting") ### Step 1: get in the original skeletons ### self.skeletons_raw = read_in_skeletons (jvid_filename) ### Step 2: add derivatives to them (fills skeletons_with_derivs) ### self.skeletons_with_derivs = add_derivatives_to_skeletons (self.skeletons_raw, self.fd_interval_1, self.fd_interval_2, self.fd_interval_1, self.fd_interval_2) ### Step 3: put it into training data form ### (X, Y) = self.get_training_data () ### Step 4: get it in training data form ### all_data = zip(X, Y) print " ### Number of examples: ", len(all_data) random.shuffle (all_data); training = all_data # training = all_data[:5000] # testing = all_data[5000:] #--- training --- X_train = [t[0] for t in training] Y_train = [t[1] for t in training] #--- testing --- # X_test = [t[0] for t in testing] # Y_test = [t[1] for t in testing] #--- get the classifier --- print_status ("Trainer", "Training the classifier") self.classifier = LogisticRegression().fit(X_train, Y_train) #--- saving data ---- if save: if classifier_name == None: classifier_name = recording_name.split('.')[0] + '.obj' classifier_name = os.path.join (os.getcwd (), 'python_backend/classifiers', classifier_name) print_status ("Trainer", "Pickling the classifier at " + classifier_name) self.save_classifier (classifier_name)
for skeleton in skeletons: outfile.write(skeleton.__str__()) return if __name__ == "__main__": ### Step 1: get/sanitize args ### args = sys.argv if len(args) != 3: print_error("Not enough arguments", "Usage: ./convert_to_json.py infile outfile") infile_name = args[1] outfile_name = args[2] ### Step 2: read in infile ### print_status("Main", "Opening infile") entire_infile = open(infile_name, "r").read() skeletons = read_in_skeletons(entire_infile) ### Step 2: write out skeletons ### print_status("Main", "Writing in txt format to outfile") record_to_txt(skeletons, outfile_name) ### Step 3: get json representation ### # print_status("Main", "Getting json representations") # json_skeletons_list = [s.json () for s in skeletons] ### Step 4: open outfile and write ### # print_status ("Main", "Writing to outfile") # outfile = open(outfile_name, 'w') # outfile.write (json.dumps(json_skeletons_list))
def on_exit(self, controller): print_status("Synth Listener", "Exiting")
def on_disconnect(self, controller): print_status("Synth Listener", "Controller disconnected")
prediction_prob = classifier.predict_proba (example) print "true_label | prediction: ", true_label, " | ", prediction_prob, " | ", prediction print "--- RESULTS ---" print 'hits: ', hits print 'misses: ', misses print 'accuracy: ', float(hits)/float(hits+misses) if __name__ == "__main__": print_status ("Train Classifier", "Starting") ### Step 1: manage args ### if len(sys.argv) < 3: print_error ("Not enough args", "Usage: ./train_classifier.py [infile] [classifier_name] [(load|save|test)]") infile_name = sys.argv[1] classifier_name = sys.argv[2] if 'load' in sys.argv: load = True else: load = False if 'save' in sys.argv: save = True else: save = False if 'test' in sys.argv:
def on_init(self, controller): ### Step 2: notify of initialization ### print_status("Synth Listener", "controller initialized")
outfile.write(skeleton.__str__()) return if __name__ == "__main__": ### Step 1: get/sanitize args ### args = sys.argv if len(args) != 3: print_error("Not enough arguments", "Usage: ./convert_to_json.py infile outfile") infile_name = args[1] outfile_name = args[2] ### Step 2: read in infile ### print_status("Main", "Opening infile") entire_infile = open(infile_name, 'r').read() skeletons = read_in_skeletons(entire_infile) ### Step 2: write out skeletons ### print_status("Main", "Writing in txt format to outfile") record_to_txt(skeletons, outfile_name) ### Step 3: get json representation ### # print_status("Main", "Getting json representations") # json_skeletons_list = [s.json () for s in skeletons] ### Step 4: open outfile and write ### # print_status ("Main", "Writing to outfile") # outfile = open(outfile_name, 'w') # outfile.write (json.dumps(json_skeletons_list))
def on_exit(self, controller): print_status ("Synth Listener", "Exiting")
def on_disconnect(self, controller): print_status ("Synth Listener", "Controller disconnected")
misses += 1 # print "true_label | prediction: ", true_label, " | ", prediction prediction_prob = classifier.predict_proba(example) print "true_label | prediction: ", true_label, " | ", prediction_prob, " | ", prediction print "--- RESULTS ---" print 'hits: ', hits print 'misses: ', misses print 'accuracy: ', float(hits) / float(hits + misses) if __name__ == "__main__": print_status("Train Classifier", "Starting") ### Step 1: manage args ### if len(sys.argv) < 3: print_error( "Not enough args", "Usage: ./train_classifier.py [infile] [classifier_name] [(load|save|test)]" ) infile_name = sys.argv[1] classifier_name = sys.argv[2] if 'load' in sys.argv: load = True else: load = False if 'save' in sys.argv: save = True
def on_init(self, controller): ### Step 2: notify of initialization ### print_status ("Synth Listener", "controller initialized")