def recognize_gesture(self, source): # calculating the optical flow of unknown gesture # and reside to match with training data's source_flow = measure(source, f=optical_flow_magnitude()) source_flow = cv2.resize(source_flow, (180, 320)).flatten() # getting the prediction after passing the unknown data # to SVM i = int(self.model.predict(source_flow)) return GESTURES[i]
def recognize_gesture(self, source): # Calculate the optical flow magnitute of unknown gesture source_flow = measure(source, f=optical_flow_magnitude()) # resize to compare source_flow = cv2.resize(source_flow, (180, 320)) avg_diffs = {} for gesture in GESTURES: # compare the unknown optical flow with data diffs = [simple_difference(source_flow, flow) for flow in self.known_gestures[gesture]] avg_diffs[gesture] = np.mean(diffs) print avg_diffs # return the smallest difference -> the predicted gesture return min(GESTURES, key=lambda x: avg_diffs[x])
def _process(gesture, idx, f): source = 'videos/{0}{1}_blackbackground.mov'.format(gesture, idx) print '\t', source data = measure(source, f) data = cv2.resize(data, (180, 320)) return data.flatten()