def main(): # ================== setup myo-python (do not change) ===================== myo.init(sdk_path='../../myo_sdk') # Compile Python binding to Myo's API hub = myo.Hub() # Create a Python instance of MYO API if not ConnectionChecker().ok: # Check connection before starting acquisition: quit() # ========================================================================= # Setup our custom processor of MYO's events. # EmgBuffer will acquire new data in a buffer (queue): listener = Buffer(buffer_len = 512) # At sampling rate of 200Hz, 512 samples correspond to ~2.5 seconds of the most recent data. # Setup multichannel plotter for visualisation: plotter = MultichannelPlot(nchan = 8, xlen = 512) # Number of EMG channels in MYO armband is 8 #plotter2 = MultichannelPlot(nchan = 8, xlen = 512) N1 = 0 q = 0 j = 0 # Tell MYO API to start a parallel thread that will collect the data and # command the MYO to start sending EMG data. with hub.run_in_background(listener): # This is the way to associate our listener with the MYO API. print('Streaming EMG ... Press shift-c to stop.') while hub.running: time.sleep(0.040) # Pull recent EMG data from the buffer emg_data = listener.get_emg_data() # Transform it to numpy matrix emg_data = np.array([x[1] for x in emg_data]) # q = q+1 # if q >= 100: # q = 0 # N = 0 # for i in range(1,len(emg_data)): # N[j] = N[j] + emg_data[i] # N1[j] = N[j]/(len(emg_data)) # j = j+1 # print('\rRecording ... %d percent done.' % N[j], end='\n') # Plot it plotter.update_plot(emg_data.T) # Plot Moving Mean Average Value #plotter2.update_plot(N1,j) plt.plot(N1,j, color="r", linestyle="-", linewidth=1) plt.show() if keyboard.is_pressed('C'): print('Stop.') break
def main(): # ================== setup myo-python (do not change) ===================== myo.init(sdk_path='../../myo_sdk') # Compile Python binding to Myo's API hub = myo.Hub() # Create a Python instance of MYO API if not ConnectionChecker( ).ok: # Check connection before starting acquisition: quit() # ========================================================================= # calculate the Mean Absolute Value # Setup our custom processor of MYO's events. # EmgBuffer will acquire new data in a buffer (queue): listener = Buffer( buffer_len=512 ) # At sampling rate of 200Hz, 512 samples correspond to ~2.5 seconds of the most recent data. # Setup multichannel plotter for visualisation: plotter = MultichannelPlot( nchan=8, xlen=512) # Number of EMG channels in MYO armband is 8 # Tell MYO API to start a parallel thread that will collect the data and # command the MYO to start sending EMG data. with hub.run_in_background( listener ): # This is the way to associate our listener with the MYO API. print('Streaming EMG ... Press shift-c to stop.') while hub.running: time.sleep(0.040) # Pull recent EMG data from the buffer emg_data = listener.get_emg_data() # Transform it to numpy matrix emg_data = np.array([x[1] for x in emg_data]) data = [] mav_data = [] for i in range(502): data = emg_data[i:i + 10, :] mav_data[i, :] = MAV(data) # #data.append(emg_data) #data = np.array(data) # if (data.shape[1]==512): # mav_data = MAV(data) # 1x8 # data = np.delete(data, [0], axis=0) # Plot it #plotter.update_plot(emg_data.T) plotter.update_plot(np.array(mav_data).T) if keyboard.is_pressed('C'): print('Stop.') break
def main(): # ================== setup myo-python (do not change) ===================== myo.init(sdk_path='../../myo_sdk') # Compile Python binding to Myo's API hub = myo.Hub() # Create a Python instance of MYO API if not ConnectionChecker( ).ok: # Check connection before starting acquisition: quit() # ========================================================================= # Parce command line inputs, if any input_file = 'models/trained_model.pkl' if len(sys.argv) > 1: input_file = sys.argv[1] # Load pickled feature extractor and classification model with open(input_file, 'rb') as file: model = pickle.load(file) # Extract variables from pickled object mdl = model['mdl'] feature_extractor = model['feature_extractor'] gestures = model['gestures'] # Set up the buffer that will always contain the most up-to-date readings from the MYO emg_buffer = Buffer(feature_extractor.winlen) # Set up inference with hub.run_in_background(emg_buffer.on_event): print('You may start performing gestures. Press ctrl-c to stop.') while hub.running: time.sleep(0.050) # Skip the rest until enough data for feature extraction is acquired if len(emg_buffer.emg_data_queue) < feature_extractor.winlen: continue # Get latest emg data emg = emg_buffer.get_emg_data() # Convert to a numpy matrix (an Nx8 matrix, each channel is a column): emg = np.array([x[1] for x in emg]) # Extract features from the emg signal: feature_vector = feature_extractor.extract_feature_vector(emg) # Use classification model to recognise the gesture: inference = mdl.predict(feature_vector) # Implement majority voting here, if needed: # ... # Output inference: print('\rRecognized gesture: ', gestures[inference[0]], end='')
def main(): # ================== setup myo-python (do not change) ===================== myo.init(sdk_path='../../myo_sdk') # Compile Python binding to Myo's API hub = myo.Hub() # Create a Python instance of MYO API if not ConnectionChecker( ).ok: # Check connection before starting acquisition: quit() # ========================================================================= # calculate the Mean Absolute Value # Setup our custom processor of MYO's events. # EmgBuffer will acquire new data in a buffer (queue): listener = Buffer( buffer_len=512 ) # At sampling rate of 200Hz, 512 samples correspond to ~2.5 seconds of the most recent data. calculate = Feature(input_len=512) # Setup multichannel plotter for visualisation: plotter = MultichannelPlot( nchan=8, xlen=512 ) # Number of EMG channels in MYO armband is 8 , window size is 15 for MAV freq = 200 move = cursor(freq) # Tell MYO API to start a parallel thread that will collect the data and # command the MYO to start sending EMG data. with hub.run_in_background( listener ): # This is the way to associate our listener with the MYO API. print('Streaming EMG ... Press shift-c to stop.') while hub.running: time.sleep(0.040) # Pull recent EMG data from the buffer emg_data = listener.get_emg_data() # Transform it to numpy matrix emg_data = np.array([x[1] for x in emg_data]) # avoid len() report error if (emg_data.ndim == 2): if (emg_data.shape[0] == 512): # calculate MAV of emg data mav_data = calculate.MAV(emg_data) mav_data = np.array(mav_data.T) plotter.update_plot(mav_data) move.move_cursor(mav_data) if keyboard.is_pressed('C'): print('Stop.') break
grasp = 0 # Connect the hand to the computer and switch it on. First run after connection # will initialise the hand. Consecutive runs won't. p = AlpesProsthesis() p.initialise() p.set_grasp(grasps[grasp]) mav2command = [0.02, 0.02 ] #mapping from mean absolute signal to the control variable. with hub.run_in_background(listener): print('Streaming EMG ... Press ctrl-c or shift-c to stop.') while hub.running: time.sleep(winsec) # Pull recent EMG data from the buffer emg_data = listener.get_emg_data() # Transform it to numpy matrix emg_data = np.array([[x[1][flexors_chan], x[1][extensors_chan]] for x in emg_data]) # Interpret EMG, produce decision decision, mav = mc.decide(emg_data) # If co-contraction detected, change grasp if decision == mc.STATES['cc']: grasp = (grasp + 1) % len(grasps) print('Changing grasp to %s ...' % grasps[grasp].name) p.set_grasp(grasps[grasp]) # If closing or opening intent detected, move the hand if decision == mc.STATES['close'] or decision == mc.STATES['open']: command = mav[0] * mav2command[0] - mav[1] * mav2command[1]
def keyboard(): # ================== setup myo-python (do not change) ===================== myo.init(sdk_path="../../myo_sdk") # Compile Python binding to Myo's API hub = myo.Hub() # Create a Python instance of MYO API if not ConnectionChecker().ok: # Check connection before starting acquisition: quit() # ========================================================================= # Parce command line inputs, if any input_file = "../classification/models/trained_model.pkl" if len(sys.argv) > 1: input_file = sys.argv[1] # Load pickled feature extractor and classification model with open(input_file, "rb") as file: model = pickle.load(file) # Extract variables from pickled object mdl = model["mdl"] feature_extractor = model["feature_extractor"] gestures = model["gestures"] # Set up the buffer that will always contain the most up-to-date readings from the MYO emg_buffer = Buffer(feature_extractor.winlen) # Set up inference with hub.run_in_background(emg_buffer.on_event): print("You may start performing gestures. Press ctrl-c to stop.") while hub.running: time.sleep(0.050) # Skip the rest until enough data for feature extraction is acquired if len(emg_buffer.emg_data_queue) < feature_extractor.winlen: continue # Get latest emg data emg = emg_buffer.get_emg_data() # Convert to a numpy matrix (an Nx8 matrix, each channel is a column): emg = np.array([x[1] for x in emg]) # Extract features from the emg signal: feature_vector = feature_extractor.extract_feature_vector(emg) # Use classification model to recognise the gesture: inference = mdl.predict(feature_vector) # Implement majority voting here, if needed: # ... # Output inference: print("\rRecognized gesture: ", gestures[inference[0]], end="") gesture = gestures[inference[0]] if gesture == 'rest': continue #pyautogui.press('w') elif gesture == 'radial': pyautogui.keyDown('a') elif gesture == 'ulnar': pyautogui.keyDown('d') elif gesture == 'flextion': pyautogui.keyDown('w') elif gesture == 'extension': pyautogui.keyDown('s') elif gesture == 'fist': pyautogui.keyDown('space')