Esempio n. 1
0
def main():
    # ================== setup myo-python (do not change) =====================
    myo.init(sdk_path='../../myo_sdk') # Compile Python binding to Myo's API
    hub = myo.Hub() # Create a Python instance of MYO API
    if not ConnectionChecker().ok: # Check connection before starting acquisition:
        quit()
    # =========================================================================


    # Setup our custom processor of MYO's events.
    # EmgBuffer will acquire new data in a buffer (queue):
    listener = Buffer(buffer_len = 512) # At sampling rate of 200Hz, 512 samples correspond to ~2.5 seconds of the most recent data.

    # Setup multichannel plotter for visualisation:
    plotter = MultichannelPlot(nchan = 8, xlen = 512) # Number of EMG channels in MYO armband is 8
    #plotter2 = MultichannelPlot(nchan = 8, xlen = 512) 
    N1 = 0
    q = 0
    j = 0
    # Tell MYO API to start a parallel thread that will collect the data and
    # command the MYO to start sending EMG data.
    with hub.run_in_background(listener): # This is the way to associate our listener with the MYO API.
        print('Streaming EMG ... Press shift-c to stop.')
        while hub.running:
            time.sleep(0.040)
            # Pull recent EMG data from the buffer
            emg_data = listener.get_emg_data()
            # Transform it to numpy matrix
            emg_data = np.array([x[1] for x in emg_data])
            
            # q = q+1
            # if q >= 100: 
                # q = 0
                # N = 0
                # for i in range(1,len(emg_data)):
                    # N[j] = N[j] + emg_data[i]                
                # N1[j] = N[j]/(len(emg_data))
                # j = j+1
                # print('\rRecording ... %d percent done.' % N[j], end='\n')  
                
                
            # Plot it
            plotter.update_plot(emg_data.T)
            
            # Plot Moving Mean Average Value
            
            #plotter2.update_plot(N1,j)
            plt.plot(N1,j, color="r", linestyle="-", linewidth=1)
            plt.show()
   
            
            if keyboard.is_pressed('C'):
                print('Stop.')
                break
Esempio n. 2
0
def main():
    # ================== setup myo-python (do not change) =====================
    myo.init(sdk_path='../../myo_sdk')  # Compile Python binding to Myo's API
    hub = myo.Hub()  # Create a Python instance of MYO API
    if not ConnectionChecker(
    ).ok:  # Check connection before starting acquisition:
        quit()
    # =========================================================================
    # calculate the Mean Absolute Value
    # Setup our custom processor of MYO's events.
    # EmgBuffer will acquire new data in a buffer (queue):
    listener = Buffer(
        buffer_len=512
    )  # At sampling rate of 200Hz, 512 samples correspond to ~2.5 seconds of the most recent data.

    # Setup multichannel plotter for visualisation:
    plotter = MultichannelPlot(
        nchan=8, xlen=512)  # Number of EMG channels in MYO armband is 8

    # Tell MYO API to start a parallel thread that will collect the data and
    # command the MYO to start sending EMG data.
    with hub.run_in_background(
            listener
    ):  # This is the way to associate our listener with the MYO API.
        print('Streaming EMG ... Press shift-c to stop.')
        while hub.running:
            time.sleep(0.040)
            # Pull recent EMG data from the buffer
            emg_data = listener.get_emg_data()
            # Transform it to numpy matrix
            emg_data = np.array([x[1] for x in emg_data])

            data = []
            mav_data = []

            for i in range(502):
                data = emg_data[i:i + 10, :]
                mav_data[i, :] = MAV(data)
            #
            #data.append(emg_data)
            #data = np.array(data)
            # if (data.shape[1]==512):
            # mav_data = MAV(data)  # 1x8
            # data = np.delete(data, [0], axis=0)

            # Plot it
            #plotter.update_plot(emg_data.T)
            plotter.update_plot(np.array(mav_data).T)
            if keyboard.is_pressed('C'):
                print('Stop.')
                break
Esempio n. 3
0
def main():

    # ================== setup myo-python (do not change) =====================
    myo.init(sdk_path='../../myo_sdk')  # Compile Python binding to Myo's API
    hub = myo.Hub()  # Create a Python instance of MYO API
    if not ConnectionChecker(
    ).ok:  # Check connection before starting acquisition:
        quit()
    # =========================================================================

    # Parce command line inputs, if any
    input_file = 'models/trained_model.pkl'
    if len(sys.argv) > 1:
        input_file = sys.argv[1]

    # Load pickled feature extractor and classification model
    with open(input_file, 'rb') as file:
        model = pickle.load(file)

    # Extract variables from pickled object
    mdl = model['mdl']
    feature_extractor = model['feature_extractor']
    gestures = model['gestures']

    # Set up the buffer that will always contain the most up-to-date readings from the MYO
    emg_buffer = Buffer(feature_extractor.winlen)

    # Set up inference
    with hub.run_in_background(emg_buffer.on_event):
        print('You may start performing gestures. Press ctrl-c to stop.')
        while hub.running:
            time.sleep(0.050)
            # Skip the rest until enough data for feature extraction is acquired
            if len(emg_buffer.emg_data_queue) < feature_extractor.winlen:
                continue

            # Get latest emg data
            emg = emg_buffer.get_emg_data()
            # Convert to a numpy matrix (an Nx8 matrix, each channel is a column):
            emg = np.array([x[1] for x in emg])
            # Extract features from the emg signal:
            feature_vector = feature_extractor.extract_feature_vector(emg)
            # Use classification model to recognise the gesture:
            inference = mdl.predict(feature_vector)
            # Implement majority voting here, if needed:
            # ...

            # Output inference:
            print('\rRecognized gesture: ', gestures[inference[0]], end='')
Esempio n. 4
0
def main():
    # ================== setup myo-python (do not change) =====================
    myo.init(sdk_path='../../myo_sdk')  # Compile Python binding to Myo's API
    hub = myo.Hub()  # Create a Python instance of MYO API
    if not ConnectionChecker(
    ).ok:  # Check connection before starting acquisition:
        quit()
    # =========================================================================
    # calculate the Mean Absolute Value
    # Setup our custom processor of MYO's events.
    # EmgBuffer will acquire new data in a buffer (queue):
    listener = Buffer(
        buffer_len=512
    )  # At sampling rate of 200Hz, 512 samples correspond to ~2.5 seconds of the most recent data.
    calculate = Feature(input_len=512)
    # Setup multichannel plotter for visualisation:
    plotter = MultichannelPlot(
        nchan=8, xlen=512
    )  # Number of EMG channels in MYO armband is 8 , window size is 15 for MAV
    freq = 200
    move = cursor(freq)

    # Tell MYO API to start a parallel thread that will collect the data and
    # command the MYO to start sending EMG data.
    with hub.run_in_background(
            listener
    ):  # This is the way to associate our listener with the MYO API.
        print('Streaming EMG ... Press shift-c to stop.')
        while hub.running:
            time.sleep(0.040)
            # Pull recent EMG data from the buffer
            emg_data = listener.get_emg_data()
            # Transform it to numpy matrix
            emg_data = np.array([x[1] for x in emg_data])

            # avoid len() report error
            if (emg_data.ndim == 2):
                if (emg_data.shape[0] == 512):
                    # calculate MAV of emg data
                    mav_data = calculate.MAV(emg_data)
                    mav_data = np.array(mav_data.T)

                    plotter.update_plot(mav_data)

                    move.move_cursor(mav_data)

            if keyboard.is_pressed('C'):
                print('Stop.')
                break
Esempio n. 5
0
from Alpes.Prosthesis import AlpesProsthesis, GRASPS

# ================== setup myo-python (do not change) =====================
myo.init(sdk_path='../../myo_sdk')  # Compile Python binding to Myo's API
hub = myo.Hub()  # Create a Python instance of MYO API
if not ConnectionChecker().ok:  # Check connection before starting acquisition:
    quit()
# =========================================================================

EMG_SAMPLING_RATE = 200
winsec = 0.1  # Window duration in seconds
winlen = int(winsec * EMG_SAMPLING_RATE)  # Window length in samples
extensors_chan = 3
flexors_chan = 0
listener = Buffer(buffer_len=winlen)

mc = TwoChannelMyoControl(
    # Absolute values of EMG are spanned between 0 and 127.
    # thresholds are approximately measure as percentage of total muscle contraction.
    # thresholds[0] corresponds to flexors, thresholds[1] correspons to extensors.
    thresholds=[10, 15],
    # cc_lock_duration filters out some unwanted control decisions after a co-contraction.
    # cc_lock_duration is measured in number of 'winlen's (see variable above).
    # Increase cc_lock_duration if unwanted motions appear after co-contraction or if
    # co-contraction gets detected twice instead of once in a short amount of time.
    cc_lock_duration=1)

grasps = [GRASPS().CYLINDRICAL, GRASPS().LATERAL, GRASPS().PINCH]
grasp = 0
Esempio n. 6
0
def keyboard():

    # ================== setup myo-python (do not change) =====================
    myo.init(sdk_path="../../myo_sdk")  # Compile Python binding to Myo's API
    hub = myo.Hub()  # Create a Python instance of MYO API
    if not ConnectionChecker().ok:  # Check connection before starting acquisition:
        quit()
    # =========================================================================

    # Parce command line inputs, if any
    input_file = "../classification/models/trained_model.pkl"
    if len(sys.argv) > 1:
        input_file = sys.argv[1]

    # Load pickled feature extractor and classification model
    with open(input_file, "rb") as file:
        model = pickle.load(file)

    # Extract variables from pickled object
    mdl = model["mdl"]
    feature_extractor = model["feature_extractor"]
    gestures = model["gestures"]

    # Set up the buffer that will always contain the most up-to-date readings from the MYO
    emg_buffer = Buffer(feature_extractor.winlen)

    # Set up inference
    with hub.run_in_background(emg_buffer.on_event):
        print("You may start performing gestures. Press ctrl-c to stop.")
        while hub.running:
            time.sleep(0.050)
            # Skip the rest until enough data for feature extraction is acquired
            if len(emg_buffer.emg_data_queue) < feature_extractor.winlen:
                continue

            # Get latest emg data
            emg = emg_buffer.get_emg_data()
            # Convert to a numpy matrix (an Nx8 matrix, each channel is a column):
            emg = np.array([x[1] for x in emg])
            # Extract features from the emg signal:
            feature_vector = feature_extractor.extract_feature_vector(emg)
            # Use classification model to recognise the gesture:
            inference = mdl.predict(feature_vector)
            # Implement majority voting here, if needed:
            # ...
            # Output inference:
            print("\rRecognized gesture: ", gestures[inference[0]], end="")
            gesture = gestures[inference[0]]
            
            if gesture == 'rest':
                continue
                #pyautogui.press('w')
            elif gesture == 'radial':
                pyautogui.keyDown('a')
            elif gesture == 'ulnar':
                pyautogui.keyDown('d')
            elif gesture == 'flextion':
                pyautogui.keyDown('w')
            elif gesture == 'extension':
                pyautogui.keyDown('s')
            elif gesture == 'fist':
                pyautogui.keyDown('space')