Example #1
0
def replay_xdf(path, files, auto=True):
    game_info = lsl.StreamInfo("Game State", "Flags", 1, 0, 'string',
                               "jkllkjl")
    game_outlet = lsl.StreamOutlet(game_info)

    eeg_info = lsl.StreamInfo("NeuroneStream", "EEG", 75, 5000, 'float32',
                              "SDQWD")
    eeg_outlet = lsl.StreamOutlet(eeg_info)

    for file in files:
        #load a file
        print('Reading ', file)
        stream = xdf.load_xdf(path + file, verbose=False)

        stream_names = np.array(
            [item['info']['name'][0] for item in stream[0]])
        game_state = list(stream_names).index('Game State')
        eeg_data = list(stream_names).index('NeuroneStream')

        game_series = stream[0][game_state]['time_series']
        game_time = stream[0][game_state]['time_stamps']

        eeg_series = stream[0][eeg_data]['time_series']
        eeg_time = stream[0][eeg_data]['time_stamps']

        eeg_time = eeg_time - game_time[0]
        game_time = game_time - game_time[0]

        game_idx = 0
        eeg_idx = eeg_time[eeg_time <= 0].size - 1
        end = game_time.size - 1

        if not auto:
            print('Press "Enter" to start streaming next file.')
            keyboard.wait('enter')

        start = time.time()
        min = 0
        now = time.time() - start

        while game_idx <= end:
            now = time.time() - start
            game_target = game_time[game_time <= now].size - 1
            eeg_target = eeg_time[eeg_time <= now].size - 1
            while game_idx <= game_target:
                game_outlet.push_sample(game_series[game_idx])
                game_idx += 1
            while eeg_idx <= eeg_target:
                eeg_outlet.push_sample(eeg_series[eeg_idx])
                eeg_idx += 1
            if now / 60 > min:
                print("Streamed for", min, "out of 5 minutes.")
                min += 1
    print("Streaming complete.")
Example #2
0
def massAnalysis2(path, users, videos):
    overlap_list = []
    s = 1
    print('computing...')
    for u1 in users:
        print('[', int((users.index(u1) / len(users)) * 100), '% ]')
        for u2 in users:
            if u1 != u2:
                for v in videos:
                    # load data from xdf files
                    streams_1 = xdf.load_xdf(path + u1 + '_' + v + '.xdf',
                                             None, False)
                    streams_2 = xdf.load_xdf(path + u2 + '_' + v + '.xdf',
                                             None, False)
                    # extract gaze streams
                    gazestream_1 = extractStream('iViewXLSL', streams_1)
                    gazestream_2 = extractStream('iViewXLSL', streams_2)
                    # Extract gaze coordinates user 1
                    gazeX_1 = gazestream_1['time_series'][:, 0]
                    gazeY_1 = gazestream_1['time_series'][:, 1]
                    gazetimes_1 = gazestream_1['time_stamps']
                    gtime_1 = gazetimes_1 - gazetimes_1[0]
                    # Extract gaze coordinates user 1
                    gazeX_2 = gazestream_2['time_series'][:, 0]
                    gazeY_2 = gazestream_2['time_series'][:, 1]
                    gazetimes_2 = gazestream_2['time_stamps']
                    gtime_2 = gazetimes_2 - gazetimes_2[0]
                    # get fixations
                    fixations_1 = fixationDetection(gazeX_1, gazeY_1, gtime_1,
                                                    u1, v)
                    fixations_2 = fixationDetection(gazeX_2, gazeY_2, gtime_2,
                                                    u2, v)
                    #crop at blink locations
                    cropFixations(fixations_1)
                    cropFixations(fixations_2)
                    # get overlapping fixations
                    overlap_list.append(
                        findOverlappingFix(fixations_1, fixations_2, 50, 0.5))
        s += 1
    return overlap_list
Example #3
0
from matplotlib.lines import Line2D
from matplotlib.font_manager import FontProperties
from xdf import load_xdf

def add_markers():
    global marker
    for marker in range(len(markers["time_stamps"])):
        if str(markers["time_series"][marker]) == "['S  1']":
            plt.axvline(x=markers["time_stamps"][marker], label="S1", color='r', lw=0.5)
        else:
            plt.axvline(x=markers["time_stamps"][marker], label="S1", color='g', lw=0.5)


stream_file = input("Zadejte název xdf souboru:")

streams, header = load_xdf(stream_file)
eeg = None
emg = None
markers = None

# identifikace jednotlivych streamu
for stream in range(len(streams)):
    type = streams[stream]["info"]["type"][0]
    if type == 'EEG':
        eeg = streams[stream]
    elif type == 'EMG':
        emg = streams[stream]
    elif type == 'Markers':
        markers = streams[stream]
    
def exportStream(file_path,
                 stream_name,
                 markers_stream_name=None,
                 markers_to_write=None):
    streams = xdf.load_xdf(file_path, verbose=False)

    subject_number = None
    digits = re.findall('\\d+', file_path[file_path.rfind('/'):])
    if len(digits) > 0:
        subject_number = digits[0]

    # Find the desired stream
    desired_stream = None
    for i in range(len(streams[0])):
        if streams[0][i]['info']['name'][0] == stream_name:
            desired_stream = streams[0][i]
            break

    if desired_stream is None:
        return "Stream " + stream_name + " not found"

    if markers_stream_name is not None and markers_stream_name is not '':
        # Put the data in array, row by row, with a place for the markers
        desired_stream_data = []
        for i in range(len(desired_stream['time_series'])):
            desired_stream_data.append([
                desired_stream['time_series'][i][0], 0,
                desired_stream['time_stamps'][i]
            ])

        # Find the Marker data
        markers = None
        for i in range(len(streams[0])):
            if streams[0][i]['info']['name'][0] == markers_stream_name:
                markers = streams[0][i]
                break

        if markers is None:
            return "Markers stream " + markers_stream_name + " not found"

        # Define function for finding the nearest value in an array
        # Used to find the closest desired stream timestamp that matches the timestamp of each marker
        def find_nearest(array, value):
            idx = (np.abs(array - value)).argmin()
            return idx

        # Add the markers to right rows of desired stream data
        for i in range(len(markers['time_series'])):
            if markers_to_write == None or len(markers_to_write) == 0 or int(
                    markers['time_series'][i]) in markers_to_write:
                index = find_nearest(desired_stream['time_stamps'],
                                     markers['time_stamps'][i])
                desired_stream_data[index][1] = int(markers['time_series'][i])

        # Create a dataframe with the desired stream data
        desired_stream_dataframe = pd.DataFrame(
            desired_stream_data, columns=[stream_name, 'Markers', 'Time'])
    else:
        # Put the data in array, row by row
        desired_stream_data = []
        for i in range(len(desired_stream['time_series'])):
            desired_stream_data.append([
                desired_stream['time_series'][i][0],
                desired_stream['time_stamps'][i]
            ])

        # Create a dataframe with the desired stream data
        desired_stream_dataframe = pd.DataFrame(desired_stream_data,
                                                columns=[stream_name, 'Time'])

    # Save the desired stream dataframe to CSV
    output_file_path = file_path[:file_path.rfind(
        '/')]  # File path without file name
    output_file_name = stream_name
    if subject_number is not None and subject_number is not '':
        output_file_name = output_file_name + '-' + str(subject_number)
    desired_stream_dataframe.to_csv(output_file_path + '/' + output_file_name +
                                    '.csv',
                                    sep='\t',
                                    index=False)

    print("File " + output_file_name + ".csv created successfully")
    return "File " + output_file_name + ".csv created successfully"
Example #5
0
def synchronize_video(xdf_file, vid):
    """ Predicts LSL time for every frame in a corresponding video. For use in a Notebook, preceed the command with "%matplotlib qt".
    
    Args:
        xdf: Path to the .xdf file of the experiment.
        vid: Path to the video file of the experiment
        
    Returns:
        frame_time: Array of LSL times. Index corresponds to frame in the video.
    
    """

    cap = cv2.VideoCapture(vid)
    ret, frame = cap.read()

    #
    f, a = plt.subplots()
    a.imshow(frame, cmap='gray')
    pos = []

    print('Click on the indicator and close the image.')

    def onclick(event):
        pos.append([event.xdata, event.ydata])

    f.canvas.mpl_connect('button_press_event', onclick)
    f.suptitle('Click on the indicator')

    plt.show(block=True)
    pos = np.array(pos[-1]).round().astype(int)
    print('Read pixel: ', pos)

    print('Start reading video')
    clval = []  #np.expand_dims(frame[pos[1],pos[0]],0)
    while (ret):
        clval.append(frame[pos[1], pos[0]])
        ret, frame = cap.read()

    cap.release()
    print('End reading video')

    digi = np.array(clval).sum(1)

    digi = digi > 100
    digi = digi.astype(int)

    switch = np.diff(digi, axis=0)

    print('Start reading .xdf')
    stream = xdf.load_xdf(xdf_file, verbose=False)
    print('End reading .xdf')

    stream_names = np.array([item['info']['name'][0] for item in stream[0]])
    vid_sync = list(stream_names).index('Video Sync')

    vid_sync_times = np.array(stream[0][vid_sync]['time_stamps'])

    get_indices = lambda x, xs: [
        i for (y, i) in zip(xs, range(len(xs))) if x == y
    ]
    record_frames = np.array(get_indices(1, switch)) + 1

    me, be = np.polyfit(record_frames[-2:], vid_sync_times[-2:], 1)
    ms, bs = np.polyfit(record_frames[:2], vid_sync_times[:2], 1)

    all_frames = np.arange(len(clval))
    frame_time = np.interp(all_frames, record_frames, vid_sync_times)
    frame_time[:record_frames[0]] = all_frames[:record_frames[0]] * ms + bs
    frame_time[record_frames[-1] +
               1:] = all_frames[record_frames[-1] + 1:] * me + be

    return (frame_time)
Example #6
0
def xdf_loader(xdf_file):
    """ Loads an appropriate EEG file into a mne raw object.
    
    Args:
        xdf_file: The path to an .xdf file from which the data is extracted.
        
    Returns:
        raw: The mne raw object.
    
    """

    # Load the xdf file
    stream = xdf.load_xdf(xdf_file, verbose=False)

    # Extract the necessary event and eeg information
    stream_names = np.array([item['info']['name'][0] for item in stream[0]])
    game_state = list(stream_names).index('Game State')
    eeg_data = list(stream_names).index('NeuroneStream')

    sfreq = int(stream[0][eeg_data]['info']['nominal_srate'][0])
    game_state_series = np.array(
        [item[0] for item in stream[0][game_state]['time_series']])
    game_state_times = np.array(stream[0][game_state]['time_stamps'])

    times = stream[0][eeg_data]['time_stamps']
    data = stream[0][eeg_data]['time_series'].T

    if (len(data) == 72):
        ch_types = [
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'emg', 'emg', 'emg', 'emg',
            'eog', 'eog', 'eog', 'eog', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg'
        ]

        ch_names = [
            'Fp1', 'Fpz', 'Fp2', 'F7', 'F3', 'Fz', 'F4', 'F8', 'FC5', 'FC1',
            'FC2', 'FC6', 'M1', 'T7', 'C3', 'Cz', 'C4', 'T8', 'M2', 'CP5',
            'CP1', 'CP2', 'CP6', 'P7', 'P3', 'Pz', 'P4', 'P8', 'POz', 'O1',
            'Oz', 'O2', 'EMG_RH', 'EMG_LH', 'EMG_RF', 'EMG_LF', 'EOG_R',
            'EOG_L', 'EOG_U', 'EOG_D', 'AF7', 'AF3', 'AF4', 'AF8', 'F5', 'F1',
            'F2', 'F6', 'FC3', 'FCz', 'FC4', 'C5', 'C1', 'C2', 'C6', 'CP3',
            'CPz', 'CP4', 'P5', 'P1', 'P2', 'P6', 'PO5', 'PO3', 'PO4', 'PO6',
            'FT7', 'FT8', 'TP7', 'TP8', 'PO7', 'PO8'
        ]

    elif (len(data) == 75):
        ch_types = [
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'emg', 'emg', 'emg', 'emg',
            'eog', 'eog', 'eog', 'eog', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg',
            'ecg', 'bio', 'bio'
        ]

        ch_names = [
            'Fp1', 'Fpz', 'Fp2', 'F7', 'F3', 'Fz', 'F4', 'F8', 'FC5', 'FC1',
            'FC2', 'FC6', 'M1', 'T7', 'C3', 'Cz', 'C4', 'T8', 'M2', 'CP5',
            'CP1', 'CP2', 'CP6', 'P7', 'P3', 'Pz', 'P4', 'P8', 'POz', 'O1',
            'Oz', 'O2', 'EMG_RH', 'EMG_LH', 'EMG_RF', 'EMG_LF', 'EOG_R',
            'EOG_L', 'EOG_U', 'EOG_D', 'AF7', 'AF3', 'AF4', 'AF8', 'F5', 'F1',
            'F2', 'F6', 'FC3', 'FCz', 'FC4', 'C5', 'C1', 'C2', 'C6', 'CP3',
            'CPz', 'CP4', 'P5', 'P1', 'P2', 'P6', 'PO5', 'PO3', 'PO4', 'PO6',
            'FT7', 'FT8', 'TP7', 'TP8', 'PO7', 'PO8', 'ECG', 'Respiration',
            'GSR'
        ]

    info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)

    raw = mne.io.RawArray(data, info)

    events = np.zeros([game_state_series.size, 3], 'int')

    # Calculate the closest frame in the EEG data for each event time
    events[:, 0] = [
        np.argmin(np.abs(times - event_time))
        for event_time in game_state_times
    ]

    legend = np.unique(game_state_series)
    class_vector = np.zeros(game_state_series.size, dtype='int')
    event_id = {}
    for ii in np.arange(legend.size):
        class_vector += (game_state_series == legend[ii]) * ii
        event_id[legend[ii]] = ii

    events[:, 2] = class_vector

    # This will not get saved to .fif and has to be worked around
    raw.events = events
    raw.event_id = event_id

    # Set eeg sensor locations
    raw.set_montage(
        mne.channels.read_montage("standard_1005",
                                  ch_names=raw.info["ch_names"]))

    # Reference to the common average
    #raw.set_eeg_reference()

    return (raw)
Example #7
0

def saveAsJsonRAW(x, y, t, filename):
    with open(filename + '.json', 'w') as json_file:
        json_file.write('')
    # fill list with the dictionaries containing the values from fixations
    for i in range(0, len(x)):
        # each dictionary has the values x, y and duration
        d = {'x': x[i], 'y': y[i], 'time': t[i]}
        with open(filename + '.json', 'a') as json_file:
            out = json.dumps(d, separators=(',', ':'))
            json_file.write(out + ',\n')


# choose source
streams = xdf.load_xdf(r"../../TestData/anne_GotG2.xdf", None, False)

gazestream = extractStream('iViewXLSL', streams)

gazeX = gazestream['time_series'][:, 0]
gazeY = gazestream['time_series'][:, 1]
gazetimes = gazestream['time_stamps']

x = []
y = []
t = []

for i in range(0, len(gazeX)):
    x.append(gazeX[i])
    y.append(gazeY[i])
    t.append(gazetimes[i])
Example #8
0
def get_streams(file_path):
    streams = xdf.load_xdf(file_path)

    return streams[0]
Example #9
0
        # add dictionary to list
        dictlist.append(d)

    # write dictlist to json file
    with open('fixations_ext.json', 'w') as file:
        file.write('var fix = ')
        json.dump(dictlist, file)


#############
#	Main
#############

# Load streams into object
# Video URL: https://www.youtube.com/watch?v=dW1BIid8Osg
streams = xdf.load_xdf(r"GuardiansOfTheGalaxyVol2.xdf", None, False)

# Extract streams
gazestream = extractStream('iViewXLSL', streams)

# Extract gaze coordinates
gazeX = gazestream['time_series'][:, 0]
gazeY = gazestream['time_series'][:, 1]
gazeTimes = gazestream['time_stamps']  # according times in seconds

# timestamps start from 0
time = gazeTimes - gazeTimes[0]

# compute fixations
fixations = fixationDetection(gazeX, gazeY, time)
Example #10
0
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 10 14:11:02 2018

@author: admin
"""

import xdf as xdf

filename = "C:/Users/admin.DDIAS4/Desktop/e3_walk/raw_xdf_format/TEST_S001_WALK/S1_labrecorder_100000_test.xdf"
data = xdf.load_xdf(filename)
Example #11
0
            #end
            print(fix.fixations[i].stop, end="\t\t", file=text_file)
            #duration
            print(fix.fixations[i].dur, end="\t\t", file=text_file)
            #x-coord
            if fix.fixations[i].x == 0:
                print(fix.fixations[i].x, end="\t\t\t", file=text_file)
            else:
                print(fix.fixations[i].x, end="\t\t", file=text_file)
            #y-coord
            print(fix.fixations[i].y, file=text_file)


# Load streams into object
streams_1 = xdf.load_xdf(
    r"C:/Users/Jannis/Documents/Studium/Module/Andere/5.Semester/Projektpraktikum/LabRecorder/Recordings/anne_GotG2.xdf",
    None, False)
streams_2 = xdf.load_xdf(
    r"C:/Users/Jannis/Documents/Studium/Module/Andere/5.Semester/Projektpraktikum/LabRecorder/Recordings/alex_GotG2.xdf",
    None, False)

# Extract streams
gazestream_1 = extractStream('iViewXLSL', streams_1)
emotionstream_1 = extractStream('EmotivLSL_PerformanceMetrics', streams_1)
facialstream_1 = extractStream('EmotivLSL_FacialExpression', streams_1)

gazestream_2 = extractStream('iViewXLSL', streams_2)
emotionstream_2 = extractStream('EmotivLSL_PerformanceMetrics', streams_2)
facialstream_2 = extractStream('EmotivLSL_FacialExpression', streams_2)

# Extract gaze coordinates
Example #12
0
import xdf
import matplotlib.pyplot as plt

###############################################################################
# Extract stream by name
def extractStream(name, streams):
    
    # Go over streams and search for name
    for i in range(len(streams[0])):
        if streams[0][i]['info']['name'][0] == name:
            return streams[0][i]
    return streams[0][0]
###############################################################################

# Load streams into object
streams_1 = xdf.load_xdf(r"../../TestData/christopher_conjuring.xdf", None, False)

# Extract streams
gazestream_1 = extractStream('iViewXLSL', streams_1)
emotionstream_1 = extractStream('EmotivLSL_PerformanceMetrics', streams_1)
facialstream_1 = extractStream('EmotivLSL_FacialExpression', streams_1)

# Extract gaze coordinates
gazeX_1 = gazestream_1['time_series'][:,0]
gazeY_1 = gazestream_1['time_series'][:,1]
gazetimes_1 = gazestream_1['time_stamps']

gtime_1 = gazetimes_1 - gazetimes_1[0]

# Extract emotions ("scaled score")
stress = emotionstream_1['time_series'][:,3]
Example #13
0
def convert_xdf_to_mugs(inFilename, outFilename, verbose):
  streams, headers = xdf.load_xdf(inFilename, verbose=verbose)

  # check which stream stores which data
  headStream = -1
  eyeStream = -1
  stimStream = -1
  i = 0
  for stream in streams:
    if stream["info"]["channel_count"][0] == '2':
      stimStream = i
      i = i+1
      continue
    elif stream["info"]["channel_count"][0] == '4':
      eyeStream = i
      i = i+1
      continue
    elif stream["info"]["channel_count"][0] == '6':
      headStream = i
      i = i+1
      continue
    else:
      print "Unexpected number of channels in stream", stream["info"]["name"][0], "(", stream["info"]["channel_count"][0], "channels detected )"
      sys.exit()

  if verbose:
    print "Start linking sample points..."
  # check whether a stimulus was presented
  if stimStream != -1:
    # in order to sync the stimulus sample points to the head sample points
    # we need both stored in numpy arrays
    stim = np.array(streams[stimStream]["time_stamps"])
    head = np.array(streams[headStream]["time_stamps"])
    
    # a tree optimized for nearest-neighbor lookup
    if verbose:
      print "Building KDTree for head and stimulus..."
    tree = scipy.spatial.cKDTree(head[..., np.newaxis])

    # get the distances and indices of all head sample points to their nearest neighbor 
    # stimulus sample point
    distances_head, indices_head = tree.query(stim[..., np.newaxis])

    # now do the same with the eye sample points
    eye = np.array(streams[eyeStream]["time_stamps"])
    if verbose:
      print "Building KDTree for eye and stimulus..."
    tree = scipy.spatial.cKDTree(eye[..., np.newaxis])
    distances_eye, indices_eye = tree.query(stim[..., np.newaxis])
  
    # write synchronized data to mugs file
    writeFileWStim(streams, [headStream, eyeStream, stimStream], indices_head, indices_eye, outFilename)

  else:
    # check which stream (head or eye) has fewer data points
    fewerDp = [headStream, eyeStream] if len(streams[headStream]["time_stamps"]) <= len(streams[eyeStream]["time_stamps"]) else [eyeStream, headStream]
    
    # create the needed numpy arrays
    small = np.array(streams[fewerDp[0]]["time_stamps"])
    big = np.array(streams[fewerDp[1]]["time_stamps"])

    # a tree optimized for nearest-neighbor lookup
    if verbose:
      print "Building KDTree for head and eye..."
    tree = scipy.spatial.cKDTree(big[..., np.newaxis])

    # get the distances and indices of all sample points of the smaller stream to 
    # their nearest neighbor sample points of the bigger stream
    distances_big, indices_big = tree.query(small[..., np.newaxis])

    # write synchronized data to mugs file
    writeFileWoStim(streams, [headStream, eyeStream, fewerDp[0]], indices_big, outFilename)
Example #14
0
    def __init__(self, file_path):
        # create frames for the gui
        self.root = tk.Tk()

        # quit button
        # self.quit_button = tk.Button(self.root, text="Quit", command=self.root.destroy).pack()
        # self.quit_button = tk.Button(self.root, text="Quit", command=sys.exit()).pack()

        # the video frame
        self.frame_video = tk.Frame(self.root)
        self.frame_video.pack(side=tk.LEFT)

        # the plots frame
        self.frame_wave_plots = tk.Frame(self.root)
        self.frame_wave_plots.pack(side=tk.RIGHT)

        # the sliders frame
        self.frame_slider = tk.Frame(self.root)
        self.frame_slider.pack(side=tk.TOP)

        # retrieve xdf data
        stream = xdf.load_xdf(file_path)

        self.data_all = []
        
        
        # for astream in stream:
            # print(astream)
        # print(stream[1])
        # exit()
            
        # fill the above list with necessay data to build the graphs
        for sub_stream in stream[0]:
            print(sub_stream['info']['name'])
            print(sub_stream['time_stamps'])

            # buffer video or plot data
            if (sub_stream['info']['name'][0] == 'Webcam'):
                self.data_video = sub_stream['time_series']
                continue
            self.data_all.append((
                sub_stream['time_series'], 
                sub_stream['time_stamps'], 
                sub_stream['info']['desc'][0]['channels'][0].keys(),
                sub_stream['info']['name'][0])) # Record device name in the data

        # exit()

        # create plots
        self.fig, self.axes = plt.subplots(len(self.data_all), 1, figsize=(15, 10))
        if len(self.data_all) > 1:
            self.axes = self.axes.ravel()
        elif len(self.data_all) == 1:
            self.axes = [self.axes]
        
        # add the fig to the gui
        self.w_canvas = FigureCanvasTkAgg(self.fig, self.frame_wave_plots)
        self.w_canvas.get_tk_widget().pack()

        # fill the plots with data
        for i, sub_stream in enumerate(self.data_all):
            print(i, sub_stream)
            create_wave_plot(self.axes[i], sub_stream[0], sub_stream[1], sub_stream[2])

        # create the slider plots
        self.slider_scale_ax = self.fig.add_axes([0.1, 0.04, 0.8, 0.02])
        self.slider_scale = Slider(self.slider_scale_ax, 'scale', 0.0, 1.0, valinit=1.0)
        self.slider_scale.on_changed(self.handle_slider_scale)

        self.slider_window_ax = self.fig.add_axes([0.1, 0.02, 0.8, 0.02])
        self.slider_window = Slider(self.slider_window_ax, 'window', 0.0, 1.0, valinit=0.0)
        self.slider_window.on_changed(self.handle_slider_window)
       
        self.scale = 1.0
        self.window_start = 0.0
       
        # place initial tracklines
        self.tracklines = []
        for ax in self.axes:
            self.tracklines.append(place_trackline(ax, None, 0))

        '''
        Create video objects
        '''
        # buffer video
        self.frame_buffer = np.uint8(np.array(self.data_video).reshape(-1, VIDEO_HEIGHT, VIDEO_WIDTH, 3))
        print("VIDEO LEN " + str(len(self.frame_buffer)))
        # initilize video vars
        self.len_buffer = len(self.frame_buffer)
        self.frame_index = 0
        self.play = False
        self.frame_delay = 100

        # create video widget
        self.w_video = tk.Label(self.frame_video)
        self.w_video.pack(side=tk.TOP)
        
        # create frame within video frame to hold buttons and progress slider
        self.frame_video_buttons = tk.Frame(self.frame_video)
        self.frame_video_buttons.pack(side=tk.TOP)
        
        self.frame_video_progress = tk.Frame(self.frame_video)
        self.frame_video_progress.pack(side=tk.TOP)

        # create buttons for video control
        self.w_video_play = tk.Button(self.frame_video_buttons, text='Play', command=self.handle_video_play)
        self.w_video_play.pack(side=tk.LEFT)

        self.w_video_pause = tk.Button(self.frame_video_buttons, text='Pause', command=self.handle_video_pause)
        self.w_video_pause.pack(side=tk.LEFT)

        self.w_video_restart = tk.Button(self.frame_video_buttons, text='Restart', command=self.handle_video_restart)
        self.w_video_restart.pack(side=tk.LEFT)

        # create progress slider for video
        self.w_video_progress = tk.Scale(self.frame_video_progress, from_=0, to=100, orient=tk.HORIZONTAL, command=self.handle_progress_change, length=400)
        self.w_video_progress.pack(side=tk.LEFT)

        '''
        Set up lsl stream before frame update
        '''
        # Outstream muse
        # Setup outlet stream infos
        stream_info_muse = StreamInfo('Muse', 'EEG', CHNS_MUSE, MUSE_SAMPLE_RATE, 'float32', 'museid_1')
        channels = stream_info_muse.desc().append_child("channels")
        channel_list = ["ar0", "ar1", "ar2", "ar3",
                "br0", "br1", "br2", "br3",
                "gr0", "gr1", "gr2", "gr3",
                "tr0", "tr1", "tr2", "tr3",
                "dr0", "dr1", "dr2", "dr3",
                "mellow", "concentration"]
        for c in channel_list:
            channels.append_child(c) 
            # Create outlets
        self.outlet_muse = StreamOutlet(stream_info_muse)

        # Outstream hrv
        # Setup outlet stream infos
        stream_info_hrv = StreamInfo('HRV', 'EEG', CHNS_HRV, HRV_SAMPLE_RATE, 'float32', 'hrvid_1')
        channels = stream_info_hrv.desc().append_child("channels")
        channels.append_child('hr')
        channels.append_child('rr')
        # Create outlets
        self.outlet_hrv = StreamOutlet(stream_info_hrv)

        # Outstream the shadowsuit
        stream_info_mocap = StreamInfo('ShadowSuit', 'MOCAP', mocap_channels * sample_size, 200)
        channels = stream_info_mocap.desc().append_child("channels")
        channel_list = ["lq0", "lq1", "lq2", "lq3",
            "c0", "c1", "c2", "c3"]
        for c in channel_list:
            channels.append_child(c) 
        # Create outlets
        self.outlet_mocap = StreamOutlet(stream_info_mocap)

        # frame counts to keep track of which frame is updating
        self.web_framecount = 0
        # Outstream the webcam
        stream_info_webcam = StreamInfo('Webcam', 'Experiment', WC_WIDTH * 
        WC_HEIGHT * WC_CHNS, WEBCAM_SAMPLE_RATE, 'int32', 'webcamid_1')
        # Create webcam outstream
        self.outlet_webcam = StreamOutlet(stream_info_webcam)
        '''
        End lsl stream update
        '''

        # set first frame
        self.update_frame()

        # start video frame update coroutine
        self.update_video_frame()
Example #15
0
import xdf
import numpy as np
import fixation_prototype as fx
import matplotlib.pyplot as plt

# Load streams into object
# Video URL: https://www.youtube.com/watch?v=dW1BIid8Osg
streams = xdf.load_xdf(
    r"C:/Users/Jannis/Documents/Studium/Module/Andere/5.Semester/Projektpraktikum/LabRecorder/Recordings/GotG2_Raphael.xdf",
    None, False)

# Extract streams
gazestream = fx.extractStream('iViewXLSL', streams)

# Extract gaze coordinates
gazeX = gazestream['time_series'][:, 0]
gazeY = gazestream['time_series'][:, 1]
gazeTimes = gazestream['time_stamps']  # according times in seconds

time = gazeTimes - gazeTimes[0]

fixations = fx.fixationDetection(gazeX, gazeY, time)

print("start", end="\t\t\t")
print("stop", end="\t\t\t")
print("duration", end="\t\t")
print("x-coord", end="\t\t\t")
print("y-coord")

for i in range(0, len(fixations.start)):
    #start
Example #16
0
    def __init__(self, file_path):
        # create frames for the gui
        self.root = tk.Tk()

        # to quit
        self.root.protocol("WM_DELETE_WINDOW", self.close_window)
        self.running = True

        # quit button
        # self.quit_button = tk.Button(self.root, text="Quit", command=self.root.destroy).pack()
        # self.quit_button = tk.Button(self.root, text="Quit", command=sys.exit()).pack()

        # the video frame
        self.frame_video = tk.Frame(self.root)
        self.frame_video.pack(side=tk.LEFT)

        # the plots frame
        self.frame_wave_plots = tk.Frame(self.root)
        self.frame_wave_plots.pack(side=tk.RIGHT)

        # the sliders frame
        self.frame_slider = tk.Frame(self.root)
        self.frame_slider.pack(side=tk.TOP)

        # retrieve xdf data
        # stream = xdf.load_xdf(file_path, verbose=False)
        stream = xdf.load_xdf(file_path)

        self.data_all = []

        # for astream in stream:
        # print(astream)
        # print(stream[1])
        # exit()

        # fill the above list with necessay data to build the graphs
        for sub_stream in stream[0]:
            # print(sub_stream['info']['name'])
            # print(sub_stream['time_stamps'])

            # buffer video or plot data
            if (sub_stream['info']['name'][0] == 'Webcam'):
                # print("video")
                self.data_video = sub_stream['time_series']
                continue

            # print(sub_stream['info']['desc'][0]['channels'].keys())
            if sub_stream['info']['desc'][0] == None:
                channel_names = "none"
            else:
                channel_names = sub_stream['info']['desc'][0]['channels']
            self.data_all.append((sub_stream['time_series'],
                                  sub_stream['time_stamps'], channel_names))
        # exit()

        # create plots
        self.fig, self.axes = plt.subplots(len(self.data_all),
                                           1,
                                           figsize=(15, 10))
        if len(self.data_all) > 1:
            self.axes = self.axes.ravel()
        elif len(self.data_all) == 1:
            self.axes = [self.axes]

        # add the fig to the gui
        self.w_canvas = FigureCanvasTkAgg(self.fig, self.frame_wave_plots)
        self.w_canvas.get_tk_widget().pack()

        # fill the plots with data
        for i, sub_stream in enumerate(self.data_all):
            # print(i, sub_stream)
            create_wave_plot(self.axes[i], sub_stream[0], sub_stream[1],
                             sub_stream[2])

        # create the slider plots
        self.slider_scale_ax = self.fig.add_axes([0.1, 0.04, 0.8, 0.02])
        self.slider_scale = Slider(self.slider_scale_ax,
                                   'scale',
                                   0.0,
                                   1.0,
                                   valinit=1.0)
        self.slider_scale.on_changed(self.handle_slider_scale)

        self.slider_window_ax = self.fig.add_axes([0.1, 0.02, 0.8, 0.02])
        self.slider_window = Slider(self.slider_window_ax,
                                    'window',
                                    0.0,
                                    1.0,
                                    valinit=0.0)
        self.slider_window.on_changed(self.handle_slider_window)

        self.scale = 1.0
        self.window_start = 0.0

        # place initial tracklines
        self.tracklines = []
        for ax in self.axes:
            self.tracklines.append(place_trackline(ax, None, 0))
        '''
        Create video objects
        '''

        # buffer video
        self.frame_buffer = np.uint8(
            np.array(self.data_video).reshape(-1, VIDEO_HEIGHT, VIDEO_WIDTH,
                                              3))
        print("video length" + str(len(self.frame_buffer)))
        # initilize video vars
        self.len_buffer = len(self.frame_buffer)
        self.frame_index = 0
        self.play = False
        self.frame_delay = 100

        # create video widget
        self.w_video = tk.Label(self.frame_video)
        self.w_video.pack(side=tk.TOP)

        # create frame within video frame to hold buttons and progress slider
        self.frame_video_buttons = tk.Frame(self.frame_video)
        self.frame_video_buttons.pack(side=tk.TOP)

        self.frame_video_progress = tk.Frame(self.frame_video)
        self.frame_video_progress.pack(side=tk.TOP)

        # create buttons for video control
        self.w_video_play = tk.Button(self.frame_video_buttons,
                                      text='Play',
                                      command=self.handle_video_play)
        self.w_video_play.pack(side=tk.LEFT)

        self.w_video_pause = tk.Button(self.frame_video_buttons,
                                       text='Pause',
                                       command=self.handle_video_pause)
        self.w_video_pause.pack(side=tk.LEFT)

        self.w_video_restart = tk.Button(self.frame_video_buttons,
                                         text='Restart',
                                         command=self.handle_video_restart)
        self.w_video_restart.pack(side=tk.LEFT)

        # create progress slider for video
        self.w_video_progress = tk.Scale(self.frame_video_progress,
                                         from_=0,
                                         to=100,
                                         orient=tk.HORIZONTAL,
                                         command=self.handle_progress_change,
                                         length=400)
        self.w_video_progress.pack(side=tk.LEFT)

        # set first frame
        self.update_frame()

        # start video frame update coroutine
        self.update_video_frame()
Example #17
0
data_path = '..\\data\\0001_0\\0001_0\\'
eeg_filename = '000100000.221017.141511.Signals.Raw.edf'
marker_filename = '000100000.221017.143353.Markers.mrk'

# TPI Alpha
GOLF = 0
data_path = '..\\data\\TPI Alpha Test July 2018\\TestData2\\'
eeg_filename = 'TestData2_flanker_arrows_2018-07-05_11-34-23_1.xdf'

data_path = '..\\data\\Giorgio 072118\\Giorgio 072118\\EEG Data\\'
eeg_filename = 'giorgio_phase3.xdf'

if (GOLF):
    raw = mne.io.read_raw_edf(op.join(data_path, eeg_filename), preload=True)
else:
    streams, header = load_xdf(op.join(data_path, eeg_filename),
                               synchronize_clocks=False)
    print(streams[0]["time_stamps"])
    fig, ax = plt.subplots()
    ax.plot(streams[0]["time_stamps"], streams[0]["time_series"])
    print(streams[0]["time_stamps"])
    #ax.plot(streams[1]["time_stamps"], streams[1]["time_series"])
    fig.show()
    input()

print(raw.info)  # 26 channels + 1 stimulus channel
print(raw.info['chs'][0]['loc'])  # no location info

# set locations
montage = mne.channels.read_montage('standard_1020')
print(montage)