Пример #1
0
def read_edf(subj_data_file_raw):
    """Convert the EDF to pd.DataFrames containing pupil samples, events, and messages."""

    samples, events, messages = edf.pread(subj_data_file_raw)

    assert not samples.empty, 'Error in EDF to pd.DataFrame conversion. Check input.'

    return samples, events, messages
Пример #2
0
def raw_el_data(subject, datapath='/net/store/nbp/projects/etcomp/'):
    # Input:    subjectname, datapath
    # Output:   Returns pupillabs dictionary
    filename = os.path.join(datapath,subject,'raw')
    from pyedfread import edf # parses SR research EDF data files into pandas df

    elsamples, elevents, elnotes = edf.pread(os.path.join(filename,findFile(filename,'.EDF')[0]), trial_marker=b'')
    
    return (elsamples,elevents,elnotes)
Пример #3
0
 def load_pupil(self):
     '''
     Load pupil, events, messages from raw directory.
     '''
     directory = join(self.flex_dir, 'raw', 'bids_mr_v1.2', self.subject,
                      self.session, 'func')
     files = glob(join(directory, '*{}*.edf'.format(self.run)))
     if len(files) > 1:
         raise RuntimeError(
             'More than one log file found for this block: %s' % files)
     elif len(files) == 0:
         raise RuntimeError('No log file found for this block: %s, %s, %s' %
                            (self.subject, self.session, self.run))
     if self.type == 'inference':
         return edf.pread(
             files[0], trial_marker=b'trial_id'
         )  # 'inference' and 'instructed' run files use different trial_markers
     elif self.type == 'instructed':
         return edf.pread(files[0], trial_marker=b'TRIALID')
Пример #4
0
    def load_pupil(self, directory=None):
        '''
        Concatenates path and file name and loads sa, ev, and m file.

        Recquires subject code, session, phase and block.
        '''
        directory = join(self.flex_dir, 'raw', 'bids_mr_v1.2', self.subject,
                         self.session, 'func')
        files = glob(join(directory, '*{}*.edf'.format(self.run)))
        if len(files) > 1:
            raise RuntimeError(
                'More than one log file found for this block: %s' % files)
        elif len(files) == 0:
            raise RuntimeError('No log file found for this block: %s, %s, %s' %
                               (self.subject, self.session, self.run))
        if self.type == 'inference':
            return edf.pread(files[0], trial_marker=b'trial_id')
        elif self.type == 'instructed':
            return edf.pread(files[0], trial_marker=b'TRIALID')
Пример #5
0
def load_edf(filename):
    '''
    Loads one EDF file and returns a clean DataFrame.
    '''
    events, messages = edf.pread(filename,
                                 properties_filter=[
                                     'gx', 'gy', 'pa', 'sttime', 'start',
                                     'type', 'gavx', 'gavy'
                                 ],
                                 filter='all')
    #events = edf.trials2events(events, messages)
    #events['stime'] = pd.to_datetime(events.sample_time, unit='ms')
    events['type'] = events.type == 'fixation'
    if all((events.right_pa == -32768) | np.isnan(events.right_pa)):
        del events['right_pa']
        events['pa'] = events.left_pa
        del events['left_pa']
    elif all((events.left_pa == -32768) | np.isnan(events.left_pa)):
        del events['left_pa']
        events['pa'] = events.right_pa
        del events['right_pa']
    else:
        raise RuntimeError(
            'Recorded both eyes? So unusual that I\'ll stop here')

    # In some cases the decision variable still contains ['second', 'conf', 'high'], 21.0 -> Fix this
    # In these cases the decision_time variable has 2 time stamps as well...
    if messages.decision.dtype == np.dtype(object):
        messages['decision'] = np.array([
            x[-1] if isinstance(x, collections.Sequence) else x
            for x in messages.decision.values
        ])
        messages['decision_time'] = np.array([
            x[-1] if isinstance(x, collections.Sequence) else x
            for x in messages.decision_time.values
        ])

    return events, messages
Пример #6
0
import numpy as np
import matplotlib.pyplot as plt
from pyedfread import edf

# 3b
samples, events, messages = edf.pread('181105.edf',
                                      trial_marker=b'Start Trial')

# 3f
esr = int(messages['RECCFG'][0][1])
samples['time'] = samples['time'] / esr
events[['start', 'end']] = events[['start', 'end']] / esr
messages[['trialid_time', 'Cue_time', 'End_time'
          ]] = messages[['trialid_time', 'Cue_time', 'End_time']] / esr

# 3g
eind = np.arange(15000)
ex = samples['gx_left'][eind]
ey = samples['gy_left'][eind]
et = samples['time'][eind]
ex[ex > 1920] = np.nan
ex[ex < 0] = np.nan
ey[ey > 1080] = np.nan
ey[ey < 0] = np.nan

# 3n NEED TO WRITE AND SUBMIT
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(et, ex)
plt.xlabel('Time (s)')
plt.ylabel('Eye Postion X')
Пример #7
0
def get_df(filelist, filter_fun=None, new_rect=None):
    '''
    Reads all EDF files in the filelist (see get_filelist) tuple with pyedfread 
    and returns a merged dataframe where each row is one fixation.
    
    The output dataframe consists of Event and Message dataframes (outputs of 
    pyedfread module, see pyedfread). 
    
    Metadata is used to label each row with participant and run id.
    (see get_filelist).
    
    If no FILTER_FUN is provided, Message DataFrame is directly appended to 
    Event DataFrame. In some cases one might desire to preprocess Message 
    DataFrame before appending. In that case, the result of FILTER_FUN(messages) 
    is appended.
    
    Specify NEWRECT to discard pixels outside of a square central region. NEWRECT 
    of 500 would discard all data outside of a square of 500x500 pixels.
        
    See get_filelist to know more about filelist format.
    See pyedfread for more information on the output dataframes.
    '''

    total_file = len(filelist)
    print("Receivied {} EDF files".format(total_file))
    #init 3 variables that will accumulate data frames as a list
    e = [None] * total_file
    m = [None] * total_file

    #will be used to index fixation ranks below in df.groupby.apply combo
    def addfix(df):
        df["fix"] = range(df.shape[0])
        return df

    #Call pyedfread and concat different data frames as lists.
    for i, file in enumerate(filelist):
        filename = file[0]
        _, E, M = edf.pread(filename,
                            meta=file[1],
                            ignore_samples=True,
                            filter='all')
        #just to be sure not to have any spaces on column names (this could be moved to pyedfread)
        E.rename(columns=str.strip, inplace=True)
        M.rename(columns=str.strip, inplace=True)

        #remove saccade events.
        E = E.loc[E["type"] == "fixation"]

        #take only useful columns, include the meta data columns as well.
        E = E[list(file[1].keys()) + ['trial', 'gavx', 'gavy', 'start', 'end']]

        #get SYNCTIME (i.e. stim onsets)
        E = E.merge(M.loc[:, ['SYNCTIME', 'py_trial_marker']],
                    right_on='py_trial_marker',
                    left_on='trial',
                    how="left")

        #correct time stamps with SYNCTIME
        E.start = E.start - E.SYNCTIME
        E.end = E.end - E.SYNCTIME
        #remove prestimulus fixations
        E = E[E.start > 0]
        #index fixations
        E = E.groupby("trial").apply(addfix)
        #get display coordinates and calibration values
        E = E.append(M.loc[M.py_trial_marker == -1,
                           ['validation_result:', 'subject', 'run']])

        #add the display coordinates.
        rect = M.loc[M.py_trial_marker == -1, ['DISPLAY_COORDS']].values
        E['DISPLAY_COORDS'] = np.repeat(rect, E.shape[0], axis=0)

        #drop useless columns
        E = E.drop(['SYNCTIME', 'py_trial_marker'], axis=1)
        #get what we want from messages using filter_fun or simply discard messages.
        if filter_fun != None:
            m = filter_fun(M)
            e[i] = E.merge(m,
                           left_on='trial',
                           right_on='py_trial_marker',
                           how='left')
        else:
            e[i] = E

        #progress report
        sys.stdout.write('\r')
        sys.stdout.write("Reading file {}\n".format(filename))
        sys.stdout.write("[%-20s] %d%%\n" % ('=' * round(
            (i + 1) / total_file * 20), round((i + 1) / total_file * 100)))
        sys.stdout.flush()

    #convert lists to data frame.
    events = pd.concat(e, ignore_index=True)

    #add fixation weight (todo: add it only to trials > 0)
    events.loc[:, 'weight'] = 1

    #Check whether all data has same DISPLAY_COORDINATES
    #Overwrite DISPLAY_COORDINATES if required.
    events = set_rect(events, new_rect)
    rect = get_rect(events)

    #    #Remove out of range fixation data
    valid_fix     = (events.gavx >= rect[0]) & \
                    (events.gavx <= rect[2]) & \
                    (events.gavy >= rect[1]) & \
                    (events.gavy <= rect[3]) | events.trial.isnull()

    events = events.loc[valid_fix, :]
    #

    return events
Пример #8
0
 def load_pupil(self):
     '''
     Loads sa, ev & m of edf file
     '''
     return edf.pread(self.file)
Пример #9
0
    # if the variance is not distinct enough from the 1 percentile,
    # signal has to be found manually, indicated as 666 in the first item
    # of the list.
    r_hr = [
        ds.hamilton_detector(ica_data[i]) for i in range(ica_data.shape[0])
    ]
    r_hr = [np.var(np.diff(i)) for i in r_hr]
    ecg_out[ica_key] = r_hr
    r_hr = np.array(r_hr)

    if (np.percentile(r_hr, 1) - np.min(r_hr)) > 500:
        hr = list(np.where(r_hr < np.percentile(r_hr, 1))[0])
    else:
        hr = [666]

    samples, events, messages = edf.pread(edf_path)
    eye = ["left", "right"][events.eye.unique()[0]]

    del events
    del messages

    # cleaning the eyetracking datas
    samples = samples.loc[samples.time != 0.0]
    start = samples.loc[samples.input == 252.]
    end = samples.loc[samples.input == 253.]
    start_ix = start.index[0] - 100
    end_ix = end.index[-1] + 100
    samples = samples.iloc[start_ix:end_ix]
    samples.reset_index(inplace=True)
    samples.time = samples.time - samples.time.iloc[0]
Пример #10
0
    out = [edf for edf in os.listdir(path) if edf.endswith(ftype)]
    return(out)
  
#%% EYELINK

subject = 'inga_3'
datapath='/net/store/nbp/projects/etcomp/pilot'
# filepath for preprocessed folder
preprocessed_path = os.path.join(datapath, subject, 'preprocessed')
# Load edf
filename = os.path.join(datapath,subject,'raw')

# elsamples:  contains individual EL samples
# elevents:   contains fixation and saccade definitions
# elnotes:    contains notes (meta data) associated with each trial
elsamples, elevents, elnotes = edf.pread(os.path.join(filename,findFile(filename,'.EDF')[0]), trial_marker=b'')


elevents.time.unique()

  
#%% Checking blink_id



# Probably not very interesting
elevents = events.el_make_events(subject)

plt.figure()
plt.plot(elevents.index, elevents.start, 'x', color='b')
plt.plot(elevents.index, elevents.end, 'x', color='b')
Пример #11
0
    def create(self, *args, **kwargs):
        if not os.path.isfile(self.args["edfFile"]):
            return self
        edffile = self.args["edfFile"]
        trialStart = self.args["trialStartMarker"]
        trialEnd = self.args["trialEndMarker"]
        messages, timestamps = edfread.read_messages(edffile)

        trialStarts = []
        trialEnds = []
        for ts, message in zip(timestamps, messages):
            m = message.decode().strip("\x00").replace(" ", "")
            if m == trialEnd:
                trialEnds.append(ts)
            elif m == trialStart:
                trialStarts.append(ts)

        trialEnds = np.array(trialEnds)
        trialStarts = np.array(trialStarts)
        D = trialEnds[None, :] - trialStarts[:, None]
        D[D < 0] = D.max()
        if len(trialStarts) < len(trialEnds):
            # match every trial start with its closest trial end
            idx = D.argmin(1)
            trialEnds = trialEnds[idx]
        elif len(trialStarts) > len(trialEnds):
            idx = D.argmin(0)
            trialStarts = trialStarts[idx]

        sample_size = ((trialEnds - trialStarts).astype(np.uint64)).sum()

        samples, events, messages = edf.pread(edffile)
        sample_time = samples["time"]
        # get sampling rate by comparing distance between timestamps
        sr = sample_time[1] - sample_time[0]
        if sr == 0:
            # 2 kHz, i.e. 2 timestamps per second
            sample_size = np.uint64(2 * sample_size)
        gazex = np.zeros((sample_size, ))
        gazey = np.zeros((sample_size, ))
        trialidx = np.zeros((sample_size, ), dtype=np.uint64)
        ssidx = 0
        offset = 0
        for (i, (start, end)) in enumerate(zip(trialStarts, trialEnds)):
            sidx = np.searchsorted(sample_time[ssidx:], start, side='let')
            eidx = np.searchsorted(sample_time[ssidx:], end, side='right')
            sidx += ssidx
            eidx += ssidx
            w = eidx - sidx
            ssidx = eidx + 1
            gazex[offset:offset + w] = samples["gx_left"][sidx:eidx]
            gazey[offset:offset + w] = samples["gy_left"][sidx:eidx]
            trialidx[offset:offset + w] = i
            offset += w

        self.gazeX = gazex
        self.gazeY = gazey
        self.trialStart = trialStarts
        self.trialEnd = trialEnds
        self.trialIdx = trialidx
        self.setidx = [0 for i in range(len(self.trialIdx))]
Пример #12
0
 def test_ignore_samples(self):
     samples, events, messages = edf.pread('../SUB001.EDF',
                                           ignore_samples=True)
     self.assertEqual(samples.shape[0], 0)
Пример #13
0
 def setUp(self):
     samples, events, messages = edf.pread('../SUB001.EDF')
     self.samples = samples
     self.events = events
     self.messages = messages