コード例 #1
0
        def filter_moving_average(trial_data, config):
            args = config['config']
            logger = MultiProcessingLog.get_logger()
            window_size = args[0]['window_size']

            # Ensure window size is always odd
            if window_size % 2 == 0:
                window_size += 1
            window_sides = int((window_size - 1) / 2)

            data = trial_data['trial']['data']
            new_data = []
            pad_data = list(data[:window_size][::-1]) + list(data) + list(
                data[-window_size:][::-1])
            for count, _ in enumerate(pad_data):
                if count <= window_size - 1 or count > (len(pad_data) -
                                                        1) - window_size:
                    continue

                window = pad_data[count-window_sides:count] +\
                         [pad_data[count]] +\
                         pad_data[count+1:count+window_sides+1]

                avg = np.mean(window)
                new_data.append(avg.copy())

            trial_data['trial']['data'] = new_data
            return trial_data
コード例 #2
0
ファイル: pupil_lib.py プロジェクト: zhang405744522/pupil-lib
    def __init__(self, config=None, quiet=False):
        self.config = config

        MultiProcessingLog.setQuiet(quiet)

        if not self.config:
            print("Config not provided, ensure that get_build_config is called "
                  "before running.")
            self.loader = None
        else:
            ConfigStore.set_instance(config)
            self.loader = PupilLibLoader(config)
            self.logger = MultiProcessingLog.set_logger_type(self.config['logger'])

        self.loaded_datasets = []
        self.proc_datasets = {}
        self.proc_data = {}
        self.data_store = None
コード例 #3
0
    def __init__(self, config, chunk_data=None):
        Thread.__init__(self)
        self.config = config  # Metadata about how to process the given datasets.
        self.chunk_data = chunk_data
        self.trial_num = 0

        self.initial_data = {
            'config':
            config,  # Metadata about how to process the given datasets.
            'chunk_data': chunk_data,
        }

        self.proc_trial_data = {}
        self.logger = MultiProcessingLog.get_logger()
コード例 #4
0
    def __init__(self, config, dataset=None):
        Thread.__init__(self)
        self.config = config  # Metadata about how to process the given datasets.
        self.dataset = dataset
        self.logger = MultiProcessingLog.get_logger()

        self.initial_data = {
            'config':
            config,  # Metadata about how to process the given datasets.
            'dataset': dataset,
        }

        self.proc_eye_data = {}
        self.proc_generic_data = {}
        self.proc_dataset_data = {}
コード例 #5
0
    def __init__(self, config, eye_dataset=None, markers=None):
        Thread.__init__(self)
        self.config = copy.deepcopy(config)    # Metadata about how to process the given datasets.
        self.eye_dataset = eye_dataset
        self.eye_dataset['srate'] = np.size(self.eye_dataset['data'], 0) / \
                (np.max(self.eye_dataset['timestamps']) - np.min(self.eye_dataset['timestamps']))
        self.config['srate'] = eye_dataset['srate']
        self.markers = markers
        self.logger = MultiProcessingLog.get_logger()

        self.initial_data = {
            'config': config,    # Metadata about how to process the given datasets.
            'dataset': self.eye_dataset,
            'markers': markers
        }

        self.trigger_data = {}
        self.proc_data = {}
コード例 #6
0
        def custom_resample(trigger_data, config):
            args = config['config']
            logger = MultiProcessingLog.get_logger()
            testing = trigger_data['config']['testing']

            # Get srate
            srate = args[0]['srate']

            proc_trial_data = trigger_data['trials']
            proc_trial_data = {
                trial_name: trial_info for trial_name, trial_info in proc_trial_data.items()
                if 'trial' in trial_info
                   and 'timestamps' in trial_info['trial']
                   and 'data' in trial_info['trial']
                   and len(trial_info['trial']['timestamps']) > 0
                   and len(trial_info['trial']['data']) > 0
            }
            if len(proc_trial_data) <= 0:
                return trigger_data

            for trial_num, trial_info in proc_trial_data.items():
                if 'reject' in trial_info and trial_info['reject']:
                    continue
                times = trial_info['trial']['timestamps']
                stimes = np.asarray(times) - times[0]
                new_xrange = np.linspace(stimes[0], stimes[-1], num=srate*(stimes[-1]-stimes[0]))

                trial_info['trial']['data'] = np.interp(
                    new_xrange,
                    stimes,
                    trial_info['trial']['data']
                )
                trial_info['trial']['timestamps'] = new_xrange

                if 'trial_proc' in trial_info:
                    trial_info['trial_proc']['data'] = np.interp(
                        new_xrange,
                        stimes,
                        trial_info['trial_proc']['data']
                    )
                    trial_info['trial_proc']['timestamps'] = new_xrange

            return trigger_data
コード例 #7
0
        def filter_fft(trial_data, config):
            args = config['config']
            logger = MultiProcessingLog.get_logger()

            high_freq = args[0]['highest_freq']
            low_freq = args[1]['lowest_freq']
            srate = trial_data['srate']

            data = trial_data['trial']['data']
            if 'trial_proc' in data:
                data = trial_data['trial_proc']['data']
            else:
                trial_data['trial_proc'] = {
                    'data': [],
                    'timestamps': trial_data['trial']['timestamps']
                }

            trial_data['trial_proc']['data'] = filter_fft_data(
                data, low_freq, high_freq, srate)
            return trial_data
コード例 #8
0
    def __init__(self, config, eye_dataset=None, marker_inds=None, marker_times=None, marker_name=''):
        Thread.__init__(self)
        self.config = copy.deepcopy(config)    # Metadata about how to process the given datasets.
        self.eye_dataset = eye_dataset
        self.data = self.eye_dataset['data']
        self.timestamps = self.eye_dataset['timestamps']
        self.marker_inds = marker_inds
        self.marker_times = marker_times
        self.marker_name = marker_name

        self.initial_data = {
            'config': config,    # Metadata about how to process the given datasets.
            'data': self.eye_dataset['data'],
            'timestamps': self.eye_dataset['timestamps'],
            'marker_inds': self.marker_inds,
            'marker_times': self.marker_times,
            'marker_name': self.marker_name
        }

        # To be initialized
        self.proc_trigger_data = {}
        self.proc_trial_data = {}

        self.logger = MultiProcessingLog.get_logger()
コード例 #9
0
    def __init__(self):
        self.logger = MultiProcessingLog.get_logger()
        pre = makeregistrar()
        post = makeregistrar()

        @pre
        def tester(trigger_data, config):
            print('helloooooo')

        @post
        def tester2(trigger_data, config):
            print('done.')

        @post
        def tester3(trigger_data, config):
            a_test_to_do('Print this!')

        # Testing/demo function.
        @pre
        def get_sums(trigger_data, config):
            args = config['config']
            args1 = args[0]['srate']

            print('get_sums got: ' + str(args1))
            print('Result: ' + str(int(args1) + 10))

            return trigger_data

        @post
        def custom_resample(trigger_data, config):
            args = config['config']
            logger = MultiProcessingLog.get_logger()
            testing = trigger_data['config']['testing']

            # Get srate
            srate = args[0]['srate']

            proc_trial_data = trigger_data['trials']
            proc_trial_data = {
                trial_name: trial_info for trial_name, trial_info in proc_trial_data.items()
                if 'trial' in trial_info
                   and 'timestamps' in trial_info['trial']
                   and 'data' in trial_info['trial']
                   and len(trial_info['trial']['timestamps']) > 0
                   and len(trial_info['trial']['data']) > 0
            }
            if len(proc_trial_data) <= 0:
                return trigger_data

            for trial_num, trial_info in proc_trial_data.items():
                if 'reject' in trial_info and trial_info['reject']:
                    continue
                times = trial_info['trial']['timestamps']
                stimes = np.asarray(times) - times[0]
                new_xrange = np.linspace(stimes[0], stimes[-1], num=srate*(stimes[-1]-stimes[0]))

                trial_info['trial']['data'] = np.interp(
                    new_xrange,
                    stimes,
                    trial_info['trial']['data']
                )
                trial_info['trial']['timestamps'] = new_xrange

                if 'trial_proc' in trial_info:
                    trial_info['trial_proc']['data'] = np.interp(
                        new_xrange,
                        stimes,
                        trial_info['trial_proc']['data']
                    )
                    trial_info['trial_proc']['timestamps'] = new_xrange

            return trigger_data

        # This function should only be run after the
        # custom resampling phase.
        @post
        def rm_baseline(trigger_data, config):
            args = config['config']
            logger = MultiProcessingLog.get_logger()
            testing = trigger_data['config']['testing']
            proc_trial_data = trigger_data['trials']
            new_trial_data = copy.deepcopy(proc_trial_data)
            baseline_range = trigger_data['config']['baseline']
            if not baseline_range:
                return trigger_data

            for trial_num, trial_info in proc_trial_data.items():
                if 'baseline_mean' in new_trial_data[trial_num]:
                    continue

                times = copy.deepcopy(trial_info['trial']['timestamps'])
                data = copy.deepcopy(trial_info['trial']['data'])

                # Subtract initial
                times = times - times[0]
                total_time = times[-1]

                # Check to make sure the baseline range is OK.
                if baseline_range[0] < times[0]:
                    raise Exception("Error: Cannot have a negative baseline range start. All trials start at 0. ")
                if baseline_range[1] > total_time:
                    raise Exception("Error: Cannot have a baseline range that exceeds the total time of the trial. ")

                # Get the initial point, then the final point, with all points in
                # between as the baseline mean for each trial.

                bmean = 0
                pcount = 0
                found_first = False
                for time_ind in range(len(times)-1):
                    # While we have not found the first point, continue looking
                    if not found_first:
                        if times[time_ind] <= baseline_range[0] < times[time_ind+1]:
                            pcount += 1
                            if baseline_range[0] == times[time_ind]:
                                bmean += data[time_ind]
                            else:
                                bmean += linear_approx(data[time_ind], times[time_ind],
                                                       data[time_ind+1], times[time_ind+1],
                                                       baseline_range[0])
                            found_first = True
                        continue

                    # Check if we have the final point area, if we do, get it and
                    # finish looking for points.
                    if times[time_ind] <= baseline_range[1] < times[time_ind+1]:
                        pcount += 1
                        if baseline_range[1] == times[time_ind]:
                            bmean += data[time_ind]
                        else:
                            bmean += linear_approx(data[time_ind], times[time_ind],
                                                   data[time_ind + 1], times[time_ind + 1],
                                                   baseline_range[1])
                        break

                    # We get here when we're in between the first and final points.
                    pcount += 1
                    bmean += data[time_ind]

                # For each trial, calculate the baseline removed data and store the baseline mean.
                new_trial_data[trial_num]['baseline_mean'] = bmean/pcount
                new_trial_data[trial_num]['trial_rmbaseline']['data'] = data - new_trial_data[trial_num]['baseline_mean']

            trigger_data['trials'] = new_trial_data

            return trigger_data

        # Calculates the percent change data for each trial.
        @post
        def get_percent_change(trigger_data, config):
            proc_trial_data = trigger_data['trials']
            pcs = {}

            if not trigger_data['config']['baseline']:
                return trigger_data

            # Get the baseline means if it wasn't already calculated.
            if len(proc_trial_data) > 0:
                for trial_num, trial_info in proc_trial_data.items():
                    if 'baseline_mean' not in trial_info:
                        trigger_data = rm_baseline(trigger_data, {})
                        break
                proc_trial_data = trigger_data['trials']

            for trial_num, trial_info in proc_trial_data.items():
                bmean = trial_info['baseline_mean']
                data = copy.deepcopy(trial_info['trial_rmbaseline']['data'])

                if bmean and bmean != 0:
                    pcs[trial_num] = data / bmean
                else:
                    self.logger.send('WARNING', 'Baseline mean is 0 or undefined for a trial for name: ' + trial_info['config']['name'],
                         os.getpid(), threading.get_ident())
                    self.logger.send('WARNING', 'Not computing percent change for name: ' + trial_info['config']['name'],
                         os.getpid(), threading.get_ident())
                    pcs[trial_num] = data
                    trigger_data['trials'][trial_num]['reject'] = True

            for trial_num in pcs:
                trigger_data['trials'][trial_num]['trial_pc']['data'] = pcs[trial_num]

            return trigger_data

        self.pre_processing = pre
        self.post_processing = post
コード例 #10
0
        def rm_baseline(trigger_data, config):
            args = config['config']
            logger = MultiProcessingLog.get_logger()
            testing = trigger_data['config']['testing']
            proc_trial_data = trigger_data['trials']
            new_trial_data = copy.deepcopy(proc_trial_data)
            baseline_range = trigger_data['config']['baseline']
            if not baseline_range:
                return trigger_data

            for trial_num, trial_info in proc_trial_data.items():
                if 'baseline_mean' in new_trial_data[trial_num]:
                    continue

                times = copy.deepcopy(trial_info['trial']['timestamps'])
                data = copy.deepcopy(trial_info['trial']['data'])

                # Subtract initial
                times = times - times[0]
                total_time = times[-1]

                # Check to make sure the baseline range is OK.
                if baseline_range[0] < times[0]:
                    raise Exception("Error: Cannot have a negative baseline range start. All trials start at 0. ")
                if baseline_range[1] > total_time:
                    raise Exception("Error: Cannot have a baseline range that exceeds the total time of the trial. ")

                # Get the initial point, then the final point, with all points in
                # between as the baseline mean for each trial.

                bmean = 0
                pcount = 0
                found_first = False
                for time_ind in range(len(times)-1):
                    # While we have not found the first point, continue looking
                    if not found_first:
                        if times[time_ind] <= baseline_range[0] < times[time_ind+1]:
                            pcount += 1
                            if baseline_range[0] == times[time_ind]:
                                bmean += data[time_ind]
                            else:
                                bmean += linear_approx(data[time_ind], times[time_ind],
                                                       data[time_ind+1], times[time_ind+1],
                                                       baseline_range[0])
                            found_first = True
                        continue

                    # Check if we have the final point area, if we do, get it and
                    # finish looking for points.
                    if times[time_ind] <= baseline_range[1] < times[time_ind+1]:
                        pcount += 1
                        if baseline_range[1] == times[time_ind]:
                            bmean += data[time_ind]
                        else:
                            bmean += linear_approx(data[time_ind], times[time_ind],
                                                   data[time_ind + 1], times[time_ind + 1],
                                                   baseline_range[1])
                        break

                    # We get here when we're in between the first and final points.
                    pcount += 1
                    bmean += data[time_ind]

                # For each trial, calculate the baseline removed data and store the baseline mean.
                new_trial_data[trial_num]['baseline_mean'] = bmean/pcount
                new_trial_data[trial_num]['trial_rmbaseline']['data'] = data - new_trial_data[trial_num]['baseline_mean']

            trigger_data['trials'] = new_trial_data

            return trigger_data
コード例 #11
0
Pupil-lib is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with Pupil-lib.  If not, see <https://www.gnu.org/licenses/>.

Copyright (C) 2018  Gregory W. Mierzwinski
---------------------------------------------------------------------------~(*)
'''
import os
import copy

from pupillib.core.utilities.MPLogger import MultiProcessingLog
logger = MultiProcessingLog.get_logger()

def common_get_csv(mat):
    csv_file = ''
    count = 0
    max_count = len(mat)
    if type(mat[0]) not in (list, dict):
        mat = [mat]
    for row in mat:
        if count < max_count - 1:
            csv_file += ",".join(map(str, row)) + '\n'
        else:
            csv_file += ",".join(map(str, row))
    return csv_file

コード例 #12
0
        def test_results(data_for_data_name, data_name):
            # Check data fields to make sure they were created and loaded.
            # Skip and warn if we are not testing, otherwise raise and exception.
            testing = ConfigStore.get_instance().frozen_config['testing']
            logger = MultiProcessingLog.get_logger()
            test_pass = True

            if 'data' in data_for_data_name:
                if not data_for_data_name['data'].any():
                    if testing:
                        raise Exception("No data was loaded for the data name entry " + data_name)
                    else:
                        logger.send('WARNING',' Data entry name ' + data_name + ' has no '
                                              'data loaded. Skipping it.',
                                              os.getpid(), threading.get_ident())
                        test_pass = False
            else:
                if testing:
                    raise Exception("No data field was created and loaded for data name entry " + data_name)
                else:
                    logger.send('WARNING', ' Data entry name ' + data_name + ' has no '
                                           'data field created and loaded. Skipping it.',
                                           os.getpid(), threading.get_ident())
                    test_pass = False

            # Check timestamp fields to make sure they were created and loaded.
            # Skip and warn if we are not testing, otherwise raise and exception.
            if 'timestamps' in data_for_data_name:
                if not data_for_data_name['timestamps'].any():
                    if testing:
                        raise Exception("No timestamps were loaded for the data name entry " + data_name)
                    else:
                        logger.send('WARNING', ' Data entry name ' + data_name + ' has no '
                                               'timestamps loaded. Skipping it.',
                                               os.getpid(), threading.get_ident())
                        test_pass = False
            else:
                if testing:
                    raise Exception("No timestamps were created and loaded for data name entry " + data_name)
                else:
                    logger.send('WARNING', ' Data entry name ' + data_name + ' has no '
                                           'timestamps field created and loaded. Skipping it.',
                                           os.getpid(),
                                           threading.get_ident())
                    test_pass = False

            # Check sampling rate fields to make sure they were created and loaded.
            # Skip and warn if we are not testing, otherwise raise and exception.
            if 'srate' in data_for_data_name:
                if not data_for_data_name['srate'].any() or \
                   (isinstance(data_for_data_name['srate'], int) and data_for_data_name['srate'] == 0):
                    if testing:
                        raise Exception("No srate was loaded for the data name entry " + data_name)
                    else:
                        logger.send('WARNING', ' Data entry name ' + data_name + ' has no '
                                               'srate loaded. Skipping it.',
                                               os.getpid(), threading.get_ident())
                        test_pass = False
            else:
                if testing:
                    raise Exception("No srate was created and loaded for data name entry " + data_name)
                else:
                    logger.send('WARNING', ' Data entry name ' + data_name + ' has no '
                                           'srate field created and loaded. Skipping it.',
                                           os.getpid(),
                                           threading.get_ident())
                    test_pass = False
            return test_pass
コード例 #13
0
ファイル: pupil_lib.py プロジェクト: zhang405744522/pupil-lib
 def set_config(self, config):
     if config:
         self.config = config
         ConfigStore.set_instance(config)
         self.loader = PupilLibLoader(config)
         self.logger = MultiProcessingLog.set_logger_type(self.config['logger'])
コード例 #14
0
ファイル: pupil_lib.py プロジェクト: zhang405744522/pupil-lib
def xdf_pupil_load(dataset, xdf_file_and_name, data_num=0):
    logger = MultiProcessingLog.get_logger()
    if not MultiProcessingLog.quiet:
        logger.disable_redirect()

    name_list = dataset['dataname_list']

    # Split the dataset name from the path and store it
    # for later.
    split_list = xdf_file_and_name.split('|')
    if len(split_list) == 1:
        xdf_file = split_list[0]
        name = 'dataset' + str(data_num)
    else:
        xdf_file = split_list[1]
        name = split_list[0]
    dataset['dir'] = xdf_file
    dataset['dataset_name'] = name

    xdf_data = load_xdf(xdf_file, dejitter_timestamps=False)

    markers_stream = None
    eye0_stream = None
    eye1_stream = None
    eye0pyrep_stream = None
    eye1pyrep_stream = None
    gaze_stream = None
    gazepyrep_stream = None

    # Data structures are all over the place
    # so these checks are necessary.
    data_version = 1
    pcapture_ind = 1
    xdf_ind = 0
    for j, entry in enumerate(xdf_data):
        if type(entry) == list:
            for k, i in enumerate(entry):
                if type(i) == dict:
                    if 'info' in i:
                        print(i['info']['name'][0])
                        if i['info']['name'][0] == 'pupil_capture':
                            data_version = 2
                            pcapture_ind = k
                            xdf_ind = j
                            continue

                        if i['info']['name'][0] == 'Gaze Primitive Data':
                            gaze_stream = i
                        elif i['info']['name'][0] == 'Gaze Python Representation':
                            gazepyrep_stream = i
                        elif i['info']['name'][0] == 'Pupil Primitive Data - Eye 1':
                            eye1_stream = i
                        elif i['info']['name'][0] == 'Pupil Primitive Data - Eye 0':
                            eye0_stream = i
                        elif i['info']['type'][0] == 'Markers' or i['info']['name'][0] == 'Markers':
                            markers_stream = i
                        elif i['info']['name'][0] == 'Pupil Python Representation - Eye 1':
                            eye1pyrep_stream = i
                        elif i['info']['name'][0] == 'Pupil Python Representation - Eye 0':
                            eye0pyrep_stream = i

    xdf_processor = XdfLoaderProcessor()
    xdf_transforms = xdf_processor.transform.all
    if data_version == 2:
        pcap_data = xdf_data[xdf_ind][pcapture_ind]

        all_data = xdf_pupil_load_v2(name_list, pcap_data)
        if markers_stream:
            all_data['markers'] = {
                'timestamps': xdf_transforms['get_marker_times'](markers_stream, {}),
                'eventnames': xdf_transforms['get_marker_eventnames'](markers_stream, {})
            }
        else:
            logger.warning('Could not find a marker stream! Expecting a stream of type `Markers`')
            all_data['markers'] = {
                'timestamps': None,
                'eventnames': None
            }

        dataset['custom_data'] = True

        new_dict = all_data
        for entry in dataset:
            if entry not in all_data:
                new_dict[entry] = dataset[entry]

        return new_dict

    custom_data = False
    for a_name in name_list:
        if a_name != 'eye0' and a_name != 'eye1':
            custom_data = True

    data_entries = {
        'eye1': eye1_stream,
        'eye0': eye0_stream,
        'eye1-pyrep': eye1pyrep_stream,
        'eye0-pyrep': eye0pyrep_stream,
        'gaze_x': gaze_stream,
        'gaze_y': gaze_stream,
        'gaze_x-pyrep': gazepyrep_stream,
        'gaze_y-pyrep': gazepyrep_stream,
        'marks': markers_stream,

        'all': {
            'eye1': eye1_stream,
            'eye0': eye0_stream,
            'eye1-pyrep': eye1pyrep_stream,
            'eye0-pyrep': eye0pyrep_stream,
            'gaze': gaze_stream,
            'gaze-pyrep': gazepyrep_stream,
            'marks': markers_stream,
        }
    }

    # Used to determine what data stream
    # to default to when it's original dataset
    # does not exist.
    matchers = {
        'eye0': 'eye0-pyrep',
        'eye1': 'eye1-pyrep',
        'gaze_x-pyrep': 'gaze_x',
        'gaze_y-pyrep': 'gaze_y',
        'gaze_x': 'gaze_x-pyrep',
        'gaze_y': 'gaze_y-pyrep',
    }

    def check_matchers(n, data_entries):
        # We didn't find the datastream,
        # and we have a default,
        # and that default exists.
        # So get the data from the default.
        if data_entries[n] is None and \
           n in matchers and \
           data_entries[matchers[n]] is not None:
            return True
        return False

    logger = MultiProcessingLog.get_logger()
    failure = False
    if not markers_stream:
        logger.send('ERROR', 'Missing markers from datastream',
                         os.getpid(), threading.get_ident())
        failure = True
    for i in data_entries['all']:
        if i is not 'marks':
            if not data_entries['all'][i] and i in name_list:
                logger.send('ERROR', 'Missing ' + i + ' from datastream',
                            os.getpid(), threading.get_ident())

    filtered_names = []
    for n in name_list:
        if check_matchers(n, data_entries):
            filtered_names.append(matchers[n])
            logger.send('INFO', 'Found ' + matchers[n] + ' in datastream to use for ' + n,
                        os.getpid(), threading.get_ident())
        filtered_names.append(n)

    all_data = {}
    for a_data_name in filtered_names:
        print(a_data_name)
        if data_entries[a_data_name] is None:
            continue

        funct_list = xdf_processor.data_name_to_function(a_data_name)
        results = {}
        for func in funct_list:
            if func['fn_name'] in xdf_transforms:
                config = func['config']

                def no_none_in_config(c):
                    none_in_config = True
                    for el in c:
                        if isinstance(el, str) and isinstance(c[el], dict):
                            none_in_config = no_none_in_config(c[el])
                        elif isinstance(el, str) and c[el] is None:
                            none_in_config = False
                    return none_in_config

                # If this function does not depend on previous
                # functions.
                if no_none_in_config(config):
                    results[func['field']] = xdf_transforms[func['fn_name']](data_entries[a_data_name], config)
                else:

                    def recurse_new_config(old_config, res):
                        new_config = old_config
                        for elem in old_config:
                            if isinstance(elem, str) and old_config[elem] is None:
                                if elem in res:
                                    new_config[elem] = res[elem]
                                else:
                                    raise Exception("Error: Couldn't find field " + elem)

                            elif isinstance(elem, str) and isinstance(old_config[elem], dict):
                                new_config[elem] = recurse_new_config(old_config[elem], res)
                        return new_config

                    config = recurse_new_config(config, results)
                    results[func['field']] = xdf_transforms[func['fn_name']](data_entries[a_data_name], config)
            else:
                raise Exception("Error: Couldn't find function " + func['fn_name'] + " in the XDF Processor.")

        test_pass = xdf_transforms['test_results'](results, a_data_name)
        if test_pass:
            all_data[a_data_name] = results
        else:
            raise Exception("Tests conducted while loading data failed.")

    # Always get the markers along with any data.
    all_data['markers'] = {
        'timestamps': xdf_transforms['get_marker_times'](markers_stream, {}),
        'eventnames': xdf_transforms['get_marker_eventnames'](markers_stream, {})
    }

    if markers_stream:
        all_data['markers']['timestamps'] = xdf_transforms['get_marker_times'](markers_stream, {})
        all_data['markers']['eventnames'] = xdf_transforms['get_marker_eventnames'](markers_stream, {})
    else:
        all_data['markers']['timestamps'] = None
        all_data['markers']['eventnames'] = None

    default_proc_functions = {
        'eye0': eye_pyrep_to_prim_default,
        'eye1': eye_pyrep_to_prim_default,
        'gaze_x': gaze_pyrep_to_prim_default,
        'gaze_y': gaze_pyrep_to_prim_default,
        'gaze_x-pyrep': gaze_prim_to_pyrep_default,
        'gaze_y-pyrep': gaze_prim_to_pyrep_default
    }

    for n in name_list:
        if check_matchers(n, data_entries):
            func = default_proc_functions[n]
            default = matchers[n] # This is the field that we should take data from
            new_data = func(data_entries[default], default, all_data)
            all_data[n] = new_data

    dataset['custom_data'] = custom_data

    new_dict = all_data
    for entry in dataset:
        if entry not in all_data:
            new_dict[entry] = dataset[entry]

    if logger:
        logger.enable_redirect()

    return new_dict
コード例 #15
0
ファイル: pupil_lib.py プロジェクト: zhang405744522/pupil-lib
def xdf_pupil_load_v2(name_list, pcap_data):
    '''
    Parses data from V2 Pupil LSL Relay data and returns the requested named data in name_list
    in the form a dict with fields that are the names, and with values being the time series
    that were found.

    :param name_list: Names of the data to retrieve.
    :param pcap_data: Data to retrieve the data from.
    :return: Data requested in a dict of time series'. Each dict entry is a requested name.
    '''
    logger = MultiProcessingLog.get_logger()

    # Map old or shorthand names to the new data names
    name_mapping = {
        'diameter0_3d': ['eye0', 'eye0-pyrep'],
        'diameter1_3d': ['eye1', 'eye1-pyrep'],
        'norm_pos_x': ['gaze_x', 'gaze_x-pyrep'],
        'norm_pos_y': ['gaze_y', 'gaze_y-pyrep'],
    }

    # Provide a fallback for certain data streams
    fallback_list = {
        'diameter0_3d': 'diameter0_2d',
        'diameter1_3d': 'diameter1_2d',
    }

    # Reformat old names to new names
    fmt_name_list = []
    for name in name_list:
        found = False
        for nf, of in name_mapping.items():
            if name in of:
                fmt_name_list.append(nf)
                found = True
                break
        if not found:
            fmt_name_list.append(name)

    # Get channel indices for each requested timeseries
    chaninds = {}
    allchans = pcap_data['info']['desc'][0]['channels'][0]['channel']
    for i, chan in enumerate(allchans):
        cname = chan['label'][0]
        if cname in fmt_name_list:
            logger.info("Found requested stream %s" % cname)
            chaninds[cname] = i

    missing_names = list(set(fmt_name_list) - set(list(chaninds.keys())))
    new_names = []
    if missing_names:
        # Map to fallback and try again
        logger.info("Missing streams, attempting to find a fallback: %s" % str(missing_names))
        new_names = [fallback_list[n] for n in missing_names if n in fallback_list]
        if new_names:
            for i, chan in enumerate(allchans):
                cname = chan['label'][0]
                if cname in new_names:
                    logger.info("Found fallback for a requested stream: %s" % cname)
                    chaninds[cname] = i

    # Output error for any streams that can't be found
    final_missing = list(
        (set(fmt_name_list) | set(new_names)) - set(list(chaninds.keys()))
    )
    for name in final_missing:
        logger.error('Missing %s from datastream' % name)

    # Now extract the timeseries for all requested names that exist
    pcap_tseries = pcap_data['time_series']
    pcap_tstamps = pcap_data['time_stamps']

    all_data = {}
    for cname in chaninds:
        all_data[cname] = {
            'data': [],
            'timestamps': pcap_tstamps
        }

    for sample in pcap_tseries:
        for cname, cind in chaninds.items():
            all_data[cname]['data'].append(sample[cind])

    # Data's extracted, now calculate a sampling rate for each timeseries
    xdf_processor = XdfLoaderProcessor()
    xdf_transforms = xdf_processor.transform.all
    for cname, stream in all_data.items():
        all_data[cname]['srate'] = xdf_transforms['srate'](None, stream)

    # Remap new names to old ones
    rm_all_data = {}
    for cname, stream in all_data.items():
        # Check if cname was a fallback
        ocname = cname
        if new_names and cname in new_names:
            for rn, fn in fallback_list:
                if fn == cname:
                    cname = rn
                    break

        # Find old name that was requested
        oldnames = name_mapping.get(cname, None)
        if oldnames:
            for n in oldnames:
                if n in name_list:
                    cname = n

        rm_all_data[cname] = stream
        if ocname != cname:
            rm_all_data[ocname] = stream

    return rm_all_data
コード例 #16
0
        def custom_resample_stream(dataset_data, config):
            print('Resampling data streams to regular sampling rates...')
            args = config['config']
            logger = MultiProcessingLog.get_logger()

            # Get srate
            srate = args[0]['srate']
            if len(dataset_data['dataset']) <= 1:
                return dataset_data

            dataset_data = clean_nan_data(dataset_data)

            for dset in dataset_data['dataset']:
                d = dataset_data['dataset'][dset]
                if type(d) is not dict:
                    continue
                if 'data' not in d:
                    continue
                dataset_data['dataset'][dset]['data'] = list(
                    np.nan_to_num(d['data']))

            min_dataname, max_dataname = pick_best_stream(dataset_data)
            min_datastream = dataset_data['dataset'][min_dataname][
                'timestamps']
            max_datastream = dataset_data['dataset'][max_dataname][
                'timestamps']
            global_min = min_datastream[0]
            global_max = max_datastream[-1]

            all_data = {
                dataname: dataset_data['dataset'][dataname]
                for dataname in dataset_data['dataset']
                if dataname not in DSTREAMS_BLACKLIST
            }
            for dataname in all_data:
                dstream = all_data[dataname]
                data_ts = dstream['timestamps']
                data_dset = dstream['data']

                # Replace start points
                cur_start = 0
                cur_min = data_ts[0]
                new_data_ts = data_ts
                new_data_dset = data_dset
                if cur_min != global_min:
                    new_data_ts = []
                    for count, _ in enumerate(data_ts[:-1]):
                        if data_ts[count] < global_min <= data_ts[count + 1]:
                            cur_start = count
                            break

                    # Replace first data points
                    new_data_ts = data_ts[cur_start:]
                    new_data_dset = data_dset[cur_start:]

                    # Interpolate a new data point
                    new_data_dset[0] = linear_approx(new_data_dset[0],
                                                     new_data_ts[0],
                                                     new_data_dset[1],
                                                     new_data_ts[1],
                                                     global_min)
                    new_data_ts[0] = global_min

                # Replace end points
                cur_max = data_ts[-1]
                if cur_max != global_max:
                    cur_end = len(data_ts) - 1
                    cur_end_offset = 0
                    reved_ts = data_ts[::-1]
                    for count, _ in enumerate(reved_ts):
                        if reved_ts[count] >= global_max > reved_ts[count + 1]:
                            cur_end_offset = count
                            break

                    # Second value of slice is exclusive, add 1
                    # to keep the value (it's going to be replaced)
                    cur_end = cur_end - cur_end_offset + 1
                    new_data_ts = new_data_ts[:cur_end]
                    new_data_dset = new_data_dset[:cur_end]

                    # Interpolate a new data point
                    new_data_dset[-1] = linear_approx(new_data_dset[-2],
                                                      new_data_ts[-2],
                                                      new_data_dset[-1],
                                                      new_data_ts[-1],
                                                      global_max)
                    new_data_ts[-1] = global_max

                new_dstream = dstream
                if srate != 'None' and srate is not None:
                    print('Resampling data to ' + str(srate) + 'Hz...')
                    total_time = global_max - global_min
                    new_xrange = np.linspace(global_min,
                                             global_max,
                                             num=srate * (total_time))

                    new_dstream['data'] = np.interp(new_xrange, new_data_ts,
                                                    new_data_dset)
                    new_dstream['timestamps'] = new_xrange

                    dataset_data['dataset'][dataname] = new_dstream

            for dset in dataset_data['dataset']:
                if dset in DSTREAMS_BLACKLIST:
                    continue
                print("Len for " + dset + ": " +
                      str(len(dataset_data['dataset'][dset]['data'])))
            return dataset_data