Ejemplo n.º 1
0
def create_extract_arguments_from_raw(filename):

    with tables.openFile(filename, 'r') as fh:
        kwargs = {}
        md = h5.p_get_node(fh, '*/data/physiology/channel_metadata')

        processing = {}

        processing['diff_mode'] = md._v_attrs.diff_mode
        processing['filter_freq_lp'] = md._v_attrs.filter_freq_lp
        processing['filter_freq_hp'] = md._v_attrs.filter_freq_hp
        processing['filter_btype'] = md._v_attrs.filter_btype
        processing['filter_order'] = md._v_attrs.filter_order

        md = md.read()
        processing['bad_channels'] = [s['index'] for s in md if s['bad']]

        md = [s for s in md if s['extract']]

        # Gather the arguments required by the spike extraction routine
        kwargs['processing'] = processing
        kwargs['noise_std'] = [s['std'] for s in md]
        kwargs['channels'] = [s['index'] for s in md]
        kwargs['threshold_stds'] = [s['th_std'] for s in md]
        kwargs['rej_threshold_stds'] = [s['artifact_std'] for s in md]
        kwargs['window_size'] = 2.1
        kwargs['cross_time'] = 0.5
        kwargs['cov_samples'] = 5e3

        return kwargs
Ejemplo n.º 2
0
def load_trial_log(filename, path='*/data'):
    '''
    Load the trial log from the experiment and populate it with epoch data
    collected during the experiment.

    Path can have wildcards in it, but must point to the data node (not the
    trial_log node).
    '''
    epochs = (
        'trial_epoch',
        'physiology_epoch',
        'poke_epoch',
        'signal_epoch',
        )

    with tables.openFile(filename) as fh:
        base_node = h5.p_get_node(fh.root, path)
        tl = pandas.DataFrame(base_node.trial_log[:])
        for epoch in epochs:
            if epoch not in base_node.contact:
                continue
            basename = epoch.split('_')[0]
            node = base_node.contact._f_getChild(epoch)
            data = node[:]
            if data.ndim == 2:
                # This is an epoch
                data = data.astype('d')/node._v_attrs['fs']
                tl[basename + '_start']  = data[:,0]
                tl[basename + '_end']  = data[:,1]
            else:
                # This is a timestamp
                data = data.astype('d')/node._v_attrs['fs']
                tl[basename + '_ts']  = data

        # Pull in response timestamps as well
        node = base_node.contact._f_getChild('response_ts')
        tl['response_ts|'] = node[:]
        tl['response|'] =  node[:]/node._v_attrs['fs']

        return tl
Ejemplo n.º 3
0
def load_task_epochs(raw_filename, pad=1.0):
    '''
    Given the file containing raw experiment data, return a list of epochs
    (expanded on either end by `pad` seconds that reflect task-related activity
    (e.g. going to the nose-poke or spout).
    '''
    with tables.openFile(raw_filename) as fh:
        contact = h5.p_get_node(fh, '*/data/contact')
        uservars = {
            'spout':        contact.spout_TTL,
            'poke':         contact.poke_TTL,
            'reaction':     contact.reaction_TTL,
            'response':     contact.response_TTL,
        }

        # OR together the arrays using an in-kernel approach (faster and uses
        # less memory)
        expr = tables.Expr('spout | poke | reaction | response', uservars)
        fs = contact.poke_TTL._v_attrs.fs

        # Evaluate the expression, convert it to epochs, pad the epochs and
        # convert to time in seconds.
        return epochs(expr.eval(), pad*fs)/fs