Exemplo n.º 1
0
def _load_as_array6(l, channel_groups, starts, bigchunksize, prechunk, fs, 
    post_prechunk, post_chunksize, post_bigchunksize, verbose):
    """Avoid using intermediate dict object, mean first"""
    # Indexes into chunk columns
    ichannel_groups = [
        [l.header.Channel_ID.index(ch) for ch in group] 
        for group in channel_groups]
    
    # Set up return values
    res_l, rest_l = [], []
    for start in starts:
        # Grab current chunk
        if verbose:
            printnow("loading chunk starting at %d" % start)
        raw = l.get_chunk(start=start-prechunk, n_samples=bigchunksize)        
        t = np.arange(start-prechunk, start - prechunk + bigchunksize) / float(fs) 
        
        # Resample and slice out extra all at once
        dsraw, new_t = scipy.signal.resample(raw, post_bigchunksize, t=t, axis=0)
        dsraw = dsraw[post_prechunk:post_prechunk+post_chunksize]
        new_t = new_t[post_prechunk:post_prechunk+post_chunksize]

        # Now mean each group
        downsampled_l = [np.mean(dsraw[:, igroup], axis=1) 
            for igroup in ichannel_groups]
        t_l = [new_t] * len(ichannel_groups)
        res_l.append(downsampled_l)
        rest_l.append(t_l)    
    res = np.array(res_l)
    rest = np.array(rest_l)

    return res, rest
Exemplo n.º 2
0
def _load_as_array2(l, channel_groups, starts, bigchunksize, prechunk, fs, 
    post_prechunk, post_chunksize, post_bigchunksize, verbose):
    """Process groups together, mean first, then downsample"""
    # Set up return values
    res_l, rest_l = [], []
    for start in starts:
        # Grab current chunk
        if verbose:
            printnow("loading chunk starting at %d" % start)
        raw = l.get_chunk_by_channel(start=start-prechunk, n_samples=bigchunksize)
        t = np.arange(start-prechunk, start - prechunk + bigchunksize) / float(fs) 

        # Now process all groups at once
        all = np.array([np.mean([raw[ch] for ch in group], axis=0) 
            for group in channel_groups])
        dsall, new_t = scipy.signal.resample(all, post_bigchunksize, t=t, axis=1)
        new_t = np.tile(new_t, (len(channel_groups), 1))
        
        # Slice out just desired chunk
        # If you grab one more sample here on the ends, you can check how
        # well the overlap is working between chunks
        dsall = dsall[:, post_prechunk:post_prechunk+post_chunksize]
        new_t = new_t[:, post_prechunk:post_prechunk+post_chunksize]
        
        res_l.append(dsall)
        rest_l.append(new_t)
    res = np.array(res_l)
    rest = np.array(rest_l)

    return res, rest
Exemplo n.º 3
0
def _load_as_array5(l, channel_groups, starts, bigchunksize, prechunk, fs, 
    post_prechunk, post_chunksize, post_bigchunksize, verbose):
    """Avoid using intermediate dict object, mean first"""
    # Set up return values
    res_l, rest_l = [], []
    for start in starts:
        # Grab current chunk
        if verbose:
            printnow("loading chunk starting at %d" % start)
        raw = l.get_chunk(start=start-prechunk, n_samples=bigchunksize)
        t = np.arange(start-prechunk, start - prechunk + bigchunksize) / float(fs) 

        # Now process one group at a time
        downsampled_l, t_l = [], []
        for group in channel_groups:
            # Column indexes into raw
            igroup = [l.header.Channel_ID.index(ch) for ch in group]
            
            # Mean and downsample
            #rawgroup = np.asarray([raw[ch] for ch in group])        
            meaned = np.mean(raw[:, igroup], axis=1)
            downsampled, new_t = scipy.signal.resample(meaned, post_bigchunksize, t=t, axis=0)
            
            # Slice out just desired chunk
            # If you grab one more sample here on the ends, you can check how
            # well the overlap is working between chunks
            downsampled = downsampled[post_prechunk:post_prechunk+post_chunksize]
            new_t = new_t[post_prechunk:post_prechunk+post_chunksize]
            
            # Append to result
            downsampled_l.append(downsampled)
            t_l.append(new_t)
        res_l.append(downsampled_l)
        rest_l.append(t_l)
    res = np.array(res_l)
    rest = np.array(rest_l)

    return res, rest
Exemplo n.º 4
0
def _load_as_array1(l, channel_groups, starts, bigchunksize, prechunk, fs, 
    post_prechunk, post_chunksize, post_bigchunksize, verbose):
    """Process groups separately, mean first, then downsample"""
    # Set up return values
    res_l, rest_l = [], []
    for start in starts:
        # Grab current chunk
        if verbose:
            printnow("loading chunk starting at %d" % start)
        raw = l.get_chunk_by_channel(start=start-prechunk, n_samples=bigchunksize)
        t = np.arange(start-prechunk, start - prechunk + bigchunksize) / float(fs) 

        # Now process one group at a time
        # This might be faster if mean and downsample all groups at once?
        downsampled_l, t_l = [], []
        for group in channel_groups:
            # Mean and downsample
            meaned = np.mean([raw[ch] for ch in group], axis=0)        
            downsampled, new_t = scipy.signal.resample(meaned, 
                post_bigchunksize, t=t)
            
            # Slice out just desired chunk
            # If you grab one more sample here on the ends, you can check how
            # well the overlap is working between chunks
            downsampled = downsampled[post_prechunk:post_prechunk+post_chunksize]
            new_t = new_t[post_prechunk:post_prechunk+post_chunksize]
            
            # Append to result
            downsampled_l.append(downsampled)
            t_l.append(new_t)
        res_l.append(downsampled_l)
        rest_l.append(t_l)
    res = np.array(res_l)
    rest = np.array(rest_l)

    return res, rest
Exemplo n.º 5
0
# Initialize return variables
outrec_l = []

# Which trials to get -- include cue this time
trial_picker_kwargs = {
    'labels': LBPB.stimnames,
    'label_kwargs': [{
        'stim_name': s
    } for s in LBPB.stimnames],
    'nonrandom': 0,
    'outcome': 'hit'
}

# Iterate over units
for ulabel, rec in to_analyze.iterrows():
    myutils.printnow(ulabel)

    # Get folded and store for later PSTHing
    dfolded = my.dataload.ulabel2dfolded(
        ulabel,
        trial_picker_kwargs=trial_picker_kwargs,
        folding_kwargs={
            'dstart': 0.,
            'dstop': .05
        })

    # count in window
    t1, t2 = rec['audresp_t1'], rec['audresp_t2']
    evoked_spikes = myutils.map_d(lambda f: f.count_in_window(t1, t2), dfolded)

    # Insert more info
Exemplo n.º 6
0
    unit_db.include &
    unit_db.audresp.isin(['weak', 'good', 'sustained'])]


# Initialize return variables
outrec_l = []

# Which trials to get -- include cue this time
trial_picker_kwargs = {
    'labels': LBPB.stimnames,
    'label_kwargs': [{'stim_name': s} for s in LBPB.stimnames],
    'nonrandom': 0, 'outcome': 'hit'}

# Iterate over units
for ulabel, rec in to_analyze.iterrows():
    myutils.printnow(ulabel)

    # Get folded and store for later PSTHing
    dfolded = my.dataload.ulabel2dfolded(ulabel, 
        trial_picker_kwargs=trial_picker_kwargs,
        folding_kwargs={'dstart': 0., 'dstop': .05})

    # count in window
    t1, t2 = rec['audresp_t1'], rec['audresp_t2']
    evoked_spikes = myutils.map_d(
        lambda f: f.count_in_window(t1, t2), dfolded)
    
    # Insert more info
    evoked_spikes['evok_nspk'] = np.sum(map(np.sum, evoked_spikes.values()))
    evoked_spikes['evok_dt'] = t2 - t1
    evoked_spikes['ulabel'] = ulabel