def select_segments_by_trial_number(id_block, trial_numbers):    
    # Load all segments from block
    id_segs, info_segs = OE.sql('select segment.id, segment.info from segment \
        where segment.id_block = %d' % id_block)
    
    # Extract b_trial_numbers in more useful format by strippin leading 'B'
    b_trial_numbers = info_segs.astype(int) #[int(info) for info in info_segs] 
    
    # Find id_segs that match these trials
    return id_segs[np.in1d(b_trial_numbers, trial_numbers)]
示例#2
0
def plot_all_PSTHs(data_dir, PRE_STIMULUS_TIME=0, save_fig_dir=None):
    # Location of OE db
    db_filename = glob.glob(os.path.join(data_dir, '*.db'))[0]
    db = OE.open_db('sqlite:///%s' % db_filename)

    # Get list of neurons from tetrode block
    id_block = get_tetrode_block_id()
    block = OE.Block().load(id_block)
    neuron_list = block._neurons
    big_spiketimes = get_all_spike_times(neuron_list)

    # Plot PSTHs
    for n_name, spike_time_list in list(big_spiketimes.items()):
        plt.figure()
        plt.hist(np.array(spike_time_list) - PRE_STIMULUS_TIME, bins=100)
        plt.title(n_name)
        if save_fig_dir is not None:
            plt.savefig(os.path.join(save_fig_dir, 'PSTH_%s.png' % n_name))
            plt.close()
        else:
            plt.show()
示例#3
0
def run(db_name, save_fig_name=None):
    """If save_fig_name is not None, should be ex. '~/test.png' """
    # Load the raw data
    db = OE.open_db(url=('sqlite:///%s' % db_name))
    id_blocks, = OE.sql('SELECT block.id FROM block WHERE block.name="Raw Data"')
    id_block = id_blocks[0]

    id_recordingpoints, rp_names = OE.sql("SELECT \
        recordingpoint.id, recordingpoint.name \
        FROM recordingpoint \
        WHERE recordingpoint.id_block = :id_block", id_block=id_block)

    f = plt.figure(figsize=(10,10))
    
    # Process each recording point separately
    for n, (id_rp,tt) in enumerate(zip(id_recordingpoints[:16], rp_names[:16])):
        # Load all signals from all segments with this recording point
        id_sigs, = OE.sql('SELECT analogsignal.id FROM analogsignal ' + \
            'WHERE analogsignal.id_recordingpoint = :id_recordingpoint',
            id_recordingpoint=id_rp)
        
        # Average the signal
        avgsig = np.zeros(OE.AnalogSignal().load(id_sigs[0]).signal.shape)
        for id_sig in id_sigs: 
            sig = OE.AnalogSignal().load(id_sig)
            avgsig = avgsig + sig.signal
        avgsig = avgsig / len(id_sigs)

        # Plot the average signal of this recording point
        ax = f.add_subplot(4,4,n+1)
        ax.plot(np.arange(len(avgsig)) / sig.sampling_rate * 1000, avgsig)
        #ax.set_ylim((-250, 250))
        ax.set_title(tt)

    if save_fig_name is None:
        plt.show()
    else:
        plt.savefig(save_fig_name)
        plt.close()
def select_audio_signals_by_stimulus_number(id_block, stim_num, 
    TRIALS_INFO, side='L'):
    # Find trials with stimulus number stim_num
    keep_id_segs = select_segments_by_trial_number(id_block, 
        trial_numbers=TRIALS_INFO['TRIAL_NUMBER'][\
        TRIALS_INFO['STIM_NUMBER'] == stim_num])

    # Load all analogsignals of this channel
    id_sigs, id_segs = OE.sql('select analogsignal.id, analogsignal.id_segment \
        from analogsignal where analogsignal.name like "' \
        + side + ' Speaker %"')
    
    # Grab analog signals where id_segs matches keep_id_segs    
    keep_id_sigs = id_sigs[np.in1d(id_segs.astype(int), 
        keep_id_segs.astype(int))]
    speaker_traces = [OE.AnalogSignal().load(id_sig).signal \
        for id_sig in keep_id_sigs]
    
    return np.array(speaker_traces)
示例#5
0
def plot_all_PSTHs(data_dir, PRE_STIMULUS_TIME=0, save_fig_dir=None):
    # Location of OE db
    db_filename = glob.glob(os.path.join(data_dir, '*.db'))[0]
    db = OE.open_db('sqlite:///%s' % db_filename)
    
    # Get list of neurons from tetrode block
    id_block = get_tetrode_block_id()
    block = OE.Block().load(id_block)    
    neuron_list = block._neurons    
    big_spiketimes = get_all_spike_times(neuron_list)
    
    # Plot PSTHs
    for n_name, spike_time_list in big_spiketimes.items():
        plt.figure()
        plt.hist(np.array(spike_time_list) - PRE_STIMULUS_TIME, bins=100)
        plt.title(n_name)
        if save_fig_dir is not None:
            plt.savefig(os.path.join(save_fig_dir, 'PSTH_%s.png' % n_name))
            plt.close()
        else:
            plt.show()
示例#6
0
def run(db_name, save_fig_name=None):
    """If save_fig_name is not None, should be ex. '~/test.png' """
    # Load the raw data
    db = OE.open_db(url=('sqlite:///%s' % db_name))
    id_blocks, = OE.sql(
        'SELECT block.id FROM block WHERE block.name="Raw Data"')
    id_block = id_blocks[0]

    id_recordingpoints, rp_names = OE.sql("SELECT \
        recordingpoint.id, recordingpoint.name \
        FROM recordingpoint \
        WHERE recordingpoint.id_block = :id_block",
                                          id_block=id_block)

    f = plt.figure(figsize=(10, 10))

    # Process each recording point separately
    for n, (id_rp, tt) in enumerate(zip(id_recordingpoints[:16],
                                        rp_names[:16])):
        # Load all signals from all segments with this recording point
        id_sigs, = OE.sql('SELECT analogsignal.id FROM analogsignal ' + \
            'WHERE analogsignal.id_recordingpoint = :id_recordingpoint',
            id_recordingpoint=id_rp)

        # Average the signal
        avgsig = np.zeros(OE.AnalogSignal().load(id_sigs[0]).signal.shape)
        for id_sig in id_sigs:
            sig = OE.AnalogSignal().load(id_sig)
            avgsig = avgsig + sig.signal
        avgsig = old_div(avgsig, len(id_sigs))

        # Plot the average signal of this recording point
        ax = f.add_subplot(4, 4, n + 1)
        ax.plot(
            old_div(np.arange(len(avgsig)), sig.sampling_rate) * 1000, avgsig)
        #ax.set_ylim((-250, 250))
        ax.set_title(tt)

    if save_fig_name is None:
        plt.show()
    else:
        plt.savefig(save_fig_name)
        plt.close()
示例#7
0
# Grabs spike times from db that were calculated from within OE

import OpenElectrophy as OE
import numpy as np
import matplotlib.pyplot as plt

db_name = '/home/chris/Public/20110401_CR13A_audresp_data/0327_002/datafile_CR_CR13A_110327_002.db'
#db_name = '/home/chris/Public/20110401_CR13A_audresp_data/0403_002/datafile_CR_CR13A_110403_002.db'
#db_name = '/home/chris/Public/20110401_CR13A_audresp_data/0329_002/datafile_CR_CR13A_110329_002.db'
OE.open_db(url=('sqlite:///%s' % db_name))

# Load neurons
id_block = OE.sql('select block.id from block where block.name = \
    "CAR Tetrode Data"')[0][0]
id_neurons, = OE.sql('select neuron.id from neuron where neuron.id_block = \
    :id_block',
                     id_block=id_block)

plt.figure()
bigger_spiketimes = np.array([])
for id_neuron in id_neurons:
    n = OE.Neuron().load(id_neuron)

    # Grab spike times from all trials (segments)
    big_spiketimes = np.concatenate(\
        [spiketrain.spike_times - spiketrain.t_start \
        for spiketrain in n._spiketrains])
    bigger_spiketimes = np.concatenate([bigger_spiketimes, big_spiketimes])

    # Compute histogram
    nh, x = np.histogram(big_spiketimes, bins=100)
示例#8
0
def run(db_name, CAR=True, smooth_spikes=True):
    """Filters the data for spike extraction.
    
    db_name: Name of the OpenElectrophy db file
    CAR: If True, subtract the common-average of every channel.
    smooth_spikes: If True, add an additional low-pass filtering step to
        the spike filter.
    """
    # Open connection to the database
    OE.open_db(url=("sqlite:///%s" % db_name))

    # Check that I haven't already run
    id_blocks, = OE.sql("SELECT block.id FROM block WHERE block.name='CAR Tetrode Data'")
    if len(id_blocks) > 0:
        print "CAR Tetrode Data already exists, no need to recompute"
        return

    # Find the block
    id_blocks, = OE.sql("SELECT block.id FROM block WHERE block.name='Raw Data'")
    assert len(id_blocks) == 1
    id_block = id_blocks[0]
    raw_block = OE.Block().load(id_block)

    # Define spike filter
    # TODO: fix so that doesn't assume all sampling rates the same!
    fixed_sampling_rate = OE.AnalogSignal().load(1).sampling_rate
    FILTER_B, FILTER_A = define_spike_filter(fixed_sampling_rate)

    # If requested, define second spike filter
    if smooth_spikes is True:
        FILTER_B2, FILTER_A2 = define_spike_filter_2(fixed_sampling_rate)

    # Find TETRODE_CHANNELS file in data directory of db
    data_dir = path.split(db_name)[0]
    TETRODE_CHANNELS = get_tetrode_channels(path.join(data_dir, "TETRODE_CHANNELS"))
    N_TET = len(TETRODE_CHANNELS)

    # For convenience, flatten TETRODE_CHANNELS to just get worthwhile channels
    GOOD_CHANNELS = [item for sublist in TETRODE_CHANNELS for item in sublist]

    # Create a new block for referenced data, and save to db.
    car_block = OE.Block(
        name="CAR Tetrode Data", info="Raw neural data, now referenced and ordered by tetrode", fileOrigin=db_name
    )
    id_car_block = car_block.save()

    # Make RecordingPoint for each channel, linked to tetrode number with `group`
    # Also keep track of link between channel and RP with ch2rpid dict
    ch2rpid = dict()
    for tn, ch_list in enumerate(TETRODE_CHANNELS):
        for ch in ch_list:
            rp = OE.RecordingPoint(
                name=("RP%d" % ch), id_block=id_car_block, trodness=len(ch_list), channel=float(ch), group=tn
            )
            rp_id = rp.save()
            ch2rpid[ch] = rp_id

    # Find all segments in the block of raw data
    id_segments, = OE.sql("SELECT segment.id FROM segment " + "WHERE segment.id_block = :id_block", id_block=id_block)

    # For each segment in this block, load each AnalogSignal listed in
    # TETRODE channels and average
    # to compute CAR. Then subtract from each AnalogSignal.
    for id_segment in id_segments:
        # Create a new segment in the new block with the same name
        old_seg = OE.Segment().load(id_segment)
        car_seg = OE.Segment(name=old_seg.name, id_block=id_car_block)
        id_car_seg = car_seg.save()

        # Find all AnalogSignals in this segment
        id_sigs, = OE.sql(
            "SELECT analogsignal.id FROM analogsignal " + "WHERE analogsignal.id_segment = :id_segment",
            id_segment=id_segment,
        )

        # Compute average of each
        running_car = 0
        n_summed = 0
        for id_sig in id_sigs:
            sig = OE.AnalogSignal().load(id_sig)
            if sig.channel not in GOOD_CHANNELS:
                continue
            running_car = running_car + sig.signal
            n_summed = n_summed + 1

        # Zero out CAR if CAR is not wanted
        # TODO: eliminate the actual calculation of CAR above in this case
        # For now, just want to avoid weird bugs
        if CAR is False:
            running_car = np.zeros(running_car.shape)

        # Put the CAR into the new block
        # not assigning channel, t_start, sample_rate, maybe more?
        car_sig = OE.AnalogSignal(
            name="CAR",
            signal=running_car / n_summed,
            info="CAR calculated from good channels for this segment",
            id_segment=id_segment,
        )
        car_sig.save()

        # Put all the substractions in id_car_seg
        for id_sig in id_sigs:
            # Load the raw signal (skip bad channels)
            sig = OE.AnalogSignal().load(id_sig)
            if sig.channel not in GOOD_CHANNELS:
                continue

            # Subtract the CAR
            referenced_signal = sig.signal - car_sig.signal

            # Filter!
            filtered_signal = scipy.signal.filtfilt(FILTER_B, FILTER_A, referenced_signal)
            if smooth_spikes is True:
                filtered_signal = scipy.signal.filtfilt(FILTER_B2, FILTER_A2, filtered_signal)

            # Check for infs or nans
            if np.isnan(filtered_signal).any():
                print "ERROR: Filtered signal contains NaN!"
            if np.isinf(filtered_signal).any():
                print "ERROR: Filtered signal contains Inf!"

            # Store in db
            new_sig = OE.AnalogSignal(
                name=sig.name,
                signal=filtered_signal,
                info="CAR has been subtracted",
                id_segment=id_car_seg,
                id_recordingpoint=ch2rpid[sig.channel],
                channel=sig.channel,
                t_start=sig.t_start,
                sampling_rate=sig.sampling_rate,
            )
            new_sig.save()

        # Finally, copy the audio channel over from the old block
        id_audio_sigs, = OE.sql(
            "SELECT analogsignal.id FROM analogsignal "
            + "WHERE analogsignal.id_segment = :id_segment AND "
            + "analogsignal.name LIKE '% Speaker %'",
            id_segment=id_segment,
        )
        for id_audio_sig in id_audio_sigs:
            old_sig = OE.AnalogSignal().load(id_audio_sig)
            OE.AnalogSignal(
                name=old_sig.name,
                signal=old_sig.signal,
                id_segment=id_car_seg,
                channel=old_sig.channel,
                t_start=old_sig.t_start,
                sampling_rate=old_sig.sampling_rate,
            ).save()
示例#9
0
def stuff(filename, db_name, TIMESTAMPS, SHOVE_CHANNELS, 
    pre_slice_len, post_slice_len, db_type='sqlite'):
    
    # Load the file and file header
    l = ns5.Loader(filename=filename)    
    l.load_file()
    
    # Audio channel numbers
    AUDIO_CHANNELS = l.get_audio_channel_numbers()
    
    # Open connection to OE db and create a block
    if db_type is 'postgres':
        OE.open_db(url=('postgresql://[email protected]/test'))# %s' % db_name))
        print 'post'
    else:
        OE.open_db(url=('sqlite:///%s' % db_name))
    #OE.open_db(url=('mysql://*****:*****@localhost/%s' % db_name))
    block = OE.Block(name='Raw Data', 
        info='Raw data sliced around trials',
        fileOrigin=filename)
    id_block = block.save() # need this later
    
    # Convert requested slice lengths to samples
    pre_slice_len_samples = int(pre_slice_len * l.header.f_samp)
    post_slice_len_samples = int(post_slice_len * l.header.f_samp)    
    
    # Add RecordingPoint
    ch2rpid = dict()
    for ch in SHOVE_CHANNELS:
        rp = OE.RecordingPoint(name=('RP%d' % ch), 
            id_block=id_block,
            channel=float(ch))
        rp_id = rp.save()
        ch2rpid[ch] = rp_id
    
    # Extract each trial as a segment
    for tn, trial_start in enumerate(TIMESTAMPS):
        # Create segment for this trial
        segment = OE.Segment(id_block=id_block, name=('trial%d' % tn),
            info='raw data loaded from good channels')
        id_segment = segment.save()        

        # Create AnalogSignal for each channel
        for chn, ch in enumerate(SHOVE_CHANNELS):
            # Load
            x = np.array(l._get_channel(ch)[trial_start-pre_slice_len_samples:\
                trial_start+post_slice_len_samples])
            
            # Convert to uV
            x = x * uV_QUANTUM
            
            # Put in AnalogSignal and save to db
            sig = OE.AnalogSignal(signal=x,
                channel=float(ch),
                sampling_rate=l.header.f_samp,
                t_start=(trial_start-pre_slice_len_samples)/l.header.f_samp,
                id_segment=id_segment,
                id_recordingpoint=ch2rpid[ch],
                name=('Channel %d Trial %d' % (ch, tn)))   
            
            # Special processing for audio channels
            if ch == AUDIO_CHANNELS[0]:
                sig.name = ('L Speaker Trial %d' % tn)
            elif ch == AUDIO_CHANNELS[1]:
                sig.name = ('R Speaker Trial %d' % tn)
            
            # Save signal to database
            sig.save()
        
        
        # Handle AUDIO CHANNELS only slightly differently
        for ch in AUDIO_CHANNELS:
            # Load
            x = np.array(l._get_channel(ch)[trial_start-pre_slice_len_samples:\
                trial_start+post_slice_len_samples])

            # Special processing for audio channels
            if ch == AUDIO_CHANNELS[0]:
                sname = ('L Speaker Trial %d' % tn)
            elif ch == AUDIO_CHANNELS[1]:
                sname = ('R Speaker Trial %d' % tn)

            # Put in AnalogSignal and save to db
            sig = OE.AnalogSignal(signal=x,
                channel=float(ch),
                sampling_rate=l.header.f_samp,
                t_start=(trial_start-pre_slice_len_samples)/l.header.f_samp,
                id_segment=id_segment,
                name=sname)
            
            # Save signal to database
            sig.save()
        


        # Save segment (with all analogsignals) to db
        # Actually this may be unnecessary
        # Does saving the signals link to the segment automatically?
        segment.save()
        
    return (id_segment, id_block)
示例#10
0
def run(control_params, auto_validate=True, v2_behavior=False):
    # Location of data
    data_dir = control_params['data_dir']

    # Location of the Bcontrol file
    bdata_filename = control_params['behavior_filename']

    # Location of TIMESTAMPS
    timestamps_filename = os.path.join(data_dir, 'TIMESTAMPS')

    # Location of OE db
    db_filename = control_params['db_name']

    # Load timestamps calculated from audio onsets in ns5 file
    ns5_times = np.loadtxt(timestamps_filename, dtype=np.int)

    # Load bcontrol data (will also validate)
    bcl = bcontrol.Bcontrol_Loader(filename=bdata_filename,
                                   auto_validate=auto_validate,
                                   v2_behavior=v2_behavior)
    bcl.load()

    # Grab timestamps from behavior file
    b_onsets = bcl.data['onsets']

    # Try to convert this stuff into the format expected by the syncer
    class fake_bcl(object):
        def __init__(self, onsets):
            self.audio_onsets = onsets

    class fake_rdl(object):
        def __init__(self, onsets):
            self.audio_onsets = onsets

    # Convert into desired format, also throwing away first behavior onset
    # We need to correct for this later.
    fb = fake_bcl(b_onsets[1:])
    fr = fake_rdl(ns5_times)

    # Sync. Will write CORR files to disk.
    # Also produces bs.map_n_to_b_masked and vice versa for trial mapping
    bs = DataSession.BehavingSyncer()
    bs.sync(fb, fr, force_run=True)

    # Put trial numbers into OE db
    db = OE.open_db('sqlite:///%s' % db_filename)

    # Each segment in the db is named trial%d, corresponding to the
    # ordinal TIMESTAMP, which means neural trial time.
    # We want to mark it with the behavioral trial number.
    # For now, put the behavioral trial number into Segment.info
    # TODO: Put the skip-1 behavior into the syncer so we don't have to
    # use the trick. Then we can use map_n_to_b_masked without fear.
    # Note that the 1010 data is NOT missing the first trial.
    # Double check that the neural TIMESTAMP matches the value in peh.
    # Also, add the check_audio_waveforms functionality here so that it's
    # all done at once.
    id_segs, name_segs = OE.sql('select segment.id, segment.name from segment')
    for id_seg, name_seg in zip(id_segs, name_segs):
        # Extract neural trial number from name_seg
        n_trial = int(re.search('trial(\d+)', name_seg).group(1))

        # Convert to behavioral trial number
        # We use the 'trial_number' field of TRIALS_INFO
        # IE the original Matlab numbering of the trial
        # Here we correct for the dropped first trial.
        try:
            b_trial = bcl.data['TRIALS_INFO']['TRIAL_NUMBER'][\
                bs.map_n_to_b_masked[n_trial] + 1]
        except IndexError:
            # masked trial
            if n_trial == 0:
                print("WARNING: Assuming this is the dropped first trial")
                b_trial = bcl.data['TRIALS_INFO']['TRIAL_NUMBER'][0]
            else:
                print("WARNING: can't find trial")
                b_trial = -99

        # Store behavioral trial number in the info field
        seg = OE.Segment().load(id_seg)
        seg.info = '%d' % b_trial
        seg.save()
示例#11
0
def run(db_name, CAR=True, smooth_spikes=True):
    """Filters the data for spike extraction.
    
    db_name: Name of the OpenElectrophy db file
    CAR: If True, subtract the common-average of every channel.
    smooth_spikes: If True, add an additional low-pass filtering step to
        the spike filter.
    """
    # Open connection to the database
    OE.open_db(url=('sqlite:///%s' % db_name))

    # Check that I haven't already run
    id_blocks, = OE.sql(
        "SELECT block.id FROM block WHERE block.name='CAR Tetrode Data'")
    if len(id_blocks) > 0:
        print("CAR Tetrode Data already exists, no need to recompute")
        return

    # Find the block
    id_blocks, = OE.sql(
        "SELECT block.id FROM block WHERE block.name='Raw Data'")
    assert (len(id_blocks) == 1)
    id_block = id_blocks[0]
    raw_block = OE.Block().load(id_block)

    # Define spike filter
    # TODO: fix so that doesn't assume all sampling rates the same!
    fixed_sampling_rate = OE.AnalogSignal().load(1).sampling_rate
    FILTER_B, FILTER_A = define_spike_filter(fixed_sampling_rate)

    # If requested, define second spike filter
    if smooth_spikes is True:
        FILTER_B2, FILTER_A2 = define_spike_filter_2(fixed_sampling_rate)

    # Find TETRODE_CHANNELS file in data directory of db
    data_dir = path.split(db_name)[0]
    TETRODE_CHANNELS = get_tetrode_channels(
        path.join(data_dir, 'TETRODE_CHANNELS'))
    N_TET = len(TETRODE_CHANNELS)

    # For convenience, flatten TETRODE_CHANNELS to just get worthwhile channels
    GOOD_CHANNELS = [item for sublist in TETRODE_CHANNELS for item in sublist]

    # Create a new block for referenced data, and save to db.
    car_block = OE.Block(\
        name='CAR Tetrode Data',
        info='Raw neural data, now referenced and ordered by tetrode',
        fileOrigin=db_name)
    id_car_block = car_block.save()

    # Make RecordingPoint for each channel, linked to tetrode number with `group`
    # Also keep track of link between channel and RP with ch2rpid dict
    ch2rpid = dict()
    for tn, ch_list in enumerate(TETRODE_CHANNELS):
        for ch in ch_list:
            rp = OE.RecordingPoint(name=('RP%d' % ch),
                                   id_block=id_car_block,
                                   trodness=len(ch_list),
                                   channel=float(ch),
                                   group=tn)
            rp_id = rp.save()
            ch2rpid[ch] = rp_id

    # Find all segments in the block of raw data
    id_segments, = OE.sql('SELECT segment.id FROM segment ' + \
        'WHERE segment.id_block = :id_block', id_block=id_block)

    # For each segment in this block, load each AnalogSignal listed in
    # TETRODE channels and average
    # to compute CAR. Then subtract from each AnalogSignal.
    for id_segment in id_segments:
        # Create a new segment in the new block with the same name
        old_seg = OE.Segment().load(id_segment)
        car_seg = OE.Segment(
            name=old_seg.name,
            id_block=id_car_block,
        )
        id_car_seg = car_seg.save()

        # Find all AnalogSignals in this segment
        id_sigs, = OE.sql('SELECT analogsignal.id FROM analogsignal ' + \
            'WHERE analogsignal.id_segment = :id_segment', id_segment=id_segment)

        # Compute average of each
        running_car = 0
        n_summed = 0
        for id_sig in id_sigs:
            sig = OE.AnalogSignal().load(id_sig)
            if sig.channel not in GOOD_CHANNELS:
                continue
            running_car = running_car + sig.signal
            n_summed = n_summed + 1

        # Zero out CAR if CAR is not wanted
        # TODO: eliminate the actual calculation of CAR above in this case
        # For now, just want to avoid weird bugs
        if CAR is False:
            running_car = np.zeros(running_car.shape)

        # Put the CAR into the new block
        # not assigning channel, t_start, sample_rate, maybe more?
        car_sig = OE.AnalogSignal(
            name='CAR',
            signal=old_div(running_car, n_summed),
            info='CAR calculated from good channels for this segment',
            id_segment=id_segment)
        car_sig.save()

        # Put all the substractions in id_car_seg
        for id_sig in id_sigs:
            # Load the raw signal (skip bad channels)
            sig = OE.AnalogSignal().load(id_sig)
            if sig.channel not in GOOD_CHANNELS:
                continue

            # Subtract the CAR
            referenced_signal = sig.signal - car_sig.signal

            # Filter!
            filtered_signal = scipy.signal.filtfilt(FILTER_B, FILTER_A,
                                                    referenced_signal)
            if smooth_spikes is True:
                filtered_signal = scipy.signal.filtfilt(
                    FILTER_B2, FILTER_A2, filtered_signal)

            # Check for infs or nans
            if np.isnan(filtered_signal).any():
                print("ERROR: Filtered signal contains NaN!")
            if np.isinf(filtered_signal).any():
                print("ERROR: Filtered signal contains Inf!")

            # Store in db
            new_sig = OE.AnalogSignal(\
                name=sig.name,
                signal=filtered_signal,
                info='CAR has been subtracted',
                id_segment=id_car_seg,
                id_recordingpoint=ch2rpid[sig.channel],
                channel=sig.channel,
                t_start=sig.t_start,
                sampling_rate=sig.sampling_rate)
            new_sig.save()

        # Finally, copy the audio channel over from the old block
        id_audio_sigs, = OE.sql('SELECT analogsignal.id FROM analogsignal ' + \
            'WHERE analogsignal.id_segment = :id_segment AND ' + \
            "analogsignal.name LIKE '% Speaker %'", id_segment=id_segment)
        for id_audio_sig in id_audio_sigs:
            old_sig = OE.AnalogSignal().load(id_audio_sig)
            OE.AnalogSignal(\
                name=old_sig.name,
                signal=old_sig.signal,
                id_segment=id_car_seg,
                channel=old_sig.channel,
                t_start=old_sig.t_start,
                sampling_rate=old_sig.sampling_rate).save()
示例#12
0
def get_tetrode_block_id():
    id_blocks, = OE.sql('select block.id from block where \
        block.name = "Spike-filtered Data"')
    return id_blocks[0]
示例#13
0
def execute(data_dir, PRE_STIMULUS_TIME=0.5):
    """Write spike time data and metadata from OE db.
    
    Writes spike times from all sorted spikes in OE db to KlustaKwik
    files in same directory.
    
    Also writes a file metadata.csv with the ntrial number (ordering
    of segments in OE db), the btrial number (trial id in matlab struct,
    extracted from OE db info field), and the time of stimulus onset.
    
    Time of stimulus onset is calculated as the t_start time of that segment
    in  OE dB, plus the provided PRE_STIMULUS_TIME parameter. Later scripts
    will use the stimulus onset times to find spikes associated with that
    trial in the KlustaKwik files.
    """
    # Location of data
    #data_dir = '/home/chris/Public/20110517_CR12B_FF2B/CR12B_0514_001'
    #PRE_STIMULUS_TIME = 0.5

    # Location of the Bcontrol file
    #bdata_filename = os.path.join(data_dir, 
    #    'data_@TwoAltChoice_v2_chris_AIR11A_101012a.at')

    # Location of OE db
    #db_filename = os.path.join(data_dir, 'datafile_CR_CR12B_110507_001.db')
    db_filename = glob.glob(os.path.join(data_dir, '*.db'))[0]

    # Output of the writers
    output_filename = os.path.splitext(db_filename)[0]
    
    # Metadata: trial numbers etc
    metadata_filename = os.path.join(data_dir, 'metadata.csv')

    # Load db
    db = OE.open_db('sqlite:///%s' % db_filename)
    
    # Get list of segments from tetrode block
    id_block = 2
    block = OE.Block().load(id_block)    
    neuron_list = block._neurons
    

    
    # Build a writer
    w = KlustaKwikIO(filename=output_filename)    
    w.write_block(block)
    
    # Also dump metadata: btrial num, t_start in samples
    seg_metadata = list()
    for seg in block.get_segments():
        t_starts1 = [sig.t_start for sig in seg._analogsignals]
        t_starts2 = [st.t_start for st in seg._spiketrains]
        assert len(np.unique(t_starts1)) == 1
        
        # You can get errors here where some spiketrains have already the right
        # t_start and others don't. I think maybe this happens when you re-spike
        # sort or something.
        #assert len(np.unique(t_starts2)) == 1
        #assert np.unique(t_starts1) == np.unique(t_starts2)
        
        
        # This was a stupid bugfix for a stupid bug that is now breaking things
        # Replaced with PRE_STIMULUS_TIME so at least it's up front
        t_start = np.rint((t_starts1[0] + PRE_STIMULUS_TIME) * 30000.).astype(np.int64)        
        
        seg_metadata.append((seg.name, int(seg.info), t_start))
    
    # Convert seg_metadata to recarray and write to disk
    r = np.rec.fromrecords(seg_metadata,
        dtype=[('ntrial','<U32' ), ('btrial_num', np.int), ('stim_onset', np.int64)])
    mlab.rec2csv(r, metadata_filename)
示例#14
0
# Grabs spike times from db that were calculated from within OE

import OpenElectrophy as OE
import numpy as np
import matplotlib.pyplot as plt

db_name = '/home/chris/Public/20110401_CR13A_audresp_data/0327_002/datafile_CR_CR13A_110327_002.db'
#db_name = '/home/chris/Public/20110401_CR13A_audresp_data/0403_002/datafile_CR_CR13A_110403_002.db'
#db_name = '/home/chris/Public/20110401_CR13A_audresp_data/0329_002/datafile_CR_CR13A_110329_002.db'
OE.open_db(url=('sqlite:///%s' % db_name))    

# Load neurons
id_block = OE.sql('select block.id from block where block.name = \
    "CAR Tetrode Data"')[0][0]
id_neurons, = OE.sql('select neuron.id from neuron where neuron.id_block = \
    :id_block', id_block=id_block)

plt.figure()
bigger_spiketimes = np.array([])
for id_neuron in id_neurons:
    n = OE.Neuron().load(id_neuron)
    
    # Grab spike times from all trials (segments)
    big_spiketimes = np.concatenate(\
        [spiketrain.spike_times - spiketrain.t_start \
        for spiketrain in n._spiketrains])
    bigger_spiketimes = np.concatenate([bigger_spiketimes, big_spiketimes])
    
    # Compute histogram
    nh, x = np.histogram(big_spiketimes, bins=100)
    x = np.diff(x) + x[:-1]
示例#15
0
def stuff(filename,
          db_name,
          TIMESTAMPS,
          SHOVE_CHANNELS,
          pre_slice_len,
          post_slice_len,
          db_type='sqlite'):

    # Load the file and file header
    l = ns5.Loader(filename=filename)
    l.load_file()

    # Audio channel numbers
    AUDIO_CHANNELS = l.get_audio_channel_numbers()

    # Open connection to OE db and create a block
    if db_type is 'postgres':
        OE.open_db(url=(
            'postgresql://[email protected]/test'))  # %s' % db_name))
        print('post')
    else:
        OE.open_db(url=('sqlite:///%s' % db_name))
    #OE.open_db(url=('mysql://*****:*****@localhost/%s' % db_name))
    block = OE.Block(name='Raw Data',
                     info='Raw data sliced around trials',
                     fileOrigin=filename)
    id_block = block.save()  # need this later

    # Convert requested slice lengths to samples
    pre_slice_len_samples = int(pre_slice_len * l.header.f_samp)
    post_slice_len_samples = int(post_slice_len * l.header.f_samp)

    # Add RecordingPoint
    ch2rpid = dict()
    for ch in SHOVE_CHANNELS:
        rp = OE.RecordingPoint(name=('RP%d' % ch),
                               id_block=id_block,
                               channel=float(ch))
        rp_id = rp.save()
        ch2rpid[ch] = rp_id

    # Extract each trial as a segment
    for tn, trial_start in enumerate(TIMESTAMPS):
        # Create segment for this trial
        segment = OE.Segment(id_block=id_block,
                             name=('trial%d' % tn),
                             info='raw data loaded from good channels')
        id_segment = segment.save()

        # Create AnalogSignal for each channel
        for chn, ch in enumerate(SHOVE_CHANNELS):
            # Load
            x = np.array(l._get_channel(ch)[trial_start-pre_slice_len_samples:\
                trial_start+post_slice_len_samples])

            # Convert to uV
            x = x * uV_QUANTUM

            # Put in AnalogSignal and save to db
            sig = OE.AnalogSignal(signal=x,
                                  channel=float(ch),
                                  sampling_rate=l.header.f_samp,
                                  t_start=old_div(
                                      (trial_start - pre_slice_len_samples),
                                      l.header.f_samp),
                                  id_segment=id_segment,
                                  id_recordingpoint=ch2rpid[ch],
                                  name=('Channel %d Trial %d' % (ch, tn)))

            # Special processing for audio channels
            if ch == AUDIO_CHANNELS[0]:
                sig.name = ('L Speaker Trial %d' % tn)
            elif ch == AUDIO_CHANNELS[1]:
                sig.name = ('R Speaker Trial %d' % tn)

            # Save signal to database
            sig.save()

        # Handle AUDIO CHANNELS only slightly differently
        for ch in AUDIO_CHANNELS:
            # Load
            x = np.array(l._get_channel(ch)[trial_start-pre_slice_len_samples:\
                trial_start+post_slice_len_samples])

            # Special processing for audio channels
            if ch == AUDIO_CHANNELS[0]:
                sname = ('L Speaker Trial %d' % tn)
            elif ch == AUDIO_CHANNELS[1]:
                sname = ('R Speaker Trial %d' % tn)

            # Put in AnalogSignal and save to db
            sig = OE.AnalogSignal(signal=x,
                                  channel=float(ch),
                                  sampling_rate=l.header.f_samp,
                                  t_start=old_div(
                                      (trial_start - pre_slice_len_samples),
                                      l.header.f_samp),
                                  id_segment=id_segment,
                                  name=sname)

            # Save signal to database
            sig.save()

        # Save segment (with all analogsignals) to db
        # Actually this may be unnecessary
        # Does saving the signals link to the segment automatically?
        segment.save()

    return (id_segment, id_block)
示例#16
0
def execute(data_dir, PRE_STIMULUS_TIME=0.5):
    """Write spike time data and metadata from OE db.
    
    Writes spike times from all sorted spikes in OE db to KlustaKwik
    files in same directory.
    
    Also writes a file metadata.csv with the ntrial number (ordering
    of segments in OE db), the btrial number (trial id in matlab struct,
    extracted from OE db info field), and the time of stimulus onset.
    
    Time of stimulus onset is calculated as the t_start time of that segment
    in  OE dB, plus the provided PRE_STIMULUS_TIME parameter. Later scripts
    will use the stimulus onset times to find spikes associated with that
    trial in the KlustaKwik files.
    """
    # Location of data
    #data_dir = '/home/chris/Public/20110517_CR12B_FF2B/CR12B_0514_001'
    #PRE_STIMULUS_TIME = 0.5

    # Location of the Bcontrol file
    #bdata_filename = os.path.join(data_dir,
    #    'data_@TwoAltChoice_v2_chris_AIR11A_101012a.at')

    # Location of OE db
    #db_filename = os.path.join(data_dir, 'datafile_CR_CR12B_110507_001.db')
    db_filename = glob.glob(os.path.join(data_dir, '*.db'))[0]

    # Output of the writers
    output_filename = os.path.splitext(db_filename)[0]

    # Metadata: trial numbers etc
    metadata_filename = os.path.join(data_dir, 'metadata.csv')

    # Load db
    db = OE.open_db('sqlite:///%s' % db_filename)

    # Get list of segments from tetrode block
    id_block = 2
    block = OE.Block().load(id_block)
    neuron_list = block._neurons

    # Build a writer
    w = KlustaKwikIO(filename=output_filename)
    w.write_block(block)

    # Also dump metadata: btrial num, t_start in samples
    seg_metadata = list()
    for seg in block.get_segments():
        t_starts1 = [sig.t_start for sig in seg._analogsignals]
        t_starts2 = [st.t_start for st in seg._spiketrains]
        assert len(np.unique(t_starts1)) == 1

        # You can get errors here where some spiketrains have already the right
        # t_start and others don't. I think maybe this happens when you re-spike
        # sort or something.
        #assert len(np.unique(t_starts2)) == 1
        #assert np.unique(t_starts1) == np.unique(t_starts2)

        # This was a stupid bugfix for a stupid bug that is now breaking things
        # Replaced with PRE_STIMULUS_TIME so at least it's up front
        t_start = np.rint(
            (t_starts1[0] + PRE_STIMULUS_TIME) * 30000.).astype(np.int64)

        seg_metadata.append((seg.name, int(seg.info), t_start))

    # Convert seg_metadata to recarray and write to disk
    r = np.rec.fromrecords(seg_metadata,
                           dtype=[('ntrial', '<U32'), ('btrial_num', np.int),
                                  ('stim_onset', np.int64)])
    mlab.rec2csv(r, metadata_filename)
示例#17
0
def get_tetrode_block_id():
    id_blocks, = OE.sql('select block.id from block where \
        block.name = "Spike-filtered Data"')
    return id_blocks[0]
def execute(control_params):
    # Load TRIALS_INFO
    bcl = bcontrol.Bcontrol_Loader(filename=control_params['behavior_filename'],
        v2_behavior=True)
    bcl.load()
    TRIALS_INFO = bcl.data['TRIALS_INFO']
    
    # Open database
    OE.open_db('sqlite:///%s' % control_params['db_name'])    
    id_blocks, = OE.sql('select block.id from block where block.name = "Raw Data"')
    id_block = id_blocks[0]

    pre_stim_len = int(control_params['pre_slice'] * 30000.)
    stim_len = int(.250 * 30000.)
    f1 = plt.figure(); f2 = plt.figure();
    l_sums = dict(); r_sums = dict();
    for sn in np.unique(TRIALS_INFO['STIM_NUMBER']):
        # Get all signals with certain stim number
        l_speaker_traces = select_audio_signals_by_stimulus_number(id_block,
            sn, TRIALS_INFO, 'L')
        r_speaker_traces = select_audio_signals_by_stimulus_number(id_block,
            sn, TRIALS_INFO, 'R')
        
        if sn == 6:
            return l_speaker_traces, r_speaker_traces
        
        ax = f1.add_subplot(3, 4, sn)
        ax.plot(l_speaker_traces[:, pre_stim_len + np.arange(-30, 30)].transpose())
        ax.set_title('L %d' % sn)
        
        ax = f2.add_subplot(3, 4, sn)
        ax.plot(r_speaker_traces[:, pre_stim_len + np.arange(-30, 30)].transpose())
        ax.set_title('R %d' % sn)
        
        slices = l_speaker_traces[:, pre_stim_len:pre_stim_len+stim_len]
        l_sums[sn] = 10*np.log10((slices.astype(np.float) ** 2).sum(axis=1))
        
        slices = r_speaker_traces[:, pre_stim_len:pre_stim_len+stim_len]
        r_sums[sn] = 10*np.log10((slices.astype(np.float) ** 2).sum(axis=1))
    
    plt.show()
    
    # Now plot powers
    plt.figure()
    plt.subplot(131)
    for sn in [1,2,3,4]:
        plt.plot(l_sums[sn], r_sums[sn], '.')
    plt.xlabel('left'); plt.ylabel('right')
    plt.legend(['lo', 'hi', 'le', 'ri'], loc='best')
    plt.title('Pure')
    
    plt.subplot(132)
    for sn in [5,6,7,8]:
        plt.plot(l_sums[sn], r_sums[sn], '.')
    plt.xlabel('left'); plt.ylabel('right')
    plt.legend(['le-hi', 'ri-hi', 'le-lo', 'ri-lo'], loc='best')
    plt.title('PB')
    
    plt.subplot(133)
    for sn in [9,10,11,12]:
        plt.plot(l_sums[sn], r_sums[sn], '.')
    plt.xlabel('left'); plt.ylabel('right')
    plt.legend(['le-hi', 'ri-hi', 'le-lo', 'ri-lo'], loc='best')
    plt.title('LB')
    plt.show()
示例#19
0
def run(control_params, auto_validate=True, v2_behavior=False):
    # Location of data
    data_dir = control_params['data_dir']

    # Location of the Bcontrol file
    bdata_filename = control_params['behavior_filename']

    # Location of TIMESTAMPS
    timestamps_filename = os.path.join(data_dir, 'TIMESTAMPS')

    # Location of OE db
    db_filename = control_params['db_name']

    # Load timestamps calculated from audio onsets in ns5 file
    ns5_times = np.loadtxt(timestamps_filename, dtype=np.int)

    # Load bcontrol data (will also validate)
    bcl = bcontrol.Bcontrol_Loader(filename=bdata_filename,
        auto_validate=auto_validate, v2_behavior=v2_behavior)
    bcl.load()

    # Grab timestamps from behavior file
    b_onsets = bcl.data['onsets']

    # Try to convert this stuff into the format expected by the syncer
    class fake_bcl:
        def __init__(self, onsets):
            self.audio_onsets = onsets
    class fake_rdl:
        def __init__(self, onsets):
            self.audio_onsets = onsets

    # Convert into desired format, also throwing away first behavior onset
    # We need to correct for this later.
    fb = fake_bcl(b_onsets[1:])
    fr = fake_rdl(ns5_times)

    # Sync. Will write CORR files to disk.
    # Also produces bs.map_n_to_b_masked and vice versa for trial mapping
    bs = DataSession.BehavingSyncer()
    bs.sync(fb, fr, force_run=True)

    # Put trial numbers into OE db
    db = OE.open_db('sqlite:///%s' % db_filename)

    # Each segment in the db is named trial%d, corresponding to the
    # ordinal TIMESTAMP, which means neural trial time.
    # We want to mark it with the behavioral trial number.
    # For now, put the behavioral trial number into Segment.info
    # TODO: Put the skip-1 behavior into the syncer so we don't have to
    # use the trick. Then we can use map_n_to_b_masked without fear.
    # Note that the 1010 data is NOT missing the first trial.
    # Double check that the neural TIMESTAMP matches the value in peh.
    # Also, add the check_audio_waveforms functionality here so that it's
    # all done at once.
    id_segs, name_segs = OE.sql('select segment.id, segment.name from segment')
    for id_seg, name_seg in zip(id_segs, name_segs):
        # Extract neural trial number from name_seg
        n_trial = int(re.search('trial(\d+)', name_seg).group(1))
        
        # Convert to behavioral trial number
        # We use the 'trial_number' field of TRIALS_INFO
        # IE the original Matlab numbering of the trial
        # Here we correct for the dropped first trial.
        try:
            b_trial = bcl.data['TRIALS_INFO']['TRIAL_NUMBER'][\
                bs.map_n_to_b_masked[n_trial] + 1]
        except IndexError:
            # masked trial
            if n_trial == 0:
                print "WARNING: Assuming this is the dropped first trial"
                b_trial = bcl.data['TRIALS_INFO']['TRIAL_NUMBER'][0]
            else:
                print "WARNING: can't find trial"
                b_trial = -99
        
        # Store behavioral trial number in the info field
        seg = OE.Segment().load(id_seg)
        seg.info = '%d' % b_trial
        seg.save()