def get_spikes(cerebus_times_ms=None, t1_ms=-50, t2_ms=150, fun_args=None): """A reading function that reads in data from the cerebus spike files. Inputs: cerebus_times_ms : an array of cerebus times indicating the start of the stimuli t1_ms : start time relative to cerebus_times_ms t2_ms : end time relative to cerebus_times_ms fun_args : dictionary: 'nev_basic_header', 'nev_extended_header', 'frag_dir', - directory where fragmented nev file is (absolute) 'channel', - the channel we worry about 'unit' the unit Outputs: neural_response : a 7D list with ultimate cells containing a list, array or any other data structure comment : a string that should explain what the data is """ nev_basic_header = fun_args['nev basic header'] nev_extended_header = fun_args['nev extended header'] frag_dir = fun_args['frag dir'] channel = fun_args['channel'] unit = fun_args['unit'] # Read in the whole spike train - warning, if you read in the spike # waveforms as well, you may run outta memory data, dummy = nev.read_frag_unit(frag_dir, nev_basic_header, nev_extended_header, channel=channel, unit=unit, tstart_ms=0.0, tdur_ms=-1, load_waveform=False, buffer_increment_size=1000) all_spike_time_ms = data['spike time ms'] spike_counts = pylab.zeros((cerebus_times_ms.size), dtype=float) #mean_latency_ms = pylab.ones((cerebus_times_ms.size),dtype=float) * 1000 spike_time_ms = [None] * cerebus_times_ms.size for n in range(cerebus_times_ms.size): tstart_ms = cerebus_times_ms[n] + t1_ms #all times in ms tstop_ms = cerebus_times_ms[n] + t2_ms idx = pylab.find((all_spike_time_ms >= tstart_ms) & (all_spike_time_ms <= tstop_ms)) spike_counts[n] = idx.size if idx.size > 0: rel_spike_time_ms = all_spike_time_ms[idx] - cerebus_times_ms[n] #mean_latency_ms[n] = rel_spike_time_ms.mean() spike_time_ms[n] = rel_spike_time_ms data = { 'trials': cerebus_times_ms.size, 'spike counts': spike_counts, #'mean latency ms': mean_latency_ms, 'spike times ms': spike_time_ms } return data
def get_spikes(cerebus_times_ms = None, t1_ms = -50, t2_ms = 150, fun_args = None): """A reading function that reads in data from the cerebus spike files. Inputs: cerebus_times_ms : an array of cerebus times indicating the start of the stimuli t1_ms : start time relative to cerebus_times_ms t2_ms : end time relative to cerebus_times_ms fun_args : dictionary: 'nev_basic_header', 'nev_extended_header', 'frag_dir', - directory where fragmented nev file is (absolute) 'channel', - the channel we worry about 'unit' the unit Outputs: neural_response : a 7D list with ultimate cells containing a list, array or any other data structure comment : a string that should explain what the data is """ nev_basic_header = fun_args['nev basic header'] nev_extended_header = fun_args['nev extended header'] frag_dir = fun_args['frag dir'] channel = fun_args['channel'] unit = fun_args['unit'] # Read in the whole spike train - warning, if you read in the spike # waveforms as well, you may run outta memory data, dummy = nev.read_frag_unit( frag_dir, nev_basic_header, nev_extended_header, channel = channel, unit = unit, tstart_ms = 0.0, tdur_ms = -1, load_waveform = False, buffer_increment_size = 1000) all_spike_time_ms = data['spike time ms'] spike_counts = pylab.zeros((cerebus_times_ms.size),dtype=float) #mean_latency_ms = pylab.ones((cerebus_times_ms.size),dtype=float) * 1000 spike_time_ms = [None] * cerebus_times_ms.size for n in range(cerebus_times_ms.size): tstart_ms = cerebus_times_ms[n] + t1_ms #all times in ms tstop_ms = cerebus_times_ms[n] + t2_ms idx = pylab.find((all_spike_time_ms >= tstart_ms) & (all_spike_time_ms <= tstop_ms)) spike_counts[n] = idx.size if idx.size > 0: rel_spike_time_ms = all_spike_time_ms[idx] - cerebus_times_ms[n] #mean_latency_ms[n] = rel_spike_time_ms.mean() spike_time_ms[n] = rel_spike_time_ms data = {'trials': cerebus_times_ms.size, 'spike counts': spike_counts, #'mean latency ms': mean_latency_ms, 'spike times ms': spike_time_ms} return data
def get_spikes_in_window(cerebus_times_ms=None, t1_ms=-50, t2_ms=150, nev_basic_header=None, nev_extended_header=None, frag_dir=None, channel=0, unit=0): """A reading function that reads in data from the cerebus spike files. (Replaces get_spikes) Inputs: cerebus_times_ms : an array of cerebus times indicating the start of the stimuli t1_ms : start time relative to cerebus_times_ms t2_ms : end time relative to cerebus_times_ms nev_basic_header nev_extended_header frag_dir - directory where fragmented nev file is (absolute) channel - the channel we worry about unit - the unit Outputs: spike_time_ms : list (length same as cerebus_times) of spike times, given relative to the cerebus_time """ # Read in the whole spike train - warning, if you read in the spike # waveforms as well, you may run outta memory data, dummy = nev.read_frag_unit(frag_dir, nev_basic_header, nev_extended_header, channel=channel, unit=unit, tstart_ms=0.0, tdur_ms=-1, load_waveform=False, buffer_increment_size=1000) all_spike_time_ms = data['spike time ms'] spike_time_ms = [None] * cerebus_times_ms.size for n in range(cerebus_times_ms.size): tstart_ms = cerebus_times_ms[n] + t1_ms #all times in ms tstop_ms = cerebus_times_ms[n] + t2_ms idx = pylab.find((all_spike_time_ms >= tstart_ms) & (all_spike_time_ms <= tstop_ms)) if idx.size > 0: rel_spike_time_ms = all_spike_time_ms[idx] - cerebus_times_ms[n] spike_time_ms[n] = rel_spike_time_ms return spike_time_ms
def get_spikes_in_window(cerebus_times_ms = None, t1_ms = -50, t2_ms = 150, nev_basic_header = None, nev_extended_header = None, frag_dir = None, channel = 0, unit = 0): """A reading function that reads in data from the cerebus spike files. (Replaces get_spikes) Inputs: cerebus_times_ms : an array of cerebus times indicating the start of the stimuli t1_ms : start time relative to cerebus_times_ms t2_ms : end time relative to cerebus_times_ms nev_basic_header nev_extended_header frag_dir - directory where fragmented nev file is (absolute) channel - the channel we worry about unit - the unit Outputs: spike_time_ms : list (length same as cerebus_times) of spike times, given relative to the cerebus_time """ # Read in the whole spike train - warning, if you read in the spike # waveforms as well, you may run outta memory data, dummy = nev.read_frag_unit( frag_dir, nev_basic_header, nev_extended_header, channel = channel, unit = unit, tstart_ms = 0.0, tdur_ms = -1, load_waveform = False, buffer_increment_size = 1000) all_spike_time_ms = data['spike time ms'] spike_time_ms = [None] * cerebus_times_ms.size for n in range(cerebus_times_ms.size): tstart_ms = cerebus_times_ms[n] + t1_ms #all times in ms tstop_ms = cerebus_times_ms[n] + t2_ms idx = pylab.find((all_spike_time_ms >= tstart_ms) & (all_spike_time_ms <= tstop_ms)) if idx.size > 0: rel_spike_time_ms = all_spike_time_ms[idx] - cerebus_times_ms[n] spike_time_ms[n] = rel_spike_time_ms return spike_time_ms
read_non_neural = False load_waveform = False logger.info('Reading headers') f = open(options.nevfile) basic_header = nev.read_basic_header(f) extended_header = nev.read_extended_header(f, basic_header) if fragment: logger.info('Fragmenting') nev.fragment(f, basic_header, extended_header, frag_dir = options.fragdir, channel_list = pylab.arange(1,97)) f.close() if read_non_neural: logger.info('Reading non neural events') time_stamp_ms, codes = \ nev.read_frag_nonneural_digital(options.fragdir, basic_header) if load_waveform: logger.info('Loading all waveforms') data, read_errors = \ nev.read_frag_unit(options.fragdir, basic_header, extended_header, channel = 1, unit = 0, tstart_ms = 0.0, tdur_ms = -1.0, load_waveform = True)
(options, args) = parser.parse_args() fragment = False read_non_neural = False load_waveform = True logger.info('Reading headers') f = open(options.nevfile) basic_header = nev.read_basic_header(f) extended_header = nev.read_extended_header(f, basic_header) unit = 0#Unsorted for channel in range(1,97): fout_name = options.outdir + '/channel%02d.asc' %(channel) fout = open(fout_name,'w') write_header(fout, basic_header, extended_header, 1, 1)#each file gets a separate channel data, data_ok = \ nev.read_frag_unit(options.fragdir, basic_header, extended_header, channel = channel, unit = unit, tstart_ms = 0.0, tdur_ms = -1.0,#Read all of it load_waveform = True) if data_ok: write_data(fout, 1, unit, data, options.threshold) fout.close() f.close()
logger.info('Reading headers') f = open(options.nevfile) basic_header = nev.read_basic_header(f) extended_header = nev.read_extended_header(f, basic_header) if fragment: logger.info('Fragmenting') nev.fragment(f, basic_header, extended_header, frag_dir=options.fragdir, channel_list=pylab.arange(1, 97)) f.close() if read_non_neural: logger.info('Reading non neural events') time_stamp_ms, codes = \ nev.read_frag_nonneural_digital(options.fragdir, basic_header) if load_waveform: logger.info('Loading all waveforms') data, read_errors = \ nev.read_frag_unit(options.fragdir, basic_header, extended_header, channel = 1, unit = 0, tstart_ms = 0.0, tdur_ms = -1.0, load_waveform = True)
(options, args) = parser.parse_args() fragment = False read_non_neural = False load_waveform = True logger.info('Reading headers') f = open(options.nevfile) basic_header = nev.read_basic_header(f) extended_header = nev.read_extended_header(f, basic_header) unit = 0 #Unsorted for channel in range(1, 97): fout_name = options.outdir + '/channel%02d.asc' % (channel) fout = open(fout_name, 'w') write_header(fout, basic_header, extended_header, 1, 1) #each file gets a separate channel data, data_ok = \ nev.read_frag_unit(options.fragdir, basic_header, extended_header, channel = channel, unit = unit, tstart_ms = 0.0, tdur_ms = -1.0,#Read all of it load_waveform = True) if data_ok: write_data(fout, 1, unit, data, options.threshold) fout.close() f.close()