Example #1
0
def klusters_files(table,basename):
    CluFileName,FetFileName,ResFileName,SpkFileName,XMLFileName = (basename + ext for ext in [".clu.1",".fet.1",".res.1",".spk.1",".xml"])
    output.write_clu(table.cols.clu[:],CluFileName)
    output.write_fet(to2d(table.cols.fet[:]),FetFileName,samples=table.cols.time[:])
    output.write_res(table.cols.time[:],ResFileName)
    output.write_spk(table.cols.wave[:],SpkFileName)
    output.write_xml(n_ch=N_CH,n_samp=S_TOTAL,n_feat=N_CH*FPC,sample_rate=SAMPLE_RATE,filepath=XMLFileName)    
Example #2
0
def write_files(basename,CluList,TmList,FetList,ChannelGraph=None,STArr=None):
    """Writes files that result from a clustering job."""
    CluFileName = basename+'.clu.1'
    FetFileName = basename+'.fet.1'
    ResFileName = basename+'.res.1'
    XMLFileName = basename+'.xml'    
    if CluList is not None: output.write_clu(np.array(CluList),CluFileName)
    output.write_res(np.array(TmList),ResFileName)
    output.write_fet(np.array(FetList),FetFileName,samples=np.array(TmList))
    output.write_xml(n_ch=N_CH,n_samp=S_TOTAL,n_feat=N_CH*FPC,sample_rate=SAMPLE_RATE,filepath=XMLFileName)
                
    if STArr is not None:
        np.save("ST.npy",STArr)
Example #3
0
def klusters_files(table, basename):
    CluFileName, FetFileName, ResFileName, SpkFileName, XMLFileName = (
        basename + ext
        for ext in [".clu.1", ".fet.1", ".res.1", ".spk.1", ".xml"])
    output.write_clu(table.cols.clu[:], CluFileName)
    output.write_fet(to2d(table.cols.fet[:]),
                     FetFileName,
                     samples=table.cols.time[:])
    output.write_res(table.cols.time[:], ResFileName)
    output.write_spk(table.cols.wave[:], SpkFileName)
    output.write_xml(n_ch=N_CH,
                     n_samp=S_TOTAL,
                     n_feat=N_CH * FPC,
                     sample_rate=SAMPLE_RATE,
                     filepath=XMLFileName)
Example #4
0
def write_files(basename,
                CluList,
                TmList,
                FetList,
                ChannelGraph=None,
                STArr=None):
    """Writes files that result from a clustering job."""
    CluFileName = basename + '.clu.1'
    FetFileName = basename + '.fet.1'
    ResFileName = basename + '.res.1'
    XMLFileName = basename + '.xml'
    if CluList is not None: output.write_clu(np.array(CluList), CluFileName)
    output.write_res(np.array(TmList), ResFileName)
    output.write_fet(np.array(FetList), FetFileName, samples=np.array(TmList))
    output.write_xml(n_ch=N_CH,
                     n_samp=S_TOTAL,
                     n_feat=N_CH * FPC,
                     sample_rate=SAMPLE_RATE,
                     filepath=XMLFileName)

    if STArr is not None:
        np.save("ST.npy", STArr)
Example #5
0
n_spikes_arr = [ss.poisson(time_length*f).rvs() for f in f_arr]
spike_times = [ np.sort(ss.uniform(0,time_length).rvs(n_spikes)) for n_spikes in n_spikes_arr]

def add_spike(t,t_width,amp,ch):
    s = np.searchsorted(tseries,t)
    s_width = int(t_width*sample_rate)
    data[(s-s_width):(s+s_width),:] += (.75*amp*np.sin(-np.pi*(t-tseries[(s-s_width):(s+s_width)])/t_width)).reshape(-1,1)
    data[(s-s_width):(s+s_width),ch] += .25*amp*np.sin(-np.pi*(t-tseries[(s-s_width):(s+s_width)])/t_width)
    

for i_unit in xrange(n_units):
    for time in spike_times[i_unit]:
        add_spike(time,T_arr[i_unit],a_arr[i_unit],ch_arr[i_unit])            

white_noise = ss.uniform(-noise_amp,noise_amp).rvs(data.shape)
data += white_noise
       
fakedata_dir = os.path.join(caton.OUT_DIR,'fakedata')
if os.path.exists(fakedata_dir):
    os.system('rm -r '+fakedata_dir)
os.mkdir(fakedata_dir)
os.chdir(fakedata_dir)

import output
np.int16(data).tofile('fakedata1.dat')
write_info_about_file('fakedata1.dat',n_channels,'int16',sample_rate=sample_rate)
all_times = np.concatenate([np.int32(time_arr*sample_rate) for time_arr in spike_times])
all_clus = np.concatenate([[i]*len(spike_times[i]) for i in xrange(len(spike_times))])
sort_inds = np.argsort(all_times)
output.write_res(all_times[sort_inds],'fakedata1','')
output.write_clu(all_clus[sort_inds],'fakedata1','')
Example #6
0
        (t - tseries[(s - s_width):(s + s_width)]) / t_width)).reshape(-1, 1)
    data[(s - s_width):(s + s_width), ch] += .25 * amp * np.sin(
        -np.pi * (t - tseries[(s - s_width):(s + s_width)]) / t_width)


for i_unit in xrange(n_units):
    for time in spike_times[i_unit]:
        add_spike(time, T_arr[i_unit], a_arr[i_unit], ch_arr[i_unit])

white_noise = ss.uniform(-noise_amp, noise_amp).rvs(data.shape)
data += white_noise

fakedata_dir = os.path.join(caton.OUT_DIR, 'fakedata')
if os.path.exists(fakedata_dir):
    os.system('rm -r ' + fakedata_dir)
os.mkdir(fakedata_dir)
os.chdir(fakedata_dir)

import output
np.int16(data).tofile('fakedata1.dat')
write_info_about_file('fakedata1.dat',
                      n_channels,
                      'int16',
                      sample_rate=sample_rate)
all_times = np.concatenate(
    [np.int32(time_arr * sample_rate) for time_arr in spike_times])
all_clus = np.concatenate([[i] * len(spike_times[i])
                           for i in xrange(len(spike_times))])
sort_inds = np.argsort(all_times)
output.write_res(all_times[sort_inds], 'fakedata1', '')
output.write_clu(all_clus[sort_inds], 'fakedata1', '')