#This is the Umat value UMAT = u.build_u_matrix(sm, distance=1, row_normalized=False) #Here you have Umatrix plus its render UMAT = u.show(sm, distance2=1, row_normalized=False, show_data=True, contooor=True, blob=False) ### Try to plot a report # dataSpikes.samples # dataSpikes.timestamps dataSpikes.clusters = sm.cluster()[sm.project_data(allWaves)] figure() plot_cluster_waves(allWaves, dataSpikes.clusters) cr = spikesorting.ClusterReportFromData(dataSpikes, outputDir='/home/nick/Desktop', filename='somcluster_shared_{}{}{}.png'.format(animalName, ephysFn,'Tetrode{}'.format(tetrode) )) codebookVecs = sm.codebook.matrix from sklearn.manifold import TSNE import timeit start_time = timeit.default_timer() model = TSNE(n_components=2, method='barnes_hut', verbose=20, n_iter=1000) Y = model.fit_transform(codebookVecs) elapsed = timeit.default_timer() - start_time print 'ELAPSED TIME: {} mins'.format(elapsed/60)
X_pc = sklearn.decomposition.RandomizedPCA( n_components=50).fit_transform(allWaves) elapsed = timeit.default_timer() - start_time print 'ELAPSED TIME: {} mins'.format(elapsed / 60) wavesToUse = random.randint(len(X_pc), size=10000) pcWavesToUse = X_pc[wavesToUse, :] model = sklearn.mixture.GMM(n_components=12) start_time = timeit.default_timer() model.fit(pcWavesToUse) elapsed = timeit.default_timer() - start_time print 'ELAPSED TIME: {} mins'.format(elapsed / 60) start_time = timeit.default_timer() clusters = model.predict(X_pc) elapsed = timeit.default_timer() - start_time print 'ELAPSED TIME: {} mins'.format(elapsed / 60) GAIN = 5000.0 SAMPLING_RATE = 30000.0 dataSpikes.samples = ((dataSpikes.samples - 32768.0) / GAIN) * 1000.0 dataSpikes.timestamps = dataSpikes.timestamps / SAMPLING_RATE dataSpikes.clusters = clusters spikesorting.ClusterReportFromData(dataSpikes, outputDir='/home/nick/Desktop', filename='test50PC_GMM.png')
from jaratoolbox import loadopenephys reload(loadopenephys) from pylab import * N_CHANNELS = 4 SAMPLES_PER_SPIKE = 40 dataDir = os.path.join(settings.EPHYS_PATH, '%s/%s/' % (animalName, ephysSession)) tetrodeFile = os.path.join(dataDir, 'Tetrode%d.spikes' % tetrode) dataTT = loadopenephys.DataSpikes(tetrodeFile) dataTT.timestamps = dataTT.timestamps / 0.03 # in microsec dataTT.samples = dataTT.samples.astype(float) - 2**15 dataTT.set_clusters('/tmp/TT2.clu.1') crep = spikesorting.ClusterReportFromData(dataTT) ''' dataTT.samples = dataTT.samples.reshape((-1,N_CHANNELS,SAMPLES_PER_SPIKE),order='C') fetArray = spikesorting.calculate_features(dataTT.samples,['peak','valley']) spikesorting.write_fet_file('/tmp/TT2.fet.1',fetArray) ''' ''' plot(dataTT.samples[:10,:].T,'.-') draw() show() ''' ''' ~/tmp/klustakwik/KK2/KlustaKwik TT6 1 -Subset 1e5 -MinClusters 6 -MaxClusters 12 -MaxPossibleClusters 12 -UseFeatures 11111111
def show_report(self): self.find_cluster_each_spike() self.dataTT.set_clusters(self.clusterEachSpike) spikesorting.ClusterReportFromData(self.dataTT, nrows=self.nClusters + 1)
n_iter=1000, metric='precomputed') Y = model.fit_transform(X_dist) plot(Y[:, 0], Y[:, 1], '.') GAIN = 5000.0 SAMPLING_RATE = 30000.0 dataSpikes.samples = ((dataSpikes.samples - 32768.0) / GAIN) * 1000.0 dataSpikes.timestamps = dataSpikes.timestamps / SAMPLING_RATE from jaratoolbox import spikesorting spikesorting.ClusterReportFromData(dataSpikes, outputDir='/home/nick/Desktop', filename='testKTSNEcluster.png') # for i in range(21): # sys.stdout.write('\r') # # the exact output you're looking for: # sys.stdout.write("[%-20s] %d%%" % ('='*i, 5*i)) # sys.stdout.flush() # sleep(0.25) # progress = (9/100.) # sys.stdout.write("[%-20s] %d%%" % ('='*np.floor(progress*20), np.floor(progress*100))) animalname = 'adap020' ephysloc = '/home/nick/data/ephys/' ephyspath = os.path.join(ephysloc, animalname)