Exemple #1
0
 def test_create_ts_time_units(self):
     """
     internally data are stored as us
     """
     a = np.random.randint(0, 1000, 100)
     a.sort()
     ts = nts.Ts(a / 1000, time_units='ms')
     np.testing.assert_array_almost_equal_nulp(ts.index.values, a)
     ts = nts.Ts(a / 1000000, time_units='s')
     # np.testing.assert_array_almost_equal_nulp(ts.index.values, a.astype(np.int64), nulp=100)
     self.assertTrue(np.all(ts.index.values >= a.astype(np.int64) - 1))
     self.assertTrue(np.all(ts.index.values <= a.astype(np.int64) + 1))
Exemple #2
0
def loadSpikeData(path, index):
    # units shoud be the value to convert in s 
    import scipy.io
    import neuroseries as nts
    spikedata = scipy.io.loadmat(path)
    shank = spikedata['shank']
    shankIndex = np.where(shank == index)[0]
    spikes = {}	
    for i in shankIndex:	
        spikes[i] = nts.Ts(spikedata['S'][0][0][0][i][0][0][0][1][0][0][2], time_units = 's')
    a = spikes[list(spikes.keys())[0]].as_units('s').index.values	
    if ((a[-1]-a[0])/60.)/60. > 20. : # VERY BAD		
        spikes = {}	
        for i in shankIndex:	
            spikes[i] = nts.Ts(spikedata['S'][0][0][0][i][0][0][0][1][0][0][2]*0.0001, time_units = 's')
    return spikes, shank
Exemple #3
0
 def test_realign_right(self, data_class):
     d_a = self.mat_data_right['d_a']
     d_a = d_a.reshape((len(d_a), ))
     t_a = data_class(self.mat_data_right['t_a'].astype(np.int64), d_a)
     t_b = nts.Ts(self.mat_data_right['t_b'].astype(np.int64))
     t_closest = t_a.realign(t_b)
     dt = self.mat_data_right['d_closest'].ravel()
     self.assertTrue((t_closest.values.ravel() != dt).sum() < 10)
     np.testing.assert_array_almost_equal_nulp(t_closest.values.ravel(), dt)
Exemple #4
0
 def test_create_ts_time_units_double():
     """
     conversion of time units from floating point type
     """
     a = np.floor(np.random.rand(100) * 1000000)
     a.sort()
     ts = nts.Ts(a.copy(), time_units='ms')
     # noinspection PyTypeChecker
     np.testing.assert_array_almost_equal_nulp(ts.index.values / 1000, a)
Exemple #5
0
 def test_create_ts_from_double(self):
     """
     data get converted to ts and back fine
     """
     a = np.floor(np.random.rand(100)*1000000)
     a.sort()
     ts = nts.Ts(a)
     self.assertIs(ts.index.dtype, np.dtype(np.int64), msg='index type is not int64')
     np.testing.assert_array_almost_equal_nulp(a, ts.index.values)
Exemple #6
0
 def test_realign_wrong_units(self, data_class):
     d_a = self.mat_data1['d_a']
     d_a = d_a.reshape((len(d_a), ))
     t_a = data_class(self.mat_data1['t_a'].astype(np.int64), d_a)
     t_b = nts.Ts(self.mat_data1['t_b'].astype(np.int64))
     # noinspection PyUnusedLocal
     t_closest = 1
     with self.assertRaises(ValueError):
         t_closest = t_a.realign(t_b, align='banana')
     self.assertTrue(t_closest)
Exemple #7
0
    def test_create_ts_from_non_sorted():
        """
        if ts are not sorted, a warning should be returned and the timestamps sorted for you
        """
        a = np.random.randint(0, 1000, 100)
        # with self.assertWarns(UserWarning):
        #     ts = nts.Ts(a)
        ts = nts.Ts(a)

        np.testing.assert_array_almost_equal_nulp(np.sort(a), ts.index.values)
Exemple #8
0
def loadRipples(path):
    # 0 : debut
    # 1 : milieu
    # 2 : fin
    # 3 : amplitude nombre de sd au dessus de bruit
    # 4 : frequence instantan
    import neuroseries as nts
    ripples = np.genfromtxt(path + '/' + path.split("/")[-1] + '.sts.RIPPLES')
    return (nts.IntervalSet(ripples[:, 0], ripples[:, 2], time_units='s'),
            nts.Ts(ripples[:, 1], time_units='s'))
Exemple #9
0
    def test_times_units_ts():
        """
        tests the units calling of times
        """
        a = np.random.randint(0, 10000000, 100)
        a.sort()
        ts = nts.Ts(a)

        np.testing.assert_array_almost_equal_nulp(a, ts.times('us'))
        np.testing.assert_array_almost_equal_nulp(a / 1000., ts.times('ms'))
        np.testing.assert_array_almost_equal_nulp(a / 1.e6, ts.times('s'))
Exemple #10
0
 def test_create_ts_wrong_units(self):
     """
     if the units are unsupported it should raise ValueError
     """
     a = np.random.randint(0, 10000000, 100)
     a.sort()
     # noinspection PyUnusedLocal
     ts = 1
     with self.assertRaises(ValueError):
         ts = nts.Ts(a, time_units='min')
     self.assertTrue(ts)
def ripples():
    ripples_ = scipy.io.loadmat(
        f"{bk.load.session}-RippleFiring.mat")["ripples"]["allsws"][0][0]
    #     ripples_ = pd.DataFrame(data = ripples,columns=['start','peak','stop'])

    columns = ["start", "peak", "stop"]

    ripples = {}
    for i, c in zip(range(ripples_.shape[1]), columns):
        ripples.update({c: nts.Ts(ripples_[:, i], time_units="s")})
    return ripples
Exemple #12
0
def ripples():
    ripples_ = scipy.io.loadmat(
        f'{bk.load.session}-RippleFiring.mat')['ripples']['allsws'][0][0]
    #     ripples_ = pd.DataFrame(data = ripples,columns=['start','peak','stop'])

    columns = ['start', 'peak', 'stop']

    ripples = {}
    for i, c in zip(range(ripples_.shape[1]), columns):
        ripples.update({c: nts.Ts(ripples_[:, i], time_units='s')})
    return ripples
def transitions_times(states,epsilon = 1,verbose = False):
    '''
        states : dict of nts.Interval_Set
        
        This function compute transition in between Intervals in a dict.
        It returns a new dict with intervals and when the transition occurs
        
        epsilon : tolerance time delay between state
        
        This function does NOT WORK for triple transitions (ex : sws/rem/sws) ... 
        
    '''
    
    import itertools
    
    empty_state = []
    for state in states:
        if len(states[state]) == 0:
            empty_state.append(state)
            continue
        states[state] = states[state].drop_short_intervals(1)
    
    
    for i in empty_state: del states[i]
        
    transitions_intervals = {}
    transitions_timing = {}
    
    for items in itertools.permutations(states.keys(),2):
#         states[items[0]] = states[items[0]].drop_short_intervals(1)
#         states[items[1]] = states[items[1]].drop_short_intervals(1)
        
        if verbose: print('Looking at transition from',items[0],' to ',items[1])
        end = nts.Ts(np.array(states[items[0]].end + (epsilon * 1_000_000)+1))
        in_next_epoch = states[items[1]].in_interval(end)
        
        transitions_intervals.update({items:[]})
        transitions_timing.update({items:[]})

        for n,t in enumerate(in_next_epoch):
            if np.isnan(t): continue            
            start = states[items[0]].iloc[n].start
            trans = int(np.mean([states[items[0]].iloc[n].end,states[items[1]].iloc[int(t)].start]))
            end  = states[items[1]].iloc[int(t)].end
            transitions_intervals[items].append([start,end])
            transitions_timing[items].append(trans)
        
        if  not transitions_timing[items] == []:      
            transitions_intervals[items] = np.array(transitions_intervals[items])
            transitions_intervals[items] = nts.IntervalSet(transitions_intervals[items][:,0],transitions_intervals[items][:,1],force_no_fix = True)
            
            transitions_timing[items] = nts.Ts(t = np.array(transitions_timing[items]))
    return transitions_intervals,transitions_timing
Exemple #14
0
 def test_create_ts(self):
     """
     calling convention
     ts = nts.Ts(a)
     ts is an instance of pd.DataFrame
     """
     a = np.random.randint(0, 10000000, 100)
     a.sort()
     ts = nts.Ts(a)
     self.assertIsInstance(ts, pd.Series, msg="ts doesn't return DataFrame")
     self.assertIsInstance(ts.index.values, np.ndarray,
                           msg="ts doesn't return array values")
     self.assertIs(ts.index.dtype, np.dtype(np.int64), msg='index type is not int64')
     np.testing.assert_array_almost_equal_nulp(a, ts.index.values)
Exemple #15
0
def loadUFOs(path):
    """
	Name of the file should end with .evt.py.ufo
	"""
    import os
    name = path.split("/")[-1]
    files = os.listdir(path)
    filename = os.path.join(path, name + '.evt.py.ufo')
    if name + '.evt.py.ufo' in files:
        tmp = np.genfromtxt(path + '/' + name + '.evt.py.ufo')[:, 0]
        ripples = tmp.reshape(len(tmp) // 3, 3) / 1000
    else:
        print("No ufo in ", path)
        sys.exit()
    return (nts.IntervalSet(ripples[:, 0], ripples[:, 2], time_units='s'),
            nts.Ts(ripples[:, 1], time_units='s'))
def freezing_video(video_path, output_file, tf, freezing_intervals):
    """
        video_path : path to the video to be displaying
        outputfile : path to the video to written
        tf : vector of time containing timing of each frame
        freezing intervals : Intervals when the animal is freezing (as nts.Interval_Set)
    """
    import cv2

    if os.path.exists(output_file):
        print(output_file, 'already exist, please delete manually')
        return
    print(video_path)
    tf = nts.Ts(tf, time_units='s')
    freezing_frames = np.where(freezing_intervals.in_interval(tf) >= 0)[0]
    fs = 1 / scipy.stats.mode(np.diff(tf.as_units('s').index)).mode[0]
    cap = cv2.VideoCapture(video_path)
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))

    nf = 0
    out = cv2.VideoWriter(output_file,
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fs,
                          (frame_width, frame_height))
    while True:

        ret, frame = cap.read()
        if ret == True:
            if nf in freezing_frames:
                frame = cv2.circle(frame, (25, 25), 10, (0, 0, 255), 20)

            cv2.imshow(video_path, frame)
            out.write(frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            nf += 1
        else:
            break
    cap.release()
    out.release()

    return True
Exemple #17
0
    def test_realign(self, data_class):
        """
        first simple realign case
        """

        d_a = self.mat_data1['d_a']
        d_a = d_a.reshape((len(d_a),))
        t_a = data_class(self.mat_data1['t_a'].astype(np.int64), d_a)
        t_b = nts.Ts(self.mat_data1['t_b'].astype(np.int64))
        t_closest = t_a.realign(t_b, align='closest')
        dt = self.mat_data1['d_closest'].reshape((len(self.mat_data1['d_closest'],)))
        self.assertTrue((t_closest.values.ravel() != dt).sum() < 10)
        np.testing.assert_array_almost_equal_nulp(t_closest.values.ravel(), dt)

        t_next = t_a.realign(t_b, align='next')
        dt = self.mat_data1['d_next'].reshape((len(self.mat_data1['d_next'],)))
        np.testing.assert_array_almost_equal_nulp(t_next.values.ravel(), dt)

        t_prev = t_a.realign(t_b, align='prev')
        dt = self.mat_data1['d_prev'].reshape((len(self.mat_data1['d_prev'],)))
        np.testing.assert_array_almost_equal_nulp(t_prev.values.ravel(), dt)
Exemple #18
0
    def complextrain(epochs, num_ofneurons):
        """
        This function simulate neural activity during several time epochs of
        activity or innactivity

        Parameters
        ----------
        epochs : a neuroseries.interval_set.IntervalSet
            an IntervalSet with the start and end of a time period and a label
            indicating if the neuron was "active" or "inactive"
        num_ofneurons :  int
            desired number of neurons to simulate

        Returns
        -------
        complex_spikes : dict
            a dictionary with simulated spike times as values and indices as keys

        """
        complex_spikes = {neuron:emptyList for neuron, emptyList in \
                      zip ([*range(num_ofneurons)],  [[] for i in range(num_ofneurons)])}
        for i in epochs.index:
            if epochs.loc[i].label == 'active':
                spikes = sim.traingeneration(num_ofneurons,
                                             epochs.loc[i].start,
                                             epochs.loc[i].end,
                                             factor=1)
            elif epochs.loc[i].label == 'inactive':
                spikes = sim.traingeneration(num_ofneurons,
                                             epochs.loc[i].start,
                                             epochs.loc[i].end,
                                             factor=0.25)
            else:
                print("wrong label for epochs")
            for n in complex_spikes.keys():
                complex_spikes[n].extend(spikes[n])
        for n in complex_spikes.keys():
            array = np.sort(np.asarray(complex_spikes[n]))
            complex_spikes[n] = nts.Ts(array, time_units='us')
        return complex_spikes
Exemple #19
0
                                            (np.diff(obins) / 2).flatten(),
                                            columns=neurons)
                for k in neurons:
                    spks = spikes[k].as_units('ms').index.values
                    spike_counts[k] = histo(spks, tbins)

                rates_swr.append(np.sqrt(spike_counts / (bin_size)))

            ####################################################################################################################
            # RANDOM
            ####################################################################################################################
            # BINNING
            rnd_tsd = nts.Ts(t=np.sort(
                np.hstack([
                    np.random.randint(sws_ep.loc[j, 'start'] +
                                      500000, sws_ep.loc[j, 'end'] + 500000,
                                      np.maximum(1, n_ex // len(sws_ep)))
                    for j in sws_ep.index
                ])))
            if len(rnd_tsd) > n_ex:
                rnd_tsd = rnd_tsd[0:n_ex]
            rates_rnd = []
            tmp3 = rnd_tsd.index.values / 1000
            for j, t in enumerate(tmp3):
                tbins = t + obins
                spike_counts = pd.DataFrame(index=obins[:, 0] +
                                            (np.diff(obins) / 2).flatten(),
                                            columns=neurons)
                for k in neurons:
                    spks = spikes[k].as_units('ms').index.values
                    spike_counts[k] = histo(spks, tbins)
Exemple #20
0
def loadSpikeData(path, index=None, fs=20000):
    """
	if the path contains a folder named /Analysis, 
	the script will look into it to load either
		- SpikeData.mat saved from matlab
		- SpikeData.h5 saved from this same script
	if not, the res and clu file will be loaded 
	and an /Analysis folder will be created to save the data
	Thus, the next loading of spike times will be faster
	Notes :
		If the frequency is not givne, it's assumed 20kH
	Args:
		path : string

	Returns:
		dict, array    
	"""
    if not os.path.exists(path):
        print("The path " + path + " doesn't exist; Exiting ...")
        sys.exit()
    new_path = os.path.join(path, 'Analysis/')
    if os.path.exists(new_path):
        new_path = os.path.join(path, 'Analysis/')
        files = os.listdir(new_path)
        if 'SpikeData.mat' in files:
            spikedata = scipy.io.loadmat(new_path + 'SpikeData.mat')
            shank = spikedata['shank'] - 1
            if index is None:
                shankIndex = np.arange(len(shank))
            else:
                shankIndex = np.where(shank == index)[0]
            spikes = {}
            for i in shankIndex:
                spikes[i] = nts.Ts(
                    spikedata['S'][0][0][0][i][0][0][0][1][0][0][2],
                    time_units='s')
            a = spikes[0].as_units('s').index.values
            if ((a[-1] - a[0]) / 60.) / 60. > 20.:  # VERY BAD
                spikes = {}
                for i in shankIndex:
                    spikes[i] = nts.Ts(
                        spikedata['S'][0][0][0][i][0][0][0][1][0][0][2] *
                        0.0001,
                        time_units='s')
            return spikes, shank
        elif 'SpikeData.h5' in files:
            final_path = os.path.join(new_path, 'SpikeData.h5')
            final_path = os.path.join(new_path, 'SpikeData.h5')
            store = pd.HDFStore(final_path, 'r')
            toreturn = {}
            for n in store['neurons_id'].values:
                toreturn[n] = nts.Ts(store['neuron_' + str(n)].index.values,
                                     time_units='s')

            shank = store['shanks'].values
            store.close()
            return toreturn, shank

        else:
            print("Couldn't find any SpikeData file in " + new_path)
            print("If clu and res files are present in " + path +
                  ", a SpikeData.h5 is going to be created")

    # Creating /Analysis/ Folder here if not already present
    if not os.path.exists(new_path): os.makedirs(new_path)
    files = os.listdir(path)
    clu_files = np.sort([f for f in files if '.clu.' in f and f[0] != '.'])
    res_files = np.sort([f for f in files if '.res.' in f and f[0] != '.'])
    clu1 = np.sort([int(f.split(".")[-1]) for f in clu_files])
    clu2 = np.sort([int(f.split(".")[-1]) for f in res_files])
    if len(clu_files) != len(res_files) or not (clu1 == clu2).any():
        print("Not the same number of clu and res files in " + path +
              "; Exiting ...")
        sys.exit()

    count = 0
    neurons = {}
    shank = []

    for i in range(len(clu_files)):
        clu = np.genfromtxt(os.path.join(path, clu_files[i]),
                            dtype=np.int32)[1:]
        if np.max(clu) > 1:
            res = np.genfromtxt(os.path.join(path, res_files[i]))
            tmp = np.unique(clu).astype(int)
            idx_clu = tmp[tmp > 1]
            idx_neu = np.arange(count, count + len(idx_clu))
            for j, n in zip(idx_clu, idx_neu):
                neurons[n] = pd.Series(index=np.unique(res[clu == j]) / fs,
                                       data=np.uint8(n),
                                       dtype=np.uint8)
                shank.append(int(clu_files[i].split(".")[-1]))
            count += len(idx_clu)

    # Saving SpikeData.h5
    final_path = os.path.join(new_path, 'SpikeData.h5')
    store = pd.HDFStore(final_path, 'w')
    for n in neurons:
        store.append('neuron_' + str(n), neurons[n])
    store.append('neurons_id', pd.Series(np.array(list(neurons.keys()))))
    store.append('shanks', pd.Series(shank))
    store.close()

    # Returning a dictionnary
    toreturn = {}
    for i in neurons:
        toreturn[i] = nts.Ts(t=neurons[i].index.values, time_units='s')

    return toreturn, shank
Exemple #21
0
This script will introduce you to the basics of neuroseries
It is the package used to handle spike times, epoch of wake/rem/sleep, etc
It is build on pandas
'''

import numpy as np
import pandas as pd
import neuroseries as nts
from pylab import *

# let's create fake data for example the time of 10 spikes between 0 and 15 s
random_times = np.random.uniform(0, 15, 10)
# let's sort them in ascending order of apparition
random_times = np.sort(random_times)
# we can include them in a neuroserie object called a Ts (Time series)
my_spike = nts.Ts(random_times, time_units='s')
# DON'T FORGET THE time_units otherwise it will consider you have spikes in microseconds
# Observe your dataset
my_spike
# The first column indicates the timestamps in microseconds
# The second column is full of NaN (Not A Number) because it's just time stamps
# Let's try with spikes with milliseconds timestamps
my_spike2 = nts.Ts(random_times, time_units='ms')
# Observe the difference between the 2
my_spike
my_spike2
# SO REMEMBER
# ALWAYS CHECK THAT YOUR TIME UNITS ARE CORRECT!

# If you have timestamps associated with a value for example 15 points of EEG during 15seconds
my_eeg = np.sin(np.arange(0, 15))
def loadSpikeData(path, index=None, fs=20000):
    """
    if the path contains a folder named /Analysis, 
    the script will look into it to load either
        - SpikeData.mat saved from matlab
        - SpikeData.h5 saved from this same script
    if not, the res and clu file will be loaded 
    and an /Analysis folder will be created to save the data
    Thus, the next loading of spike times will be faster
    Notes :
        If the frequency is not givne, it's assumed 20kH
    Args:
        path : string

    Returns:
        dict, array    
    """
    if not os.path.exists(path):
        print("The path " + path + " doesn't exist; Exiting ...")
        sys.exit()
    new_path = os.path.join(path, 'Analysis/')
    if os.path.exists(new_path):
        new_path = os.path.join(path, 'Analysis/')
        files = os.listdir(new_path)
        if 'SpikeData.mat' in files:
            spikedata = scipy.io.loadmat(new_path + 'SpikeData.mat')
            shank = spikedata['shank'] - 1
            if index is None:
                shankIndex = np.arange(len(shank))
            else:
                shankIndex = np.where(shank == index)[0]
            spikes = {}
            for i in shankIndex:
                spikes[i] = nts.Ts(
                    spikedata['S'][0][0][0][i][0][0][0][1][0][0][2],
                    time_units='s')
            a = spikes[0].as_units('s').index.values
            if ((a[-1] - a[0]) / 60.) / 60. > 20.:  # VERY BAD
                spikes = {}
                for i in shankIndex:
                    spikes[i] = nts.Ts(
                        spikedata['S'][0][0][0][i][0][0][0][1][0][0][2] *
                        0.0001,
                        time_units='s')
            return spikes, shank
        elif 'SpikeData.h5' in files:
            final_path = os.path.join(new_path, 'SpikeData.h5')
            spikes = pd.read_hdf(final_path, mode='r')
            # Returning a dictionnary | can be changed to return a dataframe
            toreturn = {}
            for i, j in spikes:
                toreturn[j] = nts.Ts(t=spikes[(i, j)].replace(
                    0, np.nan).dropna().index.values,
                                     time_units='s')
            shank = spikes.columns.get_level_values(0).values[:, np.newaxis]
            return toreturn, shank

        else:
            print("Couldn't find any SpikeData file in " + new_path)
            print("If clu and res files are present in " + path +
                  ", a SpikeData.h5 is going to be created")

    # Creating /Analysis/ Folder here if not already present
    if not os.path.exists(new_path): os.makedirs(new_path)
    files = os.listdir(path)
    clu_files = np.sort([f for f in files if 'clu' in f and f[0] != '.'])
    res_files = np.sort([f for f in files if 'res' in f and f[0] != '.'])
    clu1 = np.sort([int(f.split(".")[-1]) for f in clu_files])
    clu2 = np.sort([int(f.split(".")[-1]) for f in res_files])
    if len(clu_files) != len(res_files) or not (clu1 == clu2).any():
        print("Not the same number of clu and res files in " + path +
              "; Exiting ...")
        sys.exit()
    count = 0
    spikes = []
    for i in range(len(clu_files)):
        clu = np.genfromtxt(os.path.join(path, clu_files[i]),
                            dtype=np.int32)[1:]
        if np.max(clu) > 1:
            res = np.genfromtxt(os.path.join(path, res_files[i]))
            tmp = np.unique(clu).astype(int)
            idx_clu = tmp[tmp > 1]
            idx_col = np.arange(count, count + len(idx_clu))
            tmp = pd.DataFrame(index=np.unique(res) / fs,
                               columns=pd.MultiIndex.from_product([[i],
                                                                   idx_col]),
                               data=0,
                               dtype=np.int32)
            for j, k in zip(idx_clu, idx_col):
                tmp.loc[res[clu == j] / fs, (i, k)] = k + 1
            spikes.append(tmp)
            count += len(idx_clu)

        # tmp2 = pd.DataFrame(index=res[clu==j]/fs, data = k+1, ))
        # spikes = pd.concat([spikes, tmp2], axis = 1)
    spikes = pd.concat(spikes, axis=1)
    spikes = spikes.fillna(0)
    spikes = spikes.astype(np.int32)

    # Saving SpikeData.h5
    final_path = os.path.join(new_path, 'SpikeData.h5')
    spikes.columns.set_names(['shank', 'neuron'], inplace=True)
    spikes.to_hdf(final_path, key='spikes', mode='w')

    # Returning a dictionnary
    toreturn = {}
    for i, j in spikes:
        toreturn[j] = nts.Ts(t=spikes[(i, j)].replace(
            0, np.nan).dropna().index.values,
                             time_units='s')

    shank = spikes.columns.get_level_values(0).values[:, np.newaxis].flatten()

    return toreturn, shank
Exemple #23
0
    os.mkdir(data_directory+'/plots')
path =rootDir + '/' + ID + '/' + session
#count number of sessions
ns = int([i for i in os.listdir(path) if os.path.isdir(path+'/'+i)==True][-1][-1:])
if ns == 1:
    episodes=['wake']
else:
    episodes = ['wake' if i==wakepos else 'sleep' for i in list(range(ns+1))]
spikes, shank = loadSpikeData(data_directory)
n_channels, fs, shank_to_channel = loadXML(data_directory)
position = loadPosition(data_directory, events, episodes, n_ttl_channels = 2, optitrack_ch = 0)
wake_ep                             = loadEpoch(data_directory, 'wake', episodes)
if "sleep" in episodes:
    sleep_ep                             = loadEpoch(data_directory, 'sleep')                    
ttl_track, ttl_opto_start, ttl_opto_end = loadTTLPulse2(os.path.join(data_directory, session+'_0_analogin.dat'), 2)
ttl_track = nts.Ts(ttl_track.index.values, time_units = 's')
ttl_opto_start = nts.Ts(ttl_opto_start.index.values, time_units = 's')
ttl_opto_end = nts.Ts(ttl_opto_end.index.values, time_units = 's')
opto_ep = nts.IntervalSet(start = ttl_opto_start.index.values, end = ttl_opto_end.index.values)
stim_ep=manage.optoeps(ttl_opto_start, ttl_opto_end) #Load main stim epochs


neuron = 5
# whole duration= int(spikes[neuron].as_units('s').index[-1] - spikes[21].as_units('s').index[0])
bins = np.arange(0, 300000, 1)
plt.figure()
plt.hist(np.diff(spikes[neuron].index.values), bins)



bins = np.arange(0, 300000+10000, 10000)
Exemple #24
0
                           '/Analysis/HDCells.mat')['hdCellStats'][:, -1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])

spikes = {
    k: spikes[k]
    for k in np.where(hd_info_neuron == 0)[0] if k not in []
}
neurons = np.sort(list(spikes.keys()))

####################################################################################################################
# binning data
####################################################################################################################
allrates = {}

n_ex = 200
tmp = nts.Ts(rip_tsd.as_series().sample(
    n_ex, replace=False).sort_index()).index.values
rip_tsd = pd.Series(index=tmp, data=np.nan)
# rip_tsd = rip_tsd.iloc[0:200]

bins_size = [200, 10, 100]


####################################################################################################################
# BIN SWR
####################################################################################################################
@jit(nopython=True)
def histo(spk, obins):
    n = len(obins)
    count = np.zeros(n)
    for i in range(n):
        count[i] = np.sum((spk > obins[i, 0]) * (spk < obins[i, 1]))
Exemple #25
0
This script will introduce you to the basics of neuroseries
It is the package used to handle spike time, epoch of wake/rem/sleep, etc
It is build on pandas
'''
import numpy as np
import pandas as pd
# the folder neuroseries should be in current directory
# to check, you can type ls in ipython3 to list all the files and folders in your current directory
import neuroseries as nts

# let's create fake data for example the time of 10 spikes between 0 and 15 s
my_spike = np.random.uniform(0, 15, 10)
# let's sort them in ascending order of apparition
my_spike = np.sort(my_spike)
# we can include them in a neuroserie object called a Ts (Time series)
my_spike = nts.Ts(my_spike, time_units='s')
# DON'T FORGET THE time_units otherwise it will consider you have spikes in microseconds
# Observe your dataset
my_spike
# The first column indicates the timestamps in microseconds
# The second column is full of NaN (Not A Number) because it's just time stamps

# IF you have timestamps associated with a value for example 15 points of EEG each second
my_eeg = np.sin(np.arange(0, 15))
# You use a Tsd (Time series data)
my_eeg = nts.Tsd(t=np.arange(15), d=my_eeg, time_units='s')
# Observe your variable
my_eeg
# And how the software transform you timetamps in second in timestamps in microsecond

# Now if you are using a fancy probe and recording for example 3 channel at the same times
Exemple #26
0
		subplot(211)
		plot(angle1.mean(1), label = session)
		legend()
		title("Pas de zscore")
		subplot(212)
		plot(angle2.mean(1), label = session)
		title("Zscored")
		legend()
		show()

		sys.exit()


		# UP/DOWN

		up_tsd = nts.Ts(t = up_ep['start'].values)

		d = np.vstack(rip_tsd.index.values) - up_tsd.index.values
		d[d<0] = np.max(d)
		idx = np.argmin(d, 1)
		up_time = up_tsd.index.values[idx]

		interval = rip_tsd.index.values - up_time

		a = angle2.iloc[:,np.argsort(interval)]

		groups = []
		for idx in np.array_split(np.arange(a.shape[1]),3):
			groups.append(a[idx].mean(1))
		groups = pd.concat(groups, 1)
Exemple #27
0
# let's imagine that the animal is moving his head clockwise at a constant speed
angle = np.arange(0, 100, 0.1)
# let's bring that between 0 and 2pi
angle = angle % (2 * np.pi)
# let's imagine the sampling rate is 100Hz for detecting the position of the animal
# So we have a dt of
dt = 1 / 100.
# and a duration of
duration = dt * len(angle)
# let's put angle in a neuroseries tsd
angle = nts.Tsd(t=np.arange(0, duration, dt), d=angle, time_units='s')

# now let's imagine we have some spikes
spikes = np.sort(np.random.uniform(0, duration, 100))
spikes = nts.Ts(spikes, time_units='s')

# We can plot both angle and spikes together
figure()
plot(angle)
plot(spikes.times(), np.zeros(len(spikes)), '|', markersize=10)
show()

#So the question is: What was the corresponding angular position when a spike was recorded
# To do that, you use realign which basically takes the closed angle value in time from the spike
angle_spike = angle.realign(spikes)
# The order matters here! it's not spikes.realign(angle)
# let's look at what it does
figure()
plot(angle)
plot(spikes.times(), np.zeros(len(spikes)), '|', markersize=10)
Exemple #28
0
    spind_hpc_ep = nts.IntervalSet(tmp[:, 0], tmp[:, 1], time_units='ms')
    spind_ep = spind_hpc_ep.intersect(spind_thl_ep).drop_short_intervals(0.0)
    spind_thl_no_hpc = spind_thl_ep.set_diff(
        spind_hpc_ep).drop_short_intervals(0.0)
    spind_hpc_no_thl = spind_hpc_ep.set_diff(
        spind_thl_ep).drop_short_intervals(0.0)
    store = pd.HDFStore("../data/phase_spindles/" + session.split("/")[1] +
                        ".lfp")
    phase_hpc = nts.Tsd(store['phase_hpc_spindles'])
    phase_thl = nts.Tsd(store['phase_thl_spindles'][0])
    store.close()
    spikes = {}
    store_spike = pd.HDFStore("../data/spikes_thalamus/" +
                              session.split("/")[1] + ".spk")
    for n in store_spike.keys():
        spikes[int(n[1:])] = nts.Ts(store_spike[n])
    store_spike.close()

    ##################################################################################################
    # SPINDLES MODULATION
    ##################################################################################################

    spind_mod1 = computePhaseModulation(phase_hpc, spikes, spind_hpc_ep)
    spind_mod2 = computePhaseModulation(phase_thl, spikes, spind_thl_ep)
    spind_mod3 = computePhaseModulation(phase_hpc, spikes, spind_ep)
    spind_mod4 = computePhaseModulation(phase_thl, spikes, spind_ep)
    spind_mod5 = computePhaseModulation(phase_thl, spikes, spind_thl_no_hpc)
    spind_mod6 = computePhaseModulation(phase_hpc, spikes, spind_hpc_no_thl)

    kappa = np.vstack([
        spind_mod1[:, 2], spind_mod3[:, 2], spind_mod2[:, 2], spind_mod4[:, 2]
Exemple #29
0
	rip_ep,rip_tsd 	= loadRipples(data_directory+session)
	rip_ep			= sws_ep.intersect(rip_ep)	
	rip_tsd 		= rip_tsd.restrict(sws_ep)	
	
	hd_info 		= scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
	hd_info_neuron	= np.array([hd_info[n] for n in spikes.keys()])

	spikeshd 		= {k:spikes[k] for k in np.where(hd_info_neuron==1)[0] if k not in []}
	spikesnohd 		= {k:spikes[k] for k in np.where(hd_info_neuron==0)[0] if k not in []}
	hdneurons		= np.sort(list(spikeshd.keys()))
	nohdneurons		= np.sort(list(spikesnohd.keys()))

	# UP/DOWN
	down_ep, up_ep = loadUpDown(data_directory+session)

	up_tsd = nts.Ts(t = up_ep['start'].values)

	bin_size 	= 10
	nb_bins 	= 400
	times 		= np.arange(0, bin_size*(nb_bins+1), bin_size) - (nb_bins*bin_size)/2

	C = crossCorr(up_tsd.as_units('ms').index.values, rip_tsd.as_units('ms').index.values, bin_size, nb_bins)
	
	C = pd.Series(index = times, data = C)

	CC[session] = C

CC = pd.DataFrame.from_dict(CC)

a = CC.rolling(window = 40, win_type = 'gaussian', center = True, min_periods = 1).mean(std = 4.0)