Exemple #1
0
def DLC_pos(filtered=True, force_reload=False, save=False):
    """
    Load position from DLC files (*.h5) and returns it as a nts.TsdFrame
    """
    files = os.listdir()
    if ('positions.h5' in files) and (force_reload == False):
        data = pd.read_hdf('positions.h5')
        pos = nts.TsdFrame(data)
        return pos

    for f in files:
        if filtered and f.endswith('filtered.h5'):
            filename = f
            break
        if not filtered and not f.endswith('filtered.h5') and f.endswith(
                '.h5'):
            filename = f
            break

    data = pd.read_hdf(filename)
    data = data[data.keys()[0][0]]

    TTL = digitalin('digitalin.dat')[0, :]
    tf = bk.compute.TTL_to_times(TTL)

    if len(tf) > len(data):
        tf = np.delete(tf, -1)

    data.index = tf * 1_000_000

    if save:
        data.to_hdf('positions.h5', 'pos')

    pos = nts.TsdFrame(data)
    return pos
Exemple #2
0
def getPeaksandTroughs(lfp, min_points):
    """	 
		At 250Hz (1250/5), 2 troughs cannont be closer than 20 (min_points) points (if theta reaches 12Hz);		
	"""
    import neuroseries as nts
    import scipy.signal
    if isinstance(lfp, nts.time_series.Tsd):
        troughs = nts.Tsd(lfp.as_series().iloc[scipy.signal.argrelmin(
            lfp.values, order=min_points)[0]],
                          time_units='us')
        peaks = nts.Tsd(lfp.as_series().iloc[scipy.signal.argrelmax(
            lfp.values, order=min_points)[0]],
                        time_units='us')
        tmp = nts.Tsd(
            troughs.realign(peaks, align='next').as_series().drop_duplicates(
                'first'))  # eliminate double peaks
        peaks = peaks[tmp.index]
        tmp = nts.Tsd(
            peaks.realign(troughs, align='prev').as_series().drop_duplicates(
                'first'))  # eliminate double troughs
        troughs = troughs[tmp.index]
        return (peaks, troughs)
    elif isinstance(lfp, nts.time_series.TsdFrame):
        peaks = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape))
        troughs = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape))
        for i in lfp.keys():
            peaks[i], troughs[i] = getPeaksandTroughs(lfp[i], min_points)
        return (peaks, troughs)
Exemple #3
0
def calc_Angular_velocity(decoded_angles):
	#Calculate angular_velocity as a function of time...
	ang_vector = []
	for i in range(len(decoded_angles)):
		ang_vector.append(nts.TsdFrame((decoded_angles[i]).index.values[:-1],np.diff(decoded_angles[i]), time_units = 'us'))

	ts = []
	for i in range(len(ang_vector)):
		ts.append((ang_vector[i]).index.values)

	ang_vec = []
	for i in range(len(decoded_angles)):
		y1 = np.absolute(np.diff(decoded_angles[i]))
		ang_vec.append(nts.TsdFrame(np.arange(len(ts[i])), np.where(y1> np.pi, 2*np.pi - y1, y1)/np.diff(decoded_angles[i].index.values)))
	return ang_vec	
Exemple #4
0
def lfp(start,
        stop,
        n_channels=90,
        channel=64,
        frequency=1250.0,
        precision='int16',
        verbose=False):

    p = session + ".lfp"
    if verbose:
        print('Load LFP from ' + p)
    # From Guillaume viejo
    import neuroseries as nts
    bytes_size = 2
    start_index = int(start * frequency * n_channels * bytes_size)
    stop_index = int(stop * frequency * n_channels * bytes_size)
    #In order not to read after the file
    if stop_index > os.path.getsize(p): stop_index = os.path.getsize(p)
    fp = np.memmap(p,
                   np.int16,
                   'r',
                   start_index,
                   shape=(stop_index - start_index) // bytes_size)
    data = np.array(fp).reshape(len(fp) // n_channels, n_channels)

    if type(channel) is not list:
        timestep = np.arange(0, len(data)) / frequency + start
        return nts.Tsd(timestep, data[:, channel], time_units='s')
    elif type(channel) is list:
        timestep = np.arange(0, len(data)) / frequency + start
        return nts.TsdFrame(timestep, data[:, channel], time_units='s')
def speed(pos, value_gaussian_filter, columns_to_drop=None):

    body = []
    for i in pos:
        body.append(i[0])
    body = np.unique(body)

    all_speed = np.empty((len(pos) - 1, 5))
    i = 0
    for b in body:
        x_speed = np.diff(pos.as_units('s')[b]['x']) / np.diff(
            pos.as_units('s').index)
        y_speed = np.diff(pos.as_units('s')[b]['y']) / np.diff(
            pos.as_units('s').index)

        v = np.sqrt(x_speed**2 + y_speed**2)
        all_speed[:, i] = v
        i += 1
    all_speed = scipy.ndimage.gaussian_filter1d(all_speed,
                                                value_gaussian_filter,
                                                axis=0)
    all_speed = nts.TsdFrame(t=pos.index.values[:-1],
                             d=all_speed,
                             columns=body)
    if columns_to_drop != None:
        all_speed = all_speed.drop(columns=columns_to_drop)

    return all_speed
Exemple #6
0
def loadLFP(path,
            n_channels=90,
            channel=64,
            frequency=1250.0,
            precision='int16'):
    import neuroseries as nts
    if type(channel) is not list:
        f = open(path, 'rb')
        startoffile = f.seek(0, 0)
        endoffile = f.seek(0, 2)
        bytes_size = 2
        n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
        duration = n_samples / frequency
        interval = 1 / frequency
        f.close()
        with open(path, 'rb') as f:
            data = np.fromfile(f, np.int16).reshape(
                (n_samples, n_channels))[:, channel]
        timestep = np.arange(0, len(data)) / frequency
        return nts.Tsd(timestep, data, time_units='s')
    elif type(channel) is list:
        f = open(path, 'rb')
        startoffile = f.seek(0, 0)
        endoffile = f.seek(0, 2)
        bytes_size = 2

        n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
        duration = n_samples / frequency
        f.close()
        with open(path, 'rb') as f:
            data = np.fromfile(f, np.int16).reshape(
                (n_samples, n_channels))[:, channel]
        timestep = np.arange(0, len(data)) / frequency
        return nts.TsdFrame(timestep, data, time_units='s')
def processed_files(animal_id, date):
    'input to the function must be a strings'

    data_dir = 'D:/EphysData/Experiments/' + date + '/' + animal_id + '-' + date + '/' + animal_id + '-' + date + '/Analysis'
    dir = 'D:/EphysData/Experiments/' + date + '/' + animal_id + '-' + date + '/' + animal_id + '-' + date
    epochs = nts.IntervalSet(pd.read_hdf(data_dir + '/BehavEpochs.H5'))
    position = pd.read_hdf(data_dir + '/' + 'Position.H5')
    position = nts.TsdFrame(t=position.index.values,
                            d=position.values,
                            columns=position.columns,
                            time_units='s')
    spikes, shank = loadSpikeData(
        dir)  #shank tells the number of cells on each shank

    tcurv = {}
    for i in range(len(epochs)):
        tcurv[i] = computeAngularTuningCurves(
            spikes, position['ry'],
            nts.IntervalSet(epochs.loc[i, 'start'], epochs.loc[i, 'end']), 60)
    'tuning curves are computed based on entire epoch, to restrict it, just modify the end time above'
    #load tuning curve
    #np.load(dir+'/'+animal_id+'.npy').item()

    np.save(os.path.join(data_dir, animal_id), tcurv)
    return spikes, epochs, position, tcurv
def lfp(
    channel,
    start=0,
    stop=1e8,
    fs=1250.0,
    n_channels_local=None,
    precision=np.int16,
    dat=False,
    verbose=False,
    memmap=False,
    p=None,
    volt_step=0.195,
):

    if (np.isnan(channel)) or (channel is None):
        return None

    if p is None:
        p = session + ".lfp"
        if dat:
            p = session + ".dat"

    if n_channels_local is None:
        n_channels = xml()["nChannels"]
    else:
        n_channels = n_channels_local

    if verbose:
        print("Load data from " + p)
        print(f"File contains {n_channels} channels")

    # From Guillaume viejo
    import neuroseries as nts

    bytes_size = 2
    start_index = int(start * fs * n_channels * bytes_size)
    stop_index = int(stop * fs * n_channels * bytes_size)
    # In order not to read after the file
    if stop_index > os.path.getsize(p):
        stop_index = os.path.getsize(p)
    fp = np.memmap(p,
                   precision,
                   "r",
                   start_index,
                   shape=(stop_index - start_index) // bytes_size)
    if memmap == True:
        print("/!\ memmap is not compatible with volt_step /!\ ")
        return fp.reshape(-1, n_channels)[:, channel]
    data = np.array(fp).reshape(len(fp) // n_channels, n_channels) * volt_step

    if type(channel) is not list:
        timestep = np.arange(0, len(data)) / fs + start
        return nts.Tsd(timestep, data[:, channel], time_units="s")
    elif type(channel) is list:
        timestep = np.arange(0, len(data)) / fs + start
        return nts.TsdFrame(timestep, data[:, channel], time_units="s")
Exemple #9
0
def getPhase(lfp, fmin, fmax, nbins, fsamp, power=False):
    """ Continuous Wavelets Transform
        return phase of lfp in a Tsd array
    """
    import neuroseries as nts
    from Wavelets import MyMorlet as Morlet
    if isinstance(lfp, nts.time_series.TsdFrame):
        allphase = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape))
        allpwr = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape))
        for i in lfp.keys():
            allphase[i], allpwr[i] = getPhase(lfp[i],
                                              fmin,
                                              fmax,
                                              nbins,
                                              fsamp,
                                              power=True)
        if power:
            return allphase, allpwr
        else:
            return allphase

    elif isinstance(lfp, nts.time_series.Tsd):
        cw = Morlet(lfp.values, fmin, fmax, nbins, fsamp)
        cwt = cw.getdata()
        cwt = np.flip(cwt, axis=0)
        wave = np.abs(cwt)**2.0
        phases = np.arctan2(np.imag(cwt), np.real(cwt)).transpose()
        cwt = None
        index = np.argmax(wave, 0)
        # memory problem here, need to loop
        phase = np.zeros(len(index))
        for i in range(len(index)):
            phase[i] = phases[i, index[i]]
        phases = None
        if power:
            pwrs = cw.getpower()
            pwr = np.zeros(len(index))
            for i in range(len(index)):
                pwr[i] = pwrs[index[i], i]
            return nts.Tsd(lfp.index.values,
                           phase), nts.Tsd(lfp.index.values, pwr)
        else:
            return nts.Tsd(lfp.index.values, phase)
Exemple #10
0
def load_continuous_tsd(paths,
                        t_min=None,
                        t_max=None,
                        downsample=None,
                        columns=None):
    """
    read data for a specific time interval from a list of files (or ContinuousFile objects)
    Args:
        paths: a list of pathnames or ContinuousFile objects
        t_min: the low end of the time interval to read
        t_max: the high end of the time interval to read
        downsample: if not None, it should be an integer and acts as a downsampling factor (useful to get e.g. LFP)
        columns: a list of column names for the resulting TsdFrame. If None, the labels from the ContinuousFile objects 
        are used

    Returns:
        a TsdFrame with the data 
    """
    import scipy.signal as ss
    if isinstance(paths, str):
        paths = (paths, )
    elif not is_sequence(paths):
        raise TypeError("paths must be a string or list of strings.")

    if isinstance(paths[0], str):
        cf = [ContinuousFile(p) for p in paths]
    else:
        cf = paths

    data, tstamps = cf[0].read_interval(t_min, t_max)
    if downsample:
        data = ss.decimate(data, downsample, zero_phase=True)
    data = data.reshape((-1, 1))
    columns_from_files = False
    if columns is None:
        columns = [cf[0].label]
        columns_from_files = True
    if isinstance(columns, tuple):
        columns = list(columns)
    for f in cf[1:]:
        d, ts1 = f.read_interval(t_min, t_max)
        assert len(ts1) == len(tstamps)
        if downsample:
            d = ss.decimate(d, downsample, zero_phase=True)
        data = np.hstack((data, d.reshape((-1, 1))))
        if columns_from_files:
            columns.append(f.label)

    if downsample:
        tstamps = tstamps[::downsample]
        data = data[:, :len(tstamps)]

    cont_tsd = nts.TsdFrame(tstamps, data, columns=columns)

    return cont_tsd
Exemple #11
0
def downsample(tsd, up, down):
    import scipy.signal
    import neuroseries as nts
    dtsd = scipy.signal.resample_poly(tsd.values, up, down)
    dt = tsd.as_units('s').index.values[np.arange(0, tsd.shape[0], down)]
    if len(tsd.shape) == 1:
        return nts.Tsd(dt, dtsd, time_units='s')
    elif len(tsd.shape) == 2:
        return nts.TsdFrame(dt,
                            dtsd,
                            time_units='s',
                            columns=list(tsd.columns))
def pos(save=False):
    # BK : 04/08/2020
    # Return a NeuroSeries DataFrame of position whith the time as index

    #     session_path = get_session_path(session_name)
    import csv

    pos_clean = scipy.io.loadmat(path + "/posClean.mat")["posClean"]
    #     if save == True :
    #         with open('position'+'.csv', 'w') as csvfile:
    #             filewriter=csv.writer(csvfile)
    return nts.TsdFrame(t=pos_clean[:, 0],
                        d=pos_clean[:, 1:],
                        columns=["x", "y"],
                        time_units="s")
Exemple #13
0
def load_Positions(path, ep):
	#Load the position HDF file as TsDFrame
	#Epoch to be selected from all the recording time. Available options : 'wake', 'presleep', 'postsleep'
	import os
	import neuroseries as nts
	if not os.path.exists(path):
		print("The path "+path+" doesn't exist; Exiting ...")
		sys.exit()    
	new_path = os.path.join(path, 'Analysis/')
	if os.path.exists(new_path):
		new_path    = os.path.join(path, 'Analysis/')
		store = pd.HDFStore(new_path + 'Positions.h5')
		pos = store['positions']
		try:
			presl = (pos.loc[ep])
			positions_vs_t = nts.TsdFrame(t = (presl['timestamp']).values, d = presl[['x','y','angle']].values, columns  = presl[['x','y','angle']].columns, time_units = 's')
			return positions_vs_t
		except:
			print("Specific epoch name didn't exist is the position data. Exiting.....")
			sys.exit()
def loadLFP(path,
            n_channels=90,
            channel=64,
            frequency=1250.0,
            precision="int16"):
    """
    LEGACY
    """
    # From Guillaume Viejo
    import neuroseries as nts

    if type(channel) is not list:
        f = open(path, "rb")
        startoffile = f.seek(0, 0)
        endoffile = f.seek(0, 2)
        bytes_size = 2
        n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
        duration = n_samples / frequency
        interval = 1 / frequency
        f.close()
        with open(path, "rb") as f:
            print("opening")
            data = np.fromfile(f, np.int16).reshape(
                (n_samples, n_channels))[:, channel]
            timestep = np.arange(0, len(data)) / frequency
        return nts.Tsd(timestep, data, time_units="s")
    elif type(channel) is list:
        f = open(path, "rb")
        startoffile = f.seek(0, 0)
        endoffile = f.seek(0, 2)
        bytes_size = 2

        n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
        duration = n_samples / frequency
        f.close()
        with open(path, "rb") as f:
            data = np.fromfile(f, np.int16).reshape(
                (n_samples, n_channels))[:, channel]
            timestep = np.arange(0, len(data)) / frequency
        return nts.TsdFrame(timestep, data, time_units="s")
Exemple #15
0
def loadPosition(path,
                 events=None,
                 episodes=None,
                 n_ttl_channels=1,
                 optitrack_ch=None,
                 names=['ry', 'rx', 'rz', 'x', 'y', 'z'],
                 update_wake_epoch=True):
    """
	load the position contained in /Analysis/Position.h5

	Notes:
		The order of the columns is assumed to be
			['ry', 'rx', 'rz', 'x', 'y', 'z']
	Args:
		path: string
		
	Returns:
		neuroseries.TsdFrame
	"""
    if not os.path.exists(path):  # Checking for path
        print("The path " + path + " doesn't exist; Exiting ...")
        sys.exit()
    new_path = os.path.join(path, 'Analysis')
    if not os.path.exists(new_path): os.mkdir(new_path)
    file = os.path.join(path, 'Analysis', 'Position.h5')
    if not os.path.exists(file):
        makePositions(path, events, episodes, n_ttl_channels, optitrack_ch,
                      names, update_wake_epoch)
    if os.path.exists(file):
        store = pd.HDFStore(file, 'r')
        position = store['position']
        store.close()
        position = nts.TsdFrame(t=position.index.values,
                                d=position.values,
                                columns=position.columns,
                                time_units='s')
        return position
    else:
        print("Cannot find " + file + " for loading position")
        sys.exit()
Exemple #16
0
def det_pos(data_directory, ID, color, path2save):
    #load the angular value at each time steps and make a nts frame of it
    data = np.genfromtxt(data_directory + ID + '_PosHD.txt')
    mouse_position = nts.TsdFrame(d=data[:, [1, 2, 3]],
                                  t=data[:, 0],
                                  time_units='s')
    # But TsdFrame is a wrapper of pandas and you can change name of columns in pandas
    # So let's change the columns name
    mouse_position.columns = ['x', 'y', 'ang']

    #Plot position
    plt.figure()
    plt.plot(mouse_position['x'].values, mouse_position['y'].values, color)
    plt.xlabel("x position (cm)")
    plt.ylabel("y position (cm)")
    plt.title("Position of the mouse in the arena")
    if path2save == 'a': plot_curve = './plots/' + 'position_' + '.pdf'
    elif path2save == 'b':
        plot_curve = r'cd /home/grvite/Dropbox (Peyrache Lab)/Peyrache Lab Team Folder/Projects/DreamSpeed - Gilberto/figs/' + 'position_' + '.pdf'
    plt.savefig
    plt.show()

    return (mouse_position)
Exemple #17
0
def loadLFP(path,
            n_channels=90,
            channel=64,
            frequency=1250.0,
            precision='int16'):
    import neuroseries as nts
    f = open(path, 'rb')
    startoffile = f.seek(0, 0)
    endoffile = f.seek(0, 2)
    bytes_size = 2
    n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
    duration = n_samples / frequency
    interval = 1 / frequency
    f.close()
    fp = np.memmap(path, np.int16, 'r', shape=(n_samples, n_channels))
    timestep = np.arange(0, n_samples) / frequency

    if type(channel) is not list:
        timestep = np.arange(0, n_samples) / frequency
        return nts.Tsd(timestep, fp[:, channel], time_units='s')
    elif type(channel) is list:
        timestep = np.arange(0, n_samples) / frequency
        return nts.TsdFrame(timestep, fp[:, channel], time_units='s')
Exemple #18
0
def loadBunch_Of_LFP(path,
                     start,
                     stop,
                     n_channels=90,
                     channel=64,
                     frequency=1250.0,
                     precision='int16'):
    import neuroseries as nts
    bytes_size = 2
    start_index = int(start * frequency * n_channels * bytes_size)
    stop_index = int(stop * frequency * n_channels * bytes_size)
    fp = np.memmap(path,
                   np.int16,
                   'r',
                   start_index,
                   shape=(stop_index - start_index) // bytes_size)
    data = np.array(fp).reshape(len(fp) // n_channels, n_channels)

    if type(channel) is not list:
        timestep = np.arange(0, len(data)) / frequency
        return nts.Tsd(timestep, data[:, channel], time_units='s')
    elif type(channel) is list:
        timestep = np.arange(0, len(data)) / frequency
        return nts.TsdFrame(timestep, data[:, channel], time_units='s')
Exemple #19
0
        # pos_sws_ep 		= pos_sws_ep.intersect(nts.IntervalSet(pos_sws_ep['start'].iloc[0], pos_sws_ep['end'].iloc[0] + 60*60*1000*1000))

        # if pre_sws_ep.tot_length()/1000/1000/60 > 30.0 and pos_sws_ep.tot_length()/1000/1000/60 > 30.0:
        if pre_ep.tot_length() / 1000 / 1000 / 60 > 3.0 and post_ep.tot_length(
        ) / 1000 / 1000 / 60 > 3.0:
            for hd in range(2):
                index = np.where(hd_info_neuron == hd)[0]
                allpop = all_pop[index].copy()
                if allpop.shape[1] and allpop.shape[1] > 2:
                    eigen = compute_eigen(allpop)
                    ###############################################################################################################
                    # SHANK LOOP
                    ###############################################################################################################
                    for shank in np.unique(shankIndex):
                        index2 = np.where((shankIndex[index] == shank))[0]
                        prepop = nts.TsdFrame(pre_pop[index2].copy())
                        pospop = nts.TsdFrame(pos_pop[index2].copy())

                        pre_score = compute_score(prepop.copy(), eigen[:,
                                                                       index2])
                        pos_score = compute_score(pospop.copy(), eigen[:,
                                                                       index2])

                        prerip_score = compute_rip_score(
                            rip_tsd.restrict(pre_ep), pre_score, bins2)
                        posrip_score = compute_rip_score(
                            rip_tsd.restrict(post_ep), pos_score, bins2)

                        a = pd.DataFrame(index=prerip_score.index.values,
                                         data=gaussFilt(
                                             prerip_score.mean(1).values,
Exemple #20
0
sws_ep = loadEpoch(data_directory, 'sws')
rem_ep = loadEpoch(data_directory, 'rem')

# Next step is to load the angular value at each time steps
# We need to load Mouse12-120806_PosHD.txt which is a text file
# We can use the function genfromtxt of numpy that load a simple text file
data = np.genfromtxt(data_directory + 'Mouse12-120806_PosHD.txt')
# Check your variable by typing
data
# It's an array, we can check the dimension by typing :
data.shape
# It has 40858 lines and 4 columns
# The columns are respectively [times | x position in the arena | y position in the arena | angular value of the head]
# So we can use the TsdFrame object of neuroseries as seen in main2.py
mouse_position = nts.TsdFrame(d=data[:, [1, 2, 3]],
                              t=data[:, 0],
                              time_units='s')
# Check your variable
mouse_position
# By defaut, TsdFrame does not take column name as input
mouse_position.columns
# But TsdFrame is a wrapper of pandas and you can change name of columns in pandas
# So let's change the columns name
mouse_position.columns = ['x', 'y', 'ang']

# It's good to always check the data by plotting them
# To see the position of the mouse in the arena during the session
# you can plot the x position versus the y position
import matplotlib.pyplot as plt
plt.figure()
plt.plot(mouse_position['x'].values, mouse_position['y'].values)
Exemple #21
0
def compute_population_correlation(session):
    # for session in sessions:
    start_time = time.clock()
    print(session)

    store = pd.HDFStore("/mnt/DataGuillaume/population_activity_hd/" + session)
    rip_pop = store['rip']
    rem_pop = store['rem']
    wak_pop = store['wake']
    store.close()

    ###############################################################################################################
    # POPULATION CORRELATION FOR EACH RIPPLES
    ###############################################################################################################
    #matrix of distance between ripples	in second
    interval_mat = np.vstack(nts.TsdFrame(rip_pop).as_units(
        's').index.values) - nts.TsdFrame(rip_pop).as_units('s').index.values
    rip_corr = np.ones(interval_mat.shape) * np.nan
    # doing the upper part of the diagonal
    # rip_corr = np.eye(interval_mat.shape[0])
    # bad
    tmp = np.zeros_like(rip_corr)
    tmp[np.triu_indices(interval_mat.shape[0], 1)] += 1
    tmp[np.tril_indices(interval_mat.shape[0], 300)] += 1
    index = np.where(tmp == 2)

    for i, j in zip(index[0], index[1]):
        rip_corr[i, j] = scipy.stats.pearsonr(rip_pop.iloc[i].values,
                                              rip_pop.iloc[j].values)[0]
        rip_corr[j, i] = rip_corr[i, j]
        # print(rip_corr[i,j])

    allrip_corr = pd.DataFrame(index=interval_mat[index], data=rip_corr[index])
    rip_corr = pd.DataFrame(index=rip_pop.index.values,
                            data=rip_corr,
                            columns=rip_pop.index.values)

    np.fill_diagonal(rip_corr.values, 1.0)
    rip_corr = rip_corr.fillna(0)

    ###############################################################################################################
    # POPULATION CORRELATION FOR EACH THETA CYCLE OF REM
    ###############################################################################################################
    # compute all time interval for each ep of theta
    interval_mat = np.vstack(nts.TsdFrame(rem_pop).as_units(
        's').index.values) - nts.TsdFrame(rem_pop).as_units('s').index.values
    rem_corr = np.ones(interval_mat.shape) * np.nan
    # index = np.where(np.logical_and(interval_mat < 3.0, interval_mat >= 0.0))
    # rem_corr = np.eye(interval_mat.shape[0])
    # bad
    tmp = np.zeros_like(rem_corr)
    tmp[np.triu_indices(interval_mat.shape[0], 1)] += 1
    tmp[np.tril_indices(interval_mat.shape[0], 300)] += 1
    index = np.where(tmp == 2)

    for i, j in zip(index[0], index[1]):
        rem_corr[i, j] = scipy.stats.pearsonr(rem_pop.iloc[i].values,
                                              rem_pop.iloc[j].values)[0]
        rem_corr[j, i] = rem_corr[i, j]

    allrem_corr = pd.DataFrame(index=interval_mat[index], data=rem_corr[index])
    rem_corr = pd.DataFrame(index=rem_pop.index.values,
                            data=rem_corr,
                            columns=rem_pop.index.values)
    np.fill_diagonal(rem_corr.values, 1.0)
    rem_corr = rem_corr.fillna(0)

    ###############################################################################################################
    # POPULATION CORRELATION FOR EACH THETA CYCLE OF WAKE
    ###############################################################################################################
    # compute all time interval for each ep of theta
    interval_mat = np.vstack(nts.TsdFrame(wak_pop).as_units(
        's').index.values) - nts.TsdFrame(wak_pop).as_units('s').index.values
    wak_corr = np.ones(interval_mat.shape) * np.nan
    # index = np.where(np.logical_and(interval_mat < 3.0, interval_mat >= 0.0))
    # wak_corr = np.eye(interval_mat.shape[0])
    # bad
    tmp = np.zeros_like(wak_corr)
    tmp[np.triu_indices(interval_mat.shape[0], 1)] += 1
    tmp[np.tril_indices(interval_mat.shape[0], 300)] += 1
    index = np.where(tmp == 2)

    for i, j in zip(index[0], index[1]):
        wak_corr[i, j] = scipy.stats.pearsonr(wak_pop.iloc[i].values,
                                              wak_pop.iloc[j].values)[0]
        wak_corr[j, i] = wak_corr[i, j]

    allwak_corr = pd.DataFrame(index=interval_mat[index], data=wak_corr[index])
    wak_corr = pd.DataFrame(index=wak_pop.index.values,
                            data=wak_corr,
                            columns=wak_pop.index.values)
    np.fill_diagonal(wak_corr.values, 1.0)
    wak_corr = wak_corr.fillna(0)

    ###############################################################################################################
    # STORING
    ###############################################################################################################
    store = pd.HDFStore("/mnt/DataGuillaume/corr_pop_hd/" + session)
    store.put('rip_corr', rip_corr)
    store.put('allrip_corr', allrip_corr)
    store.put('wak_corr', wak_corr)
    store.put('allwak_corr', allwak_corr)
    store.put('rem_corr', rem_corr)
    store.put('allrem_corr', allrem_corr)
    store.close()
    print(time.clock() - start_time, "seconds")
    return time.clock() - start_time
        end = start + binsize
        if ((start.values[0] >= first_spike)
                and (last_spike >= end.values[0])):
            spikes_in_interval = my_neuron.as_units(
                's').loc[start.values[0]:end.values[0]]
            firing_rate[i] = len(spikes_in_interval)
    firing_rate = nts.Tsd(t=np.arange(rem_start, rem_end, binsize),
                          d=firing_rate,
                          time_units='s')
    if j == HD_index[0]:
        sleep_spikes = firing_rate
    else:
        sleep_spikes = np.vstack([sleep_spikes, firing_rate])
sleep_spikes = sleep_spikes.transpose()
time_bins = np.arange(rem_start, rem_end, binsize)
sleep_spikes = nts.TsdFrame(t=time_bins, d=sleep_spikes, time_units='s')
sleep_spikes.columns = HD_index

#Compute tuning curves for each neuron using the wake epoch
mouse_position = np.genfromtxt(data_directory + 'Mouse12-120806_PosHD.txt')
mouse_HD = nts.TsdFrame(t=mouse_position[:, 0],
                        d=mouse_position[:, 3],
                        time_units='s')
wake_ep = loadEpoch(data_directory, 'wake')
wake_start = wake_ep.as_units('s')['start'].values[0]
wake_end = wake_ep.as_units('s')['end'].values[0]
duration = wake_end - wake_start
duration = duration + 0.1
num_points = duration / binsize
num_points = int(num_points)
head_direction = np.zeros(num_points)
Exemple #23
0
wake_ep = loadEpoch(data_directory, 'wake')
# The function will automaticaly search for the rigth file
# You can check your variables by typing them

# Next step is to load the angular value at each time steps
# We need to load Mouse12-120806_PosHD.txt which is a text file
# We can use the function genfromtxt of numpy that load a simple text file
data = np.genfromtxt('../data_matlab/Mouse12-120806/Mouse12-120806_PosHD.txt')
# Check your variable by typing it
# It's an array, we can check the dimension by typing :
data.shape
# It has 40858 lines and 4 columns
# The columns are respectively [times | x position in the arena | y position in the arena | angular value of the head]
# So we can use the TsdFrame object of neuroseries as seen in main3.py
mouse_position = nts.TsdFrame(d=data[:, [1, 2, 3]],
                              t=data[:, 0],
                              time_units='s',
                              columns=['x', 'y', 'ang'])

# It's good to always check the data by plotting them
# To see the position of the mouse in the arena during the session
# you can plot the x position versus the y position

figure()
plot(mouse_position['x'].values, mouse_position['y'].values)
xlabel("x position (cm)")
ylabel("y position (cm)")
show()

# Now we are going to compute the tuning curve for all neurons during exploration
# The process of making a tuning curve has been covered in main3_tuningcurves.py
# So here we are gonna define a function that will be looped over each HD neurons
Exemple #24
0
# spikeshd 		= {k:spikes[k] for k in np.where(hd_info_neuron==1)[0] if k not in []}
# position 		= pd.read_csv(data_directory+session+"/"+session.split("/")[1] + ".csv", delimiter = ',', header = None, index_col = [0])
# angle 			= nts.Tsd(t = position.index.values, d = position[1].values, time_units = 's')
# tcurves 		= computeAngularTuningCurves(spikeshd, angle, wake_ep, nb_bins = 60, frequency = 1/0.0256)
# neurons 		= tcurves.idxmax().sort_values().index.values

####################################################################################################################
# POSITION X Y
####################################################################################################################
position = pd.read_csv(data_directory + session + "/" + session.split("/")[1] +
                       "_XY.csv",
                       delimiter=',',
                       header=None,
                       index_col=[0])
position = nts.TsdFrame(t=position.index.values,
                        d=position.values,
                        time_units='s')

spikesnohd = {
    k: spikes[k]
    for k in np.where(hd_info_neuron == 0)[0] if k not in []
}

placefield, extent = computePlaceFields(spikesnohd,
                                        position,
                                        wake_ep,
                                        nb_bins=40,
                                        frequency=1 / 0.0256)

####################################################################################################################
# PHASE SPIKE NO HD
Exemple #25
0
                               session.split("/")[1] + ".csv",
                               delimiter=',',
                               header=None,
                               index_col=[0])
        angle = nts.Tsd(t=position.index.values,
                        d=position[1].values,
                        time_units='s')
        wakangle = pd.Series(index=np.arange(len(bins) - 1))
        tmp = angle.groupby(
            np.digitize(angle.as_units('ms').index.values, bins) - 1).mean()
        wakangle.loc[tmp.index] = tmp
        wakangle.index = data.index
        wakangle = wakangle.interpolate(method='nearest')

        data = nts.TsdFrame(t=data.index.values,
                            d=data.values,
                            time_units='ms')

        abins = np.linspace(0, 2 * np.pi, 61)

        wakangle = wakangle.dropna()
        data = data.loc[wakangle.index]

        index = np.digitize(wakangle.values, abins) - 1

        a = data.groupby(index).mean()

        data = data.restrict(theta_ep)

        imap = Isomap(n_neighbors=100,
                      n_components=2).fit_transform(data.values[0:20000])
Exemple #26
0
def compute_population_correlation(nuc, session):
	start_time = time.clock()
	print(session)

	store 			= pd.HDFStore("/mnt/DataGuillaume/population_activity/"+session+".h5")
	rip_pop 		= store['rip']
	rem_pop 		= store['rem']
	wak_pop 		= store['wake']	
	store.close()

	# WHICH columns to keep
	mappings = pd.read_hdf("/mnt/DataGuillaume/MergedData/MAPPING_NUCLEUS.h5")
	tmp = mappings[mappings.index.str.contains(session)]['nucleus'] == nuc
	neurons = tmp.index.values[np.where(tmp)[0]]
	idx = np.array([int(n.split("_")[1]) for n in neurons])
	rip_pop = rip_pop[idx]
	rem_pop = rem_pop[idx]
	wak_pop = wak_pop[idx]


	###############################################################################################################
	# POPULATION CORRELATION FOR EACH RIPPLES
	###############################################################################################################
	#matrix of distance between ripples	in second	
	interval_mat = np.vstack(nts.TsdFrame(rip_pop).as_units('s').index.values) - nts.TsdFrame(rip_pop).as_units('s').index.values
	rip_corr = np.ones(interval_mat.shape)*np.nan
	# doing the upper part of the diagonal
	# rip_corr = np.eye(interval_mat.shape[0])
	# bad
	tmp = np.zeros_like(rip_corr)
	tmp[np.triu_indices(interval_mat.shape[0], 1)] += 1
	tmp[np.tril_indices(interval_mat.shape[0], 300)] += 1
	index = np.where(tmp == 2)
	
	for i, j in zip(index[0], index[1]):
		rip_corr[i,j] = scipy.stats.pearsonr(rip_pop.iloc[i].values, rip_pop.iloc[j].values)[0]
		rip_corr[j,i] = rip_corr[i,j]
		# print(rip_corr[i,j])

	allrip_corr = pd.DataFrame(index = interval_mat[index], data = rip_corr[index])
	rip_corr = pd.DataFrame(index = rip_pop.index.values, data = rip_corr, columns = rip_pop.index.values)		
	
	np.fill_diagonal(rip_corr.values, 1.0)
	rip_corr = rip_corr.fillna(0)

	###############################################################################################################
	# POPULATION CORRELATION FOR EACH THETA CYCLE OF REM
	###############################################################################################################
	# compute all time interval for each ep of theta
	interval_mat = np.vstack(nts.TsdFrame(rem_pop).as_units('s').index.values) - nts.TsdFrame(rem_pop).as_units('s').index.values
	rem_corr = np.ones(interval_mat.shape)*np.nan
	# index = np.where(np.logical_and(interval_mat < 3.0, interval_mat >= 0.0))
	# rem_corr = np.eye(interval_mat.shape[0])
	# bad
	tmp = np.zeros_like(rem_corr)
	tmp[np.triu_indices(interval_mat.shape[0], 1)] += 1
	tmp[np.tril_indices(interval_mat.shape[0], 300)] += 1
	index = np.where(tmp == 2)

	for i, j in zip(index[0], index[1]):
		rem_corr[i,j] = scipy.stats.pearsonr(rem_pop.iloc[i].values, rem_pop.iloc[j].values)[0]
		rem_corr[j,i] = rem_corr[i,j]

	allrem_corr = pd.DataFrame(index = interval_mat[index], data = rem_corr[index])
	rem_corr = pd.DataFrame(index = rem_pop.index.values, data = rem_corr, columns = rem_pop.index.values)		
	np.fill_diagonal(rem_corr.values, 1.0)
	rem_corr = rem_corr.fillna(0)

	###############################################################################################################
	# POPULATION CORRELATION FOR EACH THETA CYCLE OF WAKE
	###############################################################################################################
	# compute all time interval for each ep of theta
	interval_mat = np.vstack(nts.TsdFrame(wak_pop).as_units('s').index.values) - nts.TsdFrame(wak_pop).as_units('s').index.values
	wak_corr = np.ones(interval_mat.shape)*np.nan
	# index = np.where(np.logical_and(interval_mat < 3.0, interval_mat >= 0.0))
	# wak_corr = np.eye(interval_mat.shape[0])
	# bad
	tmp = np.zeros_like(wak_corr)
	tmp[np.triu_indices(interval_mat.shape[0], 1)] += 1
	tmp[np.tril_indices(interval_mat.shape[0], 300)] += 1
	index = np.where(tmp == 2)
		
	for i, j in zip(index[0], index[1]):
		wak_corr[i,j] = scipy.stats.pearsonr(wak_pop.iloc[i].values, wak_pop.iloc[j].values)[0]
		wak_corr[j,i] = wak_corr[i,j]

	allwak_corr = pd.DataFrame(index = interval_mat[index], data = wak_corr[index])
	wak_corr = pd.DataFrame(index = wak_pop.index.values, data = wak_corr, columns = wak_pop.index.values)		
	np.fill_diagonal(wak_corr.values, 1.0)
	wak_corr = wak_corr.fillna(0)
	
	###############################################################################################################
	# STORING
	###############################################################################################################
	store 			= pd.HDFStore("/mnt/DataGuillaume/corr_pop_nucleus/"+nuc+"/"+session+".h5")
	store.put('rip_corr', rip_corr)
	store.put('allrip_corr', allrip_corr)
	store.put('wak_corr', wak_corr)
	store.put('allwak_corr', allwak_corr)
	store.put('rem_corr', rem_corr)
	store.put('allrem_corr', allrem_corr)	
	store.close()
	print(time.clock() - start_time, "seconds")
	return time.clock() - start_time
Exemple #27
0
    spikes = {
        n: spikes[n]
        for n in spikes.keys() if len(spikes[n].restrict(sws_ep))
    }
    n_neuron = len(spikes)
    n_channel, fs, shank_to_channel = loadXML(data_directory + session + "/" +
                                              session.split("/")[1] + '.xml')
    # lfp_hpc 		= loadLFP(data_directory+session+"/"+session.split("/")[1]+'.eeg', n_channel, hpc_channel, float(fs), 'int16')
    # lfp_hpc 		= downsample(lfp_hpc, 1, 5)

    store = pd.HDFStore("../data/phase_spindles/" + session.split("/")[1] +
                        ".lfp")
    phase_hpc = nts.Tsd(store['phase_hpc_spindles'])
    phase_thl = nts.Tsd(store['phase_thl_spindles'])
    lfp_hpc = nts.Tsd(store['lfp_hpc'])
    lfp_thl = nts.TsdFrame(store['lfp_thl'])
    store.close()

    ##################################################################################################
    # DETECTION UP/DOWN States
    ##################################################################################################
    # print("up/down states")
    # # bins of 5000 us
    # bins 			= np.floor(np.arange(lfp_hpc.start_time(), lfp_hpc.end_time()+5000, 5000))
    # total_value 	= nts.Tsd(bins[0:-1]+(bins[1]-bins[0])/2, np.zeros(len(bins)-1)).restrict(sws_ep)
    # # each shank
    # for s in shankStructure['thalamus']:
    # 	neuron_index = np.where(shank == s)[0]
    # 	if len(neuron_index):
    # 		tmp = {i:spikes[i] for i in neuron_index}
    # 		frate			= getFiringRate(tmp, bins)
Exemple #28
0
subplot(212)
plot(tuning_curve)
show()

# Ok it's ugly but who cares
# It's just random data

# This second example is about constructing a place field
# so this time, the spikes times are realigned to a 2d position
# Let's imagine the animal is in a circular environment this time
xpos = np.cos(angle.values) + np.random.randn(len(angle)) * 0.05
ypos = np.sin(angle.values) + np.random.randn(len(angle)) * 0.05

# We can stack the x,y position in a TsdFrame
position = np.vstack((xpos, ypos)).T
position = nts.TsdFrame(t=angle.times(), d=position, columns=['x', 'y'])

# and we can plot it
figure()
plot(position['x'], position['y'])
show()

# Now it's the same as before
# except the histogram is in 2d
position_spike = position.realign(spikes)
xbins = np.linspace(xpos.min(), xpos.max() + 0.01, 10)
ybins = np.linspace(ypos.min(), ypos.max() + 0.01, 10)
spike_count2, _, _ = np.histogram2d(position_spike['y'], position_spike['x'],
                                    [ybins, xbins])
occupancy2, _, _ = np.histogram2d(position['y'], position['x'], [ybins, xbins])
spike_count2 = spike_count2 / (occupancy2 + 1)
Exemple #29
0
# If you have timestamps associated with a value for example 15 points of EEG during 15seconds
my_eeg = np.sin(np.arange(0, 15))
# You use a Tsd (Time series data)
my_eeg = nts.Tsd(t=np.arange(15), d=my_eeg, time_units='s')
# Observe your variable
my_eeg
# And how the software transform you timetamps in second in timestamps in microsecond
# You can plot your data
plot(my_eeg, 'o-')
show()

# Now if you are using a fancy probe and recording for example 3 channel at the same times
# You use a TsdFrame
my_channels = np.random.rand(15, 3)
my_channels = nts.TsdFrame(t=np.arange(15), d=my_channels, time_units='s')
# You can plot your data
# It's always important to look at your data in the eyes
plot(my_channels, 'o-')
show()
# Yes it's random...

# If I want the data of my recording between second 5 to second 12
my_spike.as_units('s').loc[5:12]
my_eeg.as_units('s').loc[5:12]
my_channels.as_units('s').loc[5:12]
# Shoud be the same in millisecond
my_spike.as_units('ms').loc[5000:12000]
my_eeg.as_units('ms').loc[5000:12000]
my_channels.as_units('ms').loc[5000:12000]
# And in microseconds which is the default mode
Exemple #30
0
def compute_pop_pca(session):
    data_directory = '/mnt/DataGuillaume/MergedData/'
    import numpy as np
    import scipy.io
    import scipy.stats
    import _pickle as cPickle
    import time
    import os, sys
    import neuroseries as nts
    from functions import loadShankStructure, loadSpikeData, loadEpoch, loadThetaMod, loadSpeed, loadXML, loadRipples, loadLFP, downsample, getPeaksandTroughs, butter_bandpass_filter
    import pandas as pd

    # to know which neurons to keep
    data_directory = '/mnt/DataGuillaume/MergedData/'
    datasets = np.loadtxt(data_directory + 'datasets_ThalHpc.list',
                          delimiter='\n',
                          dtype=str,
                          comments='#')
    theta_mod, theta_ses = loadThetaMod(
        '/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle',
        datasets,
        return_index=True)
    theta = pd.DataFrame(index=theta_ses['rem'],
                         columns=['phase', 'pvalue', 'kappa'],
                         data=theta_mod['rem'])
    tmp2 = theta.index[theta.isnull().any(1)].values
    tmp3 = theta.index[(theta['pvalue'] > 0.01).values].values
    tmp = np.unique(np.concatenate([tmp2, tmp3]))
    theta_modth = theta.drop(tmp, axis=0)
    neurons_index = theta_modth.index.values

    bins1 = np.arange(-1005, 1010, 25) * 1000
    times = np.floor(
        ((bins1[0:-1] + (bins1[1] - bins1[0]) / 2) / 1000)).astype('int')
    premeanscore = {
        i: {
            'rem': pd.DataFrame(index=[], columns=['mean', 'std']),
            'rip': pd.DataFrame(index=times, columns=[])
        }
        for i in range(3)
    }
    posmeanscore = {
        i: {
            'rem': pd.DataFrame(index=[], columns=['mean', 'std']),
            'rip': pd.DataFrame(index=times, columns=[])
        }
        for i in range(3)
    }
    bins2 = np.arange(-1012.5, 1025, 25) * 1000
    tsmax = {i: pd.DataFrame(columns=['pre', 'pos']) for i in range(3)}

    # for session in datasets:
    # for session in datasets[0:15]:
    # for session in ['Mouse12/Mouse12-120815']:
    start_time = time.clock()
    print(session)
    generalinfo = scipy.io.loadmat(data_directory + session +
                                   '/Analysis/GeneralInfo.mat')
    shankStructure = loadShankStructure(generalinfo)
    if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
        hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
    else:
        hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
    spikes, shank = loadSpikeData(
        data_directory + session + '/Analysis/SpikeData.mat',
        shankStructure['thalamus'])
    wake_ep = loadEpoch(data_directory + session, 'wake')
    sleep_ep = loadEpoch(data_directory + session, 'sleep')
    sws_ep = loadEpoch(data_directory + session, 'sws')
    rem_ep = loadEpoch(data_directory + session, 'rem')
    sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3)
    sws_ep = sleep_ep.intersect(sws_ep)
    rem_ep = sleep_ep.intersect(rem_ep)
    speed = loadSpeed(data_directory + session +
                      '/Analysis/linspeed.mat').restrict(wake_ep)
    speed_ep = nts.IntervalSet(
        speed[speed > 2.5].index.values[0:-1],
        speed[speed > 2.5].index.values[1:]).drop_long_intervals(
            26000).merge_close_intervals(50000)
    wake_ep = wake_ep.intersect(speed_ep).drop_short_intervals(3000000)
    n_channel, fs, shank_to_channel = loadXML(data_directory + session + "/" +
                                              session.split("/")[1] + '.xml')
    rip_ep, rip_tsd = loadRipples(data_directory + session)
    hd_info = scipy.io.loadmat(data_directory + session +
                               '/Analysis/HDCells.mat')['hdCellStats'][:, -1]
    hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
    all_neurons = np.array(list(spikes.keys()))
    mod_neurons = np.array([
        int(n.split("_")[1]) for n in neurons_index
        if session.split("/")[1] in n
    ])
    if len(sleep_ep) > 1:
        store = pd.HDFStore("/mnt/DataGuillaume/population_activity_25ms/" +
                            session.split("/")[1] + ".h5")
        # all_pop       = store['allwake']
        pre_pop = store['presleep']
        pos_pop = store['postsleep']
        store.close()

        store = pd.HDFStore("/mnt/DataGuillaume/population_activity_100ms/" +
                            session.split("/")[1] + ".h5")
        all_pop = store['allwake']
        # pre_pop       = store['presleep']
        # pos_pop       = store['postsleep']
        store.close()

        def compute_eigen(popwak):
            popwak = popwak - popwak.mean(0)
            popwak = popwak / (popwak.std(0) + 1e-8)
            from sklearn.decomposition import PCA
            pca = PCA(n_components=popwak.shape[1])
            xy = pca.fit_transform(popwak.values)
            pc = pca.explained_variance_ > (
                1 + np.sqrt(1 / (popwak.shape[0] / popwak.shape[1])))**2.0
            eigen = pca.components_[pc]
            lambdaa = pca.explained_variance_[pc]
            return eigen, lambdaa

        def compute_score(ep_pop, eigen, lambdaa, thr):
            ep_pop = ep_pop - ep_pop.mean(0)
            ep_pop = ep_pop / (ep_pop.std(0) + 1e-8)
            a = ep_pop.values
            score = np.zeros(len(ep_pop))
            for i in range(len(eigen)):
                if lambdaa[i] >= thr:
                    score += (np.dot(a, eigen[i])**2.0 -
                              np.dot(a**2.0, eigen[i]**2.0))
            score = nts.Tsd(t=ep_pop.index.values, d=score)
            return score

        def compute_rip_score(tsd, score, bins):
            times = np.floor(
                ((bins[0:-1] + (bins[1] - bins[0]) / 2) / 1000)).astype('int')
            rip_score = pd.DataFrame(index=times, columns=[])
            for r, i in zip(tsd.index.values, range(len(tsd))):
                xbins = (bins + r).astype('int')
                y = score.groupby(
                    pd.cut(score.index.values, bins=xbins,
                           labels=times)).mean()
                if ~y.isnull().any():
                    rip_score[r] = y

            return rip_score

        def get_xmin(ep, minutes):
            duree = (ep['end'] - ep['start']) / 1000 / 1000 / 60
            tmp = ep.iloc[np.where(np.ceil(duree.cumsum()) <= minutes + 1)[0]]
            return nts.IntervalSet(tmp['start'], tmp['end'])

        pre_ep = nts.IntervalSet(sleep_ep['start'][0], sleep_ep['end'][0])
        post_ep = nts.IntervalSet(sleep_ep['start'][1], sleep_ep['end'][1])

        pre_sws_ep = sws_ep.intersect(pre_ep)
        pos_sws_ep = sws_ep.intersect(post_ep)
        pre_sws_ep = get_xmin(pre_sws_ep.iloc[::-1], 30)
        pos_sws_ep = get_xmin(pos_sws_ep, 30)

        if pre_sws_ep.tot_length('s') / 60 > 5.0 and pos_sws_ep.tot_length(
                's') / 60 > 5.0:
            for hd in range(3):
                if hd == 0 or hd == 2:
                    index = np.where(hd_info_neuron == 0)[0]
                elif hd == 1:
                    index = np.where(hd_info_neuron == 1)[0]
                if hd == 0:
                    index = np.intersect1d(index, mod_neurons)
                elif hd == 2:
                    index = np.intersect1d(
                        index, np.setdiff1d(all_neurons, mod_neurons))

                allpop = all_pop[index].copy()
                prepop = nts.TsdFrame(pre_pop[index].copy())
                pospop = nts.TsdFrame(pos_pop[index].copy())
                # prepop25ms = nts.TsdFrame(pre_pop_25ms[index].copy())
                # pospop25ms = nts.TsdFrame(pos_pop_25ms[index].copy())
                if allpop.shape[1] and allpop.shape[1] > 5:
                    eigen, lambdaa = compute_eigen(allpop)
                    seuil = 1.2
                    if np.sum(lambdaa > seuil):
                        pre_score = compute_score(prepop, eigen, lambdaa,
                                                  seuil)
                        pos_score = compute_score(pospop, eigen, lambdaa,
                                                  seuil)

                        prerip_score = compute_rip_score(
                            rip_tsd.restrict(pre_sws_ep), pre_score, bins1)
                        posrip_score = compute_rip_score(
                            rip_tsd.restrict(pos_sws_ep), pos_score, bins1)

                        # pre_score_25ms    = compute_score(prepop25ms, eigen)
                        # pos_score_25ms    = compute_score(pospop25ms, eigen)
                        # prerip25ms_score = compute_rip_score(rip_tsd.restrict(pre_ep),  pre_score_25ms, bins2)
                        # posrip25ms_score = compute_rip_score(rip_tsd.restrict(post_ep), pos_score_25ms,  bins2)
                        # prerip25ms_score = prerip25ms_score - prerip25ms_score.mean(0)
                        # posrip25ms_score = posrip25ms_score - posrip25ms_score.mean(0)
                        # prerip25ms_score = prerip25ms_score / prerip25ms_score.std(0)
                        # posrip25ms_score = posrip25ms_score / posrip25ms_score.std(0)
                        # prerip25ms_score = prerip25ms_score.loc[-500:500]
                        # posrip25ms_score = posrip25ms_score.loc[-500:500]
                        # sys.exit()
                        # tmp = pd.concat([pd.DataFrame(prerip25ms_score.idxmax().values, columns = ['pre']),pd.DataFrame(posrip25ms_score.idxmax().values, columns = ['pos'])],axis = 1)
                        # tmp = pd.DataFrame(data = [[prerip25ms_score.mean(1).idxmax(), posrip25ms_score.mean(1).idxmax()]], columns = ['pre', 'pos'])
                        # tsmax[hd] = tsmax[hd].append(tmp, ignore_index = True)

                        premeanscore[hd]['rip'][session] = prerip_score.mean(1)
                        posmeanscore[hd]['rip'][session] = posrip_score.mean(1)

                        # if len(rem_ep.intersect(pre_ep)) and len(rem_ep.intersect(post_ep)):
                        #   premeanscore[hd]['rem'].loc[session,'mean'] = pre_score.restrict(rem_ep.intersect(pre_ep)).mean()
                        #   posmeanscore[hd]['rem'].loc[session,'mean'] = pos_score.restrict(rem_ep.intersect(post_ep)).mean()
                        #   premeanscore[hd]['rem'].loc[session,'std'] =  pre_score.restrict(rem_ep.intersect(pre_ep)).std()
                        #   posmeanscore[hd]['rem'].loc[session,'std'] =  pos_score.restrict(rem_ep.intersect(post_ep)).std()

    return [premeanscore, posmeanscore, tsmax]