Esempio n. 1
0
def computeAngularVelocityTuningCurves(spikes,
                                       angle,
                                       ep,
                                       nb_bins=20,
                                       bin_size=100000):
    tmp = pd.Series(index=angle.index.values, data=np.unwrap(angle.values))
    tmp2 = tmp.rolling(window=100,
                       win_type='gaussian',
                       center=True,
                       min_periods=1).mean(std=30.0)
    time_bins = np.arange(tmp.index[0], tmp.index[-1] + bin_size,
                          bin_size)  # assuming microseconds
    index = np.digitize(tmp2.index.values, time_bins)
    tmp3 = tmp2.groupby(index).mean()
    tmp3.index = time_bins[np.unique(index) - 1] + 50000
    tmp3 = nts.Tsd(tmp3)
    tmp4 = np.diff(tmp3.values) / np.diff(tmp3.as_units('s').index.values)
    velocity = nts.Tsd(t=tmp3.index.values[1:], d=tmp4)
    velocity = velocity.restrict(ep)
    bins = np.linspace(-3 * np.pi / 2, 3 * np.pi / 2, nb_bins)
    idx = bins[0:-1] + np.diff(bins) / 2
    velo_curves = pd.DataFrame(index=idx, columns=np.arange(len(spikes)))
    for k in spikes:
        spks = spikes[k]
        spks = spks.restrict(ep)
        speed_spike = velocity.realign(spks)
        spike_count, bin_edges = np.histogram(speed_spike, bins)
        occupancy, _ = np.histogram(velocity, bins)
        spike_count = spike_count / (occupancy + 1)
        velo_curves[k] = spike_count * (1 / (bin_size * 1e-6))

    return velo_curves
Esempio n. 2
0
def getPeaksandTroughs(lfp, min_points):
    """	 
		At 250Hz (1250/5), 2 troughs cannont be closer than 20 (min_points) points (if theta reaches 12Hz);		
	"""
    import neuroseries as nts
    import scipy.signal
    if isinstance(lfp, nts.time_series.Tsd):
        troughs = nts.Tsd(lfp.as_series().iloc[scipy.signal.argrelmin(
            lfp.values, order=min_points)[0]],
                          time_units='us')
        peaks = nts.Tsd(lfp.as_series().iloc[scipy.signal.argrelmax(
            lfp.values, order=min_points)[0]],
                        time_units='us')
        tmp = nts.Tsd(
            troughs.realign(peaks, align='next').as_series().drop_duplicates(
                'first'))  # eliminate double peaks
        peaks = peaks[tmp.index]
        tmp = nts.Tsd(
            peaks.realign(troughs, align='prev').as_series().drop_duplicates(
                'first'))  # eliminate double troughs
        troughs = troughs[tmp.index]
        return (peaks, troughs)
    elif isinstance(lfp, nts.time_series.TsdFrame):
        peaks = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape))
        troughs = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape))
        for i in lfp.keys():
            peaks[i], troughs[i] = getPeaksandTroughs(lfp[i], min_points)
        return (peaks, troughs)
def hilbert(lfp, deg=False):
    """
    lfp : lfp as an nts.Tsd

    return 
    power : nts.Tsd
    phase : nts.Tsd
    """
    xa = scipy.signal.hilbert(lfp)
    power = nts.Tsd(np.array(lfp.index), np.abs(xa)**2)
    phase = nts.Tsd(np.array(lfp.index), np.angle(xa, deg=deg))
    return power, phase
Esempio n. 4
0
def aucorr_plot(data, nbins, binsize, epochstr, path2save):
    """Plot autocorrelogram"""
    from matplotlib.pyplot import hlines as hlines
    plt.figure(figsize=(12, 8))
    #plt.plot(aucorr) # Plot the raw version
    times = np.arange(0, binsize *
                      (nbins + 1), binsize) - (nbins * binsize) / 2
    data = nts.Tsd(t=times, d=data, time_units='ms')
    plt.plot(data.as_units('ms'))  # Plot the smoothed version
    plt.title("Autocorrelogram")
    plt.xlabel("time (ms)")
    #middle horizontal line
    #hlines (meanfiring, 0,  nbins, 'g', label = 'mean firing rate')
    hlines(data.max() / 2,
           0 - nbins * binsize / 2,
           nbins * binsize / 2,
           'r',
           label='half point')
    if path2save == 'a':
        autocorrelogram = './plots/' + 'autocorrelogram_' + str(
            neuro_num) + '_' + epochstr + '.pdf'
    elif path2save == 'b':
        autocorrelogram = r'cd /home/grvite/Dropbox (Peyrache Lab)/Peyrache Lab Team Folder/Projects/DreamSpeed - Gilberto/figs/' + 'autocorrelogram_' + str(
            neuro_num) + '_' + epochstr + '.pdf'
    plt.savefig(autocorrelogram)
Esempio n. 5
0
def smooth_corr(aucorr,
                nbins,
                binsize,
                meanfiring,
                window=7,
                stdv=5.0,
                plot=False):
    aucorr = aucorr - meanfiring
    dfa = aucorr[0:int(nbins / 2)]
    dfa = pd.DataFrame(dfa).rolling(window=window,
                                    win_type='gaussian',
                                    center=True,
                                    min_periods=1).mean(std=stdv)
    dfb = np.flipud(aucorr[int(nbins / 2) + 1::])
    dfb = pd.DataFrame(dfb).rolling(window=window,
                                    win_type='gaussian',
                                    center=True,
                                    min_periods=1).mean(std=stdv)
    #array = np.append((dfa.values),0)
    arrayt = np.append(np.append((dfa.values), 0), np.flipud(dfb.values))
    if plot == True:
        #Make a Tsd
        times = np.arange(0, binsize *
                          (nbins + 1), binsize) - (nbins * binsize) / 2
        ndf = nts.Tsd(t=times, d=arrayt / meanfiring)
        ndf.plot()
    return arrayt
Esempio n. 6
0
def lfp(start,
        stop,
        n_channels=90,
        channel=64,
        frequency=1250.0,
        precision='int16',
        verbose=False):

    p = session + ".lfp"
    if verbose:
        print('Load LFP from ' + p)
    # From Guillaume viejo
    import neuroseries as nts
    bytes_size = 2
    start_index = int(start * frequency * n_channels * bytes_size)
    stop_index = int(stop * frequency * n_channels * bytes_size)
    #In order not to read after the file
    if stop_index > os.path.getsize(p): stop_index = os.path.getsize(p)
    fp = np.memmap(p,
                   np.int16,
                   'r',
                   start_index,
                   shape=(stop_index - start_index) // bytes_size)
    data = np.array(fp).reshape(len(fp) // n_channels, n_channels)

    if type(channel) is not list:
        timestep = np.arange(0, len(data)) / frequency + start
        return nts.Tsd(timestep, data[:, channel], time_units='s')
    elif type(channel) is list:
        timestep = np.arange(0, len(data)) / frequency + start
        return nts.TsdFrame(timestep, data[:, channel], time_units='s')
Esempio n. 7
0
def computeSpeedTuningCurves(spikes,
                             position,
                             ep,
                             bin_size=0.1,
                             nb_bins=20,
                             speed_max=0.4):
    time_bins = np.arange(position.index[0],
                          position.index[-1] + bin_size * 1e6, bin_size * 1e6)
    index = np.digitize(position.index.values, time_bins)
    tmp = position.groupby(index).mean()
    tmp.index = time_bins[np.unique(index) - 1] + (bin_size * 1e6) / 2
    distance = np.sqrt(
        np.power(np.diff(tmp['x']), 2) + np.power(np.diff(tmp['z']), 2))
    speed = nts.Tsd(t=tmp.index.values[0:-1] + bin_size / 2,
                    d=distance / bin_size)
    speed = speed.restrict(ep)
    bins = np.linspace(0, speed_max, nb_bins)
    idx = bins[0:-1] + np.diff(bins) / 2
    speed_curves = pd.DataFrame(index=idx, columns=np.arange(len(spikes)))
    for k in spikes:
        spks = spikes[k]
        spks = spks.restrict(ep)
        speed_spike = speed.realign(spks)
        spike_count, bin_edges = np.histogram(speed_spike, bins)
        occupancy, _ = np.histogram(speed, bins)
        spike_count = spike_count / (occupancy + 1)
        speed_curves[k] = spike_count / bin_size

    return speed_curves
Esempio n. 8
0
def loadLFP(path,
            n_channels=90,
            channel=64,
            frequency=1250.0,
            precision='int16'):
    import neuroseries as nts
    if type(channel) is not list:
        f = open(path, 'rb')
        startoffile = f.seek(0, 0)
        endoffile = f.seek(0, 2)
        bytes_size = 2
        n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
        duration = n_samples / frequency
        interval = 1 / frequency
        f.close()
        with open(path, 'rb') as f:
            data = np.fromfile(f, np.int16).reshape(
                (n_samples, n_channels))[:, channel]
        timestep = np.arange(0, len(data)) / frequency
        return nts.Tsd(timestep, data, time_units='s')
    elif type(channel) is list:
        f = open(path, 'rb')
        startoffile = f.seek(0, 0)
        endoffile = f.seek(0, 2)
        bytes_size = 2

        n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
        duration = n_samples / frequency
        f.close()
        with open(path, 'rb') as f:
            data = np.fromfile(f, np.int16).reshape(
                (n_samples, n_channels))[:, channel]
        timestep = np.arange(0, len(data)) / frequency
        return nts.TsdFrame(timestep, data, time_units='s')
Esempio n. 9
0
def computeAngularVelocity(spikes, angle, ep, nb_bins=20, bin_size=100000):
    tmp = pd.Series(index=angle.index.values, data=np.unwrap(angle.values))
    tmp2 = tmp.rolling(window=100,
                       win_type='gaussian',
                       center=True,
                       min_periods=1).mean(std=30.0)
    time_bins = np.arange(tmp.index[0], tmp.index[-1] + bin_size,
                          bin_size)  # assuming microseconds
    index = np.digitize(tmp2.index.values, time_bins)
    tmp3 = tmp2.groupby(index).mean()
    tmp3.index = time_bins[np.unique(index) - 1] + 50000
    tmp3 = nts.Tsd(tmp3)
    tmp4 = np.diff(tmp3.values) / np.diff(tmp3.as_units('s').index.values)
    velocity = nts.Tsd(t=tmp3.index.values[1:], d=tmp4)
    velocity = velocity.restrict(ep)
    return velocity
Esempio n. 10
0
def computeFrateAng(spikes, angle, ep, nb_bins=180, frequency=120.0):
    '''Computes the ang tcurves without normalising to occupancy.
    It will essentiall give you the total spike count for each angular position
    '''

    bins = np.linspace(0, 2 * np.pi, nb_bins)
    idx = bins[0:-1] + np.diff(bins) / 2
    tuning_curves = pd.DataFrame(index=idx, columns=np.arange(len(spikes)))
    angle = angle.restrict(ep)
    # Smoothing the angle here
    tmp = pd.Series(index=angle.index.values, data=np.unwrap(angle.values))
    tmp2 = tmp.rolling(window=50,
                       win_type='gaussian',
                       center=True,
                       min_periods=1).mean(std=10.0)
    angle = nts.Tsd(tmp2 % (2 * np.pi))
    for k in spikes:
        spks = spikes[k]
        # true_ep         = nts.IntervalSet(start = np.maximum(angle.index[0], spks.index[0]), end = np.minimum(angle.index[-1], spks.index[-1]))
        spks = spks.restrict(ep)
        angle_spike = angle.restrict(ep).realign(spks)
        spike_count, bin_edges = np.histogram(angle_spike, bins)
        occupancy, _ = np.histogram(angle, bins)
        tuning_curves[k] = spike_count

    return tuning_curves
def nts_smooth(y, m, std):
    g = scipy.signal.gaussian(m, std)
    g = g / g.sum()

    conv = np.convolve(y.values, g, 'same')

    y = nts.Tsd(y.index.values, conv)
    return y
Esempio n. 12
0
def smoothAngle(tsd, sd):
    tmp = pd.Series(index=tsd.index.values, data=np.unwrap(tsd.values))
    tmp2 = tmp.rolling(window=100,
                       win_type='gaussian',
                       center=True,
                       min_periods=1).mean(std=sd)
    newtsd = nts.Tsd(tmp2 % (2 * np.pi))
    return newtsd
Esempio n. 13
0
def computeAngularTuningCurves_dat(spikes,
                                   angle,
                                   ep,
                                   nb_bins=180,
                                   frequency=120.0,
                                   bin_size=100):
    tmp = pd.Series(index=angle.index.values, data=np.unwrap(angle.values))
    tmp2 = tmp.rolling(window=50,
                       win_type='gaussian',
                       center=True,
                       min_periods=1).mean(std=10.0)
    bin_size = bin_size * 1000
    time_bins = np.arange(tmp.index[0], tmp.index[-1] + bin_size,
                          bin_size)  # assuming microseconds
    index = np.digitize(tmp2.index.values, time_bins)
    tmp3 = tmp2.groupby(index).mean()
    tmp3.index = time_bins[np.unique(index) - 1] + bin_size / 2
    tmp3 = nts.Tsd(tmp3)
    tmp4 = np.diff(tmp3.values) / np.diff(tmp3.as_units('s').index.values)
    newangle = nts.Tsd(t=tmp3.index.values, d=tmp3.values % (2 * np.pi))
    velocity = nts.Tsd(t=tmp3.index.values[1:], d=tmp4)
    velocity = velocity.restrict(ep)
    velo_spikes = {}
    #for k in spikes: velo_spikes[k]    = velocity.realign(spikes[k].restrict(ep))
    #bins_velocity    = np.array([velocity.min(), -2*np.pi/3, -np.pi/6, np.pi/6, 2*np.pi/3, velocity.max()+0.001])
    #idx_velocity     = {k:np.digitize(velo_spikes[k].values, bins_velocity)-1 for k in spikes}

    bins = np.linspace(0, 2 * np.pi, nb_bins)
    idx = bins[0:-1] + np.diff(bins) / 2
    tuning_curves = {
        i: pd.DataFrame(index=idx, columns=np.arange(len(spikes)))
        for i in range(3)
    }

    for i, j in zip(range(3), range(0, 6, 2)):
        for k in spikes:
            spks = spikes[k].restrict(ep)
            #spks             = spks[idx_velocity[k] == j]
            angle_spike = newangle.restrict(ep).realign(spks)
            spike_count, bin_edges = np.histogram(angle_spike, bins)
            #tmp             = newangle.loc[velocity.index[np.logical_and(velocity.values>bins_velocity[j], velocity.values<bins_velocity[j+1])]]
            occupancy, _ = np.histogram(tmp, bins)
            spike_count = spike_count / occupancy
            tuning_curves[i][k] = spike_count * (1 / (bin_size * 1e-6))

    return tuning_curves, velocity, bins_velocity
Esempio n. 14
0
def firetdisco(hd_spikes, neuro_num, epoch):
    """
    # epoch means we need to use the object nts.IntervalSet
    # The interval are contained in the file : Mouse12-120806_ArenaEpoch.txt
    new_data = np.genfromtxt(data_directory+'Mouse12-120806_ArenaEpoch.txt')
    # We can integrate it in nts:
    exploration = nts.IntervalSet(start = new_data[:,0], end = new_data[:,1], time_units = 's')
    """
    # Next step is to compute an average firing rate for one neuron
    # Let's take neuron
    my_neuron = hd_spikes[neuro_num]
    # To speed up computation, we can restrict the time of spikes
    my_neuron = my_neuron.restrict(epoch)
    first_spike = my_neuron.index[0]
    last_spike = my_neuron.index[-1]
    #Determine bin size in us
    bin_size = 1000000  # = 1s
    # Observe the -1 for the value at the end of an array
    duration = last_spike - first_spike
    # it's the time of the last spike
    # with a bin size of 1 second, the number of points is
    nb_points = duration / bin_size
    nb_points = int(nb_points)
    #Determine the bins of your data and apply digitize to get a classification index
    bins = np.arange(first_spike, last_spike, bin_size)
    index = np.digitize(my_neuron.index.values, bins, right=False)

    #Create a pd
    df_n = pd.DataFrame(index=index)
    df_n['firing_time'] = my_neuron.index.values
    #count the number of spikes per bin
    df_n_grouped = df_n.groupby(df_n.index).size().reset_index(name='counts')
    df_n_grouped.set_index('index', inplace=True)
    #generate the real index
    df_comp = pd.DataFrame(index=range(1, np.unique(index)[-1] + 1))
    #put that index in your df
    df_cn = df_comp.combine_first(df_n_grouped)
    df_cn.set_index(bins, inplace=True)
    #fill with 0 for the na values
    df_cn.fillna(0, inplace=True)
    #generate a Tsd with the data
    spike_count = nts.Tsd(t=bins + (bin_size / 2.), d=df_cn['counts'].values)
    #change units to spikes per second = firing rate
    firing_rate = nts.Tsd(t=bins + (bin_size / 2.),
                          d=spike_count.values / (bin_size / 1000. / 1000.))
    return first_spike, last_spike, bin_size, bins, firing_rate
def lfp(
    channel,
    start=0,
    stop=1e8,
    fs=1250.0,
    n_channels_local=None,
    precision=np.int16,
    dat=False,
    verbose=False,
    memmap=False,
    p=None,
    volt_step=0.195,
):

    if (np.isnan(channel)) or (channel is None):
        return None

    if p is None:
        p = session + ".lfp"
        if dat:
            p = session + ".dat"

    if n_channels_local is None:
        n_channels = xml()["nChannels"]
    else:
        n_channels = n_channels_local

    if verbose:
        print("Load data from " + p)
        print(f"File contains {n_channels} channels")

    # From Guillaume viejo
    import neuroseries as nts

    bytes_size = 2
    start_index = int(start * fs * n_channels * bytes_size)
    stop_index = int(stop * fs * n_channels * bytes_size)
    # In order not to read after the file
    if stop_index > os.path.getsize(p):
        stop_index = os.path.getsize(p)
    fp = np.memmap(p,
                   precision,
                   "r",
                   start_index,
                   shape=(stop_index - start_index) // bytes_size)
    if memmap == True:
        print("/!\ memmap is not compatible with volt_step /!\ ")
        return fp.reshape(-1, n_channels)[:, channel]
    data = np.array(fp).reshape(len(fp) // n_channels, n_channels) * volt_step

    if type(channel) is not list:
        timestep = np.arange(0, len(data)) / fs + start
        return nts.Tsd(timestep, data[:, channel], time_units="s")
    elif type(channel) is list:
        timestep = np.arange(0, len(data)) / fs + start
        return nts.TsdFrame(timestep, data[:, channel], time_units="s")
Esempio n. 16
0
 def compute_score(ep_pop, eigen):
     ep_pop = ep_pop - ep_pop.mean(0)
     ep_pop = ep_pop / (ep_pop.std(0) + 1e-8)
     a = ep_pop.values
     score = np.zeros(len(ep_pop))
     for i in range(len(eigen)):
         score += np.dot(a, eigen[i])**2.0 - np.dot(
             a**2.0, eigen[i]**2.0)
     score = nts.Tsd(t=ep_pop.index.values, d=score)
     return score
Esempio n. 17
0
def old_speed(pos,value_gaussian_filter,pixel = 0.43):
    x_speed = np.diff(pos.as_units('s')['x'])/np.diff(pos.as_units('s').index)
    y_speed = np.diff(pos.as_units('s')['y'])/np.diff(pos.as_units('s').index)

    v = np.sqrt(x_speed**2 + y_speed**2)*pixel
    
    v = scipy.ndimage.gaussian_filter1d(v,value_gaussian_filter,axis=0)
    v = nts.Tsd(t = pos.index.values[:-1],d = v)
    
    return v
Esempio n. 18
0
def getPhase(lfp, fmin, fmax, nbins, fsamp, power=False):
    """ Continuous Wavelets Transform
        return phase of lfp in a Tsd array
    """
    import neuroseries as nts
    from Wavelets import MyMorlet as Morlet
    if isinstance(lfp, nts.time_series.TsdFrame):
        allphase = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape))
        allpwr = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape))
        for i in lfp.keys():
            allphase[i], allpwr[i] = getPhase(lfp[i],
                                              fmin,
                                              fmax,
                                              nbins,
                                              fsamp,
                                              power=True)
        if power:
            return allphase, allpwr
        else:
            return allphase

    elif isinstance(lfp, nts.time_series.Tsd):
        cw = Morlet(lfp.values, fmin, fmax, nbins, fsamp)
        cwt = cw.getdata()
        cwt = np.flip(cwt, axis=0)
        wave = np.abs(cwt)**2.0
        phases = np.arctan2(np.imag(cwt), np.real(cwt)).transpose()
        cwt = None
        index = np.argmax(wave, 0)
        # memory problem here, need to loop
        phase = np.zeros(len(index))
        for i in range(len(index)):
            phase[i] = phases[i, index[i]]
        phases = None
        if power:
            pwrs = cw.getpower()
            pwr = np.zeros(len(index))
            for i in range(len(index)):
                pwr[i] = pwrs[index[i], i]
            return nts.Tsd(lfp.index.values,
                           phase), nts.Tsd(lfp.index.values, pwr)
        else:
            return nts.Tsd(lfp.index.values, phase)
Esempio n. 19
0
def loadTheta(path):
    import scipy.io
    import neuroseries as nts
    thetaInfo = scipy.io.loadmat(path)
    troughs = nts.Tsd(thetaInfo['thetaTrghs'][0][0][2].flatten(),
                      thetaInfo['thetaTrghs'][0][0][3].flatten(),
                      time_units='s')
    peaks = nts.Tsd(thetaInfo['thetaPks'][0][0][2].flatten(),
                    thetaInfo['thetaPks'][0][0][3].flatten(),
                    time_units='s')
    good_ep = nts.IntervalSet(thetaInfo['goodEp'][0][0][1],
                              thetaInfo['goodEp'][0][0][2],
                              time_units='s')
    tmp = (good_ep.as_units('s')['end'].iloc[-1] -
           good_ep.as_units('s')['start'].iloc[0])
    if (tmp / 60.) / 60. > 20.:  # VERY BAD
        good_ep = nts.IntervalSet(good_ep.as_units('s')['start'] * 0.0001,
                                  good_ep.as_units('s')['end'] * 0.0001,
                                  time_units='s')
    return good_ep
Esempio n. 20
0
def computeSpeed(position, ep, bin_size=0.1):
    time_bins = np.arange(position.index[0],
                          position.index[-1] + bin_size * 1e6, bin_size * 1e6)
    index = np.digitize(position.index.values, time_bins)
    tmp = position.groupby(index).mean()
    tmp.index = time_bins[np.unique(index) - 1] + (bin_size * 1e6) / 2
    distance = np.sqrt(
        np.power(np.diff(tmp['x']), 2) + np.power(np.diff(tmp['z']), 2))
    speed = nts.Tsd(t=tmp.index.values[0:-1] + bin_size / 2,
                    d=distance / bin_size)
    speed = speed.restrict(ep)
    return speed
Esempio n. 21
0
def getFiringRate(tsd_spike, bins):
    """bins shoud be in us
	"""
    import neuroseries as nts
    frate = nts.Tsd(bins, np.zeros(len(bins)))
    bins_size = (bins[1] - bins[0]) * 1.e-6  # convert to s for Hz
    if type(tsd_spike) is dict:
        for n in tsd_spike.keys():
            index = np.digitize(tsd_spike[n].index.values, bins)
            for i in index:
                frate[bins[i]] += 1.0
        frate = nts.Tsd(bins + (bins[1] - bins[0]) / 2,
                        frate.values / len(tsd_spike) / bins_size)
        return frate
    else:
        index = np.digitize(tsd_spike.index.values, bins)
        for i in index:
            frate[bins[i]] += 1.0
        frate = nts.Tsd(bins + (bins[1] - bins[0]) / 2,
                        frate.values / bins_size)
        return frate
Esempio n. 22
0
def downsample(tsd, up, down):
    import scipy.signal
    import neuroseries as nts
    dtsd = scipy.signal.resample_poly(tsd.values, up, down)
    dt = tsd.as_units('s').index.values[np.arange(0, tsd.shape[0], down)]
    if len(tsd.shape) == 1:
        return nts.Tsd(dt, dtsd, time_units='s')
    elif len(tsd.shape) == 2:
        return nts.TsdFrame(dt,
                            dtsd,
                            time_units='s',
                            columns=list(tsd.columns))
Esempio n. 23
0
 def test_times_data(self):
     """
     tests the times and data properties
     """
     a = np.random.randint(0, 10000000, 100)
     a.sort()
     b = np.random.randn(100)
     t = nts.Tsd(a, b)
     np.testing.assert_array_almost_equal_nulp(b, t.data())
     np.testing.assert_array_almost_equal_nulp(a, t.times())
     np.testing.assert_array_almost_equal_nulp(a/1000., t.times(units=nts.milliseconds))
     np.testing.assert_array_almost_equal_nulp(a/1.0e6, t.times(units=nts.seconds))
     with self.assertRaises(ValueError):
         t.times(units=nts.TimeUnits('banana'))
Esempio n. 24
0
def decodeHD(tuning_curves, spikes, ep, bin_size=200, px=None):
    """
        See : Zhang, 1998, Interpreting Neuronal Population Activity by Reconstruction: Unified Framework With Application to Hippocampal Place Cells
        tuning_curves: pd.DataFrame with angular position as index and columns as neuron
        spikes : dictionnary of spike times
        ep : nts.IntervalSet, the epochs for decoding
        bin_size : in ms (default:200ms)
        px : Occupancy. If None, px is uniform
    """
    if len(ep) == 1:
        bins = np.arange(
            ep.as_units('ms').start.iloc[0],
            ep.as_units('ms').end.iloc[-1], bin_size)
    else:
        print("TODO, more than one epoch")
        sys.exit()

    spike_counts = pd.DataFrame(index=bins[0:-1] + np.diff(bins) / 2,
                                columns=spikes.keys())
    for k in spikes:
        spks = spikes[k].restrict(ep).as_units('ms').index.values
        spike_counts[k], _ = np.histogram(spks, bins)

    print(spike_counts.columns.values)
    print(tuning_curves.columns.values)

    tcurves_array = tuning_curves.values
    spike_counts_array = spike_counts.values
    proba_angle = np.zeros((spike_counts.shape[0], tuning_curves.shape[0]))

    part1 = np.exp(-(bin_size / 1000) * tcurves_array.sum(1))
    if px is not None:
        part2 = px
    else:
        part2 = np.ones(tuning_curves.shape[0])
        # part2 = np.histogram(position['ry'], np.linspace(0, 2*np.pi, 61), weights = np.ones_like(position['ry'])/float(len(position['ry'])))[0]

    for i in range(len(proba_angle)):
        part3 = np.prod(tcurves_array**spike_counts_array[i], 1)
        p = part1 * part2 * part3
        proba_angle[i] = p / p.sum()  # Normalization process here

    proba_angle = pd.DataFrame(index=spike_counts.index.values,
                               columns=tuning_curves.index.values,
                               data=proba_angle)
    proba_angle = proba_angle.astype('float')
    decoded = nts.Tsd(t=proba_angle.index.values,
                      d=proba_angle.idxmax(1).values,
                      time_units='ms')
    return decoded, proba_angle
Esempio n. 25
0
    def setUp(self):
        from scipy.io import loadmat
        self.mat_data1 = loadmat(
            os.path.join(nts.get_test_data_dir(), 'interval_set_data_1.mat'))

        self.a1 = self.mat_data1['a1'].ravel()
        self.b1 = self.mat_data1['b1'].ravel()
        self.int1 = nts.IntervalSet(self.a1, self.b1, expect_fix=True)

        self.a2 = self.mat_data1['a2'].ravel()
        self.b2 = self.mat_data1['b2'].ravel()
        self.int2 = nts.IntervalSet(self.a2, self.b2, expect_fix=True)

        self.tsd_t = self.mat_data1['t'].ravel()
        self.tsd_d = self.mat_data1['d'].ravel()
        self.tsd = nts.Tsd(self.tsd_t, self.tsd_d)
def lfp_in_intervals(channel, intervals):
    t = np.array([])
    lfps = np.array([])

    for start, stop in zip(
            intervals.as_units("s").start,
            intervals.as_units("s").end):
        start = np.round(start, decimals=1)
        stop = np.round(stop, decimals=1)
        lfp = bk.load.lfp(channel, start, stop)
        t = np.append(t, lfp.index)
        lfps = np.append(lfps, lfp.values)

    lfps = nts.Tsd(t, lfps)

    return lfps
Esempio n. 27
0
def refineSleepFromAccel(acceleration, sleep_ep):
    vl = acceleration[0].restrict(sleep_ep)
    vl = vl.as_series().diff().abs().dropna()
    a, _ = scipy.signal.find_peaks(vl, 0.025)
    peaks = nts.Tsd(vl.iloc[a])
    duration = np.diff(peaks.as_units('s').index.values)
    interval = nts.IntervalSet(start=peaks.index.values[0:-1],
                               end=peaks.index.values[1:])

    newsleep_ep = interval.iloc[duration > 15.0]
    newsleep_ep = newsleep_ep.reset_index(drop=True)
    newsleep_ep = newsleep_ep.merge_close_intervals(100000, time_units='us')

    #newsleep_ep	= sleep_ep.intersect(newsleep_ep)

    return newsleep_ep
Esempio n. 28
0
def computeAngularTuningCurves(spikes,
                               angle,
                               ep,
                               nb_bins=180,
                               frequency=120.0):

    bins = np.linspace(0, 2 * np.pi, nb_bins)
    idx = bins[0:-1] + np.diff(bins) / 2
    tuning_curves = pd.DataFrame(index=idx, columns=np.arange(len(spikes)))
    angle = angle.restrict(ep)
    # Smoothing the angle here
    tmp = pd.Series(index=angle.index.values, data=np.unwrap(angle.values))
    tmp2 = tmp.rolling(window=50,
                       win_type='gaussian',
                       center=True,
                       min_periods=1).mean(std=10.0)
    angle = nts.Tsd(tmp2 % (2 * np.pi))
    for k in spikes:
        spks = spikes[k]
        # true_ep         = nts.IntervalSet(start = np.maximum(angle.index[0], spks.index[0]), end = np.minimum(angle.index[-1], spks.index[-1]))
        spks = spks.restrict(ep)
        angle_spike = angle.restrict(ep).realign(spks)
        spike_count, bin_edges = np.histogram(angle_spike, bins)
        occupancy, _ = np.histogram(angle, bins)
        spike_count = spike_count / occupancy
        tuning_curves[k] = spike_count * frequency

        tcurves = tuning_curves[k]
        padded = pd.Series(index=np.hstack(
            (tcurves.index.values - (2 * np.pi), tcurves.index.values,
             tcurves.index.values + (2 * np.pi))),
                           data=np.hstack((tcurves.values, tcurves.values,
                                           tcurves.values)))
        smoothed = padded.rolling(window=20,
                                  win_type='gaussian',
                                  center=True,
                                  min_periods=1).mean(std=3.0)
        tuning_curves[k] = smoothed[tcurves.index]

    return tuning_curves
Esempio n. 29
0
def decoding_overlap(tuning_curves, bin_size, spikes, neuron_order, t, px):
	#My way of defining overlapping bins
	#bin_size = 40 # ms
	bins = np.arange(0, 2000+2*bin_size, bin_size) - 1000 - bin_size/2
	obins = np.vstack((bins-bin_size/2,bins)).T.flatten()
	obins = np.vstack((obins,obins+bin_size)).T
	times = obins[:,0]+(np.diff(obins)/2).flatten()
	
	
	# My function to compute the histogram 
	def histo(spk, obins):
		n = len(obins)
		count = np.zeros(n)
		for i in range(n):
			count[i] = np.sum((spk>obins[i,0]) * (spk < obins[i,1]))
		return count
	# When I do the binning for one SWR time t

	spike_counts = pd.DataFrame(index = times, columns = neuron_order)
	tbins = t + obins
	for k in neuron_order:
		spike_counts[k] = histo(spikes[k].as_units('ms').index.values, tbins)

	tcurves_array = tuning_curves.values
	spike_counts_array = spike_counts.values
	proba_angle = np.zeros((spike_counts.shape[0], tuning_curves.shape[0]))
	
	part1 = np.exp(-(bin_size/1000)*tcurves_array.sum(1))	
	part2 = px
	
	for i in range(len(proba_angle)):
		part3 = np.prod(tcurves_array**spike_counts_array[i], 1)
		p = part1 * part2 * part3
		proba_angle[i] = p/p.sum() #Normalization process here
	
	#print(spike_counts)
	proba_angle  = pd.DataFrame(index = spike_counts.index.values, columns = tuning_curves.index.values, data= proba_angle)	
	# proba_angle = proba_angle.astype('float')		
	decoded = nts.Tsd(t = proba_angle.index.values, d = proba_angle.idxmax(1).values, time_units = 'ms')
	return decoded, proba_angle	
def loadLFP(path,
            n_channels=90,
            channel=64,
            frequency=1250.0,
            precision="int16"):
    """
    LEGACY
    """
    # From Guillaume Viejo
    import neuroseries as nts

    if type(channel) is not list:
        f = open(path, "rb")
        startoffile = f.seek(0, 0)
        endoffile = f.seek(0, 2)
        bytes_size = 2
        n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
        duration = n_samples / frequency
        interval = 1 / frequency
        f.close()
        with open(path, "rb") as f:
            print("opening")
            data = np.fromfile(f, np.int16).reshape(
                (n_samples, n_channels))[:, channel]
            timestep = np.arange(0, len(data)) / frequency
        return nts.Tsd(timestep, data, time_units="s")
    elif type(channel) is list:
        f = open(path, "rb")
        startoffile = f.seek(0, 0)
        endoffile = f.seek(0, 2)
        bytes_size = 2

        n_samples = int((endoffile - startoffile) / n_channels / bytes_size)
        duration = n_samples / frequency
        f.close()
        with open(path, "rb") as f:
            data = np.fromfile(f, np.int16).reshape(
                (n_samples, n_channels))[:, channel]
            timestep = np.arange(0, len(data)) / frequency
        return nts.TsdFrame(timestep, data, time_units="s")