Beispiel #1
0
 def __init__(self, data_obj, p=1, oversampled=0, t_offset=None):
     # t_offset: if oversampled, this is shape (N,), and gives the offset between stim trigger and frame (nbefore).
     self.data_obj = data_obj
     self.F = data_obj.F
     self.p = p
     c, s, b, g, _ = deconvolve(data_obj.dfof.astype(np.float64),
                                penalty=1,
                                g=tuple([None] * self.p))
     self.a = np.percentile(s, 95)
     self.b = b
     fudge_factor = .97
     est = estimate_parameters(data_obj.dfof.astype(np.float64),
                               p=self.p,
                               fudge_factor=fudge_factor)
     self.sn = est[1]
     if not type(g) is tuple:
         g = (g, )
     self.g = np.array(g)
     #self.fn_obj = fn_obj
     #nangle = len(np.unique(data_obj.angle))
     self.noise = self.sn**2 * (1 + (self.g**2).sum())
     self.smax = 10
     #self.fn_obj.compute_helper_vars(data_obj,self)
     self.pFs = [self.p_F_given_s(s) for s in range(self.smax)]
     self.oversampled = oversampled
     if self.oversampled:
         self.sampwt = np.ones((self.oversampled, 1)) / self.oversampled
         self.sampmat = np.zeros(
             (self.oversampled * (self.F.shape[0] - 1), self.F.shape[1]),
             dtype='bool')
         dig = np.floor(self.oversampled * t_offset).astype('<i2')
         for i in range(self.sampmat.shape[1]):
             self.sampmat[dig::self.oversampled, i] = 1
Beispiel #2
0
    def __init__(self,
                 data_obj,
                 p=1,
                 oversampled=0,
                 t_offset=None,
                 precomputed=None):
        # some fns. require 'precomputed', a dict with at least two keys theta_star (output of lbfgsb) and fn_obj used in the optimiztion of theta_star
        # t_offset: if oversampled, this is shape (N,), and gives the offset between stim trigger and frame (nbefore).
        self.data_obj = data_obj
        self.F = data_obj.F
        self.nroi = self.F.shape[0]
        self.p = p
        self.b = np.zeros((self.nroi, 1, 1))
        self.g = np.zeros((self.nroi, self.p, 1))
        self.a = np.zeros((self.nroi, ))
        self.sn
        fudge_factor = .97
        for i in range(self.nroi):
            _, s, self.b[i, 0, 0], gtemp, _ = deconvolve(
                data_obj.dfof[i].astype(np.float64),
                penalty=1,
                g=tuple([None] * self.p))
            self.g[i, :, 0] = np.array(gtemp)
            self.a[i] = np.percentile(s, 95)
            est = estimate_parameters(data_obj.dfof[i].astype(np.float64),
                                      p=self.p,
                                      fudge_factor=fudge_factor)
            self.sn[i] = est[1]
#        if not type(g) is tuple:
#            g = (g,)
#        self.g = np.array(g)
#self.fn_obj = fn_obj
#nangle = len(np.unique(data_obj.angle))
        self.noise = (self.sn**2 * (1 + (self.g**2).sum(1)))[:, np.newaxis,
                                                             np.newaxis]
        self.smax = 10
        #self.fn_obj.compute_helper_vars(data_obj,self)
        self.pFs = [self.p_F_given_s(s) for s in range(self.smax)]
        self.oversampled = oversampled
        if self.oversampled:
            self.sampwt = np.ones((self.oversampled, 1)) / self.oversampled
            self.sampmat = np.zeros(
                (self.oversampled * (self.F.shape[0] - 1), self.F.shape[1]),
                dtype='bool')
            dig = np.floor(self.oversampled * t_offset).astype('<i2')
            for i in range(self.sampmat.shape[1]):
                self.sampmat[dig::self.oversampled, i] = 1
        if precomputed:
            theta_star = precomputed['theta_star']
            fn_obj = precomputed['fn_obj']
            self.rpre = np.zeros(
                np.array(self.F.shape) + np.array(
                    (0, -1, 0)))  # one fewer time point required
            for i in range(self.nroi):
                self.rpre[i] = fn_obj.rfunc(theta_star[i][0])
Beispiel #3
0
def oasis_decon(traces):
    from oasis.functions import deconvolve

    decon, spikes = [], []
    count = 0
    for trace in traces:
        c, s, b, g, lam = deconvolve(trace, penalty=1, b_nonneg=False)
        if not np.any(c):
            c = np.random.normal(0., 0.1, size=(len(c)))
            count += 1
        decon.append(c)
        spikes.append(s)
    return np.asarray(decon), np.asarray(spikes)
Beispiel #4
0
def gen_traces(datafiles,
               blcutoff=blcutoff,
               blspan=blspan):  #nbefore=nbefore,nafter=nafter
    trialwise = np.array(())
    ctrialwise = np.array(())
    strialwise = np.array(())
    dfofall = np.array(())
    baselineall = np.array(())
    for datafile in datafiles:
        frm = sio.loadmat(datafile.replace('.rois', '.mat'),
                          squeeze_me=True)['info']['frame'][()][1:]
        with h5py.File(datafile, mode='r') as f:
            to_add = f['corrected'][:].T
            to_add[np.isnan(to_add)] = np.nanmin(to_add)
            #             baseline = np.percentile(to_add,blcutoff,axis=1)
            baseline = sfi.percentile_filter(to_add[:, ::ds], blcutoff,
                                             (1, int(blspan / ds)))
            baseline = np.repeat(baseline, ds, axis=1)
            for i in range(baseline.shape[0]):
                baseline[i] = sfi.gaussian_filter1d(baseline[i], blspan / 2)
#             if baseline.shape[1]<to_add.shape[1]:
#                 baseline = np.hstack((baseline,np.repeat(baseline[:,-1],to_add.shape[1]-baseline.shape[1])))
            if baseline.shape[1] > to_add.shape[1]:
                baseline = baseline[:, :to_add.shape[1]]
            c = np.zeros_like(to_add)
            s = np.zeros_like(to_add)
            dfof = np.zeros_like(to_add)
            for i in range(c.shape[0]):
                #                 dfof = (to_add[i]-baseline[i,np.newaxis])/baseline[i,np.newaxis]
                dfof[i] = (to_add[i] - baseline[i, :]) / (baseline[i, :])
                #try:
                c[i], s[i], _, _, _ = deconvolve(dfof[i].astype(np.float64),
                                                 penalty=1,
                                                 sn=5e-3)
                #except:
                #    print("in "+datafile+" couldn't do "+str(i))
            try:
                trialwise = np.concatenate((trialwise, to_add), axis=0)
                ctrialwise = np.concatenate((ctrialwise, c), axis=0)
                strialwise = np.concatenate((strialwise, s), axis=0)
                dfofall = np.concatenate((dfofall, dfof), axis=0)
                baselineall = np.concatenate((baselineall, baseline), axis=0)
            except:
                trialwise = to_add.copy()
                ctrialwise = c.copy()
                strialwise = s.copy()
                dfofall = dfof.copy()
                baselineall = baseline.copy()
    return trialwise, ctrialwise, strialwise, dfofall, baselineall
def get_spiking_data(dff_traces, timestamps, cell_specimen_ids, num_std=3):
    '''
    Deconvolve dff_traces into spikes
    uses OASIS (https://github.com/j-friedrich/OASIS)

    Inputs:
        dff_traces: dictoionary dff_traces
        timestamps: time vector of dff_traces
        std: standard deviation required for classifying as a spike
    Returns:
        isis - dictionary
            key: cell_specimen_id, value:  interspike intervals
        spikes - dictionary
             key: cell_specimen_id, value:  binary vector indicating where spikes were detected
        spiketimes - dictionary
            key: cell_specimen_id, value:  spiketimes
        timestamps: list containing dff_timestamps
    '''

    from oasis.functions import deconvolve

    dff_trace = dff_traces[0]
    spike_prob_list = []
    dff = {}

    for i, dff_trace in enumerate(dff_traces):
        c, s, b, g, lam = deconvolve(np.double(dff_trace), penalty=1)
        spike_prob_list.append(s)
        dff[cell_specimen_ids[i]] = dff_trace

    std = np.std(np.asarray(spike_prob_list))

    spike_times = {}
    isis = {}
    spikes = {}

    for i, spike_prob in enumerate(spike_prob_list):
        s_sig = (spike_prob >= num_std * std)
        spikes[cell_specimen_ids[i]] = s_sig * 1
        spike_times[cell_specimen_ids[i]] = timestamps[s_sig]
        isis[cell_specimen_ids[i]] = np.diff(spike_times[cell_specimen_ids[i]])

    return spikes, spike_times, isis
def extract_spikes(roiattrs):


    frameRate = 25
    if 'corr_traces' in roiattrs.keys():
        trace_type = 'corr_traces'
    else:
        trace_type = 'traces'
    nROIs = len(roiattrs['idxs'])
    spk_traces = []
    print(np.isfinite(roiattrs['corr_traces']).all())
    print((roiattrs['corr_traces']).shape)

    for tr in roiattrs['corr_traces']:
        if np.all(np.isfinite(tr)):
            spk_traces.append(deconvolve(tr)[1])
        else:
            print('setting nan')
            spk_traces.append([np.nan]*len(tr))

    roiattrs['spike_inf'] = np.array(spk_traces)
    roiattrs['spike_long'] = np.nan
    return roiattrs
 def deconvolve_trace(self, trace, penalty):
     c, s, b, g, lam = deconvolve(trace, penalty=penalty)
     return (c, s)
        f) + "inh.txt"
    positions_loc = "../Data/small/networkPositions_iNet1_Size100_CC0" + str(
        f) + "inh.txt"

    neural_activations = parse_activations(activations_loc, partial=False)
    positions = parse_neuron_positions(positions_loc)

    print("removing light scattering")
    start_time = time.time()
    #---------_ Remove light scattering
    neural_activations = unscatter(neural_activations.T, positions)
    n.append(neural_activations[1:3000, 65])
    #---------- Discretize with the threshold-based method proposed in chalearn competition
    print("discretizing using oasis")

    neural_dis = np.apply_along_axis(lambda x: deconvolve(x, penalty=1)[1], 0,
                                     neural_activations)
    neural_dis = pd.DataFrame(neural_dis).apply(discretize, 0)
    n_dis.append(neural_dis.iloc[1:3000, 65])

    #neural_dis.to_csv("../Data/small/discretized_oasis_"+str(f)+".csv",index=False)

    t = time.time() - start_time
    print(t)
    log.write("oasis " + str(t) + " " + str(f))
    log.write("\n")

log.close()

#---------- Plot for the poster an example of discretization for the poster
# ori_stim_1 = data[40].reshape(29, 259)
# ori_stim_1 = ori_stim_1.T
# plt.close()

#intuition on cal
for neuron_id in range(30, 40):
    plt.plot(ori_stim_1[neuron_id], label="original")
    plt.plot(gen_stim_1[neuron_id], label="generated")
    plt.legend()
    plt.savefig("./intuition/intuition-calcium" + str(neuron_id))
    plt.close()

#intuition on spike
neuron_id = 1
for neuron_id in range(30, 40):
    c, s, b, g, lam = deconvolve(gen_stim_1[neuron_id], penalty=1)
    plt.plot(s, label="gen")
    c, s, b, g, lam = deconvolve(ori_stim_1[neuron_id], penalty=1)
    plt.plot(s, label="ori")
    plt.legend()
    plt.savefig("./intuition/intuition-spike" + str(neuron_id))
    plt.close()

# plot spike counts for all neurons
new_fire_rate = []
old_fire_rate = []
new_spike_train = np.zeros((259, 29))
old_spike_train = np.zeros((259, 29))
for i in trange(259):
    c, s, b, g, lam = deconvolve(gen_stim_1[i], penalty=1)
    count = len(np.flatnonzero(s > 0))
    ynew = f(xnew)
    plt.figure()
    plt.title('Nonlinear transformation')
    plt.plot(xnew, ynew)
    plt.xlabel('# AP')
    plt.ylabel('deltaF/F')
    return f


#%% Calcium trace generation
Y, truth, trueSpikes = gen_data(firerate=2, N=1000)
Y_nl, _, _ = gen_data(trueSpikes=trueSpikes, nonlinearity=True)

#%% Deconvolution using OASIS
index = 0
c, s, b, g, lam = deconvolve(Y[index])
c_nl, s_nl, b_nl, g_nl, lam_nl = deconvolve(Y_nl[index])

#%% Show without nonlinear transformation result
framerate = 30
plt.figure()
tt = np.arange(0, len(c) * 1 / 30, 1 / 30)
plt.plot(tt, Y[index], label='trace')
plt.plot(tt, c, label='deconvolve')
plt.plot(tt, truth[index], label='ground truth signal')
plt.plot(tt, trueSpikes[index], label='ground truth spikes')
plt.plot(tt, s, label='deconvolved spikes')
plt.legend()
np.corrcoef(s, trueSpikes[index])

#%% Result with nonlinear transformation