예제 #1
0
print "nspikes", nspikes
print "N_RF_X: %d\tN_RF_Y:%d\tn_exc: %d\tn_inh: %d\tn_cells:%d" % (params['N_RF_X'], params['N_RF_Y'], params['n_exc'], params['n_inh'], params['n_cells'])
#particles = np.vstack((tuning_prop.transpose(), nspikes_normalized))

# parametrize the spatial layout
H, x_edges, y_edges = np.histogram2d(tuning_prop[:,0], tuning_prop[:, 1], bins=(n_bins_x, n_bins_y))
print "x_edges", x_edges, x_edges.size
print "y_edges", y_edges, y_edges.size


z_max = 0
for gid in xrange(n_cells):
    binned_spikes, time_bins = np.histogram(spiketrains[gid], time_grid)
    x_pos_cell, y_pos_cell = tuning_prop[gid, 0], tuning_prop[gid, 1] # cell properties
    x_pos_grid, y_pos_grid = utils.get_grid_pos(x_pos_cell, y_pos_cell, x_edges, y_edges) # cell's position in the grid
#    print "%d\t%.3e\t%.3e: x:%d\ty:%d" % (gid, x_pos_cell, y_pos_cell, x_pos_grid, y_pos_grid)
    z_max = max(binned_spikes.max(), z_max)
    for frame in xrange(n_frames): # put activity in right time bin (output figure)
        output_arrays[frame][x_pos_grid, y_pos_grid] = binned_spikes[frame]

for frame in xrange(n_frames):
    output_fn_dat = output_fn_base + 'frame%d.dat' % (frame)
    output_fn_fig = output_fn_base + 'frame%d.png' % (frame)
    print "Saving to file: ", output_fn_dat
    np.savetxt(output_fn_dat, output_arrays[frame])

    print "Plotting frame: ", frame
    fig = pylab.figure()
    ax = fig.add_subplot(111, axisbg=bg_color)
    ax.set_xlabel('$x$')
    def compute_v_estimates(self):
        """
        This function combines activity on the population level to estimate vx, vy

         On which time scale shall the prediction work?
         There are (at least) 3 different ways to do it:
           Very short time-scale:
           1) Compute the prediction for each time bin - based on the activitiy in the respective time bin 
           Short time-scale:
           2) Compute the prediction for each time bin based on all activity in the past
           3) Non-linear 'voting' based on 1)
           Long time-scale:
           3) Compute the prediction based on the the activity of the whole run - not time dependent
           4) Non-linear 'voting' based on 3) 
        """
        # momentary result, based on the activity in one time bin
        self.vx_avg = np.zeros(self.n_bins) 
        self.vy_avg = np.zeros(self.n_bins)
        # ---> gives theta_avg 

        # based on the activity in several time bins
        self.vx_moving_avg = np.zeros((self.n_bins, 2))
        self.vy_moving_avg = np.zeros((self.n_bins, 2))

        # non linear transformation of vx_avg
        self.vx_non_linear = np.zeros(self.n_bins)
        self.vy_non_linear = np.zeros(self.n_bins)

        trace_length = 100 # [ms] window length for moving average 
        trace_length_in_bins = int(round(trace_length / self.time_binsize))
        # ---> gives theta_moving_avg

        # # # # # # # # # # # # # # # # # # # # # # 
        # S P E E D    P R E D I C T I O N 
        # # # # # # # # # # # # # # # # # # # # # # 
        self.vx_confidence_binned = self.nspikes_binned_normalized[self.sorted_indices_vx]
        self.vy_confidence_binned = self.nspikes_binned_normalized[self.sorted_indices_vy]
        vx_prediction_trace = np.zeros((self.n_cells, self.n_bins, 2))    # _trace: prediction based on the momentary and past activity (moving average, and std) --> trace_length
        vy_prediction_trace = np.zeros((self.n_cells, self.n_bins, 2))    # _trace: prediction based on the momentary and past activity (moving average, and std) --> trace_length
        for i in xrange(self.n_bins):

            # 1) momentary vote
            # take the weighted average for v_prediction (weight = normalized activity)
            vx_pred = self.vx_confidence_binned[:, i] * self.vx_tuning
            vy_pred = self.vy_confidence_binned[:, i] * self.vy_tuning
            self.vx_avg[i] = np.sum(vx_pred)
            self.vy_avg[i] = np.sum(vy_pred)

            # 2) moving average
            past_bin = max(0, min(0, i-trace_length_in_bins))
            for cell in xrange(self.n_cells):
                vx_prediction_trace[cell, i, 0] = self.vx_confidence_binned[cell, past_bin:i].mean()
                vx_prediction_trace[cell, i, 1] = self.vx_confidence_binned[cell, past_bin:i].std()
                vy_prediction_trace[cell, i, 0] = self.vy_confidence_binned[cell, past_bin:i].mean()
                vy_prediction_trace[cell, i, 1] = self.vy_confidence_binned[cell, past_bin:i].std()
            self.vx_moving_avg[i, 0] = np.sum(vx_prediction_trace[:, i, 0] * self.vx_tuning)
            self.vx_moving_avg[i, 1] = np.std(vx_prediction_trace[:, i, 1] * self.vx_tuning)
            self.vy_moving_avg[i, 0] = np.sum(vy_prediction_trace[:, i, 0] * self.vy_tuning)
            self.vy_moving_avg[i, 1] = np.std(vy_prediction_trace[:, i, 1] * self.vy_tuning)

            # 3)
            # rescale activity to negative values
            vx_shifted = self.nspikes_binned[self.sorted_indices_vx, i] - self.nspikes_binned[self.sorted_indices_vx, i].max()
            vy_shifted = self.nspikes_binned[self.sorted_indices_vy, i] - self.nspikes_binned[self.sorted_indices_vy, i].max()
            # exp --> mapping to range(0, 1)
            vx_exp = np.exp(vx_shifted)
            vy_exp = np.exp(vy_shifted)
            # normalize and vote
            vx_votes = (vx_exp / vx_exp.sum()) * self.vx_tuning
            vy_votes = (vy_exp / vy_exp.sum()) * self.vy_tuning
            self.vx_non_linear[i] = vx_votes.sum()
            self.vy_non_linear[i] = vy_votes.sum()

        # in the first step the trace can not have a standard deviation --> avoid NANs 
        self.vx_moving_avg[0, 0] = np.sum(self.vx_confidence_binned[self.sorted_indices_vx, 0].mean() * self.vx_tuning)
        self.vy_moving_avg[0, 0] = np.sum(self.vy_confidence_binned[self.sorted_indices_vy, 0].mean() * self.vy_tuning)
        self.vx_moving_avg[0, 1] = 0
        self.vy_moving_avg[0, 1] = 0

        # ---> time INdependent estimates: based on activity of the full run

        # compute the marginalized (over all positions) vx, vy estimates and bin them in a grid
        self.vx_grid = np.linspace(np.min(self.vx_tuning), np.max(self.vx_tuning), self.n_vx_bins, endpoint=True)
        self.vy_grid = np.linspace(np.min(self.vy_tuning), np.max(self.vy_tuning), self.n_vy_bins, endpoint=True)
        self.vx_marginalized_binned = np.zeros(self.n_vx_bins)
        self.vy_marginalized_binned = np.zeros(self.n_vy_bins)
        self.vx_marginalized_binned_nonlinear = np.zeros(self.n_vx_bins)
        self.vy_marginalized_binned_nonlinear = np.zeros(self.n_vy_bins)

        for gid in xrange(self.n_cells):
            vx_cell, vy_cell = self.tuning_prop[gid, 2], self.tuning_prop[gid, 3] # cell properties
            vx_grid_pos, vy_grid_pos = utils.get_grid_pos(vx_cell, vy_cell, self.vx_grid, self.vy_grid)
            self.vx_marginalized_binned[vx_grid_pos] += self.nspikes_normalized[gid]
            self.vy_marginalized_binned[vy_grid_pos] += self.nspikes_normalized[gid]
            self.vx_marginalized_binned_nonlinear[vx_grid_pos] += self.nspikes_normalized_nonlinear[gid]
            self.vy_marginalized_binned_nonlinear[vy_grid_pos] += self.nspikes_normalized_nonlinear[gid]