def responsive_shuffle_xcorr(cell, centre, whole): thresh = 2 rounds = 50 samples = len(centre) xcorr_c = corr_trial_to_trial(centre.T, 0) xcorr_w = corr_trial_to_trial(whole.T, 0) shuffles = [] for i in xrange(rounds): shift = np.random.randint(-samples, samples, 1) shift_xcorr_c = corr_trial_to_trial(centre.T, shift) shift_xcorr_w = corr_trial_to_trial(whole.T, shift) shuffles.append([shift_xcorr_c, shift_xcorr_w]) shuffles = np.array(shuffles) shift_xcorr_c = average_corrs(shuffles[:, 0]) shift_xcorr_w = average_corrs(shuffles[:, 1]) active = (xcorr_c > (shift_xcorr_c * thresh) or xcorr_w > (shift_xcorr_w * thresh)) vals = [cell, active, xcorr_c, xcorr_w, shift_xcorr_c, shift_xcorr_w] return vals
def do_spot_scatter_plot(data, xval, c='k', width=0.2, text=True, mean_adjust=True): plt.hold(True) dd = data[data != 0] r = width / 2. for d in data: plt.scatter(xval + (np.random.rand(1) - 0.5) * r, d, c=c, marker='x') if len(data) > 0: # fishers z transform for mean and then transform back if mean_adjust: mn = average_corrs(data) else: mn = data.mean() plt.plot([xval - r, xval + r], [mn, mn], 'k', lw=3) plt.grid(True, axis='y') if text: plt.text(xval - r, 0.9, '#%d' % len(dd))
def do_point_line_plot(data, xvals, c=['r', 'b'], width=0.2, mean_adjust=True, text=True, alpha=0.3): plt.hold(True) dd = data[data.sum(1) != 0] r = width / 2. ind = np.argsort(data[:, 0]) data = data[ind] for d in data: offset = (np.random.rand(1) - 0.5) * r x = xvals + offset if len(x) > 1: plt.plot(x, d, '-', c='0.7', lw=0.5, alpha=alpha) for i in range(len(x)): plt.scatter(x[i], d[i], c=c[i], marker='x', alpha=alpha) if len(data) > 0: # fishers z transform for mean and then transform back for i in range(len(xvals)): if mean_adjust: mn = average_corrs(data[:, i]) else: mn = data[:, i].mean() plt.plot([xvals[i] - r, xvals[i] + r], [mn, mn], c[i], lw=4) plt.plot([xvals[i] - r, xvals[i] + r], [mn, mn], 'k', lw=1.5) for j in range(data.shape[1]): if data[:, j].sum() == 0: continue p_offset = -0.2 for ind in range(j + 1, data.shape[1]): if data[:, ind].sum() == 0: continue if mean_adjust: _, p = scipy.stats.ttest_ind(np.arctanh(data[:, j]), np.arctanh(data[:, ind])) else: _, p = scipy.stats.ttest_ind(data[:, j], data[:, ind]) print 'p', p, j, ind if p < 0.05: plt.scatter(xvals[j] + p_offset, 0.95, c=c[ind], edgecolor=c[ind], marker='*') p_offset *= -1 plt.grid(True, axis='y')
y = plt.ylim()[1] plt.text(0.7, y * 0.9, 'Avg Act: %.2f' % avg_w) hist_mx = np.maximum(cnts.max(), hist_mx) plt.xticks(tks) plt.title('Mean Activation Histogram') adjust_spines(hst_ax2, ['bottom', 'left']) hst_ax1.set_ylim(0, hist_mx) hst_ax2.set_ylim(0, hist_mx) ax = plt.subplot(425) plt.hold(True) crrs = [] for c, w in zip(dat_c.mean(2), dat_w.mean(2)): crrs.append(do_thresh_corr(c, w)) cr = average_corrs(crrs) vals.append(cr) csv_vals[4] += crrs plt.plot(mn_w, label='Whole', color=clr_w, linewidth=1.5) #plt.fill_between(range(psth_w.shape[1]), mn_c, mn_w, # facecolor=clr2) plt.plot(mn_c, color=clr_c, label='Center', linewidth=2) plt.legend(bbox_to_anchor=(0.55, 0.95, 0.4, 0.1), frameon=False, ncol=2) plt.text(10, 0.9, 'corr: %.2f' % (cr)) adjust_spines(ax, ['bottom', 'left']) ylim = plt.ylim(0, 1) plt.ylim([-0.01, ylim[1]]) plt.title('Mean Comparison')
cnt_axs = [] crr_axs = [] cnt_lims = [99999, 0] crr_lims = [99999, 0] if exp_type not in shift_max_mn: shift_max_mn[exp_type] = {} for i, k in enumerate(sorted(shift_max.keys())): if k not in shift_max_mn[exp_type]: shift_max_mn[exp_type][k] = [] ax = plt.subplot(1, 2, i + 1) plt.title(k) for s, t in zip(shifts, xaxis_time): vals = np.array(shift_max[k][s]) # fisher z transform shift_max_mn[exp_type][k].append(average_corrs(vals)) do_spot_scatter_plot(vals, t, 'k', 25, True, mean_adjust=True) #do_box_plot(crrs, shifts, 'k', np.ones_like(shifts) * 0.5) if i == 0: plt.ylabel('mean corr of responders') plt.xlabel('Shift in Frames') adjuster = np.array([-10, 10]) ax.set_ylim(-0.01, 1) ax.set_xlim(np.array([xaxis_time.min(), xaxis_time.max()]) + adjuster) plt.subplots_adjust(left=0.06, bottom=0.05, right=0.97, top=0.95, wspace=0.23, hspace=0.1) fig4.savefig(fig_path + '%s_%s_shift_max.eps' % (str(filt), exp_type))
clf_args=clf_args, edges=[0, 0]) pred_time = pred_time.ravel() y = y.ravel() crr = do_thresh_corr(pred_time, y) #, corr_type='pearsonr') crrs.append(crr) res = 'Predict: %s, Using: %s, Dimension: %d, Crr: %.2f' % ( d, m, dim, crr) pred_time = (pred_time - pred_time.mean()) / np.std(pred_time) y = (y - y.mean()) / np.std(y) # print res if crr > 0.6: plt.figure(figsize=(14, 8)) plt.hold(True) plt.plot(pred_time, label='pred') plt.plot(y, label='movie') plt.legend() plt.title(res) plt.show() crrs = np.array(crrs) av_crrs = average_corrs(crrs) print alpha, av_crrs plt.hist(crrs, bins=30) plt.title('%.3f %.3f' % (alpha, av_crrs)) plt.show() # print pred.shape, con_mask.shape
continue mean_corr_c = dat["mean_corr_c"] mean_corr_w = dat["mean_corr_w"] win = dat['win'] c_crr = [] w_crr = [] mn_c_crr = [] mn_w_crr = [] print 'try to predict correlation from movie features, try to predict one from another, try to predict movie features from population' for i in range(mean_corr_c.shape[1]): for j in range(mean_corr_c.shape[1]): if i < j: mn_c_crr.append(mean_corr_c[i, j, win / 2:-(win / 2)]) mn_w_crr.append(mean_corr_w[i, j, win / 2:-(win / 2)]) dat_c = average_corrs(np.array(mn_c_crr)) dat_w = average_corrs(np.array(mn_w_crr)) lum_mask, con_mask, flow_mask, four_mask, four_mask_shape,\ freq_mask, orient_mask,\ lum_surr, con_surr, flow_surr, four_surr, four_surr_shape,\ freq_surr, orient_surr,\ lum_whole, con_whole, flow_whole, four_whole, four_whole_shape,\ freq_whole, orient_whole = load_parsed_movie_dat(exp, 'POP', None) all_dat = { 'Data': { 'Centre': dat_c, 'Whole': dat_w, 'Diff': dat_c - dat_w },
trls = np.arange(X.shape[0]) pred_time, coefs = CV_time(clf, X, y, folds=folds, clf_args=clf_args, edges=[0, 0]) pred_time = pred_time.ravel() y = y.ravel() crr = do_thresh_corr(pred_time, y)#, corr_type='pearsonr') crrs.append(crr) res= 'Predict: %s, Using: %s, Dimension: %d, Crr: %.2f' %( d, m, dim, crr) pred_time = (pred_time - pred_time.mean()) / np.std(pred_time) y = (y - y.mean()) / np.std(y) # print res if crr > 0.6: plt.figure(figsize=(14, 8)) plt.hold(True) plt.plot(pred_time, label='pred') plt.plot(y, label='movie') plt.legend() plt.title(res) plt.show() crrs = np.array(crrs) av_crrs = average_corrs(crrs) print alpha, av_crrs plt.hist(crrs, bins=30) plt.title('%.3f %.3f' % (alpha, av_crrs)) plt.show() # print pred.shape, con_mask.shape
cnt_axs = [] crr_axs = [] cnt_lims = [99999, 0] crr_lims = [99999, 0] if exp_type not in shift_max_mn: shift_max_mn[exp_type] = {} for i, k in enumerate(sorted(shift_max.keys())): if k not in shift_max_mn[exp_type]: shift_max_mn[exp_type][k] = [] ax = plt.subplot(1, 2, i + 1) plt.title(k) for s, t in zip(shifts, xaxis_time): vals = np.array(shift_max[k][s]) # fisher z transform shift_max_mn[exp_type][k].append(average_corrs(vals)) do_spot_scatter_plot(vals, t, 'k', 25, True, mean_adjust=True) #do_box_plot(crrs, shifts, 'k', np.ones_like(shifts) * 0.5) if i == 0: plt.ylabel('mean corr of responders') plt.xlabel('Shift in Frames') adjuster = np.array([-10, 10]) ax.set_ylim(-0.01, 1) ax.set_xlim(np.array([xaxis_time.min(), xaxis_time.max()]) + adjuster) plt.subplots_adjust(left=0.06, bottom=0.05, right=0.97, top=0.95, wspace=0.23, hspace=0.1) fig4.savefig(fig_path + '%s_%s_shift_max.eps' % (str(filt), exp_type)) fig4.savefig(fig_path + '%s_%s_shift_max.png' % (str(filt), exp_type)) plt.close(fig4) ps = []
def plot_corrs(c_vals, w_vals, mn_c_crr, mn_w_crr, n_cells, header, fname): fig = plt.figure(figsize=(14, 8)) fig.set_facecolor('white') c_vals = average_corrs(c_vals) w_vals = average_corrs(w_vals) c_vals_mn = average_corrs(c_vals) w_vals_mn = average_corrs(w_vals) mn_c_vals_mn = average_corrs(mn_c_crr) mn_w_vals_mn = average_corrs(mn_w_crr) print mn_c_crr.min(), mn_c_crr.max() # ax = plt.subplot(411) # plt.hold(True) # plt.plot(c_vals.T, '0.8') # plt.plot(c_vals_mn, '0.4', linewidth=2) # plt.xlim(0, c_vals.shape[1]) # adjust_spines(ax, ['bottom', 'left']) # plt.title('Masked') # plt.ylabel('Mean R') # # ax = plt.subplot(412) # plt.hold(True) # plt.plot(w_vals.T, '0.8') # plt.plot(w_vals_mn, '0.4', linewidth=2) # plt.xlim(0, w_vals.shape[1]) # adjust_spines(ax, ['bottom', 'left']) # plt.title('Whole Field') # plt.ylabel('Mean R') # # ax = plt.subplot(413) # plt.hold(True) # plt.plot(w_vals_mn, 'g', linewidth=2, label='Whole') # plt.plot(c_vals_mn, 'k', linewidth=2, label='Centre') # crr = do_thresh_corr(w_vals_mn, c_vals_mn) # leg = plt.legend(ncol=2) # leg.draw_frame(False) # plt.xlim(0, c_vals.shape[1]) # adjust_spines(ax, ['bottom', 'left']) # plt.ylabel('Mean R') # plt.xlabel('Sample') # plt.title('Whole vs Masked: Crr: {0:.2f}'.format(crr)) ax = plt.subplot(211) plt.hold(True) plt.plot(mn_w_vals_mn, 'g', linewidth=2, label='Whole') plt.plot(mn_c_vals_mn, 'k', linewidth=2, label='Centre') crr = do_thresh_corr(mn_w_vals_mn, mn_c_vals_mn) leg = plt.legend(ncol=2) leg.draw_frame(False) plt.xlim(0, c_vals.shape[1]) adjust_spines(ax, ['bottom', 'left']) plt.title('Mean Whole vs Masked: R^2: {0:.2f}'.format(crr)) plt.ylabel('Mean R') plt.xlabel('Sample') ax = plt.subplot(212) plt.hold(True) plt.plot(mn_c_vals_mn - mn_w_vals_mn, '0.3', linewidth=2) leg.draw_frame(False) plt.xlim(0, c_vals.shape[1]) adjust_spines(ax, ['bottom', 'left']) plt.title('Difference - Mean Whole vs Masked') plt.ylabel('Mean R') plt.xlabel('Sample') plt.suptitle('%s - Intercell R^2 over Trials - #Cells: %d' % (header, n_cells)) plt.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.9, wspace=0.2, hspace=0.25) fig.savefig(fname + '.eps') fig.savefig(fname + '.png') #plt.show() plt.close(fig) print fname print n_cells print '\tMean\tMax' print 'Centre:\t%.3f\t%.3f' % (c_vals_mn.mean(), c_vals.max()) print 'Whole:\t%.3f\t%.3f' % (w_vals.mean(), w_vals.max()) print
continue mean_corr_c = dat["mean_corr_c"] mean_corr_w = dat["mean_corr_w"] win = dat['win'] c_crr = [] w_crr = [] mn_c_crr = [] mn_w_crr = [] print 'try to predict correlation from movie features, try to predict one from another, try to predict movie features from population' for i in range(mean_corr_c.shape[1]): for j in range(mean_corr_c.shape[1]): if i < j: mn_c_crr.append(mean_corr_c[i, j, win / 2: -(win / 2)]) mn_w_crr.append(mean_corr_w[i, j, win / 2: -(win / 2)]) dat_c = average_corrs(np.array(mn_c_crr)) dat_w = average_corrs(np.array(mn_w_crr)) lum_mask, con_mask, flow_mask, four_mask, four_mask_shape,\ freq_mask, orient_mask,\ lum_surr, con_surr, flow_surr, four_surr, four_surr_shape,\ freq_surr, orient_surr,\ lum_whole, con_whole, flow_whole, four_whole, four_whole_shape,\ freq_whole, orient_whole = load_parsed_movie_dat(exp, 'POP', None) all_dat = {'Data': {'Centre': dat_c, 'Whole': dat_w , 'Diff': dat_c - dat_w}, 'Movie': {}} all_dat['Movie']['Contrast'] = con_mask all_dat['Movie']['Luminence'] = lum_mask all_dat['Movie']['Fourier'] = np.append(four_mask.real, four_mask.imag, axis=2).astype(np.float) all_dat['Movie']['Frequency'] = freq_mask