def _plot_power_statistics(res_, figure_path, excitatory, valence): name_str = '_E' if excitatory else '_I' name_str += '_' + valence _ = reduce.new_filter_reduce(res_, filter_keys=['mouse', 'odor_valence'], reduce_key='half_lick') res = reduce.new_filter_reduce(res_, filter_keys=['mouse', 'odor_valence'], reduce_key='half_power') res['half_lick'] = _['half_lick'] from sklearn import linear_model from sklearn.metrics import mean_squared_error, r2_score regr = linear_model.LinearRegression() regr.fit(res['half_lick'].reshape(-1, 1), res['half_power'].reshape(-1, 1)) y_pred = regr.predict(res['half_lick'].reshape(-1, 1)) score = regr.score(res['half_lick'].reshape(-1, 1), res['half_power'].reshape(-1, 1)) lim = [10, 50] ax_lim = {'xlim': lim, 'ylim': lim} a, b = plot.plot_results(res, x_key='half_lick', y_key='half_power', plot_args=scatter_args, plot_function=plt.scatter, path=figure_path, save=False, ax_args=ax_lim) plt.plot(lim, lim, '--', color='red', alpha=.5, linewidth=1) plt.text(25, lim[1], 'R = {:.2f}'.format(score), fontsize=5) plot._easy_save(a, b + name_str, pdf=True)
def plot_overlap_water(res, start_days, end_days, figure_path): ax_args_copy = overlap_ax_args.copy() res = copy.copy(res) mice = np.unique(res['mouse']) res = filter.filter_days_per_mouse(res, days_per_mouse=end_days) add_naive_learned(res, start_days, end_days) ax_args_copy.update({'xlim': [-1, 2]}) y_keys = ['US/CS+', 'CS+/US'] summary_res = defaultdict(list) for arg in y_keys: _get_overlap_water(res, arg=arg) new_res = reduce.new_filter_reduce( res, filter_keys=['mouse', 'day', 'odor_valence'], reduce_key='Overlap') new_res['Type'] = np.array([arg] * len(new_res['training_day'])) reduce.chain_defaultdicts(summary_res, new_res) summary_res.pop('Overlap_sem') summary_res.pop('Overlap_std') summary_res = filter.filter(summary_res, {'odor_valence': 'CS+'}) mean_std_res = reduce.new_filter_reduce(summary_res, filter_keys='Type', reduce_key='Overlap') types = np.unique(summary_res['Type']) scatter_args_copy = scatter_args.copy() scatter_args_copy.update({'s': 2, 'alpha': .6}) for i, type in enumerate(types): reuse_arg = True if i == 0: reuse_arg = False temp = filter.filter(summary_res, {'Type': type}) plot.plot_results(temp, x_key='Type', y_key='Overlap', loop_keys='mouse', colors=['Black'] * len(mice), plot_function=plt.scatter, path=figure_path, plot_args=scatter_args_copy, ax_args=ax_args_copy, save=False, reuse=reuse_arg, fig_size=(1.5, 1.5), rect=(.25, .25, .6, .6), legend=False) plot.plot_results(mean_std_res, x_key='Type', y_key='Overlap', error_key='Overlap_sem', path=figure_path, plot_function=plt.errorbar, plot_args=error_args, ax_args=ax_args, save=True, reuse=True, fig_size=(1.5, 1.5), legend=False) print(mean_std_res['Overlap'])
def get_compare_responsive_sig(res): key = 'ssig' def _helper(res): assert res['odor_valence'][0] == 'CS+', 'wrong odor' assert res['odor_valence'][1] == 'CS-', 'wrong odor' on = res['DAQ_O_ON_F'][0] off = res['DAQ_W_ON_F'][0] sig_p = res[key][0] sig_m = res[key][1] dff_p = res['dff'][0] dff_m = res['dff'][1] sig_p_mask = sig_p == 1 sig_m_mask = sig_m == 1 dff_mask = dff_p - dff_m dff_mask = np.mean(dff_mask[:, on:off], axis=1) p = [a and b for a, b in zip(sig_p_mask, dff_mask > 0)] m = [a and b for a, b in zip(sig_m_mask, dff_mask < 0)] return np.array(p), np.array(m) mice = np.unique(res['mouse']) res = filter.filter(res, filter_dict={'odor_valence': ['CS+', 'CS-']}) sig_res = reduce.new_filter_reduce( res, reduce_key=key, filter_keys=['mouse', 'day', 'odor_valence']) dff_res = reduce.new_filter_reduce( res, reduce_key='dff', filter_keys=['mouse', 'day', 'odor_valence']) sig_res['dff'] = dff_res['dff'] new_res = defaultdict(list) for mouse in mice: mouse_res = filter.filter(sig_res, filter_dict={'mouse': mouse}) days = np.unique(mouse_res['day']) p_list = [] m_list = [] for i, day in enumerate(days): mouse_day_res = filter.filter(mouse_res, filter_dict={'day': day}) p, m = _helper(mouse_day_res) new_res['mouse'].append(mouse) new_res['mouse'].append(mouse) new_res['day'].append(day) new_res['day'].append(day) new_res['odor_valence'].append('CS+') new_res['odor_valence'].append('CS-') new_res[key].append(p) new_res[key].append(m) new_res['Fraction'].append(np.mean(p)) new_res['Fraction'].append(np.mean(m)) p_list.append(p) m_list.append(m) for key, val in new_res.items(): new_res[key] = np.array(val) return new_res
def plot_bar(res, days_per_mouse, odor_valence, day_pad, save, reuse, figure_path, color='black', normalize=False): res = copy.copy(res) res['day_'] = np.zeros_like(res['day']) res_ = defaultdict(list) for i, days in enumerate(days_per_mouse): temp = filter.filter_days_per_mouse(res, days_per_mouse=days) temp = filter.filter(temp, {'odor_valence': odor_valence[i]}) temp['day_'] = np.array([i + day_pad] * len(temp['day_'])) reduce.chain_defaultdicts(res_, temp) _max_dff(res_) res_ = reduce.new_filter_reduce( res_, filter_keys=['odor_valence', 'mouse', 'day_'], reduce_key='max_dff') res_.pop('max_dff_sem') summary = reduce.new_filter_reduce(res_, filter_keys=['day_', 'odor_valence'], reduce_key='max_dff') if normalize: _normalize_across_days(summary) # plot.plot_results(summary, x_key='day_', y_key='max_dff', error_key='max_dff_sem', # path=figure_path, # colors='black', legend=False, plot_args=error_args, plot_function= plt.errorbar, # fig_size=(2, 1.5), save=False, reuse=reuse) line_args_copy = line_args.copy() line_args_copy.update({'alpha': .75, 'linewidth': 1}) plot.plot_results(summary, x_key='day_', y_key='max_dff', path=figure_path, colors=color, legend=False, plot_args=line_args_copy, fig_size=(2, 1.5), save=save, reuse=reuse, name_str=odor_valence[-1])
def plot_max_dff_valence(res, start_days, end_days, figure_path): res = copy.copy(res) # list_of_days = list(zip(start_days, end_days)) list_of_days = end_days start_end_day_res = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) start_end_day_res = filter.filter(start_end_day_res, {'odor_valence': ['CS+', 'CS-']}) _max_dff(start_end_day_res) start_end_day_res = reduce.new_filter_reduce( start_end_day_res, filter_keys=['odor_valence', 'mouse'], reduce_key='max_dff') add_naive_learned(start_end_day_res, start_days, end_days) ax_args_copy = ax_args.copy() # ax_args_copy.update({'xticks':[res['DAQ_O_ON_F'][-1], res['DAQ_W_ON_F'][-1]], 'xticklabels':['ON', 'US'], # 'ylim':[0, .2]}) nMice = len(np.unique(res['mouse'])) # colors = ['Green'] * nMice + ['Red'] * nMice # trace_args_copy = trace_args.copy() # trace_args_copy.update({'linestyle':'--','alpha':.5, 'linewidth':.75}) plot.plot_results(start_end_day_res, loop_keys='mouse', x_key='odor_valence', y_key='max_dff', path=figure_path, colors=['gray'] * 10, legend=False, fig_size=(2, 1.5))
def plot_correlation_matrix(res, days, loop_keys, shuffle, figure_path, odor_end=True, direction=0): res = filter.filter_days_per_mouse(res, days) res_ = _correlation(res, loop_keys, shuffle, odor_end, direction=direction) res = reduce.new_filter_reduce(res_, filter_keys=['Odor_A', 'Odor_B'], reduce_key='corrcoef') if shuffle: s = '_shuffled' else: s = '' plot.plot_weight(res, x_key='Odor_A', y_key='Odor_B', val_key='corrcoef', title='Correlation', label='Correlation', vmin=0, vmax=1, mask=True, xticklabel=['CS+1', 'CS+2', 'CS-1', 'CS-2'], yticklabel=['CS+1', 'CS+2', 'CS-1', 'CS-2'], save_path=figure_path, text=','.join([str(x) for x in days]) + s + '_direction_' + str(direction)) return res_
def get_licks_per_day(data_path, condition, return_raw=False): res = analysis.load_all_cons(data_path) analysis.add_indices(res) analysis.add_time(res) lick_res = convert(res, condition) lick_res['lick_boolean'] = np.array([y > 0 for y in lick_res['lick']]) out = reduce.new_filter_reduce(lick_res, ['odor', 'day', 'mouse'], 'lick_boolean') if return_raw: add_odor_value(lick_res, condition) return lick_res else: return out
def get_roc(res): def _dprime(a, b): u1, u2 = np.mean(a), np.mean(b) s1, s2 = np.std(a), np.std(b) return (u1 - u2) / np.sqrt(.5 * (np.square(s1) + np.square(s2))) def _roc(a, b): import sklearn.metrics data = np.concatenate((a, b)) labels = np.concatenate( (np.ones_like(a), np.zeros_like(b))).astype('bool') roc = sklearn.metrics.roc_auc_score(labels, data) return roc def _rolling_window(a, window): shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1], ) return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides) key = 'lick' print_key = 'roc' x_key = 'roc_trial' window = 10 res[print_key] = np.copy(res[key]) res[x_key] = np.copy(res['trial']) res = filter.exclude(res, {'odor_valence': 'US'}) res_ = reduce.new_filter_reduce(res, filter_keys=['mouse', 'odor_valence'], reduce_key=key) combinations, list_of_ixs = filter.retrieve_unique_entries(res_, ['mouse']) for i, ixs in enumerate(list_of_ixs): assert len(ixs) == 2 assert res_['odor_valence'][ixs[0]] == 'CS+' assert res_['odor_valence'][ixs[1]] == 'CS-' a = _rolling_window(res_[key][ixs[0]], window) b = _rolling_window(res_[key][ixs[1]], window) dprimes = np.array([_roc(x, y) for x, y in zip(a, b)]) res_[print_key][ixs[0]] = dprimes res_[print_key][ixs[1]] = dprimes res_[x_key][ixs[0]] = np.arange(len(dprimes)) res_[x_key][ixs[1]] = np.arange(len(dprimes)) return res_
def _helper(plot_res, color='green'): mean_std_res = reduce.new_filter_reduce( plot_res, reduce_key='PCA Distance', filter_keys='title') titles = np.unique(plot_res['title']) summary_ax_args_ = copy.copy(summary_ax_args) summary_ax_args_.update({'xlim': [-1, len(titles)]}) plot.plot_results(plot_res, x_key='title', y_key='PCA Distance', loop_keys='mouse', select_dict={'title': titles}, colors=[color] * mice.size, ax_args=summary_ax_args_, plot_function=plt.plot, plot_args=summary_line_args, path=figure_path, save=False) plot.plot_results(mean_std_res, x_key='title', y_key='PCA Distance', select_dict={'title': titles}, colors=color, plot_function=plt.plot, ax_args=ax_args, plot_args=line_args, path=figure_path, reuse=True, save=False) plot.plot_results(mean_std_res, x_key='title', y_key='PCA Distance', select_dict={'title': titles}, error_key='PCA Distance_sem', colors=color, plot_function=plt.errorbar, ax_args=summary_ax_args_, plot_args=error_args, legend=False, path=figure_path, reuse=True, save=True)
def plot_individual(res, lick_res, figure_path): ax_args_copy = ax_args.copy() ax_args_copy.update({'ylim': [0, .65], 'yticks': [0, .3, .6]}) overlap_ax_args_copy = overlap_ax_args.copy() res = copy.copy(res) get_responsive_cells(res) summary_res = reduce.new_filter_reduce( res, reduce_key='Fraction Responsive', filter_keys=['odor_valence', 'mouse', 'day']) summary_res = filter.filter(summary_res, {'odor_valence': 'CS+'}) lick_res = filter.filter(lick_res, {'odor_valence': 'CS+'}) for mouse in np.unique(summary_res['mouse']): select_dict = {'mouse': mouse} plot.plot_results(summary_res, x_key='day', y_key='Fraction Responsive', loop_keys='odor_valence', colors=['green', 'red', 'turquoise'], select_dict=select_dict, path=figure_path, ax_args=ax_args_copy, plot_args=line_args, save=False, sort=True) plot.plot_results(lick_res, x_key='day', y_key='lick_boolean', loop_keys='odor_valence', select_dict={'mouse': mouse}, colors=['green', 'red'], ax_args=overlap_ax_args_copy, plot_args=behavior_line_args, path=figure_path, reuse=True, save=True, twinax=True)
def _average_velocity(res, save_path): xkey = 'trial' ykey = 'velocity' error_key = ykey + '_sem' color_dict = {'CS+': 'green', 'CS-': 'red', 'PT CS+': 'C1'} valences = [['CS+', 'CS-', 'PT CS+'], ['CS+', 'CS-'], ['PT CS+']] for valence in valences: colors = [color_dict[x] for x in valence] temp = filter.filter(res, {'odor_valence': valence}) start = temp['on'][0] off = temp['off'][0] end = temp['end'][0] ax_args = { 'xticks': [start, off, end], 'xticklabels': ['ON', 'OFF', 'US'], 'ylim': [-5, 50] } mean_res = reduce.new_filter_reduce(temp, filter_keys=['odor_valence'], reduce_key=ykey) for i, v in enumerate(mean_res[ykey]): v_ = savgol_filter(v, window_length=21, polyorder=0) mean_res[ykey][i] = v_ for i, v in enumerate(mean_res[error_key]): v_ = savgol_filter(v, window_length=21, polyorder=0) mean_res[error_key][i] = v_ plot.plot_results(mean_res, x_key=xkey, y_key=ykey, loop_keys='odor_valence', select_dict={'odor_valence': valence}, error_key=error_key, plot_function=plt.fill_between, colors=colors, plot_args=fill_args, ax_args=ax_args, path=save_path)
def plot_summary_odor(res, start_days, end_days, use_colors=True, figure_path=None, reuse=False, excitatory=True): ax_args_copy = ax_args.copy() res = copy.copy(res) get_responsive_cells(res) list_of_days = list(zip(start_days, end_days)) mice = np.unique(res['mouse']) start_end_day_res = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) add_naive_learned(start_end_day_res, start_days, end_days, 'a', 'b') filter.assign_composite(start_end_day_res, loop_keys=['odor_valence', 'training_day']) start_end_day_res = reduce.new_filter_reduce( start_end_day_res, filter_keys=['training_day', 'mouse', 'odor_valence'], reduce_key='Fraction Responsive') odor_list = ['CS+', 'CS-'] if use_colors: colors = ['Green', 'Red'] else: colors = ['Black'] * 2 ax_args_copy = ax_args_copy.copy() ax_args_copy.update({ 'xlim': [-1, 8], 'ylim': [0, .4], 'yticks': [0, .1, .2, .3, .4] }) name_str = '_E' if excitatory else '_I' for i, odor in enumerate(odor_list): save_arg = False reuse_arg = True if i == 0 and not reuse: reuse_arg = False if i == len(odor_list) - 1: save_arg = True plot.plot_results(start_end_day_res, select_dict={'odor_valence': odor}, x_key='odor_valence_training_day', y_key='Fraction Responsive', loop_keys='mouse', colors=[colors[i]] * len(mice), path=figure_path, plot_args=line_args, ax_args=ax_args_copy, save=save_arg, reuse=reuse_arg, fig_size=(2.5, 1.5), legend=False, name_str=','.join([str(x) for x in start_days]) + name_str) start_end_day_res = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) add_naive_learned(start_end_day_res, start_days, end_days, 'a', 'b') filter.assign_composite(start_end_day_res, loop_keys=['odor_valence', 'training_day']) start_end_day_res = reduce.new_filter_reduce( start_end_day_res, filter_keys=['training_day', 'mouse', 'odor_standard'], reduce_key='Fraction Responsive') before_csp = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'a', 'odor_valence': 'CS+' }) after_csp = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'b', 'odor_valence': 'CS+' }) before_csm = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'a', 'odor_valence': 'CS-' }) after_csm = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'b', 'odor_valence': 'CS-' }) try: from scipy.stats import ranksums, wilcoxon, kruskal print('Before CS+: {}'.format( np.mean(before_csp['Fraction Responsive']))) print('After CS+: {}'.format(np.mean( after_csp['Fraction Responsive']))) print('Wilcoxin:{}'.format( wilcoxon(before_csp['Fraction Responsive'], after_csp['Fraction Responsive']))) print('Before CS-: {}'.format( np.mean(before_csm['Fraction Responsive']))) print('After CS-: {}'.format(np.mean( after_csm['Fraction Responsive']))) print('Wilcoxin:{}'.format( wilcoxon(before_csm['Fraction Responsive'], after_csm['Fraction Responsive']))) except: print('stats didnt work')
def plot_responsive_difference_odor_and_water(res, odor_start_days, water_start_days, end_days, use_colors=True, figure_path=None, include_water=True, normalize=False, pt_start=None, pt_learned=None, average=True, ylim=.22, reuse_arg=False, save_arg=True): key = 'Change in Fraction Responsive' if normalize: key = 'Norm. Fraction Responsive' def _helper(start_end_day_res): combs, list_of_ixs = filter.retrieve_unique_entries( start_end_day_res, ['mouse', 'odor_standard']) for i, comb in enumerate(combs): ixs = list_of_ixs[i] assert len(ixs) == 2 if start_end_day_res['training_day'][0] == 'Naive': ref = ixs[0] test = ixs[1] elif start_end_day_res['training_day'][0] == 'Learned': ref = ixs[1] test = ixs[0] else: raise ValueError('cannot find ref day') if normalize: start_end_day_res[key][test] = start_end_day_res['Fraction Responsive'][test] / \ start_end_day_res['Fraction Responsive'][ref] start_end_day_res[key][ref] = 1 else: start_end_day_res[key][test] = start_end_day_res['Fraction Responsive'][test] - \ start_end_day_res['Fraction Responsive'][ref] start_end_day_res[key][ref] = 0 ax_args_copy = ax_args.copy() res = copy.copy(res) get_responsive_cells(res) list_of_days = list(zip(odor_start_days, end_days)) mice = np.unique(res['mouse']) res[key] = np.zeros_like(res['Fraction Responsive']) start_end_day_res = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) start_end_day_res = filter.filter( start_end_day_res, {'odor_valence': ['CS+', 'CS-', 'Naive']}) add_naive_learned(start_end_day_res, odor_start_days, end_days) odors = ['CS+', 'CS-', 'Naive'] if 'PT CS+' in np.unique(res['odor_valence']): odors = ['PT CS+'] + odors list_of_days = list(zip(pt_start, pt_learned)) start_end_day_res_pt = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) start_end_day_res_pt = filter.filter(start_end_day_res_pt, {'odor_valence': 'PT CS+'}) add_naive_learned(start_end_day_res_pt, pt_start, pt_learned) reduce.chain_defaultdicts(start_end_day_res, start_end_day_res_pt) if include_water: odors += ['US'] list_of_days = list(zip(water_start_days, end_days)) start_end_day_res_water = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) start_end_day_res_water = filter.filter(start_end_day_res_water, {'odor_valence': 'US'}) add_naive_learned(start_end_day_res_water, water_start_days, end_days) reduce.chain_defaultdicts(start_end_day_res, start_end_day_res_water) filter.assign_composite(start_end_day_res, loop_keys=['odor_standard', 'training_day']) if average: start_end_day_res = reduce.new_filter_reduce( start_end_day_res, filter_keys=['odor_valence', 'mouse', 'training_day'], reduce_key=key) start_end_day_res.pop(key + '_sem') _helper(start_end_day_res) start_end_day_res = filter.filter(start_end_day_res, {'training_day': 'Learned'}) summary_res = reduce.new_filter_reduce(start_end_day_res, filter_keys='odor_valence', reduce_key=key) dict = { 'CS+': 'Green', 'CS-': 'Red', 'US': 'Turquoise', 'PT CS+': 'Orange', 'Naive': 'Gray' } if use_colors: colors = [ dict[key] for key in np.unique(start_end_day_res['odor_valence']) ] else: colors = ['Black'] * 6 ax_args_copy = ax_args_copy.copy() n_valence = len(np.unique(summary_res['odor_valence'])) ax_args_copy.update({ 'xlim': [-.5, 3.5], 'ylim': [-ylim, ylim], 'yticks': [-.3, -.2, -.1, 0, .1, .2, .3] }) if normalize: ax_args_copy.update({ 'xlim': [-.5, 3.5], 'ylim': [-.1, 1.5], 'yticks': [0, .5, 1, 1.5] }) error_args_ = { 'fmt': '.', 'capsize': 2, 'elinewidth': 1, 'markersize': 2, 'alpha': .75 } scatter_args_copy = scatter_args.copy() scatter_args_copy.update({'s': 3}) for i, odor in enumerate(odors): reuse = True if i == 0: reuse = reuse_arg plot.plot_results(start_end_day_res, loop_keys='odor_valence', select_dict={'odor_valence': odor}, x_key='odor_valence', y_key=key, colors=[dict[odor]] * len(mice), path=figure_path, plot_args=scatter_args_copy, plot_function=plt.scatter, ax_args=ax_args_copy, save=False, reuse=reuse, fig_size=(2, 1.5), rect=(.25, .2, .6, .6), legend=False, name_str=','.join([str(x) for x in odor_start_days])) if not normalize: plt.plot(plt.xlim(), [0, 0], '--', color='gray', linewidth=1, alpha=.5) plot.plot_results(summary_res, x_key='odor_valence', y_key=key, error_key=key + '_sem', colors='black', path=figure_path, plot_args=error_args_, plot_function=plt.errorbar, ax_args=ax_args_copy, save=save_arg, reuse=True, fig_size=(2, 1.5), legend=False)
def plot_summary_odor_and_water(res, odor_start_days, water_start_days, end_days, use_colors=True, excitatory=True, arg='odor_valence', figure_path=None): include_water = True ax_args_copy = ax_args.copy() res = copy.copy(res) get_responsive_cells(res) mice = np.unique(res['mouse']) list_of_days = list(zip(odor_start_days, end_days)) start_end_day_res = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) start_end_day_res = filter.exclude(start_end_day_res, {'odor_valence': 'US'}) add_naive_learned(start_end_day_res, odor_start_days, end_days, 'a', 'b') if include_water: list_of_days = list(zip(water_start_days, end_days)) start_end_day_res_water = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) start_end_day_res_water = filter.filter(start_end_day_res_water, {'odor_valence': 'US'}) add_naive_learned(start_end_day_res_water, water_start_days, end_days, 'a', 'b') reduce.chain_defaultdicts(start_end_day_res, start_end_day_res_water) ax_args_copy = ax_args_copy.copy() if arg == 'odor_valence': start_end_day_res = reduce.new_filter_reduce( start_end_day_res, filter_keys=['training_day', 'mouse', 'odor_valence'], reduce_key='Fraction Responsive') odor_list = ['CS+', 'CS-'] ax_args_copy.update({ 'xlim': [-1, 6], 'ylim': [0, .6], 'yticks': [0, .1, .2, .3, .4, .5] }) colors = ['Green', 'Red'] elif arg == 'naive': arg = 'odor_valence' start_end_day_res = reduce.new_filter_reduce( start_end_day_res, filter_keys=['training_day', 'mouse', 'odor_valence'], reduce_key='Fraction Responsive') odor_list = ['CS+'] ax_args_copy.update({ 'xlim': [-1, 4], 'ylim': [0, .6], 'yticks': [0, .1, .2, .3, .4, .5] }) colors = ['GoldenRod'] else: odor_list = ['CS+1', 'CS+2', 'CS-1', 'CS-2'] colors = ['Green', 'Green', 'Red', 'Red'] ax_args_copy.update({ 'xlim': [-1, 10], 'ylim': [0, .6], 'yticks': [0, .1, .2, .3, .4, .5] }) filter.assign_composite(start_end_day_res, loop_keys=[arg, 'training_day']) if not use_colors: colors = ['Black'] * 4 name_str = '_E' if excitatory else '_I' for i, odor in enumerate(odor_list): reuse_arg = True if i == 0: reuse_arg = False plot.plot_results(start_end_day_res, select_dict={arg: odor}, x_key=arg + '_training_day', y_key='Fraction Responsive', loop_keys='mouse', colors=[colors[i]] * len(mice), path=figure_path, plot_args=line_args, ax_args=ax_args_copy, save=False, reuse=reuse_arg, fig_size=(2.5, 1.5), legend=False, name_str=','.join([str(x) for x in odor_start_days])) plot.plot_results(start_end_day_res, select_dict={'odor_standard': 'US'}, x_key='training_day', y_key='Fraction Responsive', loop_keys='mouse', colors=['Turquoise'] * len(mice), path=figure_path, plot_args=line_args, ax_args=ax_args_copy, fig_size=(1.6, 1.5), legend=False, reuse=True, save=True, name_str=name_str) before_odor = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'a', 'odor_valence': ['CS+', 'CS-'] }) after_odor = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'b', 'odor_valence': ['CS+', 'CS-'] }) before_csp = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'a', 'odor_valence': 'CS+' }) after_csp = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'b', 'odor_valence': 'CS+' }) before_csm = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'a', 'odor_valence': 'CS-' }) after_csm = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'b', 'odor_valence': 'CS-' }) before_water = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'a', 'odor_valence': 'US' }) after_water = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'b', 'odor_valence': 'US' }) try: from scipy.stats import ranksums, wilcoxon, kruskal print('Before Odor: {}'.format( np.mean(before_odor['Fraction Responsive']))) print('After Odor: {}'.format( np.mean(after_odor['Fraction Responsive']))) print('Wilcoxin:{}'.format( wilcoxon(before_odor['Fraction Responsive'], after_odor['Fraction Responsive']))) print('Before CS+: {}'.format( np.mean(before_csp['Fraction Responsive']))) print('After CS+: {}'.format(np.mean( after_csp['Fraction Responsive']))) print('Wilcoxin:{}'.format( wilcoxon(before_csp['Fraction Responsive'], after_csp['Fraction Responsive']))) print('Before CS-: {}'.format( np.mean(before_csm['Fraction Responsive']))) print('After CS-: {}'.format(np.mean( after_csm['Fraction Responsive']))) print('Wilcoxin:{}'.format( wilcoxon(before_csm['Fraction Responsive'], after_csm['Fraction Responsive']))) print('Before US: {}'.format( np.mean(before_water['Fraction Responsive']))) print('After US: {}'.format(np.mean( after_water['Fraction Responsive']))) print('Wilcoxin:{}'.format( wilcoxon(before_water['Fraction Responsive'], after_water['Fraction Responsive']))) except: print('stats didnt work')
def plot_responsive_difference_odor_and_water(res, odor_start_days, end_days, use_colors=True, figure_path=None, normalize=False, ylim=.6): key = 'Change in Fraction' if normalize: key = 'Norm. Fraction' def _helper(start_end_day_res): combs, list_of_ixs = filter.retrieve_unique_entries( start_end_day_res, ['mouse', 'odor_valence']) for i, comb in enumerate(combs): ixs = list_of_ixs[i] assert len(ixs) == 2 if start_end_day_res['training_day'][0] == 'Naive': ref = ixs[0] test = ixs[1] elif start_end_day_res['training_day'][0] == 'Learned': ref = ixs[1] test = ixs[0] else: raise ValueError('cannot find ref day') if normalize: start_end_day_res[key][test] = start_end_day_res['Fraction'][test] / \ start_end_day_res['Fraction'][ref] start_end_day_res[key][ref] = 1 else: start_end_day_res[key][test] = start_end_day_res['Fraction'][test] - \ start_end_day_res['Fraction'][ref] start_end_day_res[key][ref] = 0 ax_args_copy = ax_args.copy() res = get_compare_responsive_sig(res) list_of_days = list(zip(odor_start_days, end_days)) mice = np.unique(res['mouse']) res[key] = np.zeros_like(res['Fraction']) start_end_day_res = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) start_end_day_res = filter.filter(start_end_day_res, {'odor_valence': ['CS+', 'CS-']}) add_naive_learned(start_end_day_res, odor_start_days, end_days) _helper(start_end_day_res) start_end_day_res = filter.filter(start_end_day_res, {'training_day': 'Learned'}) summary_res = reduce.new_filter_reduce(start_end_day_res, filter_keys='odor_valence', reduce_key=key) dict = {'CS+': 'Green', 'CS-': 'Red'} if use_colors: colors = [ dict[key] for key in np.unique(start_end_day_res['odor_valence']) ] else: colors = ['Black'] * 6 ax_args_copy = ax_args_copy.copy() n_valence = len(np.unique(summary_res['odor_valence'])) ax_args_copy.update({ 'xlim': [-1, n_valence], 'ylim': [-ylim, ylim], 'yticks': np.arange(-1, 1, .2) }) if normalize: ax_args_copy.update({ 'xlim': [-1, n_valence], 'ylim': [-.1, 1.5], 'yticks': [0, .5, 1, 1.5] }) scatter_args_copy = scatter_args.copy() scatter_args_copy.update({'s': 8}) odors = ['CS+', 'CS-'] for i, odor in enumerate(odors): reuse = True if i == 0: reuse = False plot.plot_results(start_end_day_res, loop_keys='odor_valence', select_dict={'odor_valence': odor}, x_key='odor_valence', y_key=key, colors=[dict[odor]] * len(mice), path=figure_path, plot_args=scatter_args_copy, plot_function=plt.scatter, ax_args=ax_args_copy, save=False, reuse=reuse, fig_size=(2, 1.5), legend=False, name_str=','.join([str(x) for x in odor_start_days])) if not normalize: plt.plot(plt.xlim(), [0, 0], '--', color='gray', linewidth=1, alpha=.5) plot.plot_results(summary_res, x_key='odor_valence', y_key=key, error_key=key + '_sem', colors='black', path=figure_path, plot_args=error_args, plot_function=plt.errorbar, ax_args=ax_args_copy, save=True, reuse=True, fig_size=(2, 1.5), legend=False)
def plot_correlation_across_days(res, days, loop_keys, shuffle, figure_path, reuse, save, analyze, plot_bool, odor_end=True): if analyze: res_ = defaultdict(list) for day_list in days: d = list(zip(day_list[0], day_list[1])) res_temp = filter.filter_days_per_mouse(res, d) corr_res = _correlation(res_temp, loop_keys, shuffle, odor_end=odor_end) reduce.chain_defaultdicts(res_, corr_res) res_ = filter.filter(res_, {'Odor_A': 0, 'Odor_B': 1}) res_ = reduce.new_filter_reduce(res_, filter_keys=['mouse', 'odor_standard'], reduce_key='corrcoef') res_.pop('corrcoef_sem') return res_ if plot_bool: res_ = res if shuffle: s = '_shuffled' else: s = '' ax_args_copy = ax_args.copy() ax_args_copy.update({ 'xlim': [-.5, 2.5], 'ylim': [0, 1.05], 'yticks': np.arange(0, 1.1, .2) }) swarm_args_copy = swarm_args.copy() swarm_args_copy.update({'palette': ['green', 'red', 'gray']}) plot.plot_results(res_, x_key='odor_valence', y_key='corrcoef', path=figure_path, plot_args=swarm_args_copy, plot_function=sns.stripplot, ax_args=ax_args_copy, reuse=reuse, save=False, sort=True, name_str=s) summary = reduce.new_filter_reduce(res_, filter_keys=['odor_valence'], reduce_key='corrcoef') plot.plot_results(summary, x_key='odor_valence', y_key='corrcoef', error_key='corrcoef_sem', colors='black', path=figure_path, plot_args=error_args, plot_function=plt.errorbar, save=save, reuse=True, legend=False, name_str=s) from scipy.stats import ranksums print(summary['corrcoef'])
def plot_overlap_odor(res, start_days, end_days, delete_non_selective=False, figure_path=None, excitatory=True): ax_args_copy = overlap_ax_args.copy() res = copy.copy(res) res = _get_overlap_odor(res, delete_non_selective) list_of_days = list(zip(start_days, end_days)) mice = np.unique(res['mouse']) start_end_day_res = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) start_end_day_res = reduce.new_filter_reduce( start_end_day_res, filter_keys=['mouse', 'day', 'condition'], reduce_key='Overlap') add_naive_learned(start_end_day_res, start_days, end_days) filter.assign_composite(start_end_day_res, loop_keys=['condition', 'training_day']) odor_list = ['+:+', '-:-', '+:-'] colors = ['Green', 'Red', 'Gray'] name_str = '_E' if excitatory else '_I' ax_args_copy.update({'xlim': [-1, 6]}) for i, odor in enumerate(odor_list): save_arg = False reuse_arg = True if i == 0: reuse_arg = False if i == len(odor_list) - 1: save_arg = True temp = filter.filter(start_end_day_res, {'condition': odor}) name = ','.join([str(x) for x in start_days]) + '_' + ','.join( [str(x) for x in end_days]) name += name_str plot.plot_results(temp, x_key='condition_training_day', y_key='Overlap', loop_keys='mouse', colors=[colors[i]] * len(mice), path=figure_path, plot_args=line_args, ax_args=ax_args_copy, save=save_arg, reuse=reuse_arg, name_str=name, fig_size=(2, 1.5), legend=False) b = filter.filter(temp, {'training_day': 'Learned'}) print(odor) print(np.mean(b['Overlap'])) start_end_day_res = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) add_naive_learned(start_end_day_res, start_days, end_days, str1='0', str2='1') start_end_day_res.pop('Overlap_sem', None) summary_res = reduce.new_filter_reduce(start_end_day_res, filter_keys='training_day', reduce_key='Overlap') ax_args_copy.update({ 'xlim': [-1, 2], 'ylim': [0, .5], 'yticks': [0, .1, .2, .3, .4, .5] }) plot.plot_results(summary_res, x_key='training_day', y_key='Overlap', path=figure_path, plot_args=bar_args, ax_args=ax_args_copy, plot_function=plt.bar, fig_size=(2, 1.5), legend=False, reuse=False, save=False) plot.plot_results(summary_res, x_key='training_day', y_key='Overlap', error_key='Overlap_sem', path=figure_path, plot_function=plt.errorbar, plot_args=error_args, ax_args=ax_args, save=True, reuse=True, name_str=name_str) before_odor = filter.filter(start_end_day_res, filter_dict={ 'training_day': '0', 'condition': '+:+' }) after_odor = filter.filter(start_end_day_res, filter_dict={ 'training_day': '1', 'condition': '+:+' }) before_csp = filter.filter(start_end_day_res, filter_dict={ 'training_day': '0', 'condition': '+:-' }) after_csp = filter.filter(start_end_day_res, filter_dict={ 'training_day': '1', 'condition': '+:-' }) before_csm = filter.filter(start_end_day_res, filter_dict={ 'training_day': '0', 'condition': '-:-' }) after_csm = filter.filter(start_end_day_res, filter_dict={ 'training_day': '1', 'condition': '-:-' }) from scipy.stats import ranksums, wilcoxon, kruskal print('Before ++: {}'.format(np.mean(before_odor['Overlap']))) print('After ++: {}'.format(np.mean(after_odor['Overlap']))) print('Wilcoxin:{}'.format( wilcoxon(before_odor['Overlap'], after_odor['Overlap']))) print('Before +-: {}'.format(np.mean(before_csp['Overlap']))) print('After +-: {}'.format(np.mean(after_csp['Overlap']))) print('Wilcoxin:{}'.format( wilcoxon(before_csp['Overlap'], after_csp['Overlap']))) print('Before --: {}'.format(np.mean(before_csm['Overlap']))) print('After --: {}'.format(np.mean(after_csm['Overlap']))) print('Wilcoxin:{}'.format( wilcoxon(before_csm['Overlap'], after_csm['Overlap'])))
def get_reversal_sig(res): key = 'ssig' def _helper(res): assert res['odor_valence'][0] == 'CS+', 'wrong odor' assert res['odor_valence'][1] == 'CS-', 'wrong odor' on = res['DAQ_O_ON_F'][0] off = res['DAQ_W_ON_F'][0] sig_p = res[key][0] sig_m = res[key][1] dff_p = res['dff'][0] dff_m = res['dff'][1] sig_p_mask = sig_p == 1 sig_m_mask = sig_m == 1 dff_mask = dff_p - dff_m dff_mask = np.mean(dff_mask[:, on:off], axis=1) p = [a and b for a, b in zip(sig_p_mask, dff_mask > 0)] m = [a and b for a, b in zip(sig_m_mask, dff_mask < 0)] return np.array(p), np.array(m) mice = np.unique(res['mouse']) res = filter.filter(res, filter_dict={'odor_valence': ['CS+', 'CS-']}) sig_res = reduce.new_filter_reduce( res, reduce_key=key, filter_keys=['mouse', 'day', 'odor_valence']) dff_res = reduce.new_filter_reduce( res, reduce_key='dff', filter_keys=['mouse', 'day', 'odor_valence']) sig_res['dff'] = dff_res['dff'] reversal_res = defaultdict(list) day_strs = ['Lrn', 'Rev'] for mouse in mice: mouse_res = filter.filter(sig_res, filter_dict={'mouse': mouse}) days = np.unique(mouse_res['day']) p_list = [] m_list = [] for i, day in enumerate(days): mouse_day_res = filter.filter(mouse_res, filter_dict={'day': day}) p, m = _helper(mouse_day_res) reversal_res['mouse'].append(mouse) reversal_res['mouse'].append(mouse) reversal_res['day'].append(day_strs[i]) reversal_res['day'].append(day_strs[i]) reversal_res['odor_valence'].append('CS+') reversal_res['odor_valence'].append('CS-') reversal_res[key].append(p) reversal_res[key].append(m) reversal_res['Fraction'].append(np.mean(p)) reversal_res['Fraction'].append(np.mean(m)) p_list.append(p) m_list.append(m) for k, val in reversal_res.items(): reversal_res[k] = np.array(val) stats_res = defaultdict(list) for mouse in mice: mouse_res = filter.filter(reversal_res, filter_dict={'mouse': mouse}) combinations, list_of_ixs = filter.retrieve_unique_entries( mouse_res, ['day', 'odor_valence']) assert len(combinations) == 4, 'not equal to 4' assert combinations[0][-1] == 'CS+' assert combinations[1][-1] == 'CS-' assert combinations[2][-1] == 'CS+' assert combinations[3][-1] == 'CS-' assert combinations[0][0] == day_strs[0] assert combinations[1][0] == day_strs[0] assert combinations[2][0] == day_strs[1] assert combinations[3][0] == day_strs[1] p_before = mouse_res[key][0] m_before = mouse_res[key][1] n_before = np.invert([a or b for a, b in zip(p_before, m_before)]) p_after = mouse_res[key][2] m_after = mouse_res[key][3] n_after = np.invert([a or b for a, b in zip(p_after, m_after)]) list_before = [p_before, m_before, n_before] list_after = [p_after, m_after, n_after] str = ['p', 'm', 'none'] for i, before in enumerate(list_before): for j, after in enumerate(list_after): ix_intersect = np.intersect1d( np.where(before)[0], np.where(after)[0]) fraction = len(ix_intersect) / np.sum(before) stats_res['mouse'].append(mouse) stats_res['condition'].append(str[i] + '-' + str[j]) stats_res['Fraction'].append(fraction) for key, val in stats_res.items(): stats_res[key] = np.array(val) return reversal_res, stats_res
if 'trials_to_criterion' in plotting: scatter_args_copy = scatter_args.copy() scatter_args_copy.update({'marker': '.', 'alpha': .5, 's': 10}) error_args_copy = error_args.copy() error_args_copy.update({'elinewidth': .5, 'markeredgewidth': .5, 'markersize': 0}) ax_args_cur = ax_args.copy() keyword = 'bin_ant_23_trials_to_criterion' # keyword = 'bin_ant_23_trials_to_half_max' res_ = res.copy() if collapse_arg == 'OFC_PT': res_ = filter.exclude(res_, {'mouse':['H01','H02','H04'],'experiment':'OFC_PT'}) filter.assign_composite(res_, ['odor_valence','condition']) phase_odor_valence = np.unique(res_['phase_odor_valence']) summary_res = reduce.new_filter_reduce(res_, filter_keys=['condition','phase_odor_valence'], reduce_key=keyword) for phase in np.unique(res_['phase_odor_valence']): # for phase in phase_odor_valence: if 'Pretraining' in phase: ax_args_cur.update({'xlim':[-1, 1],'ylim':[-20, 600], 'yticks':[0, 200, 400, 600]}) else: ax_args_cur.update({'xlim': [-1, 1], 'ylim': [-10, 225], 'yticks': [0, 100, 200]}) swarm_args_copy = swarm_args.copy() swarm_args_copy.update({'palette':['red','black'], 'size':5}) path, name = plot.plot_results(res_, x_key='odor_valence_condition', y_key= keyword, select_dict={'phase_odor_valence':phase}, ax_args=ax_args_cur, plot_function= sns.stripplot, plot_args= swarm_args_copy,
'size': 5, 'facecolors': 'none', 'alpha': .5, 'palette': ['black'], 'jitter': .1 } error_args = { 'fmt': '.', 'capsize': 2, 'elinewidth': 1, 'markersize': 0, 'alpha': .6 } mean_std = reduce.new_filter_reduce(out, filter_keys=['condition'], reduce_key=ykey) path, name = plot.plot_results(out, x_key='condition', y_key=ykey, ax_args=ax_args, plot_function=sns.stripplot, plot_args=swarm_args, save=False, fig_size=(3, 2), path=save_path) names_list = [x.name for x in conditions] save_name_str = '_PSTH' if psth else '' for i, name in enumerate(names_list):
def _plot_power_mean_sem(res, figure_path, excitatory, valence): color = 'red' if valence == 'CS-' else 'green' res.pop('power_sem') ax_lim = { 'yticks': [0, .5, 1], 'ylim': [0, 1.05], 'xticks': np.arange(0, 100, 25), 'xlim': [0, 85] } name_str = '_E' if excitatory else '_I' name_str += '_' + valence mean_std_power = reduce.new_filter_reduce(res, filter_keys='odor_valence', reduce_key='power', regularize='max') mean_std_bhv = reduce.new_filter_reduce(res, filter_keys='odor_valence', reduce_key=ykey_b, regularize='max') plot.plot_results(mean_std_bhv, x_key=xkey_b, y_key=ykey_b, plot_args=trace_args, path=figure_path, rect=(.2, .25, .6, .6), save=False, ax_args=ax_lim) plot.plot_results(mean_std_power, x_key='trials', y_key='power', colors=color, plot_args=trace_args, path=figure_path, reuse=True, save=False, ax_args=ax_lim) plot.plot_results(mean_std_bhv, x_key=xkey_b, y_key=ykey_b, error_key=ykey_b + '_sem', plot_function=plt.fill_between, plot_args=fill_args, path=figure_path, reuse=True, save=False, ax_args=ax_lim) plt.legend(['behavior', 'neural'], frameon=False) plot.plot_results(mean_std_power, x_key='trials', y_key='power', error_key='power_sem', plot_function=plt.fill_between, plot_args=fill_args, colors=color, path=figure_path, reuse=True, save=True, twinax=True, ax_args=ax_lim, name_str=name_str)
def plot_max_dff_days(res, days_per_mouse, odor_valence, save, reuse, day_pad, ylim=.115, colors=None, normalize=False, figure_path=None): res = copy.copy(res) res['day_'] = np.zeros_like(res['day']) res_ = defaultdict(list) for i, days in enumerate(days_per_mouse): temp = filter.filter_days_per_mouse(res, days_per_mouse=days) temp = filter.filter(temp, {'odor_valence': odor_valence[i]}) temp['day_'] = np.array([i + day_pad] * len(temp['day_'])) reduce.chain_defaultdicts(res_, temp) _max_dff(res_) res_ = reduce.new_filter_reduce( res_, filter_keys=['odor_valence', 'mouse', 'day_'], reduce_key='max_dff') if normalize: _normalize_across_days(res_) yticks = [0, 1, 2, 3, 4, 5, 6] else: yticks = np.arange(0, ylim, .05) dict = { 'CS+': 'Green', 'CS-': 'Red', 'US': 'Turquoise', 'PT CS+': 'Orange' } n_mice = len(np.unique(res['mouse'])) ax_args_copy = ax_args.copy() ax_args_copy.update({ 'ylim': [0, ylim], 'yticks': yticks, 'xticks': list(range(20)) }) line_args_copy = line_args.copy() line_args_copy.update({ 'marker': '.', 'linestyle': '--', 'linewidth': .5, 'alpha': .5, 'markersize': 2 }) if colors is None: colors = [dict[x] for x in odor_valence] else: line_args_copy.update({'marker': None}) plot.plot_results(res_, loop_keys='mouse', x_key='day_', y_key='max_dff', path=figure_path, colors=colors * n_mice, legend=False, plot_args=line_args_copy, ax_args=ax_args_copy, fig_size=(2, 2), save=save, reuse=reuse)
def _windowed_analysis(neural_res, behavior_res, window=13, smooth_length=3, excitatory=True, valence='CS+'): def _moving_window(catdata, window): n_trials = catdata.shape[1] x = [] for i in range(n_trials - window): temp = _power(catdata[:, i:i + window, :], s, e, excitatory) if i == 0: for _ in range(1 + window // 2): x.append(temp) else: x.append(temp) while len(x) != n_trials: x.append(temp) return x neural_res = copy.copy(neural_res) neural_res = filter.filter( neural_res, {'odor_valence': [valence]}) # filter only CS+ responses names, list_of_ixs = filter.retrieve_unique_entries( neural_res, ['mouse', 'odor_standard']) out = defaultdict(list) for i, ixs in enumerate(list_of_ixs): mouse = names[i][0] odor_standard = names[i][1] out['mouse'].append(mouse) out['odor_standard'].append(odor_standard) out['odor_valence'].append(odor_standard[:-1]) #neural analysis if len(np.unique(neural_res['DAQ_O_ON_F'])) > 1: print('Odor times not the same') if len(np.unique(neural_res['DAQ_W_ON_F'])) > 1: print('Water times not the same') s = np.min(neural_res['DAQ_O_ON_F'][ixs]) e = np.min(neural_res['DAQ_W_ON_F'][ixs]) d = neural_res['data'][ixs] catdata = np.concatenate(d, axis=1) n_trials = catdata.shape[1] x = _moving_window(catdata, window) x = savgol_filter(x, smooth_length, 0) if excitatory: out['power'].append(np.array(x)) else: out['power'].append(np.array(-1 * x)) out['trials'].append(np.arange(n_trials)) #behavior analysis ix_lick = np.logical_and( behavior_res['odor_standard'] == odor_standard, behavior_res['mouse'] == mouse) assert np.sum(ix_lick) == 1, '{},{},{}'.format(odor_standard, mouse, ix_lick) y = behavior_res['boolean_smoothed'][ix_lick][0] out[ykey_b].append(y) # both temp = (x - np.min(x)) / (np.max(x) - np.min(x)) temp[:10] = 0 half_pow = np.argwhere(temp > 0.5)[0][0] if np.any(y > 50): half_lick = np.argwhere(y > 50)[0][0] else: half_lick = -1 out['half_power'].append(half_pow) out['half_lick'].append(half_lick) for k, v in out.items(): out[k] = np.array(v) #average by odor out = reduce.new_filter_reduce(out, filter_keys=['mouse', 'odor_valence'], reduce_key='power') for i, power in enumerate(out['power']): min = np.min(power) max = np.max(power) out['power'][i] = (power - min) / (max - min) temp = reduce.new_filter_reduce(out, filter_keys=['mouse', 'odor_valence'], reduce_key=ykey_b) for i in range(len(out['power'])): bhv = temp[ykey_b][i] neural = out['power'][i] if len(neural) > len( bhv ): # when there is naive day but no training / behavioral data bhv_trials = np.arange(len(neural) - len(bhv), len(neural)) else: bhv_trials = np.arange(len(neural)) out[xkey_b].append(bhv_trials) out[xkey_b] = np.array(out[xkey_b]) # resample # f = lambda a: ((resample_trials - a[0]) / (a[-1] - a[0])) * (a - a[0]) + a[0] # for i in range(len(out[xkey_b])): # out[xkey_b][i] = f(out[xkey_b][i]) # out['trials'][i] = f(out['trials'][i]) return out
def plot_power(res, start_days, end_days, figure_path, excitatory=True, odor_valence=('CS+'), naive=False, colors_before={ 'CS+': 'Green', 'CS-': 'Red' }, colors_after={ 'CS+': 'Green', 'CS-': 'Red' }, ylim=[0, .1], align=True, pad=True): res = copy.copy(res) _power(res, excitatory) if pad: right_on = np.median(res['DAQ_O_ON_F']) for i, odor_on in enumerate(res['DAQ_O_ON_F']): if np.abs(odor_on - right_on) > 2: diff = (right_on - odor_on).astype(int) if diff > 0: p = res['Power'][i] newp = np.zeros_like(p) newp[:diff] = p[0] newp[diff:] = p[:-diff] res['Power'][i] = newp print('early odor time. mouse: {}, day: {}'.format( res['mouse'][i], res['day'][i])) else: p = res['Power'][i] newp = np.zeros_like(p) newp[:diff] = p[-diff:] newp[diff:] = p[-1] res['Power'][i] = newp print('late odor time. mouse: {}, day: {}'.format( res['mouse'][i], res['day'][i])) if align: nF = [len(x) for x in res['Power']] max_frame = np.max(nF) for i, p in enumerate(res['Power']): if len(p) < max_frame: newp = np.zeros(max_frame) newp[:len(p)] = p newp[len(p):] = p[-1] res['Power'][i] = newp res['Time'][i] = np.arange(0, max_frame) print('pad frames. mouse: {}, day: {}'.format( res['mouse'][i], res['day'][i])) list_of_days = list(zip(start_days, end_days)) start_end_day_res = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) add_naive_learned(start_end_day_res, start_days, end_days) if naive: start_end_day_res = filter.exclude(start_end_day_res, { 'odor_standard': 'PT CS+', 'training_day': 'Naive' }) ix = start_end_day_res['odor_valence'] == 'PT Naive' start_end_day_res['odor_valence'][ix] = 'PT CS+' start_end_day_sum_res = reduce.new_filter_reduce( start_end_day_res, filter_keys=['training_day', 'odor_valence'], reduce_key='Power') ax_args_copy = trace_ax_args.copy() if excitatory: yticks = np.arange(0, .2, .05) else: yticks = -1 * np.arange(0, .2, .025) ax_args_copy.update({ 'xticks': [res['DAQ_O_ON_F'][-1], res['DAQ_W_ON_F'][-1]], 'xticklabels': ['ON', 'US'], 'ylim': ylim, 'yticks': yticks }) colors_b = [colors_before[x] for x in odor_valence] colors = [colors_after[x] for x in odor_valence] strr = ','.join([str(x) for x in start_days]) + '_' + ','.join( [str(x) for x in end_days]) if excitatory: strr += '_E' else: strr += '_I' plot.plot_results(start_end_day_sum_res, select_dict={ 'odor_valence': odor_valence, 'training_day': 'Naive' }, x_key='Time', y_key='Power', loop_keys='odor_valence', error_key='Power_sem', path=figure_path, plot_function=plt.fill_between, plot_args=fill_args, ax_args=ax_args_copy, colors=colors_b, fig_size=(2, 1.5), rect=(.3, .2, .6, .6), save=False) plot.plot_results(start_end_day_sum_res, select_dict={ 'odor_valence': odor_valence, 'training_day': 'Naive' }, x_key='Time', y_key='Power', loop_keys='odor_valence', path=figure_path, plot_args=trace_args, ax_args=ax_args_copy, colors=colors_b, fig_size=(2, 1.5), reuse=True, save=False) plot.plot_results(start_end_day_sum_res, select_dict={ 'odor_valence': odor_valence, 'training_day': 'Learned' }, x_key='Time', y_key='Power', loop_keys='odor_valence', error_key='Power_sem', path=figure_path, plot_function=plt.fill_between, plot_args=fill_args, ax_args=ax_args_copy, colors=colors, fig_size=(2, 1.5), reuse=True, save=False) plot.plot_results(start_end_day_sum_res, select_dict={ 'odor_valence': odor_valence, 'training_day': 'Learned' }, x_key='Time', y_key='Power', loop_keys='odor_valence', path=figure_path, plot_args=trace_args, ax_args=ax_args_copy, colors=colors, fig_size=(2, 1.5), reuse=True, name_str=strr) for i, x in enumerate(start_end_day_res['Power']): on, off = [ start_end_day_res['DAQ_O_ON_F'][i], start_end_day_res['DAQ_W_ON_F'][i] ] y = np.max(x[on:off]) - np.min(x) start_end_day_res['stat'].append(y) start_end_day_res['stat'] = np.array(start_end_day_res['stat']) for valence in odor_valence: before_odor = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'Naive', 'odor_valence': valence }) after_odor = filter.filter(start_end_day_res, filter_dict={ 'training_day': 'Learned', 'odor_valence': valence }) try: from scipy.stats import ranksums, wilcoxon, kruskal print(before_odor['odor_valence']) # print('Before: {}'.format(before_odor['stat'])) print('Before: {}'.format(np.mean(before_odor['stat']))) # print('After: {}'.format(after_odor['stat'])) print('After: {}'.format(np.mean(after_odor['stat']))) print('Wilcoxin:{}'.format( wilcoxon(before_odor['stat'], after_odor['stat']))) except: print('stats didnt work') return before_odor['stat'], after_odor['stat']
def plot_reversal(res, start_days, end_days, figure_path): ax_args_copy = ax_args.copy() ax_args_copy.update({'ylim': [0, .6]}) res = copy.copy(res) list_of_days = list(zip(start_days, end_days)) start_end_day_res = filter.filter_days_per_mouse( res, days_per_mouse=list_of_days) reversal_res, stats_res = get_reversal_sig(start_end_day_res) filter.assign_composite(reversal_res, loop_keys=['day', 'odor_valence']) import seaborn as sns swarm_args = { 'marker': '.', 'size': 8, 'facecolors': 'none', 'alpha': .5, 'palette': ['green', 'red'], 'jitter': .1 } mean_res = reduce.new_filter_reduce(reversal_res, filter_keys=['day', 'odor_valence'], reduce_key='Fraction') plot.plot_results(mean_res, x_key='day_odor_valence', y_key='Fraction', error_key='Fraction_sem', path=figure_path, plot_function=plt.errorbar, plot_args=error_args, ax_args=ax_args_copy, fig_size=(2, 1.5), save=False) plt.plot([1.5, 1.5], plt.ylim(), '--', color='gray', linewidth=2) plot.plot_results( reversal_res, x_key='day_odor_valence', y_key='Fraction', path=figure_path, colors=['Green', 'Red', 'Green', 'Red'], # plot_function=plt.scatter, plot_args=scatter_args, plot_function=sns.stripplot, plot_args=swarm_args, ax_args=ax_args_copy, fig_size=(2, 1.5), reuse=True, save=True, legend=False) print(mean_res['day_odor_valence']) print(mean_res['Fraction']) from scipy.stats import wilcoxon ix_before_p = reversal_res['day_odor_valence'] == 'Lrn_CS+' ix_after_p = reversal_res['day_odor_valence'] == 'Rev_CS+' ix_before_m = reversal_res['day_odor_valence'] == 'Lrn_CS-' ix_after_m = reversal_res['day_odor_valence'] == 'Rev_CS-' stat_csp = wilcoxon(reversal_res['Fraction'][ix_before_p], reversal_res['Fraction'][ix_after_p]) stat_csm = wilcoxon(reversal_res['Fraction'][ix_before_m], reversal_res['Fraction'][ix_after_m]) print('CS+ to CS-: {}'.format(stat_csp)) print('CS- to CS+: {}'.format(stat_csm)) titles = ['', 'CS+', 'CS-', 'None'] conditions = [['none-p', 'p-m', 'p-none', 'p-p'], ['p-m', 'p-none', 'p-p'], ['m-m', 'm-none', 'm-p'], ['none-m', 'none-none', 'none-p']] labels = [['Added', 'Reversed', 'Lost', 'Retained'], ['Reversed', 'Lost', 'Retained'], ['Retained', 'Lost', 'Reversed'], ['to CS-', 'Retained', 'to CS+']] for i, title in enumerate(titles): mean_stats = reduce.new_filter_reduce(stats_res, filter_keys='condition', reduce_key='Fraction') ax_args_copy.update({ 'ylim': [-.1, 1], 'yticks': [0, .5, 1], 'xticks': [0, 1, 2, 3], 'xticklabels': labels[i] }) plot.plot_results(mean_stats, select_dict={'condition': conditions[i]}, x_key='condition', y_key='Fraction', loop_keys='mouse', error_key='Fraction_sem', sort=True, path=figure_path, colors=['Black'] * 10, plot_function=plt.errorbar, plot_args=error_args, ax_args=ax_args_copy, fig_size=(2, 1.5), save=False) plt.title(title) plot.plot_results(stats_res, select_dict={'condition': conditions[i]}, x_key='condition', y_key='Fraction', loop_keys='mouse', sort=True, path=figure_path, colors=['Black'] * 10, plot_function=plt.scatter, plot_args=scatter_args, ax_args=ax_args_copy, fig_size=(2, 1.5), legend=False, save=True, reuse=True) print(mean_stats['Fraction']) print(mean_res['Fraction'])
# retrieving relevant days learned_day_per_mouse, last_day_per_mouse = get_days_per_mouse( data_path, condition) if condition_config.start_at_training and hasattr(condition, 'training_start_day'): start_days_per_mouse = condition.training_start_day else: start_days_per_mouse = [0] * len(condition_config.condition.paths) training_start_day_per_mouse = condition.training_start_day #behavior lick_res = behavior.behavior_analysis.get_licks_per_day(data_path, condition) analysis.add_odor_value(lick_res, condition) lick_res = filter.filter(lick_res, {'odor_valence': ['CS+', 'CS-', 'PT CS+']}) lick_res = reduce.new_filter_reduce(lick_res, ['odor_valence', 'day', 'mouse'], reduce_key='lick_boolean') temp_res = behavior.behavior_analysis.analyze_behavior(data_path, condition) if condition.name == 'OFC' or condition.name == 'BLA': if condition.name == 'OFC': last_day_per_mouse = [5, 5, 3, 4, 4] res = statistics.analyze.analyze_data(save_path, condition_config, m_threshold=0.04) # naive_config = statistics.analyze.OFC_LONGTERM_Config() # data_path_ = os.path.join(Config.LOCAL_DATA_PATH, Config.LOCAL_DATA_TIMEPOINT_FOLDER, naive_config.condition.name) # save_path_ = os.path.join(Config.LOCAL_EXPERIMENT_PATH, 'COUNTING', naive_config.condition.name) # res_naive = statistics.analyze.analyze_data(save_path_, condition_config, m_threshold=0.04) # res_naive = filter.exclude(res_naive, {'mouse': 3})
lick_smoothed = 'lick_smoothed' boolean_smoothed = 'boolean_smoothed' boolean_sem = 'boolean_smoothed_sem' lick_sem = 'lick_smoothed_sem' if 'summary' in experiments: full = defaultdict(list) list_of_res = [] for time in np.arange(.5, 5, .5): start_time = time end_time = 0 all_res = custom_get_res(start_time=start_time, end_time=end_time) all_res = filter.filter(all_res, {'odor_valence': ['CS+', 'CS-', 'PT CS+']}) all_res_lick = reduce.new_filter_reduce( all_res, filter_keys=['condition', 'odor_valence', 'mouse'], reduce_key=lick) for i, x in enumerate(all_res_lick[lick]): all_res_lick['training_end_licks'].append(np.mean(x[-20:])) all_res_lick['start_time'].append(start_time) all_res_lick['end_time'].append(end_time) for k, v in all_res_lick.items(): all_res_lick[k] = np.array(v) list_of_res.append(all_res_lick) for res in list_of_res: reduce.chain_defaultdicts(full, res) line_args_copy = line_args.copy() line_args_copy.update({'marker': '.', 'linewidth': .5, 'markersize': .5})
ax_args_pt = {'yticks': [0, 5, 10], 'ylim': [-1, 12], 'xticks': [0, 100, 200, 300], 'xlim': [0, 300]} bool_ax_args_pt = {'yticks': [0, 50, 100], 'ylim': [-5, 105], 'xticks': [0, 100, 200, 300], 'xlim': [0, 300]} bar_args = {'alpha': .6, 'fill': False} scatter_args = {'marker': 'o', 's': 10, 'alpha': .6} lick = 'lick' lick_smoothed = 'lick_smoothed' boolean_smoothed = 'boolean_smoothed' boolean_sem = 'boolean_smoothed_sem' lick_sem = 'lick_smoothed_sem' if 'trials_to_criterion' in experiments: reduce_key = 'criterion' collapse_arg = 'condition' mean_std_res = reduce.new_filter_reduce(all_res, filter_keys=[collapse_arg, 'odor_valence'],reduce_key=reduce_key) x = all_res[reduce_key] scatter_args_copy = scatter_args.copy() scatter_args_copy.update({'marker': '.', 'alpha': .5, 's': 10}) error_args_copy = error_args.copy() error_args_copy.update({'elinewidth': .5, 'markeredgewidth': .5, 'markersize': 0}) xlim_1 = np.unique(all_res[collapse_arg]).size ax_args_pt_ = {'yticks': [0, 50, 100, 150, 200, 250], 'ylim': [-10, 260], 'xlim':[-1, xlim_1]} ax_args_dt_ = {'yticks': [0, 25, 50], 'ylim': [-5, 55], 'xlim':[-1, xlim_1]} ax_args_mush_ = {'yticks': [0, 50, 100], 'ylim': [-5, 125], 'xlim':[-1, xlim_1]} x_key = collapse_arg for valence in np.unique(all_res['odor_valence']): swarm_args_copy = swarm_args.copy() swarm_args_copy.update({'palette':['red','black'], 'size':5, 'jitter':0.01})
def plot_consistency_within_day(res, start, end, shuffle, pretraining, figure_path): d = list(zip(start, end)) res_temp = filter.filter_days_per_mouse(res, d) if pretraining: res_temp = filter.filter(res_temp, {'odor_valence': ['PT CS+']}) else: res_temp = filter.filter(res_temp, {'odor_valence': ['CS+', 'CS-']}) corr_res = _correlation(res_temp) corr_res.pop('data') analysis.add_naive_learned(corr_res, start, end, '0', '1') res_ = reduce.new_filter_reduce( corr_res, filter_keys=['mouse', 'odor_standard', 'training_day'], reduce_key='consistency_corrcoef') res_.pop('consistency_corrcoef_sem') filter.assign_composite(res_, loop_keys=['training_day', 'odor_valence']) if shuffle: s = '_shuffled' else: s = '' ax_args_copy = ax_args.copy() ax_args_copy.update({ 'xlim': [-.5, 2.5], 'ylim': [0, .55], 'yticks': np.arange(0, 1.1, .1) }) swarm_args_copy = swarm_args.copy() if pretraining: swarm_args_copy.update({'palette': ['gray', 'orange', 'green', 'red']}) else: swarm_args_copy.update({'palette': ['gray', 'gray', 'green', 'red']}) ix = res_['training_day_odor_valence'] == '1_PT CS+' res_['training_day_odor_valence'][ix] = '1_APT CS+' plot.plot_results(res_, x_key='training_day_odor_valence', y_key='consistency_corrcoef', path=figure_path, plot_args=swarm_args_copy, plot_function=sns.stripplot, ax_args=ax_args_copy, reuse=False, save=False, sort=True, name_str=s) summary = reduce.new_filter_reduce(res_, filter_keys='training_day_odor_valence', reduce_key='consistency_corrcoef') plot.plot_results(summary, x_key='training_day_odor_valence', y_key='consistency_corrcoef', error_key='consistency_corrcoef_sem', colors='black', path=figure_path, plot_args=error_args, plot_function=plt.errorbar, save=True, reuse=True, legend=False, name_str=s) print(summary['consistency_corrcoef']) ix_a = res_['training_day_odor_valence'] == '0_CS+' ix_b = res_['training_day_odor_valence'] == '0_CS-' ix_c = res_['training_day_odor_valence'] == '1_CS+' ix_d = res_['training_day_odor_valence'] == '1_CS-' a = res_['consistency_corrcoef'][ix_a] b = res_['consistency_corrcoef'][ix_b] c = res_['consistency_corrcoef'][ix_c] d = res_['consistency_corrcoef'][ix_d] from scipy.stats import ranksums, wilcoxon, kruskal import scikit_posthocs print(kruskal(a, b, c)) x = scikit_posthocs.posthoc_dunn(a=[a, b, c, d], p_adjust=None) print(x)
def plot_correlation(res, start_days, end_days, figure_path, odor_end=True, linestyle='-', direction=0, arg=False, save=False, reuse=False, color='black'): def _get_ixs(r, arg): A = r['Odor_A'] B = r['Odor_B'] l = [] for i, a in enumerate(A): b = B[i] if arg == 'opposing': if a < 2 and b > 1: l.append(i) elif arg == 'CS+': if a == 0 and b == 1: l.append(i) elif arg == 'CS-': if a == 2 and b == 3: l.append(i) return np.array(l) res = filter.filter(res, {'odor_valence': ['CS+', 'CS-']}) res_before = filter.filter_days_per_mouse(res, start_days) corr_before = _correlation(res_before, ['mouse'], shuffle=False, odor_end=odor_end, direction=direction) corr_before['day'] = np.array(['A'] * len(corr_before['Odor_A'])) res_after = filter.filter_days_per_mouse(res, end_days) corr_after = _correlation(res_after, ['mouse'], shuffle=False, odor_end=odor_end, direction=direction) corr_after['day'] = np.array(['B'] * len(corr_after['Odor_A'])) corr = defaultdict(list) reduce.chain_defaultdicts(corr, corr_before) reduce.chain_defaultdicts(corr, corr_after) ix_same = np.equal(corr['Odor_A'], corr['Odor_B']) ix_different = np.invert(ix_same) for k, v in corr.items(): corr[k] = v[ix_different] if arg is not False: ixs = _get_ixs(corr, arg) for k, v in corr.items(): corr[k] = v[ixs] mouse_corr = reduce.new_filter_reduce(corr, filter_keys=['mouse', 'day'], reduce_key='corrcoef') mouse_corr.pop('corrcoef_sem') mouse_corr.pop('corrcoef_std') average_corr = reduce.new_filter_reduce(mouse_corr, filter_keys=['day'], reduce_key='corrcoef') error_args = {'capsize': 2, 'elinewidth': 1, 'markersize': 2, 'alpha': .5} error_args.update({'linestyle': linestyle}) plot.plot_results(average_corr, x_key='day', y_key='corrcoef', error_key='corrcoef_sem', plot_args=error_args, plot_function=plt.errorbar, colors=color, ax_args={ 'ylim': [0, 1], 'xlim': [-.5, 1.5] }, save=save, reuse=reuse, path=figure_path, name_str=str(direction) + '_' + str(arg)) #stats before_odor = filter.filter(corr, filter_dict={'day': 'A'}) after_odor = filter.filter(corr, filter_dict={'day': 'B'}) from scipy.stats import ranksums, wilcoxon, kruskal print('direction: {}'.format(direction)) # print('Data Before: {}'.format(before_odor['corrcoef'])) # print('Data After: {}'.format(after_odor['corrcoef'])) print('Before: {}'.format(np.mean(before_odor['corrcoef']))) print('After: {}'.format(np.mean(after_odor['corrcoef']))) print('Wilcoxin:{}'.format( wilcoxon(before_odor['corrcoef'], after_odor['corrcoef'])))