def run_draw_prediction(): score_raw = unflatten(np.load(join(res_folder, "bis_prediction_raw.npz"))) score_corr = unflatten(np.load(join(res_folder, "bis_prediction_corr.npz"))) print('mean (raw vs. corr): {} vs. {}', map_tree(np.mean, score_raw), map_tree(np.mean, score_corr)) def _draw_pred(raw, corr, info): name = f"pred-perf-bis-{info['group']}-{info['idx']}.png" with Figure(join(img_folder, "prediction", name), (6, 4)) as (ax, ): ax.hist(raw, 50, alpha=0.5, color=COLORS[2]) ax.hist(corr, 50, alpha=0.5, color=COLORS[1]) map_tree(lambda x: _draw_pred(x[0], x[1], x[2]), zip_tree(score_raw, score_corr, mice)) draw_perm_comp([score_raw, score_corr], ['raw', 'corr']) ## show comaprison of scoring between raw and corr score_raw = unflatten(np.load(join(res_folder, 'k_prediction_raw.npz'))) score_corr = unflatten(np.load(join(res_folder, 'k_prediction_corr.npz'))) score_ids = [("wt", [1]), ("glt1", [1]), ("dredd", [0])] score = list() for group, indices in score_ids: raw_group = np.hstack([score_raw[group][idx] for idx in indices]) corr_group = np.hstack([score_corr[group][idx] for idx in indices]) score.append([raw_group, corr_group]) draw_twoway_comp(score, ['raw', 'corr'], ['wt', 'glt1', 'dredd'])
def run_k_means_precision(): def _get_k_means(data_file: File): return k_means(get_trials(data_file, motion_params).values, 2)[1] cluster_labels = map_tree(_get_k_means, files) score_raw = map_tree(get_score, zip_tree(files, cluster_labels)) score_corr = map_tree(lambda x: get_score(x, corr=True), zip_tree(files, cluster_labels)) np.savez_compressed(join(res_folder, "k_prediction_raw.npz"), **flatten(score_raw)) np.savez_compressed(join(res_folder, "k_prediction_corr.npz"), **flatten(score_corr)) print('done')
def run_pc_bisec_precision(): def _get_bisect(data): mask, filtered = devibrate_trials( get_trials(data[0], motion_params)[0], motion_params["pre_time"]) return pca_bisect(filtered), mask bisect_labels = map_tree(_get_bisect, files) score_raw = map_tree(get_score_mask, zip_tree(files, bisect_labels)) score_corr = map_tree(lambda x: get_score_mask(x, corr=True), zip_tree(files, bisect_labels)) np.savez_compressed(open(join(res_folder, "bis_prediction_raw.npz"), 'wb'), **flatten(score_raw)) np.savez_compressed( open(join(res_folder, "bis_prediction_corr.npz"), 'wb'), **flatten(score_corr)) print('done')
def main(): result = map_tree(lambda x: decoder_power(x, particle.decoder_factory), files) flatten = { group_str: np.array(group) for group_str, group in result.items() } np.savez_compressed(join(res_folder, "decoding.npz"), **flatten)
def find_corr(): corr_pairs = map_tree(lambda x: pairwise_corr(x, motion_params), files) corr_pairs['glt1'] = corr_pairs['glt1'][0: 2] + corr_pairs['glt1'][4: 6] + corr_pairs['glt1'][7: 8] results = dict() for group in ('wt', 'glt1', 'dredd'): result = list() for idx, (lever, neuron) in enumerate(corr_pairs[group]): # mask = (0.75 > lever) & (lever > 0.5) mask = lever > 0.75 result.append(neuron[:, mask].ravel()) results[group] = result fig, axes = plt.subplots(nrows=3, sharex=True) for ax, group in zip(axes, ('wt', 'glt1', 'dredd')): ax.hist(results[group], 50, density=True)
def find_slope_dist(params: MotionParams): def _get_best_corr(x): lever_corr, neural_corr = pairwise_corr(x[0], params) return np.vstack([lever_corr, np.quantile(neural_corr, 0.8, axis=0)]).T corr_cmp = map_tree(_get_best_corr, files) result, raw_result = dict(), dict() for group_str, group in corr_cmp.items(): points = np.vstack(group) result[group_str] = slope_dist(points, 200) raw_result[group_str] = points with Figure(join(img_folder, "corr_dist_08.svg"), (3, 8)) as (ax,): ax.scatter(raw_result['wt'][:, 0], raw_result['wt'][:, 1], color="#268bd2", s=1) ax.scatter(raw_result['glt1'][:, 0], raw_result['glt1'][:, 1], color="#d33682", s=1) ax.scatter(raw_result['dredd'][:, 0], raw_result['dredd'][:, 1], color="#859900", s=1) with Figure(join(img_folder, "slope_dist_06.svg"), (6, 4)) as (ax,): ax.hist(result['wt'], 20, color="#268bd2", alpha=0.75) ax.hist(result['glt1'], 20, color="#d33682", alpha=0.75) ax.hist(result['dredd'], 20, color="#859900", alpha=0.75)
def run_draw_template(): cluster_labels = unflatten(np.load(join(res_folder, "clustering.npz"))) map_tree(lambda x: draw_cluster_3d(*x), zip_tree(files, cluster_labels, mice)) map_tree(lambda x: draw_template(*x), zip_tree(files, cluster_labels, mice))
def run_label_clusters(): results = map_tree(lambda x: get_thresholds(x, True), files) with open(join(res_folder, "clustering.pkl"), 'wb') as fpb: pkl.dump(results, fpb)
"quiet_var": 0.001, "window_size": 1000, "event_thres": 0.3, "pre_time": 0.1, "post_time": 1.4 } with open(join(project_folder, 'data', 'recording.toml')) as fp: mice = { group_str: [{ 'group': group_str, **x } for x in group] for group_str, group in toml.load(fp).items() } files = map_tree(lambda x: (File(join(project_folder, "data", x["path"]))), mice) COLORS = [ "#dc322fff", "#268bd2ff", "#d33682ff", "#2aa198ff", "#859900ff", "#b58900ff", "#50D0B8FF" ] # Interactively set threshold and save in file attrs def get_thresholds(data_file: File, overwrite: bool = False): linkage, mask = get_linkage(data_file, motion_params) if overwrite or ('hierarchy_threshold' not in data_file.attrs): threshold = get_threshold(linkage) data_file.attrs['hierarchy_threshold'] = threshold data_file.attrs.flush() threshold = data_file.attrs['hierarchy_threshold'] clusters = get_cluster_labels(linkage, threshold)
def find_lever_corr(): corrs = map_tree(lambda x: lever_corr(x[0], x[1], motion_params), zip_tree(files, mice)) print("wt vs. dredd: ", perm_test(np.hstack(corrs['wt']), np.hstack(corrs['dredd']))) print([np.median(case) for case in corrs['dredd']])
] project_folder = expanduser("~/Sync/project/2018-leverpush-chloe") img_folder = join(project_folder, 'report', 'img') res_folder = join(project_folder, 'report', 'measure') ## with open(join(res_folder, "svr_power.pkl"), 'rb') as fp: result = pkl.load(fp) ind_scores = {x: [a[1] for a in y] for x, y in result.items()} wt_size = [len(x) for x in ind_scores['wt']] glt_size = [len(x) for x in ind_scores['glt1']] dredd_size = [len(x) for x in ind_scores['dredd']] print( f"wt: {np.mean(wt_size)}, glt: {np.mean(glt_size)}, dredd: {np.mean(dredd_size)}" ) ## score_no = map_tree(lambda x: len(x), ind_scores) plt.hist(score_no.values(), 50) ## Test: does number of neurons affect slope? pool = np.exp(-np.arange(250) / 25) def take_sample(cell_no: int, pool: np.ndarray, fn): res = list() for _ in range(500): samples = list() x_axis = list() for _ in range(10): sample = np.flip( np.sort(np.random.choice(pool, cell_no, replace=False))) sample /= np.sum(sample) samples.append(sample)
from os.path import expanduser, join import toml import numpy as np from scipy.stats import ttest_ind, ks_2samp from noformat import File from algorithm.utils import map_tree from algorithm.stats import combine_test, perm_test from lever.utils import MotionParams, get_trials from lever.plot import plot_scatter from lever.filter import devibrate_rec motion_params = {"quiet_var": 0.001, "window_size": 1000, "event_thres": 0.3, "pre_time": 0.3, "post_time": 0.7} proj_folder = expanduser("~/Sync/project/2018-leverpush-chloe") res_folder = join(proj_folder, "report", "measure") mice = toml.load(join(proj_folder, 'data', 'recording.toml')) files = map_tree(lambda x: File(join(proj_folder, 'data', x['path'])), mice) COLORS = ["#dc322fff", "#268bd2ff", "#d33682ff", "#2aa198ff", "#859900ff", "#b58900ff"] # def reliability(data: np.ndarray) -> float: t = data.shape[0] coef = 2 / (t ** 2 - t) return np.corrcoef(data, rowvar=True)[np.triu_indices(t, 1)].sum() * coef def get_initial(data_file: File, params: MotionParams): lever = devibrate_rec(get_trials(data_file, params)) pre_value = lever.values[:, :lever._pre // 2].mean(axis=1, keepdims=True) lever_off = int(np.median(np.argmax(lever.values[:, lever._pre:] <= pre_value, axis=1))) + lever._pre return reliability(lever.values[:, lever._pre // 2: lever_off]) def get_rise(data_file: File, params: MotionParams): lever = devibrate_rec(get_trials(data_file, params))
from mplplot import Figure project_folder = expanduser("~/Sync/project/2018-leverpush-chloe") # project_folder = expanduser("~/Sync/project/2017-leverpush") img_folder = join(project_folder, 'report', 'img') res_folder = join(project_folder, 'report', 'measure') motion_params = { "quiet_var": 0.001, "window_size": 1000, "event_thres": 0.3, "pre_time": 0.1, "post_time": 1.4 } mice = toml.load(join(project_folder, 'data', 'recording.toml')) files = map_tree(lambda x: (File(join(project_folder, "data", x["path"]))), mice) COLORS = [ "#dc322fff", "#268bd2ff", "#d33682ff", "#2aa198ff", "#859900ff", "#b58900ff" ] ## Amplitude def get_amp(data_file: File) -> float: mask, filtered = devibrate_trials( get_trials(data_file, motion_params).values, motion_params['pre_time']) return np.quantile( filtered[mask, 25:64].max(axis=1) - filtered[mask, 0:15].mean(axis=1), 0.75)
from algorithm.array import DataFrame from algorithm.stats import combine_test, perm_test from algorithm.time_series import take_segment from lever.filter import devibrate_trials from lever.utils import get_trials from lever.decoding.validate import cross_predict from lever.plot import plot_scatter project_folder = expanduser("~/Sync/project/2018-leverpush-chloe") res_folder = join(project_folder, "report", "measure") COLORS = [ "#dc322fff", "#268bd2ff", "#d33682ff", "#2aa198ff", "#859900ff", "#b58900ff" ] mice = toml.load(join(project_folder, "data/recording.toml")) files = map_tree(lambda x: File(join(project_folder, 'data', x['path'])), mice) motion_params = { "quiet_var": 0.001, "window_size": 1000, "event_thres": 0.3, "pre_time": 0.1, "post_time": 0.9 } ## actual running def run_amp_power(data_file: File) -> Tuple[float, float, float, float]: """Try to decode the max lever trajectory amplitude of each trial. Returns: pre_amp_power: mutual info between predicted (from neuron activity before motor onset) and real amplitude of trials in one session