def get_clusters(): """ Pimp cluster defs """ from pymeg import atlas_glasser as ag # fmt: off areas = ["47s", "47m", "a47r", "11l", "13l", "a10p", "p10p", "10pp", "10d", "OFC", "pOFC", "44", "45", "IFJp", "IFJa", "IFSp", "IFSa", "47l", "p47r", "8C", "8Av", "i6-8", "s6-8", "SFL", "8BL", "9p", "9a", "8Ad", "p9-46v", "a9-46v", "46", "9-46d", "SCEF", "p32pr", "a24pr", "a32pr", "p24", "p32", "s32", "a24", "10v", "10r", "25", "d32", "8BM", "9m"] # fmt: on areas = { "NSWFRONT_" + area: ["L_{}_ROI-lh".format(area), "L_{}_ROI-lh".format(area)] for area in areas } all_clusters, _, _, _ = ag.get_clusters() all_clusters.update(areas) all_clusters = { k: v for k, v in all_clusters.items() if (k.startswith("NSWFRONT")) or (k in ag.areas.values()) } return all_clusters
def aggregate(tfr_data, hemis, all_clusters=None): """Aggregate individual areas into clusters. """ from itertools import product from pymeg import atlas_glasser if all_clusters is None: all_clusters, _, _, _ = atlas_glasser.get_clusters() clusters = [] tfr_areas = np.unique(tfr_data.index.get_level_values('area')) for hemi, cluster in product(hemis, all_clusters.keys()): print('Working on %s, %s' % (hemi, cluster)) tfrs_rh = [area for area in all_clusters[cluster] if 'rh' in area] tfrs_lh = [area for area in all_clusters[cluster] if 'lh' in area] tfrs_rh = [ t for t in tfr_areas if any([a.lower() in t.lower() for a in tfrs_rh]) ] tfrs_lh = [ t for t in tfr_areas if any([a.lower() in t.lower() for a in tfrs_lh]) ] lh_idx = tfr_data.index.isin(tfrs_lh, level='area') rh_idx = tfr_data.index.isin(tfrs_rh, level='area') left = tfr_data.loc[lh_idx, :].groupby(['freq', 'trial']).mean() right = tfr_data.loc[rh_idx, :].groupby(['freq', 'trial']).mean() if hemi == 'Pair': left.loc[:, 'cluster'] = cluster + '_LH' left.loc[:, 'hemi'] = 'Pair' right.loc[:, 'cluster'] = cluster + '_RH' right.loc[:, 'hemi'] = 'Pair' clusters.append(left) clusters.append(right) elif hemi == 'Single': if len(left) > 0: left.loc[:, 'cluster'] = cluster + '_LH' left.loc[:, 'hemi'] = 'Single' clusters.append(left) elif len(right) > 0: right.loc[:, 'cluster'] = cluster + '_RH' right.loc[:, 'hemi'] = 'Single' clusters.append(right) else: print('RH skipping', hemi, cluster, ' -> Seems to be empty') print(left, right, lh_idx, rh_idx, tfrs_lh, tfrs_rh) continue else: if hemi == 'Lateralized': tfrs = left - right elif hemi == 'Averaged': tfrs = (right + left) / 2 tfrs.loc[:, 'cluster'] = cluster tfrs.loc[:, 'hemi'] = hemi clusters.append(tfrs) df = pd.concat(clusters) df.set_index(['cluster', 'hemi'], append=True, inplace=True) return df.reorder_levels(['hemi', 'cluster', 'trial', 'freq'])
def plot_cluster(names, view): from pymeg import atlas_glasser all_clusters, _, _, _ = atlas_glasser.get_clusters() label_names = [] for name in names: cluster_name = atlas_glasser.areas[name] label_names.extend(all_clusters[cluster_name]) plot_roi("lh", label_names, "r")
def aggregate(tfr_data, hemis, all_clusters=None): """Aggregate individual areas into clusters. """ from itertools import product from pymeg import atlas_glasser if all_clusters is None: all_clusters, _, _, _ = atlas_glasser.get_clusters() clusters = [] tfr_areas = np.unique(tfr_data.index.get_level_values("area")) for hemi, cluster in product(hemis, all_clusters.keys()): print("Working on %s, %s" % (hemi, cluster)) tfrs_rh = [area for area in all_clusters[cluster] if "rh" in area] tfrs_lh = [area for area in all_clusters[cluster] if "lh" in area] tfrs_rh = [ t for t in tfr_areas if any([a.lower() in t.lower() for a in tfrs_rh]) ] tfrs_lh = [ t for t in tfr_areas if any([a.lower() in t.lower() for a in tfrs_lh]) ] lh_idx = tfr_data.index.isin(tfrs_lh, level="area") rh_idx = tfr_data.index.isin(tfrs_rh, level="area") left = tfr_data.loc[lh_idx, :].groupby(["freq", "trial"]).mean() right = tfr_data.loc[rh_idx, :].groupby(["freq", "trial"]).mean() if (len(left) == 0) or (len(right) == 0): print("%s: Left or right Hemi is empty, skipping for aggregation" % (cluster)) continue if hemi == "Pair": left.loc[:, "cluster"] = cluster + "_LH" left.loc[:, "hemi"] = "Pair" right.loc[:, "cluster"] = cluster + "_RH" right.loc[:, "hemi"] = "Pair" clusters.append(left) clusters.append(right) else: if hemi == "Lateralized": tfrs = left - right elif hemi == "Averaged": tfrs = (right + left) / 2 tfrs.loc[:, "cluster"] = cluster tfrs.loc[:, "hemi"] = hemi clusters.append(tfrs) df = pd.concat(clusters) df.set_index(["cluster", "hemi"], append=True, inplace=True) return df.reorder_levels(["hemi", "cluster", "trial", "freq"])
def compute_contrast( contrasts, data_globstring, base_globstring, meta_data, baseline_time, baseline_per_condition=True, n_jobs=1, cache=Cache(cache=False), all_clusters=None, ): """Compute a single contrast from tfr data Args: contrast: dict Contains contrast names as keys and len==3 tuples as values. The tuples contain a list of condition names first, then a set of weights for each condition, then the hemispheres to compute the contrast across. Condition names identify columns in the meta data that are one for each trial that belongs to this condition. Hemispheres can be: 'lh_is_ipsi' if contrast is ipsi-contra hemi and left hemi is ipsi. 'rh_is_ipsi' if contrast is ipis-contra and right hemi is ipsi 'avg' if contrast should be averaged across hemispheres data_globstring: list Each string in data_globstring selects a set of filenames if passed through glob. Condition averages and baselines are then computed for each group of filenames identified by one entry in data_globstring. This is useful for, e.g. computing conditions per session first, then averaging them and then computing contrasts across sessions. base_globstring: string or list Same as data_globstring but selects data to use for baselining meta_data: data frame Meta data DataFrame with as many rows as trials. baseline_time: tuple all_clusters : dict with cluster definitions, default None If None it is loaded from atlas_glasser get_contrasts """ from itertools import product # load for all subjects: tfr_condition = [] from functools import reduce conditions = set( reduce(lambda x, y: x + y, [x[0] for x in contrasts.values()])) print("computing mean tfr for all areas and conditions...") tfr_condition = pool_conditions( conditions=conditions, data_globs=data_globstring, base_globs=base_globstring, meta_data=meta_data, baseline_time=baseline_time, baseline_per_condition=baseline_per_condition, n_jobs=n_jobs, cache=cache, ) print("computing contrasts for all clusters...") # Lower case all area names # FIXME: Set all area names to lower case! if all_clusters is None: all_clusters, _, _, _ = atlas_glasser.get_clusters() tfr_areas = np.array([ a for a in tfr_condition.index.levels[np.where( np.array(tfr_condition.index.names) == "area")[0][0]] ]) tfr_areas_lower = np.array([area.lower() for area in tfr_areas]) for cluster, areas in all_clusters.items(): new_areas = [] for area in areas: idx = np.where(tfr_areas_lower == area.lower())[0] if len(idx) == 1: new_areas.append(tfr_areas[idx[0]]) all_clusters[cluster] = new_areas # mean across sessions: tfr_condition = tfr_condition.groupby(["area", "condition", "freq"]).mean() cluster_contrasts = [] for cur_contrast in contrasts.items(): print(cur_contrast) for cluster in all_clusters.keys(): print(cluster) contrast, (conditions, weights, hemi) = cur_contrast logging.info("Start computing contrast %s for cluster %s -> %s" % (contrast, cluster, hemi)) right = [] left = [] for condition in conditions: tfrs_rh = [] tfrs_lh = [] for area in all_clusters[cluster]: area_idx = tfr_condition.index.isin([area], level="area") condition_idx = tfr_condition.index.isin([condition], level="condition") subset = (tfr_condition.loc[area_idx & condition_idx].groupby( ["freq"]).mean()) if "rh" in area: tfrs_rh.append(subset) else: tfrs_lh.append(subset) # What happens when an area is not defined for both hemis? if (len(tfrs_lh) == 0) and (len(tfrs_rh) == 0): logging.warn("Skipping condition %s in cluster %s" % (condition, cluster)) continue try: left.append(pd.concat(tfrs_lh)) except ValueError: print("Exception 327") pass try: right.append(pd.concat(tfrs_rh)) except ValueError: print("Exception 332") pass if (len(left) == 0) and (len(right) == 0): logging.warn("Skipping cluster %s" % (cluster)) continue if hemi == "rh_is_ipsi": left, right = right, left if "is_ipsi" in hemi: if not len(left) == len(right): logging.warn( "Skipping cluster %s: does not have the same number of lh/rh rois" % (cluster)) continue tfrs = [left[i] - right[i] for i in range(len(left))] else: if (len(right) == 0) and (len(left) == len(weights)): tfrs = left elif (len(left) == 0) and (len(right) == len(weights)): tfrs = right else: tfrs = [(right[i] + left[i]) / 2 for i in range(len(left))] assert len(tfrs) == len(weights) tfrs = [tfr * weight for tfr, weight in zip(tfrs, weights)] tfrs = reduce(lambda x, y: x + y, tfrs) tfrs = tfrs.groupby("freq").mean() if tfrs.shape[0] == 0: continue tfrs.loc[:, "cluster"] = cluster tfrs.loc[:, "contrast"] = contrast tfrs.loc[:, "hemi"] = hemi cluster_contrasts.append(tfrs) logging.info("Done compute contrast") return pd.concat(cluster_contrasts)
if __name__ == '__main__': compute = True plot = True # subjects: subjects = [ 'jw01', 'jw02', 'jw03', 'jw05', 'jw07', 'jw08', 'jw09', 'jw10', 'jw11', 'jw12', 'jw13', 'jw14', 'jw15', 'jw16', 'jw17', 'jw18', 'jw19', 'jw20', 'jw21', 'jw22', 'jw23', 'jw24', 'jw30' ] # subjects = ['jw01', 'jw02',] # get clusters: all_clusters, visual_field_clusters, glasser_clusters, jwg_clusters = atlas_glasser.get_clusters( ) # define contrasts: contrasts = { 'all': (['all'], [1]), 'choice': (['hit', 'fa', 'miss', 'cr'], (1, 1, -1, -1)), 'stimulus': (['hit', 'fa', 'miss', 'cr'], (1, -1, 1, -1)), 'hand': (['left', 'right'], (1, -1)), 'pupil': (['pupil_h', 'pupil_l'], (1, -1)), } # hemis: hemis = ['avg', 'avg', 'avg', 'rh_is_ipsi', 'avg'] # compute contrasts: if compute: