def __init__( self, base_dir=None, add_root=True, use_cache=True, scene_kwargs={}, **kwargs, ): """ Initialise the class instance to get a few useful paths and variables. :param base_dir: str, path to base directory in which all of brainrender data are stored. Pass only if you want to use a different one from what's default. :param add_root: bool, if True the root mesh is added to the rendered scene :param use_cache: if true data are loaded from a cache to speed things up. Useful to set it to false to help debugging. :param scene_kwargs: dict, params passed to the instance of Scene associated with this class """ Paths.__init__(self, base_dir=base_dir, **kwargs) # Get MCM cache cache_path = (Path(self.mouse_connectivity_volumetric) / "voxel_model_manifest.json") if not cache_path.exists(): if not connected_to_internet(): raise ValueError( "The first time you use this class it will need to download some data, but it seems that you're not connected to the internet." ) print( "Downloading volumetric data. This will take several minutes but it only needs to be done once." ) self.cache = VoxelModelCache(manifest_file=str(cache_path)) self.voxel_array = None self.target_coords, self.source_coords = None, None # Get projection cache paths self.data_cache = self.mouse_connectivity_volumetric_cache self.data_cache_projections = os.path.join(self.data_cache, "projections") self.data_cache_targets = os.path.join(self.data_cache, "targets") self.data_cache_sources = os.path.join(self.data_cache, "sources") for fold in [ self.data_cache_projections, self.data_cache_targets, self.data_cache_sources, ]: if not os.path.isdir(fold): os.mkdir(fold) # Get structures tree self.structure_tree = self.cache.get_structure_tree() # Get scene self.scene = Scene(add_root=add_root, **scene_kwargs) # Other vars self.use_cache = use_cache
def main(): input_data = ju.read(INPUT_JSON) manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # get cache, metric logging.debug("loading regional matrix") cache = VoxelModelCache(manifest_file=manifest_file) df_metric = cache.get_normalized_connection_density(dataframe=True) # plot fig = plot(df_metric, STRUCTURES, cache, GRID_KWS, CBAR_KWS, HEATMAP_KWS, figsize=FIGSIZE) fig.savefig(OUTPUT_FILE, **SAVEFIG_KWARGS) plt.close(fig)
def voxel_model_cache(fn_temp_dir, mcc): manifest_path = os.path.join(fn_temp_dir, 'voxel_model_manifest.json') cache = VoxelModelCache(manifest_file=manifest_path) cache.get_reference_space = mock.Mock() cache.get_reference_space.return_value = mcc.get_reference_space() return cache
def main(): input_data = ju.read(INPUT_JSON) manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # get cache, metric logging.debug("loading regional matrix") cache = VoxelModelCache(manifest_file=manifest_file) df_metric = cache.get_normalized_connection_density(dataframe=True) logging.debug("getting cortical network") df_cortex = get_cortical_df(df_metric, cache) # get projection types full_ipsi, cortex_ipsi = get_pt((df_metric, df_cortex)) full_contra, cortex_contra = get_pt((df_metric, df_cortex), pt="contra") logging.debug("Computing gaussian mixture model fits for max: %s" % MAX_COMPONENTS) dfs = (full_ipsi, full_contra, cortex_ipsi, cortex_contra) labels = ("full-ipsi", "full-contra", "cortex-ipsi", "cortex-contra") frames = [] for d, l in zip(dfs, labels): # log transform d = np.log10(d[d > 0]).reshape(-1, 1) # normality test _, p_value = stats.shapiro(d) # gmm gmm, bic = fit_gmm(d, MAX_COMPONENTS, **GMM_PARAMS) columns = ('mean', 'var', 'weight') print("", l, "-" * 40, sep="\n") print("shapiro-wilk p_value : %.5g" % p_value) print("optimal n components : %d" % gmm.n_components) print("bic : %.5g" % bic) print('\t'.join(columns)) print("----\t---\t------") attrs = tuple( map(np.ravel, (gmm.means_, gmm.covariances_, gmm.weights_))) for x in zip(*attrs): print("%.2f\t%.2f\t%.3f" % x) df = pd.DataFrame(dict(zip(columns, attrs))) df.index.name = 'n_components' frames.append(df) df = pd.concat(frames, keys=labels).unstack() df.to_csv(OUTPUT_FILE)
def test_to_json(fn_temp_dir): # ------------------------------------------------------------------------ # tests JSON serialization manifest_file = 'manifest.json' resolution = 100 path = os.path.join(fn_temp_dir, 'output.json') cache = VoxelModelCache(manifest_file=manifest_file, resolution=resolution) cache.to_json(path) input_data = json_utilities.read(path) assert input_data['manifest_file'] == manifest_file assert input_data['resolution'] == resolution
def main(injection_region, filtered=False): input_data = ju.read(INPUT_JSON) manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # get voxel_model_cache cache = VoxelModelCache(manifest_file=manifest_file) # get region id region_id = get_region_id(cache, injection_region) logging.debug("performing virtual injection into %s (%s)" % (injection_region, region_id)) projection = get_projection( cache, region_id, full=FULL_INJECTION, filtered=filtered) # get projection (row) logging.debug("upscaling projection to 10 micron") projection = upscale_projection(projection, SCALE, **UPSCALE_KWARGS) # file name suffix = injection_region + "full" if FULL_INJECTION else injection_region vol_file = os.path.join(VOLUME_DIR, "projection_density_%s.nrrd" % suffix) logging.debug("saving projection volume : %s" % vol_file) nrrd.write(vol_file, projection, options=dict(encoding='raw')) return vol_file
def main(): input_data = ju.read(INPUT_JSON) structures = input_data.get('structures') manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # experiments to exclude experiments_exclude = ju.read(EXPERIMENTS_EXCLUDE_JSON) # get caching object cache = VoxelModelCache(manifest_file=manifest_file) output_file = os.path.join(OUTPUT_DIR, 'hyperparameters-%s.json' % OPTION) results = dict() for structure in structures: logging.debug("Running cross validation for structure: %s", structure) structure_id = get_structure_id(cache, structure) results[structure] = fit_structure(cache, structure_id, experiments_exclude, kernel=KERNEL, model_option=OPTION) # write results ju.write(output_file, results)
def main(): input_data = ju.read(INPUT_JSON) manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # experiments to exclude experiments_exclude = ju.read(EXPERIMENTS_EXCLUDE_JSON) # load hyperparameter dict suffix = 'high_res' if HIGH_RES else 'standard' # get caching object cache = VoxelModelCache(manifest_file=manifest_file) fit_kwargs = dict(high_res=HIGH_RES, threshold_injection=THRESHOLD_INJECTION, experiments_exclude=experiments_exclude) model = fit(cache, **fit_kwargs) # write results logging.debug('saving') output_file = os.path.join(OUTPUT_DIR, 'homogeneous-%s-model.csv' % suffix) model.to_csv(output_file)
def main(): input_data = ju.read(INPUT_JSON) structures = input_data.get('structures') manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # experiments to exclude experiments_exclude = ju.read(EXPERIMENTS_EXCLUDE_JSON) # get caching object cache = VoxelModelCache(manifest_file=manifest_file) # get full map full_map = get_full_map(cache, structures, experiments_exclude) # convert to df df = get_df(full_map, structures) # save df.to_csv(OUTPUT_FILE)
def main(): # set log level logging.getLogger().setLevel(args.log_level) # initialize cache object logging.info('initializing VoxelModelCache with manifest_file: %s', args.manifest_file) cache = VoxelModelCache(manifest_file=args.manifest_file) structure_ids = get_ordered_summary_structures(cache) # load in voxel model logging.info('loading array') voxel_array, source_mask, target_mask = cache.get_voxel_connectivity_array() source_key = source_mask.get_key(structure_ids=structure_ids) ipsi_key = target_mask.get_key(structure_ids=structure_ids, hemisphere_id=2) contra_key = target_mask.get_key(structure_ids=structure_ids, hemisphere_id=1) ipsi_model = RegionalizedModel.from_voxel_array( voxel_array, source_key, ipsi_key, ordering=structure_ids, dataframe=True) contra_model = RegionalizedModel.from_voxel_array( voxel_array, source_key, contra_key, ordering=structure_ids, dataframe=True) # get each metric get_metric = lambda s: pd.concat((getattr(ipsi_model, s), getattr(contra_model, s)), keys=('ipsi', 'contra'), axis=1) # write results if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) # regionalized logging.info('saving metrics to directory: %s', OUTPUT_DIR) get_metric('connection_density').to_csv( os.path.join(OUTPUT_DIR, 'connection_density.csv')) get_metric('connection_strength').to_csv( os.path.join(OUTPUT_DIR, 'connection_strength.csv')) get_metric('normalized_connection_density').to_csv( os.path.join(OUTPUT_DIR, 'normalized_connection_density.csv')) get_metric('normalized_connection_strength').to_csv( os.path.join(OUTPUT_DIR, 'normalized_connection_strength.csv'))
def main(): # get cache object for loading annotation/model objects cache = VoxelModelCache(manifest_file=MANIFEST_FILE) # get ids for visual areas visual_ids = get_structure_ids_from_acronyms(cache, VISUAL_AREA_ACRONYMS) # get voxel-scale connectivity of visual network (ipsilateral) visual_network = get_voxel_subgraph(cache, visual_ids, hemisphere_id=2) # save visual_network logger.debug('Saving the visual network to %s', OUTPUT_FILE) np.savetxt(OUTPUT_FILE, visual_network, delimiter='')
def main(): input_data = ju.read(INPUT_JSON) manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # get cache, metric logging.debug("loading regional matrix") cache = VoxelModelCache(manifest_file=manifest_file) df_metric = cache.get_normalized_connection_density(dataframe=True) logging.debug("getting cortical network") df_cortex = get_cortical_df(df_metric, cache) # get projection types full_ipsi, cortex_ipsi = get_pt((df_metric, df_cortex)) full_contra, cortex_contra = get_pt((df_metric, df_cortex), pt="contra") logging.debug("Computing distribution fits for") logging.debug("%s" % DISTRIBUTIONS) fitter = DistFit(DISTRIBUTIONS) dfs = (full_ipsi, full_contra, cortex_ipsi, cortex_contra) labels = ("full-ipsi", "full-contra", "cortex-ipsi", "cortex-contra") frames = [] for d, l in zip(dfs, labels): fitter.fit(d[d > 0]) logging.debug(l) logging.debug(str(fitter)) frames.append(results_to_df(fitter)) df = pd.concat(frames, keys=labels).unstack() df.to_csv(OUTPUT_FILE)
def test_from_json(fn_temp_dir): # ------------------------------------------------------------------------ # tests alternative constructor manifest_file = 'manifest.json' resolution = 100 path = os.path.join(fn_temp_dir, 'input.json') input_data = dict(manifest_file=manifest_file, resolution=resolution) json_utilities.write(path, input_data) cache = VoxelModelCache.from_json(path) assert cache.manifest_file == manifest_file assert cache.resolution == resolution
def main(): input_data = ju.read(INPUT_JSON) structures = input_data.get('structures') manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # experiments to exclude experiments_exclude = ju.read(EXPERIMENTS_EXCLUDE_JSON) eid_set = ju.read(EXPERIMENTS_PTP_JSON) hyperparameters = ju.read(HYPERPARAMETER_JSON) # get caching object cache = VoxelModelCache(manifest_file=manifest_file) output_dir = os.path.join(OUTPUT_DIR, 'voxel-%s' % ERROR_OPTION) run_kwargs = dict(experiments_exclude=experiments_exclude, error_option=ERROR_OPTION) print(structures) for structure in reversed(structures): # get structure id logging.debug("Running nested cross validation for structure: %s", structure) structure_id = get_structure_id(cache, structure) run_kwargs.update(hyperparameters[structure]) scores = run_structure(cache, structure_id, eid_set=None, **run_kwargs) logging.debug("voxel score : %.2f", scores['test_voxel'].mean()) logging.debug("regional score : %.2f", scores['test_regional'].mean()) write_output(output_dir, structure, structure_id, scores, 'scores_full') logging.debug("Scoring only where power to predict") try: scores = run_structure(cache, structure_id, eid_set=eid_set, **run_kwargs) logging.debug("voxel score : %.2f", scores['test_voxel'].mean()) logging.debug("regional score : %.2f", scores['test_regional'].mean()) except: logging.debug("Not enough exps") else: write_output(output_dir, structure, structure_id, scores, 'scores_ptp')
def main(): input_data = ju.read(INPUT_JSON) # experiments to exclude experiments_exclude = ju.read(EXPERIMENTS_EXCLUDE_JSON) manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # get cache, metric logging.debug("loading experiments") cache = VoxelModelCache(manifest_file=manifest_file) weights = get_nnz_weights(cache, exp_exclude=experiments_exclude) # get all weights logging.debug("plotting") plot_weights(weights)
def main(): input_data = ju.read(INPUT_JSON) structures = input_data.get('structures') manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # experiments to exclude experiments_exclude = ju.read(EXPERIMENTS_EXCLUDE_JSON) eid_set = ju.read(EXPERIMENTS_PTP_JSON) # get caching object cache = VoxelModelCache(manifest_file=manifest_file) suffix = 'high_res' if HIGH_RES else 'standard' output_dir = os.path.join(OUTPUT_DIR, 'homogeneous-%s' % suffix) run_kwargs = dict(high_res=HIGH_RES, threshold_injection=THRESHOLD_INJECTION, experiments_exclude=experiments_exclude, cv=CV) for structure in structures: # get structure id logging.debug("Running nested cross validation for structure: %s", structure) structure_id = get_structure_id(cache, structure) scores = run_structure(cache, structure_id, eid_set=None, **run_kwargs) logging.debug("regional score : %.2f", scores['test_regional'].mean()) write_output(output_dir, structure, structure_id, scores, 'scores_full') logging.debug("Scoring only where power to predict") try: scores = run_structure(cache, structure_id, eid_set=eid_set, **run_kwargs) logging.debug("regional score : %.2f", scores['test_regional'].mean()) except: logging.debug("Not enough exps") else: write_output(output_dir, structure, structure_id, scores, 'scores_ptp')
def main(experiment_ids): input_data = ju.read(INPUT_JSON) manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # get voxel_model_cache cache = VoxelModelCache(manifest_file=manifest_file) # get model weights logging.debug('loading model') voxel_array, source_mask, target_mask = cache.get_voxel_connectivity_array() # file save suffix if SMOOTHED: suffix = 'smoothed' if LOG: suffix += '-log' elif LOG: suffix = 'log' else: suffix = 'standard' # top down viewer tdv = TopDownView(cache, CMAP_FILE, blend_factor=BLEND_FACTOR) if SMOOTHED: osm = OptimizedSmoothedModel(cache, voxel_array, source_mask, target_mask) for ss, experiment_ids in experiment_ids.items(): for eid in experiment_ids: # get experiment projection/centroid logging.debug('loading experiment %d', eid) experiment = cache.get_projection_density(eid)[0] logging.debug('getting model weights') # --------- # get model weights centroid = tdv.get_experiment_centroid(eid) if SMOOTHED: logging.debug("Filtering target") volume = osm.fit_voxel(centroid) else: row = source_mask.get_flattened_voxel_index(centroid) volume = voxel_array[row] if LOG: logging.debug("inverse log transforming target") volume = np.power(10.0, volume) volume[volume < EPSILON] = 0 # instead of x-EPS for numerical reasons model = target_mask.map_masked_to_annotation(volume) # --------- # get image logging.debug('creating images') exp = tdv.get_top_view(experiment) mod = tdv.get_top_view(model) # save output_dir = os.path.join(OUTPUT_DIR, ss, str(eid)) if not os.path.exists(output_dir): os.makedirs(output_dir) logging.debug('saving') exp.save(os.path.join(output_dir, 'data_%d.png' % eid)) mod.save(os.path.join(output_dir, '%s_model_%d.png' % (suffix, eid))) out_data = dict(experiment_id=eid, centroid=centroid) with open(os.path.join(output_dir, 'out_%d.json' % eid), 'w') as f: json.dump(out_data, f, indent=2)
def main(): input_data = ju.read(INPUT_JSON) structures = input_data.get('structures') manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # experiments to exclude experiments_exclude = ju.read(EXPERIMENTS_EXCLUDE_JSON) # load hyperparameter dict suffix = 'log' if LOG else 'standard' hyperparameter_json = os.path.join(OUTPUT_DIR, 'hyperparameters-%s.json' % suffix) hyperparameters = ju.read(hyperparameter_json) # get caching object cache = VoxelModelCache(manifest_file=manifest_file) # get structure ids structure_ids = [get_structure_id(cache, s) for s in structures] # mask for reordering source annotation = cache.get_annotation_volume()[0] cumm_source_mask = np.zeros(annotation.shape, dtype=np.int) offset = 1 # start @ 1 so that nonzero can be used weights, nodes = [], [] for sid, sac in zip(structure_ids, structures): logging.debug("Building model for structure: %s", sac) data, reg = fit_structure(cache, sid, experiments_exclude, hyperparameters[sac], model_option=suffix) w = reg.get_weights(data.injection_mask.coordinates) # assign ordering to full source ordering = np.arange(offset, w.shape[0] + offset, dtype=np.int) offset += w.shape[0] # get source mask data.injection_mask.fill_volume_where_masked(cumm_source_mask, ordering) # append to list weights.append(w) nodes.append(reg.nodes) # stack weights = padded_diagonal_fill(weights) nodes = np.vstack(nodes) # need to reorder weights # (subtract 1 to get proper index) permutation = cumm_source_mask[cumm_source_mask.nonzero()] - 1 weights = weights[permutation, :] # regionalized logging.debug('regionalizing voxel weights') ontological_order = get_ordered_summary_structures(cache) source_mask = Mask.from_cache(cache, structure_ids=structure_ids, hemisphere_id=2) source_key = source_mask.get_key(structure_ids=ontological_order) ipsi_key = data.projection_mask.get_key(structure_ids=ontological_order, hemisphere_id=2) contra_key = data.projection_mask.get_key(structure_ids=ontological_order, hemisphere_id=1) ipsi_model = RegionalizedModel(weights, nodes, source_key, ipsi_key, ordering=ontological_order, dataframe=True) contra_model = RegionalizedModel(weights, nodes, source_key, contra_key, ordering=ontological_order, dataframe=True) get_metric = lambda s: pd.concat( (getattr(ipsi_model, s), getattr(contra_model, s)), keys=('ipsi', 'contra'), axis=1) # write results output_dir = os.path.join(TOP_DIR, 'connectivity', 'voxel-%s-model' % suffix) if not os.path.exists(output_dir): os.makedirs(output_dir) # regionalized logging.debug('saving to directory: %s', output_dir) get_metric('connection_density').to_csv( os.path.join(output_dir, 'connection_density.csv')) get_metric('connection_strength').to_csv( os.path.join(output_dir, 'connection_strength.csv')) get_metric('normalized_connection_density').to_csv( os.path.join(output_dir, 'normalized_connection_density.csv')) get_metric('normalized_connection_strength').to_csv( os.path.join(output_dir, 'normalized_connection_strength.csv')) # voxel ju.write(os.path.join(output_dir, 'target_mask_params.json'), dict(structure_ids=structure_ids, hemisphere_id=3)) ju.write(os.path.join(output_dir, 'source_mask_params.json'), dict(structure_ids=structure_ids, hemisphere_id=2)) np.savetxt(os.path.join(output_dir, 'weights.csv.gz'), weights.astype(np.float32), delimiter=',') np.savetxt(os.path.join(output_dir, 'nodes.csv.gz'), nodes.astype(np.float32), delimiter=',')
def main(): # caching object for downloading/loading connectivity/model data cache = VoxelModelCache(manifest_file=MANIFEST_FILE) # load voxel model logging.info('loading voxel array') voxel_array, source_mask, target_mask = cache.get_voxel_connectivity_array( ) reference_shape = source_mask.reference_space.annotation.shape vmax = 1.2 * np.percentile(voxel_array.nodes, 99) # 2D Cortical Surface Mapper mapper = CorticalMap(projection='top_view') # colormaps cmap_view = matplotlib.cm.viridis cmap_pixel = matplotlib.cm.spring cmap_view.set_bad(alpha=0) cmap_pixel.set_bad(alpha=0) # only want R hemisphere lookup = mapper.view_lookup.copy().T # transpose for vertical pixel query lookup[:lookup.shape[0] // 2, :] = -1 # dict(2D lookup_value -> avg(path)) logging.info('beginning image creation') for i, val in enumerate(lookup[lookup > -1]): # get the mean path voxel path = mapper.paths[val][mapper.paths[val].nonzero()] path = np.vstack([np.unravel_index(x, reference_shape) for x in path]) voxel = tuple(map(int, path.mean(axis=0))) try: row_idx = source_mask.get_flattened_voxel_index(voxel) except ValueError: logging.warning('voxel %s not in mask', voxel) else: # get voxel expression volume = target_mask.fill_volume_where_masked( np.zeros(reference_shape), voxel_array[row_idx]) # map to cortical surface flat_view = mapper.transform(volume, fill_value=np.nan) plt.pcolormesh(flat_view, zorder=1, cmap=cmap_view, vmin=0, vmax=vmax) # injection location pixel = np.ma.masked_where(mapper.view_lookup != val, flat_view, copy=False) plt.pcolormesh(pixel, zorder=2, cmap=cmap_pixel, vmin=0, vmax=1) # plot params plt.gca().invert_yaxis() plt.axis('off') plt.savefig(os.path.join(OUTPUT_DIR, '%05d.png' % i), bbox_inches='tight', facecolor=None, edgecolor=None, transparent=True, dpi=50) plt.close() logging.info('converting images to gif') subprocess.run(GIF_CONVERT_COMMAND, stdout=subprocess.PIPE, cwd=OUTPUT_DIR, shell=True)
def main(): # caching object for downloading/loading connectivity/model data cache = VoxelModelCache(manifest_file=MANIFEST_FILE) # load voxel model logging.info('loading voxel array') voxel_array, source_mask, target_mask = cache.get_voxel_connectivity_array( ) reference_shape = source_mask.reference_space.annotation.shape vmax = 1.2 * np.percentile(voxel_array.nodes, 99) # 2D Cortical Surface Mapper # projection: can change to "flatmap" if desired mapper = CorticalMap(projection='top_view') # quick hack to fix bug mapper.view_lookup[51, 69] = mapper.view_lookup[51, 68] mapper.view_lookup[51, 44] = mapper.view_lookup[51, 43] # colormaps cmap_view = matplotlib.cm.inferno cmap_pixel = matplotlib.cm.cool cmap_view.set_bad(alpha=0) cmap_pixel.set_bad(alpha=0) # only want R hemisphere lookup = mapper.view_lookup.copy().T # transpose for vertical pixel query lookup[:lookup.shape[0] // 2, :] = -1 # dict(2D lookup_value -> avg(path)) logging.info('beginning image creation') for i, val in enumerate(lookup[lookup > -1]): # get the mean path voxel print("Evaluating pixel %d" % i) path = mapper.paths[val][mapper.paths[val].nonzero()] path = np.vstack([np.unravel_index(x, reference_shape) for x in path]) voxel = tuple(map(int, path.mean(axis=0))) try: row_idx = source_mask.get_flattened_voxel_index(voxel) except ValueError: logging.warning('voxel %s not in mask', voxel) else: # get voxel expression volume = target_mask.fill_volume_where_masked( np.zeros(reference_shape), voxel_array[row_idx]) # map to cortical surface flat_view = mapper.transform(volume, fill_value=np.nan) # injection location pixel = np.ma.masked_where(mapper.view_lookup != val, flat_view, copy=False) # plot & params fig, ax = plt.subplots(figsize=(6, 6)) # plot connectivity im = plt.pcolormesh(flat_view, zorder=1, cmap=cmap_view, vmin=0, vmax=vmax) # plot source voxel plt.pcolormesh(pixel, zorder=2, cmap=cmap_pixel, vmin=0, vmax=1) plt.gca().invert_yaxis() # flips yaxis plt.axis('off') # plot overlay extent = plt.gca().get_xlim() + plt.gca().get_ylim() plt.imshow(top_down_overlay, interpolation="nearest", extent=extent, zorder=3) # add colorbar cbar = plt.colorbar(im, shrink=0.3, use_gridspec=True, format="%1.1e") cbar.set_ticks([0, 0.0004, 0.0008, 0.0012]) cbar.ax.tick_params(labelsize=6) plt.tight_layout() plt.savefig(os.path.join(OUTPUT_DIR, '%05d.png' % i), bbox_inches=None, facecolor=None, edgecolor=None, transparent=True, dpi=240) plt.close()
class VolumetricAPI(Paths): """ This class takes care of downloading, analysing and rendering data from: "High-resolution data-driven model of the mouse connectome ", Knox et al 2018. [https://www.mitpressjournals.org/doi/full/10.1162/netn_a_00066]. These data can be used to look at spatialised projection strength with sub-region (100um) resolution. e.g. to look at where in region B are the projections from region A, you can use this class. To download the data, this class uses code from: https://github.com/AllenInstitute/mouse_connectivity_models. """ voxel_size = 100 projections = {} mapped_projections = {} hemispheres = dict(left=1, right=2, both=3) def __init__(self, base_dir=None, add_root=True, use_cache=True, scene_kwargs={}, **kwargs): """ Initialise the class instance to get a few useful paths and variables. :param base_dir: str, path to base directory in which all of brainrender data are stored. Pass only if you want to use a different one from what's default. :param add_root: bool, if True the root mesh is added to the rendered scene :param use_cache: if true data are loaded from a cache to speed things up. Useful to set it to false to help debugging. :param scene_kwargs: dict, params passed to the instance of Scene associated with this class """ Paths.__init__(self, base_dir=base_dir, **kwargs) # Get MCM cache cache_path = os.path.join(self.mouse_connectivity_volumetric, 'voxel_model_manifest.json') if not os.path.isfile(cache_path): if not connected_to_internet(): raise ValueError( "The first time you use this class it will need to download some data, but it seems that you're not connected to the internet." ) print( "Downloading volumetric data. This will take several minutes but it only needs to be done once." ) self.cache = VoxelModelCache(manifest_file=cache_path) self.voxel_array = None self.target_coords, self.source_coords = None, None # Get projection cache paths self.data_cache = self.mouse_connectivity_volumetric_cache self.data_cache_projections = os.path.join(self.data_cache, "projections") self.data_cache_targets = os.path.join(self.data_cache, "targets") self.data_cache_sources = os.path.join(self.data_cache, "sources") for fold in [ self.data_cache_projections, self.data_cache_targets, self.data_cache_sources ]: if not os.path.isdir(fold): os.mkdir(fold) # Get structures tree self.structure_tree = self.cache.get_structure_tree() # Get scene self.scene = Scene(add_root=add_root, **scene_kwargs) # Other vars self.use_cache = use_cache def __getattr__(self, attr): __dict__ = super(VolumetricAPI, self).__getattribute__('__dict__') try: return __dict__['scene'].__getattribute__(attr) except AttributeError as e: raise AttributeError( f"Could not attribute {attr} for class VolumetricAPI:\n{e}") # ---------------------------------------------------------------------------- # # UTILS # # ---------------------------------------------------------------------------- # # ------------------------- Interaction with mcmodels ------------------------ # def _get_structure_id(self, struct): " Get the ID of a structure (or list of structures) given it's acronym" if not isinstance(struct, (list, tuple)): struct = [struct] return [ self.structure_tree.get_structures_by_acronym([s])[0]["id"] for s in struct ] def _load_voxel_data(self): "Load the VoxelData array from Knox et al 2018" if self.voxel_array is None: # Get VoxelArray weights_file = os.path.join(self.mouse_connectivity_volumetric, 'voxel_model', 'weights') nodes_file = os.path.join(self.mouse_connectivity_volumetric, 'voxel_model', 'nodes') # Try to load from numpy if os.path.isfile(weights_file + '.npy.gz'): weights = load_npy_from_gz(weights_file + '.npy.gz') nodes = load_npy_from_gz(nodes_file + '.npy.gz') # Create array self.voxel_array = VoxelConnectivityArray(weights, nodes) # Get target and source masks self.source_mask = self.cache.get_source_mask() self.target_mask = self.cache.get_target_mask() else: print("Loading voxel data, might take a few minutes.") # load from standard cache self.voxel_array, self.source_mask, self.target_mask = self.cache.get_voxel_connectivity_array( ) # save to npy save_npy_to_gz(weights_file + '.npy.gz', self.voxel_array.weights) save_npy_to_gz(nodes_file + '.npy.gz', self.voxel_array.nodes) def _get_coordinates_from_voxel_id(self, p0, as_source=True): """ Takes the index of a voxel and returns the 3D coordinates in reference space. The index number should be extracted with either a source_mask or a target_mask. If target_mask wa used set as_source as False. :param p0: int """ if self.voxel_array is None: self._load_voxel_data() if as_source: return self.source_mask.coordinates[p0] * self.voxel_size else: return self.target_mask.coordinates[p0] * self.voxel_size def _get_mask_coords(self, as_source): if as_source: if self.source_coords is None: coordinates = self.source_mask.coordinates * self.voxel_size self.source_coords = coordinates else: coordinates = self.source_coords else: if self.target_coords is None: coordinates = self.target_mask.coordinates * self.voxel_size self.target_coords = coordinates else: coordinates = self.target_coords return coordinates def _get_voxel_id_from_coordinates(self, p0, as_source=True): if self.voxel_array is None: self._load_voxel_data() # Get the brain region from the coordinates coordinates = self._get_mask_coords(as_source) # Get the position of p0 in the coordinates volumetric array p0 = np.int64([round(p, -2) for p in p0]) try: x_idx = (np.abs(coordinates[:, 0] - p0[0])).argmin() y_idx = (np.abs(coordinates[:, 1] - p0[1])).argmin() z_idx = (np.abs(coordinates[:, 2] - p0[2])).argmin() p0_idx = [x_idx, y_idx, z_idx] except: raise ValueError( f"Could not find the voxe corresponding to the point given: {p0}" ) return p0_idx[0] # ----------------------------------- Cache ---------------------------------- # def _get_cache_filename(self, tgt, what): """Data are cached according to a naming convention, this function gets the name for an object according to the convention""" if what == 'projection': fld = self.data_cache_projections elif what == 'source': fld = self.data_cache_sources elif what == 'target': fld = self.data_cache_targets else: raise ValueError( f'Error while getting cached data file name.\n' + f'What was {what} but should be projection/source/target/actor.' ) name = ''.join([str(i) for i in tgt]) path = os.path.join(fld, name + '.npy.gz') return name, path, os.path.isfile(path) def _get_from_cache(self, tgt, what): """ tries to load objects from cached data, if they exist""" if not self.use_cache: return None name, cache_path, cache_exists = self._get_cache_filename(tgt, what) if not cache_exists: return None else: return load_npy_from_gz(cache_path) def save_to_cache(self, tgt, what, obj): """ Saves data to cache to avoid loading thema again in the future""" name, cache_path, _ = self._get_cache_filename(tgt, what) save_npy_to_gz(cache_path, obj) # ---------------------------------------------------------------------------- # # PREPROCESSING # # ---------------------------------------------------------------------------- # # ------------------------- Sources and targets masks ------------------------ # def get_source(self, source, hemisphere='both'): """ Loads the mask for a source structure :param source: str or list of str with acronym of source regions :param hemisphere: str, ['both', 'left', 'right']. Which hemisphere to consider. """ if not isinstance(source, (list, tuple)): source = [source] self.source = self._get_from_cache(source, 'source') if self.source is None: self._load_voxel_data() source_ids = self._get_structure_id(source) self.source = self.source_mask.get_structure_indices( structure_ids=source_ids, hemisphere_id=self.hemispheres[hemisphere]) self.save_to_cache(source, 'source', self.source) return self.source def get_target_mask(self, target, hemisphere): """returns a 'key' array and a mask object used to transform projection data from linear arrays to 3D volumes. """ target_ids = self._get_structure_id(target) self.tgt_mask = Mask.from_cache( self.cache, structure_ids=target_ids, hemisphere_id=self.hemispheres[hemisphere]) def get_target(self, target, hemisphere='both'): """ Loads the mask for a target structure. :param target: str or list of str with acronym of target regions :param hemisphere: str, ['both', 'left', 'right']. Which hemisphere to consider. """ if not isinstance(target, (list, tuple)): target = [target] if hemisphere != 'both': cache_name = target + [hemisphere] else: cache_name = target self.target = self._get_from_cache(cache_name, 'target') if self.target is None: self._load_voxel_data() target_ids = self._get_structure_id(target) self.target = self.target_mask.get_structure_indices( structure_ids=target_ids, hemisphere_id=self.hemispheres[hemisphere]) self.save_to_cache(cache_name, 'target', self.target) return self.target # -------------------------------- Projections ------------------------------- # def get_projection(self, source, target, name, hemisphere='both', projection_mode='mean', mode='target'): """ Gets the spatialised projection intensity from a source to a target. :param source: str or list of str with acronym of source regions :param target: str or list of str with acronym of target regions :param name: str, name of the projection :param projection_mode: str, if 'mean' the data from different experiments are averaged, if 'max' the highest value is taken. :param mode: str. If 'target' the spatialised projection strength in the target structures is returned, usefule to see where source projects to in target. Otherwise if 'source' the spatialised projection strength in the source structure is return. Useful to see which part of source projects to target. :return: 1D numpy array with mean projection from source to target voxels """ if mode == 'target': self.get_target_mask(target, hemisphere) elif mode == 'source': self.get_target_mask(source, 'right') else: raise ValueError( f'Invalide mode: {mode}. Should be either source or target.') cache_name = sorted(source) + ['_'] + sorted(target) + [ f'_{projection_mode}_{mode}' ] if hemisphere != 'both': cache_name += [hemisphere] proj = self._get_from_cache(cache_name, 'projection') if proj is None: source_idx = self.get_source(source, hemisphere) target_idx = self.get_target(target, hemisphere) self._load_voxel_data() projection = self.voxel_array[source_idx, target_idx] if mode == 'target': axis = 0 elif mode == 'source': axis = 1 else: raise ValueError( f'Invalide mode: {mode}. Should be either source or target.' ) if projection_mode == 'mean': proj = np.mean(projection, axis=axis) elif projection_mode == 'max': proj = np.max(projection, axis=axis) else: raise ValueError( f'Projection mode {projection_mode} not recognized.\n' + 'Should be one of: ["mean", "max"].') # Save to cache self.save_to_cache(cache_name, 'projection', proj) self.projections[name] = proj return proj def get_mapped_projection(self, source, target, name, **kwargs): """ Gets the spatialised projection intensity from a source to a target, but as a mapped volume instead of a linear array. :param source: str or list of str with acronym of source regions :param target: str or list of str with acronym of target regions :param name: str, name of the projection :return: 3D numpy array with projectino intensity """ projection = self.get_projection(source, target, name, **kwargs) mapped_projection = self.tgt_mask.map_masked_to_annotation(projection) self.mapped_projections[name] = mapped_projection return mapped_projection def get_mapped_projection_to_point(self, p0, restrict_to=None, restrict_to_hemisphere='both'): """ Gets projection intensity from all voxels to the voxel corresponding to a point of interest """ cache_name = f'proj_to_{p0[0]}_{p0[1]}_{p0[1]}' if restrict_to is not None: cache_name += f'_{restrict_to}' proj = self._get_from_cache(cache_name, 'projection') if proj is None: p0idx = self._get_voxel_id_from_coordinates(p0, as_source=False) if restrict_to is not None: source_idx = self.get_source(restrict_to, restrict_to_hemisphere) proj = self.voxel_array[source_idx, p0idx] self.get_target_mask(restrict_to, restrict_to_hemisphere) mapped_projection = self.tgt_mask.map_masked_to_annotation( proj) else: proj = self.voxel_array[:, p0idx] mapped_projection = self.source_mask.map_masked_to_annotation( proj) self.save_to_cache(cache_name, 'projection', mapped_projection) return mapped_projection else: return proj def get_mapped_projection_from_point(self, p0, restrict_to=None, restrict_to_hemisphere='both'): """ Gets projection intensity from all voxels to the voxel corresponding to a point of interest """ if self.get_hemispere_from_point(p0) == 'left': raise ValueError( f'The point passed [{p0}] is in the left hemisphere,' + ' but "projection from point" only works from the right hemisphere.' ) cache_name = f'proj_from_{p0[0]}_{p0[1]}_{p0[1]}' if restrict_to is not None: cache_name += f'_{restrict_to}' proj = self._get_from_cache(cache_name, 'projection') if proj is None: p0idx = self._get_voxel_id_from_coordinates(p0, as_source=True) if restrict_to is not None: target_idx = self.get_target(restrict_to, restrict_to_hemisphere) proj = self.voxel_array[p0idx, target_idx] self.get_target_mask(restrict_to, restrict_to_hemisphere) mapped_projection = self.tgt_mask.map_masked_to_annotation( proj) else: proj = self.voxel_array[p0idx, :] mapped_projection = self.target_mask.map_masked_to_annotation( proj) self.save_to_cache(cache_name, 'projection', mapped_projection) return mapped_projection else: return proj # ---------------------------------------------------------------------------- # # RENDERING # # ---------------------------------------------------------------------------- # def add_mapped_projection(self, source, target, actor_kwargs={}, render_source_region=False, render_target_region=False, regions_kwargs={}, **kwargs): """ Gets the spatialised projection intensity from a source to a target and renders it as a vtkplotter lego visualisation. :param source: str or list of str with acronym of source regions :param target: str or list of str with acronym of target regions :param render_source_region: bool, if true a wireframe mesh of source regions is rendered :param render_target_region: bool, if true a wireframe mesh of target regions is rendered :param regions_kwargs: pass options to specify how brain regions should look like :param kwargs: kwargs can be used to control how the rendered object looks like. Look at the arguments of 'add_volume' to see what arguments are available. """ # Get projection data if not isinstance(source, list): source = [source] if not isinstance(target, list): target = [target] name = ''.join(source) + '_'.join(target) mapped_projection = self.get_mapped_projection(source, target, name, **kwargs) lego_actor = self.add_volume(mapped_projection, **actor_kwargs) # Render relevant regions meshes if render_source_region or render_target_region: wireframe = regions_kwargs.pop('wireframe', True) use_original_color = regions_kwargs.pop('use_original_color', True) if render_source_region: self.scene.add_brain_regions( source, use_original_color=use_original_color, wireframe=wireframe, **regions_kwargs) if render_target_region: self.scene.add_brain_regions( target, use_original_color=use_original_color, wireframe=wireframe, **regions_kwargs) return lego_actor def add_mapped_projection_to_point(self, p0, show_point=True, show_point_region=False, show_crosshair=True, crosshair_kwargs={}, point_region_kwargs={}, point_kwargs={}, from_point=False, **kwargs): if not isinstance(p0, (list, tuple, np.ndarray)): raise ValueError( "point passed should be a list or a 1d array, not: {p0}") restrict_to = kwargs.pop('restrict_to', None) restrict_to_hemisphere = kwargs.pop('restrict_to_hemisphere', 'both') if not from_point: projection = self.get_mapped_projection_to_point( p0, restrict_to=restrict_to, restrict_to_hemisphere=restrict_to_hemisphere) else: projection = self.get_mapped_projection_from_point( p0, restrict_to=restrict_to, restrict_to_hemisphere=restrict_to_hemisphere) lego_actor = self.add_volume(projection, **kwargs) if show_point: color = point_kwargs.pop('color', 'salmon') radius = point_kwargs.pop('radius', 50) alpha = point_kwargs.pop('alpha', 1) if not show_crosshair: self.scene.add_sphere_at_point(p0, color=color, radius=radius, alpha=alpha, **point_kwargs) else: ml = crosshair_kwargs.pop('ml', True) dv = crosshair_kwargs.pop('dv', True) ap = crosshair_kwargs.pop('ap', True) self.scene.add_crosshair_at_point(p0, ml=ml, dv=dv, ap=ap, line_kwargs=crosshair_kwargs, point_kwargs={ 'color': color, 'radius': radius, 'alpha': alpha }) if show_point_region: use_original_color = point_region_kwargs.pop( 'use_original_color', False) alpha = point_region_kwargs.pop('alpha', 0.3) region = self.scene.get_structure_from_coordinates(p0) self.scene.add_brain_regions([region], use_original_color=use_original_color, alpha=alpha, **point_region_kwargs) return lego_actor def add_mapped_projection_from_point(self, *args, **kwargs): return self.add_mapped_projection_to_point(*args, **kwargs, from_point=True) def add_volume(self, volume, cmap='afmhot_r', alpha=1, add_colorbar=True, **kwargs): """ Renders intensitdata from a 3D numpy array as a lego volumetric actor. :param volume: np 3D array with number of dimensions = those of the 100um reference space. :param cmap: str with name of colormap to use :param alpha: float, transparency :param add_colorbar: if True a colorbar is added to show the values of the colormap """ # Parse kwargs line_width = kwargs.pop('line_width', 1) if cmap == 'random' or not cmap or cmap is None: cmap = get_random_colormap() # Get vmin and vmax threshold for visualisation vmin = kwargs.pop('vmin', 0.000001) vmax = kwargs.pop('vmax', np.nanmax(volume)) # Check values if np.max(volume) > vmax: print( "While rendering mapped projection some of the values are above the vmax threshold." + "They will not be displayed." + f" vmax was {vmax} but found value {round(np.max(volume), 5)}." ) if vmin > vmax: raise ValueError( f'The vmin threhsold [{vmin}] cannot be larger than the vmax threshold [{vmax}' ) if vmin < 0: vmin = 0 # Get 'lego' actor vol = Volume(volume) lego = vol.legosurface(vmin=vmin, vmax=vmax, cmap=cmap) # Scale and color actor lego.alpha(alpha).lw(line_width).scale(self.voxel_size) lego.cmap = cmap # Add colorbar if add_colorbar: lego.addScalarBar(vmin=vmin, vmax=vmax, horizontal=1, c='k', pos=(0.05, 0.05), titleFontSize=40) # Add to scene actor = self.scene.add_vtkactor(lego) return actor
def main(): # initialize cache object cache = VoxelModelCache(manifest_file=MANIFEST_FILE) rs = cache.get_reference_space() rs.remove_unassigned(update_self=True) # major id children only structures = [] for s in rs.structure_tree.get_structures_by_set_id([MAJOR_BRAIN_SET_ID]): structures.extend(rs.structure_tree.descendants([s['id']])[0]) # load in voxel model print('loading array') voxel_array, source_mask, target_mask = cache.get_voxel_connectivity_array( ) print('getting keys') source_keys, target_keys = [], [] source_counts, target_counts = [], [] for s in structures: s_mask = source_mask.reference_space.make_structure_mask( [s['id']], direct_only=False) t_mask = target_mask.reference_space.make_structure_mask( [s['id']], direct_only=False) # NOTE: ipsi t_mask[..., :t_mask.shape[-1] // 2] = 0 # keys s_key = source_mask.mask_volume(s_mask).nonzero()[0] t_key = target_mask.mask_volume(t_mask).nonzero()[0] source_keys.append(s_key) target_keys.append(t_key) source_counts.append(s_key.size) target_counts.append(t_key.size) del source_mask del target_mask # arrays source_counts = np.asarray(source_counts) target_counts = np.asarray(target_counts) # compute print('computing regional') connection_strength = np.empty(2 * [len(structures)]) for i, j in itertools.product(range(len(structures)), repeat=2): print(i, j) connection_strength[i, j] = voxel_array[source_keys[i], target_keys[j]].sum() del voxel_array del source_keys del target_keys structure_acronyms = [s['acronym'] for s in structures] connection_strength = pd.DataFrame(connection_strength, index=structure_acronyms, columns=structure_acronyms) # other metrics connection_density = np.divide(connection_strength, source_counts[:, np.newaxis]) normalized_connection_strength = np.divide(connection_strength, target_counts[:, np.newaxis]) normalized_connection_density = np.divide( connection_strength, np.outer(source_counts, target_counts)) # save connection_strength.to_csv( os.path.join(OUTPUT_DIR, 'connection_strength.csv')) connection_density.to_csv( os.path.join(OUTPUT_DIR, 'connection_density.csv')) normalized_connection_strength.to_csv( os.path.join(OUTPUT_DIR, 'normalized_connection_strength.csv')) normalized_connection_density.to_csv( os.path.join(OUTPUT_DIR, 'normalized_connection_density.csv'))
if __name__ == "__main__": input_data = ju.read(INPUT_JSON) manifest_file = input_data.get('manifest_file') manifest_file = os.path.join(TOP_DIR, manifest_file) log_level = input_data.get('log_level', logging.DEBUG) logging.getLogger().setLevel(log_level) # configure colors = sns.color_palette(n_colors=2) # get cache, metric cache = VoxelModelCache(manifest_file=manifest_file) df_metric = cache.get_normalized_connection_density(dataframe=True) logging.debug("getting cortical network") df_cortex = get_cortical_df(df_metric, cache) # region acs region_acs = df_metric.index.values logging.debug("computing distances") d = get_distances(region_acs, cache) d = to_dataframe(d, df_metric.index, df_metric.columns) d_cortex = get_cortical_df(d, cache) # get projection types full_ipsi = get_pt((d, df_metric), thresh=0)