def __init__(self): self.onto_id2parent_regions = {} # only find parents up to these major brain regions big_reg_acros = [ 'Isocortex', 'OLF', 'STR', 'PAL', 'TH', 'HY', 'MB', 'PAL', 'MY', 'CB', 'HPF', 'CTXsp' ] mcc = MouseConnectivityCache() onto = mcc.get_ontology() df = onto.df struct_ids = df.id for struct_id in struct_ids: structure_path_str = onto[struct_id].structure_id_path.item() region_path = structure_path_str.split('/') parent_list = [] for r in reversed(region_path): if r: parent_list.append(r) tdf = df[df.id == int(r)] acronym = tdf.acronym.item() if acronym in big_reg_acros: break self.onto_id2parent_regions[struct_id] = parent_list
def load_ontologies(): """Loads all of the ontologies into a nice dictionary data structure""" # a massive dictionary containing key : dictionary mappings between HBP ontology id's and .obo ontology terms big_onto = {} mcc = MouseConnectivityCache() aba_onto = mcc.get_ontology() file_name_list = [f for f in glob.glob(onto_root + "*.robo")] file_name_list.extend([f for f in glob.glob(onto_root + "*.obo")]) for fn in file_name_list: for o in oboparser.parse(fn): if 'synonym' in o: for s in o['synonym']: if "BROAD ACRONYM" in s: acro = re.search("\w+", s).group() o['acronym'] = acro if 'id' in o: big_onto[o['id']] = o for k in big_onto.keys(): if 'ABA' in k: new_o = big_onto[k] aba_id = int(k[11:]) new_o['acronym'] = aba_onto[aba_id]['acronym'].item() big_onto[k] = new_o return big_onto
def load_ontologies(): """Loads all of the ontologies into a nice dictionary data structure""" # a massive dictionary containing key : dictionary mappings between HBP ontology id's and .obo ontology terms big_onto = {} mcc = MouseConnectivityCache() aba_onto = mcc.get_ontology() file_name_list = [f for f in glob.glob(onto_root + "*.robo")] file_name_list.extend([f for f in glob.glob(onto_root + "*.obo")]) for fn in file_name_list: for o in oboparser.parse(fn): if 'synonym' in o: for s in o['synonym']: if "BROAD ACRONYM" in s: acro = re.search("\w+", s).group() o['acronym'] = acro if 'id' in o: big_onto[o['id']] = o for k in big_onto.keys(): if 'ABA' in k: new_o = big_onto[k] aba_id = int(k[11:]) new_o['acronym'] = aba_onto[aba_id]['acronym'].item() big_onto[k] = new_o return big_onto
def get_neuron_region(neuron_ob): """ Given a neuron object, return a dictionary describing its major brain region from the Allen Mouse Atlas :param neuron_ob: A neuroelectro.models Neuron object :return: returns a dictionary of region attributes """ regs = neuron_ob.regions.all() mcc = MouseConnectivityCache() onto = mcc.get_ontology() if regs: r = regs[0] structure_path_str = onto[r.allenid].structure_id_path.item() region_dict = get_major_brain_region(structure_path_str, onto) return region_dict else: return None # if region_dict: # print (n.name, region_dict['region_name']) # else: # print 'No region found for %s' % n.name
def __init__(self): self.onto_id2parent_regions = {} # only find parents up to these major brain regions big_reg_acros = ['Isocortex', 'OLF', 'STR', 'PAL', 'TH', 'HY', 'MB', 'PAL', 'MY', 'CB', 'HPF', 'CTXsp'] mcc = MouseConnectivityCache() onto = mcc.get_ontology() df = onto.df struct_ids = df.id for struct_id in struct_ids: structure_path_str = onto[struct_id].structure_id_path.item() region_path = structure_path_str.split('/') parent_list = [] for r in reversed(region_path): if r: parent_list.append(r) tdf = df[df.id == int(r)] acronym = tdf.acronym.item() if acronym in big_reg_acros: break self.onto_id2parent_regions[struct_id] = parent_list
print "" print " build_projection_datasets.py data/ pms.pickle" print "" sys.exit(1) import os os.chdir(sys.argv[1]) from allensdk.core.mouse_connectivity_cache import MouseConnectivityCache from allensdk.api.queries.ontologies_api import OntologiesApi mcc = MouseConnectivityCache(manifest_file="mcc_manifest.json") all_experiments = mcc.get_experiments(dataframe=True) ontology = mcc.get_ontology() summary_structures = OntologiesApi().get_structures(structure_set_names="Mouse Connectivity - Summary") summary_structure_ids = [s["id"] for s in summary_structures] print "build dict of injection structure id to experiment list" ist2e = {} for eid in all_experiments.index: for ist in all_experiments.ix[eid]["injection-structures"]: isti = ist["id"] if isti not in ist2e: ist2e[isti] = [] ist2e[isti].append(eid) # this may hours to days to run depending on bandwidth print "obtain projection maps per injection site"
def launch(self, resolution, weighting, inf_vox_thresh, vol_thresh): resolution = int(resolution) weighting = int(weighting) inf_vox_thresh = float(inf_vox_thresh) vol_thresh = float(vol_thresh) project = dao.get_project_by_id(self.current_project_id) manifest_file = self.file_handler.get_allen_mouse_cache_folder( project.name) manifest_file = os.path.join(manifest_file, 'mouse_connectivity_manifest.json') cache = MouseConnectivityCache(resolution=resolution, manifest_file=manifest_file) # the method creates a dictionary with information about which experiments need to be downloaded ist2e = DictionaireBuilder(cache, False) # the method downloads experiments necessary to build the connectivity projmaps = DownloadAndConstructMatrix(cache, weighting, ist2e, False) # the method cleans the file projmaps in 4 steps projmaps = pmsCleaner(projmaps) #download from the AllenSDK the annotation volume, the ontology, the template volume Vol, annot_info = cache.get_annotation_volume() ontology = cache.get_ontology() template, template_info = cache.get_template_volume() #rotate template in the TVB 3D reference: template = RotateReference(template) # the method includes in the parcellation only brain regions whose volume is greater than vol_thresh projmaps = AreasVolumeTreshold(cache, projmaps, vol_thresh, resolution, Vol, ontology) # the method includes in the parcellation only brain regions where at least one injection experiment had infected more than N voxel (where N is inf_vox_thresh) projmaps = AreasVoxelTreshold(cache, projmaps, inf_vox_thresh, Vol, ontology) # the method creates file order and keyord that will be the link between the SC order and the id key in the Allen database [order, key_ord] = CreateFileOrder(projmaps, ontology) # the method builds the Structural Connectivity (SC) matrix SC = ConstructingSC(projmaps, order, key_ord) # the method returns the coordinate of the centres and the name of the brain areas in the selected parcellation [centres, names] = Construct_centres(cache, ontology, order, key_ord) # the method returns the tract lengths between the brain areas in the selected parcellation tract_lengths = ConstructTractLengths(centres) # the method associated the parent and the grandparents to the child in the selected parcellation with the biggest volume [unique_parents, unique_grandparents ] = ParentsAndGrandParentsFinder(cache, order, key_ord, ontology) # the method returns a volume indexed between 0 and N-1, with N=tot brain areas in the parcellation. -1=background and areas that are not in the parcellation Vol_parcel = MouseBrainVisualizer(Vol, order, key_ord, unique_parents, unique_grandparents, ontology, projmaps) # results: Connectivity, Volume & RegionVolumeMapping # Connectivity result_connectivity = Connectivity(storage_path=self.storage_path) result_connectivity.centres = centres result_connectivity.region_labels = names result_connectivity.weights = SC result_connectivity.tract_lengths = tract_lengths # Volume result_volume = Volume(storage_path=self.storage_path) result_volume.origin = [[0.0, 0.0, 0.0]] result_volume.voxel_size = [resolution, resolution, resolution] # result_volume.voxel_unit= micron # Region Volume Mapping result_rvm = RegionVolumeMapping(storage_path=self.storage_path) result_rvm.volume = result_volume result_rvm.array_data = Vol_parcel result_rvm.connectivity = result_connectivity result_rvm.title = "Volume mouse brain " result_rvm.dimensions_labels = ["X", "Y", "Z"] # Volume template result_template = StructuralMRI(storage_path=self.storage_path) result_template.array_data = template result_template.weighting = 'T1' result_template.volume = result_volume return [ result_connectivity, result_volume, result_rvm, result_template ]
import os #import nrrd from voxnet.matrices import generate_voxel_matrices from voxnet.utilities import * from scipy.io import savemat # setup the run param_fn='run_setup.py' with open(param_fn) as f: code = compile(f.read(), param_fn, 'exec') exec(code) manifest_file=os.path.join(data_dir,'manifest.json') mcc = MouseConnectivityCache(manifest_file=manifest_file, resolution=resolution) ontology = mcc.get_ontology() sources = ontology[source_acronyms] targets = ontology[target_acronyms] if experiments_fn is not None: LIMS_id_list=unpickle(experiments_fn) else: LIMS_id_list=None try: if max_injection_volume: pass except NameError: max_injection_volume=np.inf try:
def launch(self, resolution, weighting, inf_vox_thresh, vol_thresh): resolution = int(resolution) weighting = int(weighting) inf_vox_thresh = float(inf_vox_thresh) vol_thresh = float(vol_thresh) project = dao.get_project_by_id(self.current_project_id) manifest_file = self.file_handler.get_allen_mouse_cache_folder(project.name) manifest_file = os.path.join(manifest_file, "mouse_connectivity_manifest.json") cache = MouseConnectivityCache(resolution=resolution, manifest_file=manifest_file) # the method creates a dictionary with information about which experiments need to be downloaded ist2e = DictionaireBuilder(cache, False) # the method downloads experiments necessary to build the connectivity projmaps = DownloadAndConstructMatrix(cache, weighting, ist2e, False) # the method cleans the file projmaps in 4 steps projmaps = pmsCleaner(projmaps) Vol, annot_info = cache.get_annotation_volume() ontology = cache.get_ontology() # the method includes in the parcellation only brain regions whose volume is greater than vol_thresh projmaps = AreasVolumeTreshold(projmaps, vol_thresh, resolution, Vol, ontology) # the method includes in the parcellation only brain regions where at least one injection experiment had infected more than N voxel (where N is inf_vox_thresh) projmaps = AreasVoxelTreshold(cache, projmaps, inf_vox_thresh, Vol, ontology) # the method creates file order and keyord that will be the link between the SC order and the id key in the Allen database [order, key_ord] = CreateFileOrder(projmaps, ontology) # the method builds the Structural Connectivity (SC) matrix SC = ConstructingSC(projmaps, order, key_ord) # the method returns the coordinate of the centres and the name of the brain areas in the selected parcellation [centres, names] = Construct_centres(ontology, order, key_ord, Vol) # the method returns the tract lengths between the brain areas in the selected parcellation tract_lengths = ConstructTractLengths(centres) # the method associated the parent and the grandparents to the child in the selected parcellation with the biggest volume [unique_parents, unique_grandparents] = ParentsAndGrandParentsFinder(order, key_ord, Vol, ontology) # the method returns a volume indexed between 0 and N-1, with N=tot brain areas in the parcellation. -1=background and areas that are not in the parcellation Vol_parcel = MouseBrainVisualizer(Vol, order, key_ord, unique_parents, unique_grandparents, ontology, projmaps) # results: Connectivity, Volume & RegionVolumeMapping # Connectivity result_connectivity = Connectivity(storage_path=self.storage_path) result_connectivity.centres = centres result_connectivity.region_labels = names result_connectivity.weights = SC result_connectivity.tract_lengths = tract_lengths # Volume result_volume = Volume(storage_path=self.storage_path) result_volume.origin = [[0.0, 0.0, 0.0]] result_volume.voxel_size = [resolution, resolution, resolution] # result_volume.voxel_unit= micron # Region Volume Mapping result_rvm = RegionVolumeMapping(storage_path=self.storage_path) result_rvm.volume = result_volume result_rvm.array_data = Vol_parcel result_rvm.connectivity = result_connectivity result_rvm.title = "Volume mouse brain " result_rvm.dimensions_labels = ["X", "Y", "Z"] return [result_connectivity, result_rvm, result_volume]
from allensdk.core.mouse_connectivity_cache import MouseConnectivityCache # tell the cache class what resolution (in microns) of data you want to download mcc = MouseConnectivityCache(resolution=25) # use the ontology class to get the id of the isocortex structure ontology = mcc.get_ontology('ontology.csv') isocortex = ontology['Isocortex'] # a list of dictionaries containing metadata for non-Cre experiments experiments = mcc.get_experiments(file_name='non_cre.json', injection_structure_ids=isocortex['id']) # download the projection density volume for one of the experiments pd = mcc.get_projection_density(experiments[0]['id'])