def load_config(config): if isinstance(config, string_types): return ConfigDict.from_json(config) elif isinstance(config, dict): return ConfigDict.from_dict(config) else: raise Exception('Could not convert {} (type "{}") to json.'.format(config, type(config)))
def load_spikes_file(config_file=None, spikes_file=None): if spikes_file is not None: return SpikeTrains.load(spikes_file) elif config_file is not None: config = ConfigDict.from_json(config_file) return SpikeTrains.load(config.spikes_file)
def plot_potential(cell_vars_h5=None, config_file=None, gids=None, show_plot=True, save=False): if (cell_vars_h5 or config_file) is None: raise Exception( 'Please specify a cell_vars hdf5 file or a simulation config.') if cell_vars_h5 is not None: plot_potential_hdf5(cell_vars_h5, gids=gids, show_plot=show_plot, save_as='sim_potential.jpg' if save else None) else: # load the json file or object if isinstance(config_file, string_types): config = cfg.from_json(config_file) elif isinstance(config_file, dict): config = config_file else: raise Exception('Could not convert {} (type "{}") to json.'.format( config_file, type(config_file))) gid_list = gids or config['node_id_selections']['save_cell_vars'] for gid in gid_list: save_as = '{}_v.jpg'.format(gid) if save else None title = 'cell gid {}'.format(gid) var_h5 = os.path.join(config['output']['cell_vars_dir'], '{}.h5'.format(gid)) plot_potential_hdf5(var_h5, title, show_plot, save_as)
def load_reports(config_file): cfg = ConfigDict.from_json(config_file) reports = [] for report_name, report in cfg.reports.items(): if report['module'] not in ['membrane_report', 'multimeter_report']: continue report_file = report[ 'file_name'] if 'file_name' in report else '{}.h5'.format( report_name) report_file = report_file if os.path.isabs( report_file) else os.path.join(cfg.output_dir, report_file) reports.append(CompartmentReport(report_file, 'r')) return reports
def plot_ecp(config_file=None, report_name=None, ecp_path=None, title=None, show=True): sonata_config = SonataConfig.from_json( config_file) if config_file else None _, ecp_path = _get_ecp_path(ecp_path=ecp_path, config=sonata_config, report_name=report_name) ecp_h5 = h5py.File(ecp_path, 'r') time_traces = np.arange(start=ecp_h5['/ecp/time'][0], stop=ecp_h5['/ecp/time'][1], step=ecp_h5['/ecp/time'][2]) channels = ecp_h5['/ecp/channel_id'][()] fig, axes = plt.subplots(len(channels), 1) fig.text(0.04, 0.5, 'channel id', va='center', rotation='vertical') for idx, channel in enumerate(channels): data = ecp_h5['/ecp/data'][:, idx] axes[idx].plot(time_traces, data) axes[idx].spines["top"].set_visible(False) axes[idx].spines["right"].set_visible(False) axes[idx].set_yticks([]) axes[idx].set_ylabel(channel) if idx + 1 != len(channels): axes[idx].spines["bottom"].set_visible(False) axes[idx].set_xticks([]) else: axes[idx].set_xlabel('timestamps (ms)') # scalebar = AnchoredSizeBar(axes[idx].transData, # 2.0, '1 mV', 1, # pad=0, # borderpad=0, # # color='b', # frameon=True, # # size_vertical=1.001, # # fontproperties=fontprops # ) # # axes[idx].add_artist(scalebar) if title: fig.set_title(title) if show: plt.show()
def _plot_helper(plot_fnc, config_file=None, population=None, times=None, title=None, show=True, save_as=None, group_by=None, group_excludes=None, spikes_file=None, nodes_file=None, node_types_file=None): sonata_config = SonataConfig.from_json(config_file) if config_file else None pop, spike_trains = _find_spikes(config_file=config_file, spikes_file=spikes_file, population=population) # Create the title title = title if title is not None else '{} Nodes'.format(pop) # Get start and stop times from config if needed if sonata_config and times is None: times = (sonata_config.tstart, sonata_config.tstop) # Create node-groups if group_by is not None: node_groups = [] nodes = _find_nodes(population=pop, config=sonata_config, nodes_file=nodes_file, node_types_file=node_types_file) grouped_df = None for grp in nodes.groups: if group_by in grp.all_columns: grp_df = grp.to_dataframe() grp_df = grp_df[['node_id', group_by]] grouped_df = grp_df if grouped_df is None else grouped_df.append(grp_df, ignore_index=True) if grouped_df is None: raise ValueError('Could not find any nodes with group_by attribute "{}"'.format(group_by)) # Convert from string to list so we can always use the isin() method for filtering if isinstance(group_excludes, string_types): group_excludes = [group_excludes] elif group_excludes is None: group_excludes = [] for grp_key, grp in grouped_df.groupby(group_by): if grp_key in group_excludes: continue node_groups.append({'node_ids': np.array(grp['node_id']), 'label': grp_key}) else: node_groups = None return plot_fnc( spike_trains=spike_trains, node_groups=node_groups, population=pop, times=times, title=title, show=show, save_as=save_as )
def test_json(): config_file = tempfile.NamedTemporaryFile(suffix='.json') sonata_cfg = { 'manifest': { '$BASE': '${configdir}', '$TMP_ATTR': 'mytest' }, 'myvar': '$TMP_ATTR/myvar', 'cwd': '${workingdir}', 'cdir': '${configdir}', 'cfname': '${configfname}' } json.dump(sonata_cfg, open(config_file.name, 'w')) config_dict = SonataConfig.from_json(config_file.name) assert (isinstance(config_dict, SonataConfig)) assert (isinstance(config_dict, dict)) assert (config_dict['myvar'] == 'mytest/myvar') assert (config_dict['cwd'] == os.getcwd()) assert (config_dict['cdir'] == os.path.dirname(config_file.name)) assert (config_dict['cfname'] == config_file.name) config_dict = SonataConfig.load(config_file.name) assert (isinstance(config_dict, SonataConfig)) assert (isinstance(config_dict, dict)) assert (config_dict['myvar'] == 'mytest/myvar') assert (config_dict['cwd'] == os.getcwd()) assert (config_dict['cdir'] == os.path.dirname(config_file.name)) assert (config_dict['cfname'] == config_file.name) with pytest.warns(DeprecationWarning): config_dict = from_json(config_file.name) assert (isinstance(config_dict, SonataConfig)) assert (isinstance(config_dict, dict)) assert (config_dict['myvar'] == 'mytest/myvar') assert (config_dict['cwd'] == os.getcwd()) assert (config_dict['cdir'] == os.path.dirname(config_file.name)) assert (config_dict['cfname'] == config_file.name)
def save_nodes_csv(circuit_config, population): config = SonataConfig.from_json(circuit_config) morphology_dir = config['components']['morphologies_dir'] config_dir = config['manifest']['configdir'] nodes_h5 = [n['nodes_file'] for n in config['networks']['nodes']] node_types_csv = [ n['node_types_file'] for n in config['networks']['nodes'] ] l4_net = sonata.File(data_files=nodes_h5, data_type_files=node_types_csv) net_df = l4_net.nodes[population].to_dataframe() for rot_axis in [ 'rotation_angle_xaxis', 'rotation_angle_yaxis', 'rotation_angle_zaxis' ]: if rot_axis not in net_df.columns: net_df[rot_axis] = 0.0 net_df[rot_axis] = net_df[rot_axis].fillna(0.0) net_df = net_df[[ 'x', 'y', 'z', 'rotation_angle_xaxis', 'rotation_angle_yaxis', 'rotation_angle_zaxis', 'morphology', 'model_processing', 'model_name', 'model_type' ]] p = PurePath(morphology_dir) morp_rel_path = p.relative_to(config_dir) net_df['morphology'] = net_df.apply( lambda r: os.path.join(morp_rel_path, r['morphology']) if isinstance(r['morphology'], string_types) else None, axis=1) net_df.to_csv(os.path.join(config_dir, 'network_cells.csv'), sep=' ', na_rep="None")
def plot_traces(report_path=None, config_file=None, report_name=None, population=None, group_by=None, group_excludes=None, nodes_file=None, node_types_file=None, node_ids=None, sections='origin', average=False, times=None, title=None, show_legend=None, show=True): sonata_config = SonataConfig.from_json( config_file) if config_file else None report_name, cr = _get_report(report_path=report_path, config=sonata_config, report_name=report_name) if population is None: pops = cr.populations if len(pops) > 1: raise ValueError( 'Report {} contains more than population of nodes ({}). Use population parameter' .format(report_name, pops)) population = pops[0] if title is None: title = '{} ({})'.format(report_name, population) # Create node-groups if group_by is not None: node_groups = [] nodes = _find_nodes(population=population, config=sonata_config, nodes_file=nodes_file, node_types_file=node_types_file) grouped_df = None for grp in nodes.groups: if group_by in grp.all_columns: grp_df = grp.to_dataframe() grp_df = grp_df[['node_id', group_by]] grouped_df = grp_df if grouped_df is None else grouped_df.append( grp_df, ignore_index=True) if grouped_df is None: raise ValueError( 'Could not find any nodes with group_by attribute "{}"'.format( group_by)) # Convert from string to list so we can always use the isin() method for filtering if isinstance(group_excludes, string_types): group_excludes = [group_excludes] elif group_excludes is None: group_excludes = [] for grp_key, grp in grouped_df.groupby(group_by): if grp_key in group_excludes: continue node_groups.append({ 'node_ids': np.array(grp['node_id']), 'label': grp_key }) if len(node_groups) == 0: exclude_str = ' excluding values {}'.format( ', '.join(group_excludes)) if len(group_excludes) > 0 else '' raise ValueError( 'Could not find any node-groups using group_by="{}"{}.'.format( group_by, exclude_str)) else: node_groups = None return plotting.plot_traces(report=cr, population=population, node_ids=node_ids, sections=sections, average=average, node_groups=node_groups, times=times, title=title, show_legend=show_legend, show=show)
def _find_spikes(spikes_file=None, config_file=None, population=None): candidate_spikes = [] # Get spikes file(s) if spikes_file: # User has explicity set the location of the spike files candidate_spikes.append(spikes_file) elif config_file is not None: # Otherwise search the config.json for all possible output spikes_files. We can use the simulation_reports # module to find any spikes output file specified in config's "output" or "reports" section. config = SonataConfig.from_json(config_file) sim_reports = simulation_reports.from_config(config) for report in sim_reports: if report.module == 'spikes_report': # BMTK can end up output the same spikes file in SONATA, CSV, and NWB format. Try fetching the SONATA # version first, then CSV, and finally NWB if it exists. spikes_sonata = report.params.get('spikes_file', None) spikes_csv = report.params.get('spikes_file_csv', None) spikes_nwb = report.params.get('spikes_file_nwb', None) if spikes_sonata is not None: candidate_spikes.append(spikes_sonata) elif spikes_csv is not None: candidate_spikes.append(spikes_csv) elif spikes_csv is not None: candidate_spikes.append(spikes_nwb) # TODO: Should we also look in the "inputs" for displaying input spike statistics? if not candidate_spikes: raise ValueError( 'Could not find an output spikes-file. Use "spikes_file" parameter option.' ) # Find file that contains spikes for the specified "population" of nodes. If "population" parameter is not # specified try to guess that spikes that the user wants to visualize. if population is not None: spikes_obj = None for spikes_f in candidate_spikes: st = SpikeTrains.load(spikes_f) if population in st.populations: if spikes_obj is None: spikes_obj = st else: spikes_obj.merge(st) if spikes_obj is None: raise ValueError( 'Could not fine spikes file with node population "{}".'.format( population)) else: return population, spikes_obj else: if len(candidate_spikes) > 1: raise ValueError('Found more than one spike-trains file') spikes_f = candidate_spikes[0] if not os.path.exists(spikes_f): raise ValueError( 'Did not find spike-trains file {}. Make sure the simulation has completed.' .format(spikes_f)) spikes_obj = SpikeTrains.load(spikes_f) if len(spikes_obj.populations) > 1: raise ValueError( 'Spikes file {} contains more than one node population.'. format(spikes_f)) else: return spikes_obj.populations[0], spikes_obj
def plot_traces(config_file=None, report_name=None, population=None, report_path=None, group_by=None, group_excludes=None, nodes_file=None, node_types_file=None, node_ids=None, sections='origin', average=False, times=None, title=None, show_legend=None, show=True, save_as=None): """Plot compartment variables (eg Membrane Voltage, Calcium conc.) traces from the output of simulation. Will attempt to look in the SONATA simulation configuration json "reports" sections for any matching "membrane_report" outputs with a matching report_name:: plot_traces(config_file='config.json', report_name='membrane_potential') If the path the the report is different (or missing) than what's in the SONATA config then use the "report_path" option instead:: plot_traces(report_path='/my/path/to/membrane_potential.h5') To display the traces of only a select number of nodes you can filter using the node_ids options:: plot_traces(config_file='config.json', node_ids=[10, 20, 30, 40, 50]) The average option will find the mean value of all the traces to display:: plot_traces(config_file='config.json', node_ids=range(50, 100), average=True) You may also group together different subsets of nodes and display multiple averages based on certain attributes of the network, which can be done using the group_by key. The group_exlcudes option will exclude certain groups. For example if you want to plot the averaged membrane potential across each cortical "layer", exclude L1:: plot_traces(config_file='config.json', report='membrane_potential', group_by='layer', group_excludes='L1') :param config_file: path to SONATA simulation configuration. :param report_name: name of the membrane_report "report" which will be plotted. If only one compartment report in the simulation config then function will find it automatically. :param population: string. If the report more than one population of nodes, use this to determine which nodes to plot. If only one population exists and population=None then the function will find it by default. :param report_path: Path to SONATA compartment report file. Do not use with "config_file" and "report_name" options. :param group_by: Attribute of the "nodes" file used to group and average subsets of nodes. :param group_excludes: list of strings or None. When using the "group_by", allows users to exclude certain groupings based on the attribute value. :param nodes_file: path to nodes hdf5 file containing "population". By default this will be resolved using the config. :param node_types_file: path to node-types csv file containing "population". By default this will be resolved using the config. :param node_ids: int or list of integers. Individual node to display the variable. :param sections: 'origin', 'all', or list of ids, Compartments/elements to display, By default will only show values at the soma. :param average: If true will display average of "node_ids". Default: False :param times: (float, float), start and stop times of simulation. By default will get values from simulation configs "run" section. :param title: str, adds a title to the plot. If None (default) then name will be automatically generated using the report_name. :param show_legend: Set True or False to determine if legend should be displayed on the plot. The default (None) function itself will guess if legend should be shown. :param show: bool to display or not display plot. default True. :param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not save plot. :return: matplotlib figure.Figure object """ sonata_config = SonataConfig.from_json(config_file) if config_file else None report_name, cr = _get_report(report_path=report_path, config=sonata_config, report_name=report_name) if population is None: pops = cr.populations if len(pops) > 1: raise ValueError('Report {} contains more than population of nodes ({}). Use population parameter'.format( report_name, pops )) population = pops[0] if title is None: title = '{} ({})'.format(report_name, population) # Create node-groups if group_by is not None: node_groups = [] nodes = _find_nodes(population=population, config=sonata_config, nodes_file=nodes_file, node_types_file=node_types_file) grouped_df = None for grp in nodes.groups: if group_by in grp.all_columns: grp_df = grp.to_dataframe() grp_df = grp_df[['node_id', group_by]] grouped_df = grp_df if grouped_df is None else grouped_df.append(grp_df, ignore_index=True) if grouped_df is None: raise ValueError('Could not find any nodes with group_by attribute "{}"'.format(group_by)) # Convert from string to list so we can always use the isin() method for filtering if isinstance(group_excludes, string_types): group_excludes = [group_excludes] elif group_excludes is None: group_excludes = [] for grp_key, grp in grouped_df.groupby(group_by): if grp_key in group_excludes: continue node_groups.append({'node_ids': np.array(grp['node_id']), 'label': grp_key}) if len(node_groups) == 0: exclude_str = ' excluding values {}'.format(', '.join(group_excludes)) if len(group_excludes) > 0 else '' raise ValueError('Could not find any node-groups using group_by="{}"{}.'.format(group_by, exclude_str)) else: node_groups = None return plotting.plot_traces( report=cr, population=population, node_ids=node_ids, sections=sections, average=average, node_groups=node_groups, times=times, title=title, show_legend=show_legend, show=show, save_as=save_as )
def save_synapses_csv(circuit_config, population): config = SonataConfig.from_json(circuit_config) config_dir = config['manifest']['configdir'] edges_h5 = [n['edges_file'] for n in config['networks']['edges']] edge_types_csv = [ n['edge_types_file'] for n in config['networks']['edges'] ] l4_net = sonata.File(data_files=edges_h5, data_type_files=edge_types_csv) l4_edges = l4_net.edges['{0}_to_{0}'.format(population)] l4_nodes_df = pd.read_csv(os.path.join(config_dir, 'network_cells.csv'), sep=' ') cells = {} for nid, r in l4_nodes_df.iterrows(): cells[nid] = CellSections.load_row(node_id=nid, row=r, config_dir=config_dir) src_node_ids = [] trg_node_ids = [] sec_ids = [] sec_xs = [] syn_x = [] syn_y = [] syn_z = [] for e in l4_edges: src_node_ids.append(e.source_node_id) trg_node_ids.append(e.target_node_id) cell = cells[e.target_node_id] print(cell.node_id, cell.model_type, cell.is_biophysical) if cell.is_biophysical: cell_secs = cells[e.target_node_id] sec_id = e['sec_id'] sec_x = e['sec_x'] syn_coords = cell_secs.get_coords(sec_id, sec_x) sec_ids.append(sec_id) sec_xs.append(sec_x) syn_x.append(syn_coords[0]) syn_y.append(syn_coords[1]) syn_z.append(syn_coords[2]) else: sec_ids.append(-1) sec_xs.append(-1) syn_x.append(np.nan) syn_y.append(np.nan) syn_z.append(np.nan) edges_df = pd.DataFrame({ 'target_node_id': trg_node_ids, 'source_node_id': src_node_ids, 'section_id': sec_ids, 'section_x': sec_xs, 'afferent_x': syn_x, 'afferent_y': syn_y, 'afferent_z': syn_z }) edges_df.to_csv(os.path.join(config_dir, 'network_synapses.csv'), sep=' ', index=False, na_rep=np.nan)
def test_json_split(): """Can independently load a circuit_config.json and simulation_config.json into a single dict""" circuit_cfg = { 'manifest': { '$NETWORK_DIR': 'network_tst' }, 'networks': { 'node_files': { 'nodes': '$NETWORK_DIR/nodes.h5', 'node_types': '${NETWORK_DIR}/node_types.csv' } } } simulation_cfg = { 'manifest': { '$OUTPUT_DIR': 'output_tst' }, 'output': { 'output_dir': '$OUTPUT_DIR', 'spikes_file': 'spikes.h5' } } circuit_file = tempfile.NamedTemporaryFile(suffix='.json') json.dump(circuit_cfg, open(circuit_file.name, 'w')) # Case: circuit_cfg and simulation_cfg have been merged into a single json sim_file = tempfile.NamedTemporaryFile(suffix='.json') json.dump(simulation_cfg, open(sim_file.name, 'w')) config_file = tempfile.NamedTemporaryFile(suffix='.json') json.dump({ 'network': circuit_file.name, 'simulation': sim_file.name }, open(config_file.name, 'w')) config_dict = SonataConfig.from_json(config_file.name) assert (isinstance(config_dict, SonataConfig)) assert (isinstance(config_dict, dict)) assert (config_dict['output']['output_dir'] == 'output_tst') assert (config_dict['output']['spikes_file'] == 'output_tst/spikes.h5') assert (config_dict['networks']['node_files']['nodes'] == 'network_tst/nodes.h5') assert (config_dict['networks']['node_files']['node_types'] == 'network_tst/node_types.csv') # Case: one of the config files is missing sim_file = tempfile.NamedTemporaryFile(suffix='.json') json.dump(simulation_cfg, open(sim_file.name, 'w')) config_file = tempfile.NamedTemporaryFile(suffix='.json') json.dump({'simulation': circuit_file.name}, open(config_file.name, 'w')) config_dict = SonataConfig.from_json(config_file.name) assert ('output' not in config_dict) assert (config_dict['networks']['node_files']['nodes'] == 'network_tst/nodes.h5') assert (config_dict['networks']['node_files']['node_types'] == 'network_tst/node_types.csv') # Case: one config contains a link to another sim_file = tempfile.NamedTemporaryFile(suffix='.json') json.dump(simulation_cfg, open(sim_file.name, 'w')) config_file = tempfile.NamedTemporaryFile(suffix='.json') simulation_cfg.update({'network': circuit_file.name}) json.dump(simulation_cfg, open(config_file.name, 'w')) config_dict = SonataConfig.from_json(config_file.name) assert (config_dict['output']['output_dir'] == 'output_tst') assert (config_dict['output']['spikes_file'] == 'output_tst/spikes.h5') assert (config_dict['networks']['node_files']['nodes'] == 'network_tst/nodes.h5') assert (config_dict['networks']['node_files']['node_types'] == 'network_tst/node_types.csv')