Ejemplo n.º 1
0
def load_config(config):
    if isinstance(config, string_types):
        return ConfigDict.from_json(config)
    elif isinstance(config, dict):
        return ConfigDict.from_dict(config)
    else:
        raise Exception('Could not convert {} (type "{}") to json.'.format(config, type(config)))
Ejemplo n.º 2
0
def test_node_set_file():
    tmp_ns_file = tempfile.NamedTemporaryFile(suffix='.json')
    json.dump(
        {'bio_cells': {
            'model': 'biophysical',
            'locations': ['L4', 'L2/3']
        }}, open(tmp_ns_file.name, 'w'))

    cfg = SonataConfig.from_dict({
        'target_simulator': 'NEURON',
        'node_sets_file': tmp_ns_file.name
    })

    assert ('node_sets' in cfg)
    assert ('node_sets_file' in cfg)
    assert (set(cfg['node_sets'].keys()) == {'bio_cells'})
    assert (set(
        cfg['node_sets']['bio_cells'].keys()) == {'model', 'locations'})
    assert (cfg['node_sets']['bio_cells']['model'] == 'biophysical')
    assert (cfg['node_sets']['bio_cells']['locations'] == ['L4', 'L2/3'])

    cfg = SonataConfig.from_dict({
        'target_simulator': 'NEURON',
        'node_sets_file': tmp_ns_file.name,
        'node_sets': {
            'point_cells': {
                'key': 'val'
            }
        }
    })
    assert ('node_sets' in cfg)
    assert ('node_sets_file' in cfg)
    assert (set(cfg['node_sets']['point_cells'].keys()) == {'key'})
Ejemplo n.º 3
0
def test_build_manifest_fail2():
    """Test recursive definition"""
    config_file = {
        'manifest': {
            '$BASE': '$TMP/share',
            '$TMP': '$BASE/share',
        }
    }
    with pytest.raises(Exception):
        SonataConfig.from_dict(config_file)
Ejemplo n.º 4
0
def test_build_manifest_fail1():
    """Test exception occurs when variable is missing"""
    config_file = {
        'manifest': {
            '$BASE': '/base',
            '$TMP': '$VAR/Smat',
        }
    }
    with pytest.raises(Exception):
        SonataConfig.from_dict(config_file)
Ejemplo n.º 5
0
def load_spikes_file(config_file=None, spikes_file=None):
    if spikes_file is not None:
        return SpikeTrains.load(spikes_file)

    elif config_file is not None:
        config = ConfigDict.from_json(config_file)
        return SpikeTrains.load(config.spikes_file)
Ejemplo n.º 6
0
def plot_potential(cell_vars_h5=None,
                   config_file=None,
                   gids=None,
                   show_plot=True,
                   save=False):
    if (cell_vars_h5 or config_file) is None:
        raise Exception(
            'Please specify a cell_vars hdf5 file or a simulation config.')

    if cell_vars_h5 is not None:
        plot_potential_hdf5(cell_vars_h5,
                            gids=gids,
                            show_plot=show_plot,
                            save_as='sim_potential.jpg' if save else None)

    else:
        # load the json file or object
        if isinstance(config_file, string_types):
            config = cfg.from_json(config_file)
        elif isinstance(config_file, dict):
            config = config_file
        else:
            raise Exception('Could not convert {} (type "{}") to json.'.format(
                config_file, type(config_file)))

        gid_list = gids or config['node_id_selections']['save_cell_vars']
        for gid in gid_list:
            save_as = '{}_v.jpg'.format(gid) if save else None
            title = 'cell gid {}'.format(gid)
            var_h5 = os.path.join(config['output']['cell_vars_dir'],
                                  '{}.h5'.format(gid))
            plot_potential_hdf5(var_h5, title, show_plot, save_as)
Ejemplo n.º 7
0
def test_inputs():
    _ = pytest.importorskip('jsonschema')
    # valid inputs section
    cfg = SonataConfig.from_dict({
        "inputs": {
            "input1": {
                'input_type': 'str1',
                'input_file': 'str2',
                'trial': 'str2',
                'module': 'str',
                'electrode_file': 'str',
                'node_set': 'str',
                'random_seed': 100
            }
        }
    })
    assert (cfg.validate())

    # Base inputs
    cfg = SonataConfig.from_dict(
        {"inputs": [{
            'input_type': 'spikes',
            'input_file': 'myspikes.csv'
        }]})
    with pytest.raises(Exception):
        cfg.validate()

    # missing input_type
    cfg = SonataConfig.from_dict({
        "inputs": {
            "input1": {
                'input_file': 'str2',
                'trial': 'str2',
                'module': 'myspikes.csv',
                'electrode_file': 'myspikes.csv',
                'node_set': 'myspikes.csv',
                'random_seed': 100
            }
        }
    })
    with pytest.raises(Exception):
        cfg.validate()
Ejemplo n.º 8
0
def test_build_manifest2():
    config_file = {
        'manifest': {
            '$DIR_DATA': 'data',
            '$DIR_MAT': 'mat',
            '$APPS': '/${DIR_DATA}/$DIR_MAT/apps'
        }
    }

    manifest = SonataConfig.from_dict(config_file)['manifest']
    assert (manifest['APPS'] == '/data/mat/apps')
Ejemplo n.º 9
0
def test_missing_nodes_file():
    _ = pytest.importorskip('jsonschema')
    cfg = SonataConfig.from_dict(
        {"networks": {
            "nodes": [
                {
                    "node_types_file": "node_types.csv"
                },
            ]
        }})

    with pytest.raises(Exception):
        cfg.validate()
Ejemplo n.º 10
0
def test_json():
    config_file = tempfile.NamedTemporaryFile(suffix='.json')
    sonata_cfg = {
        'manifest': {
            '$BASE': '${configdir}',
            '$TMP_ATTR': 'mytest'
        },
        'myvar': '$TMP_ATTR/myvar',
        'cwd': '${workingdir}',
        'cdir': '${configdir}',
        'cfname': '${configfname}'
    }
    json.dump(sonata_cfg, open(config_file.name, 'w'))

    config_dict = SonataConfig.from_json(config_file.name)
    assert (isinstance(config_dict, SonataConfig))
    assert (isinstance(config_dict, dict))
    assert (config_dict['myvar'] == 'mytest/myvar')
    assert (config_dict['cwd'] == os.getcwd())
    assert (config_dict['cdir'] == os.path.dirname(config_file.name))
    assert (config_dict['cfname'] == config_file.name)

    config_dict = SonataConfig.load(config_file.name)
    assert (isinstance(config_dict, SonataConfig))
    assert (isinstance(config_dict, dict))
    assert (config_dict['myvar'] == 'mytest/myvar')
    assert (config_dict['cwd'] == os.getcwd())
    assert (config_dict['cdir'] == os.path.dirname(config_file.name))
    assert (config_dict['cfname'] == config_file.name)

    with pytest.warns(DeprecationWarning):
        config_dict = from_json(config_file.name)
        assert (isinstance(config_dict, SonataConfig))
        assert (isinstance(config_dict, dict))
        assert (config_dict['myvar'] == 'mytest/myvar')
        assert (config_dict['cwd'] == os.getcwd())
        assert (config_dict['cdir'] == os.path.dirname(config_file.name))
        assert (config_dict['cfname'] == config_file.name)
Ejemplo n.º 11
0
def test_build_manifest1():
    """Test simple manifest"""
    config_file = {
        'manifest': {
            '$BASE_DIR': '/base',
            '$TMP_DIR': '$BASE_DIR/tmp',
            '$SHARE_DIR': '${TMP_DIR}_1/share'
        }
    }

    manifest = SonataConfig.from_dict(config_file)['manifest']
    assert (manifest['BASE_DIR'] == '/base')
    assert (manifest['TMP_DIR'] == '/base/tmp')
    assert (manifest['SHARE_DIR'] == '/base/tmp_1/share')
Ejemplo n.º 12
0
def test_dict():
    sonata_dict = {
        'manifest': {
            '$BASE': '${configdir}',
            '$TMP_ATTR': 'mytest'
        },
        'myvar': '$TMP_ATTR/myvar'
    }

    config_dict = SonataConfig.from_dict(sonata_dict)
    assert (isinstance(config_dict, SonataConfig))
    assert (isinstance(config_dict, dict))
    assert (config_dict['myvar'] == 'mytest/myvar')

    config_dict = SonataConfig.load(sonata_dict)
    assert (isinstance(config_dict, SonataConfig))
    assert (isinstance(config_dict, dict))
    assert (config_dict['myvar'] == 'mytest/myvar')

    config_dict = from_dict(sonata_dict)
    assert (isinstance(config_dict, SonataConfig))
    assert (isinstance(config_dict, dict))
    assert (config_dict['myvar'] == 'mytest/myvar')
Ejemplo n.º 13
0
def load_reports(config_file):
    cfg = ConfigDict.from_json(config_file)
    reports = []
    for report_name, report in cfg.reports.items():
        if report['module'] not in ['membrane_report', 'multimeter_report']:
            continue
        report_file = report[
            'file_name'] if 'file_name' in report else '{}.h5'.format(
                report_name)
        report_file = report_file if os.path.isabs(
            report_file) else os.path.join(cfg.output_dir, report_file)
        reports.append(CompartmentReport(report_file, 'r'))

    return reports
Ejemplo n.º 14
0
Archivo: ecp.py Proyecto: shixnya/bmtk
def plot_ecp(config_file=None,
             report_name=None,
             ecp_path=None,
             title=None,
             show=True):
    sonata_config = SonataConfig.from_json(
        config_file) if config_file else None

    _, ecp_path = _get_ecp_path(ecp_path=ecp_path,
                                config=sonata_config,
                                report_name=report_name)
    ecp_h5 = h5py.File(ecp_path, 'r')

    time_traces = np.arange(start=ecp_h5['/ecp/time'][0],
                            stop=ecp_h5['/ecp/time'][1],
                            step=ecp_h5['/ecp/time'][2])

    channels = ecp_h5['/ecp/channel_id'][()]
    fig, axes = plt.subplots(len(channels), 1)
    fig.text(0.04, 0.5, 'channel id', va='center', rotation='vertical')
    for idx, channel in enumerate(channels):
        data = ecp_h5['/ecp/data'][:, idx]
        axes[idx].plot(time_traces, data)
        axes[idx].spines["top"].set_visible(False)
        axes[idx].spines["right"].set_visible(False)
        axes[idx].set_yticks([])
        axes[idx].set_ylabel(channel)

        if idx + 1 != len(channels):
            axes[idx].spines["bottom"].set_visible(False)
            axes[idx].set_xticks([])
        else:
            axes[idx].set_xlabel('timestamps (ms)')
            # scalebar = AnchoredSizeBar(axes[idx].transData,
            #                            2.0, '1 mV', 1,
            #                            pad=0,
            #                            borderpad=0,
            #                            # color='b',
            #                            frameon=True,
            #                            # size_vertical=1.001,
            #                            # fontproperties=fontprops
            #                            )
            #
            # axes[idx].add_artist(scalebar)

    if title:
        fig.set_title(title)

    if show:
        plt.show()
Ejemplo n.º 15
0
def test_speical_vars():
    cfg = SonataConfig.from_dict({
        'manifest': {
            '$VAR_DATETIME': '${datetime}'
        },
        'datetime': '${VAR_DATETIME}',
        'time': '${time}',
        'date': '${date}',
        'combined': 'myfile_${date}.csv'
    })

    assert (isinstance(datetime.strptime(cfg['datetime'], '%Y-%m-%d_%H-%M-%S'),
                       datetime))
    assert (isinstance(datetime.strptime(cfg['time'], '%H-%M-%S'), datetime))
    assert (isinstance(datetime.strptime(cfg['date'], '%Y-%m-%d'), datetime))
Ejemplo n.º 16
0
def _plot_helper(plot_fnc, config_file=None, population=None, times=None, title=None, show=True, save_as=None,
                 group_by=None, group_excludes=None,
                 spikes_file=None, nodes_file=None, node_types_file=None):
    sonata_config = SonataConfig.from_json(config_file) if config_file else None
    pop, spike_trains = _find_spikes(config_file=config_file, spikes_file=spikes_file, population=population)

    # Create the title
    title = title if title is not None else '{} Nodes'.format(pop)

    # Get start and stop times from config if needed
    if sonata_config and times is None:
        times = (sonata_config.tstart, sonata_config.tstop)

    # Create node-groups
    if group_by is not None:
        node_groups = []
        nodes = _find_nodes(population=pop, config=sonata_config, nodes_file=nodes_file,
                            node_types_file=node_types_file)
        grouped_df = None
        for grp in nodes.groups:
            if group_by in grp.all_columns:
                grp_df = grp.to_dataframe()
                grp_df = grp_df[['node_id', group_by]]
                grouped_df = grp_df if grouped_df is None else grouped_df.append(grp_df, ignore_index=True)

        if grouped_df is None:
            raise ValueError('Could not find any nodes with group_by attribute "{}"'.format(group_by))

        # Convert from string to list so we can always use the isin() method for filtering
        if isinstance(group_excludes, string_types):
            group_excludes = [group_excludes]
        elif group_excludes is None:
            group_excludes = []

        for grp_key, grp in grouped_df.groupby(group_by):
            if grp_key in group_excludes:
                continue
            node_groups.append({'node_ids': np.array(grp['node_id']), 'label': grp_key})

    else:
        node_groups = None

    return plot_fnc(
        spike_trains=spike_trains, node_groups=node_groups, population=pop, times=times, title=title, show=show,
        save_as=save_as
    )
Ejemplo n.º 17
0
def test_output_dir():
    cfg = SonataConfig.from_dict({
        'manifest': {
            '$OUTPUT_DIR': 'my/output'
        },
        'output': {
            'output_dir': '$OUTPUT_DIR',
            'log_file': 'log.txt',
            'spikes_file': 'tmp/spikes.h5',
            'spikes_file_csv':
            '/abs/path/to/spikes.csv',  # do not prepend to absolute paths
            'spikes_file_nwb': '$OUTPUT_DIR/spikes.nwb'  # do not prepend
        }
    })

    assert (cfg['output']['log_file'] == 'my/output/log.txt')
    assert (cfg['output']['spikes_file'] == 'my/output/tmp/spikes.h5')
    assert (cfg['output']['spikes_file_csv'] == '/abs/path/to/spikes.csv')
    assert (cfg['output']['spikes_file_nwb'] == 'my/output/spikes.nwb')
Ejemplo n.º 18
0
def test_valid_config():
    _ = pytest.importorskip('jsonschema')
    cfg = SonataConfig.from_dict({
        "manifest": {
            "$BASE": "${configdir}"
        },
        "target_simulator": "NEURON",
        "target_simulator_version": ">=7.4",
        'run': {
            'tstop': 3000.0,
            'dt': 0.001
        },
        "networks": {
            "nodes": [{
                "nodes_file": "nodes.h5",
                "node_types_file": "node_types.csv"
            }, {
                "nodes_file": "nodes2.h5",
                "node_types_file": "node_types2.csv"
            }]
        },
        "output": {
            'output_dir': 'output',
            'spikes_file': "null"
        },
        "inputs": {
            "input1": {
                'input_type': 'spikes',
                'input_file': 'myspikes.csv'
            },
            "input2": {
                'input_type': 'voltage_clamp'
            }
        }
    })

    assert (cfg.validate())
Ejemplo n.º 19
0
def save_nodes_csv(circuit_config, population):
    config = SonataConfig.from_json(circuit_config)
    morphology_dir = config['components']['morphologies_dir']
    config_dir = config['manifest']['configdir']

    nodes_h5 = [n['nodes_file'] for n in config['networks']['nodes']]
    node_types_csv = [
        n['node_types_file'] for n in config['networks']['nodes']
    ]
    l4_net = sonata.File(data_files=nodes_h5, data_type_files=node_types_csv)

    net_df = l4_net.nodes[population].to_dataframe()
    for rot_axis in [
            'rotation_angle_xaxis', 'rotation_angle_yaxis',
            'rotation_angle_zaxis'
    ]:
        if rot_axis not in net_df.columns:
            net_df[rot_axis] = 0.0

        net_df[rot_axis] = net_df[rot_axis].fillna(0.0)

    net_df = net_df[[
        'x', 'y', 'z', 'rotation_angle_xaxis', 'rotation_angle_yaxis',
        'rotation_angle_zaxis', 'morphology', 'model_processing', 'model_name',
        'model_type'
    ]]

    p = PurePath(morphology_dir)
    morp_rel_path = p.relative_to(config_dir)
    net_df['morphology'] = net_df.apply(
        lambda r: os.path.join(morp_rel_path, r['morphology'])
        if isinstance(r['morphology'], string_types) else None,
        axis=1)

    net_df.to_csv(os.path.join(config_dir, 'network_cells.csv'),
                  sep=' ',
                  na_rep="None")
Ejemplo n.º 20
0
def test_user_vars():
    cfg = SonataConfig.from_dict(
        {
            'my_int': '${my_int}',
            'my_bool': '${my_bool}',
            'my_float': '${my_float}',
            'my_list': '${my_list}',
            'my_str': '${my_str}',
            'combined_strs': '${my_str}bar',
            'combined_int': 'file.${my_int}.txt'
        },
        my_int=100,
        my_bool=True,
        my_float=0.001,
        my_list=['a', 'b'],
        my_str='foo')

    assert (cfg['my_int'] == 100)
    assert (cfg['my_bool'] is True)
    assert (cfg['my_float'] == 0.001)
    assert (cfg['my_list'] == ['a', 'b'])
    assert (cfg['my_str'] == 'foo')
    assert (cfg['combined_strs'] == 'foobar')
    assert (cfg['combined_int'] == 'file.100.txt')
Ejemplo n.º 21
0
def plot_traces(report_path=None,
                config_file=None,
                report_name=None,
                population=None,
                group_by=None,
                group_excludes=None,
                nodes_file=None,
                node_types_file=None,
                node_ids=None,
                sections='origin',
                average=False,
                times=None,
                title=None,
                show_legend=None,
                show=True):
    sonata_config = SonataConfig.from_json(
        config_file) if config_file else None
    report_name, cr = _get_report(report_path=report_path,
                                  config=sonata_config,
                                  report_name=report_name)

    if population is None:
        pops = cr.populations
        if len(pops) > 1:
            raise ValueError(
                'Report {} contains more than population of nodes ({}). Use population parameter'
                .format(report_name, pops))
        population = pops[0]

    if title is None:
        title = '{} ({})'.format(report_name, population)

    # Create node-groups
    if group_by is not None:
        node_groups = []
        nodes = _find_nodes(population=population,
                            config=sonata_config,
                            nodes_file=nodes_file,
                            node_types_file=node_types_file)

        grouped_df = None
        for grp in nodes.groups:
            if group_by in grp.all_columns:
                grp_df = grp.to_dataframe()
                grp_df = grp_df[['node_id', group_by]]
                grouped_df = grp_df if grouped_df is None else grouped_df.append(
                    grp_df, ignore_index=True)

        if grouped_df is None:
            raise ValueError(
                'Could not find any nodes with group_by attribute "{}"'.format(
                    group_by))

        # Convert from string to list so we can always use the isin() method for filtering
        if isinstance(group_excludes, string_types):
            group_excludes = [group_excludes]
        elif group_excludes is None:
            group_excludes = []

        for grp_key, grp in grouped_df.groupby(group_by):
            if grp_key in group_excludes:
                continue
            node_groups.append({
                'node_ids': np.array(grp['node_id']),
                'label': grp_key
            })

        if len(node_groups) == 0:
            exclude_str = ' excluding values {}'.format(
                ', '.join(group_excludes)) if len(group_excludes) > 0 else ''
            raise ValueError(
                'Could not find any node-groups using group_by="{}"{}.'.format(
                    group_by, exclude_str))

    else:
        node_groups = None

    return plotting.plot_traces(report=cr,
                                population=population,
                                node_ids=node_ids,
                                sections=sections,
                                average=average,
                                node_groups=node_groups,
                                times=times,
                                title=title,
                                show_legend=show_legend,
                                show=show)
Ejemplo n.º 22
0
def _find_spikes(spikes_file=None, config_file=None, population=None):
    candidate_spikes = []

    # Get spikes file(s)
    if spikes_file:
        # User has explicity set the location of the spike files
        candidate_spikes.append(spikes_file)

    elif config_file is not None:
        # Otherwise search the config.json for all possible output spikes_files. We can use the simulation_reports
        # module to find any spikes output file specified in config's "output" or "reports" section.
        config = SonataConfig.from_json(config_file)
        sim_reports = simulation_reports.from_config(config)
        for report in sim_reports:
            if report.module == 'spikes_report':
                # BMTK can end up output the same spikes file in SONATA, CSV, and NWB format. Try fetching the SONATA
                # version first, then CSV, and finally NWB if it exists.
                spikes_sonata = report.params.get('spikes_file', None)
                spikes_csv = report.params.get('spikes_file_csv', None)
                spikes_nwb = report.params.get('spikes_file_nwb', None)

                if spikes_sonata is not None:
                    candidate_spikes.append(spikes_sonata)
                elif spikes_csv is not None:
                    candidate_spikes.append(spikes_csv)
                elif spikes_csv is not None:
                    candidate_spikes.append(spikes_nwb)

        # TODO: Should we also look in the "inputs" for displaying input spike statistics?

    if not candidate_spikes:
        raise ValueError(
            'Could not find an output spikes-file. Use "spikes_file" parameter option.'
        )

    # Find file that contains spikes for the specified "population" of nodes. If "population" parameter is not
    # specified try to guess that spikes that the user wants to visualize.
    if population is not None:
        spikes_obj = None
        for spikes_f in candidate_spikes:
            st = SpikeTrains.load(spikes_f)
            if population in st.populations:
                if spikes_obj is None:
                    spikes_obj = st
                else:
                    spikes_obj.merge(st)

        if spikes_obj is None:
            raise ValueError(
                'Could not fine spikes file with node population "{}".'.format(
                    population))
        else:
            return population, spikes_obj

    else:
        if len(candidate_spikes) > 1:
            raise ValueError('Found more than one spike-trains file')

        spikes_f = candidate_spikes[0]
        if not os.path.exists(spikes_f):
            raise ValueError(
                'Did not find spike-trains file {}. Make sure the simulation has completed.'
                .format(spikes_f))

        spikes_obj = SpikeTrains.load(spikes_f)

        if len(spikes_obj.populations) > 1:
            raise ValueError(
                'Spikes file {} contains more than one node population.'.
                format(spikes_f))
        else:
            return spikes_obj.populations[0], spikes_obj
Ejemplo n.º 23
0
def plot_traces(config_file=None, report_name=None, population=None, report_path=None, group_by=None,
                group_excludes=None, nodes_file=None, node_types_file=None,
                node_ids=None, sections='origin', average=False, times=None, title=None,
                show_legend=None, show=True, save_as=None):
    """Plot compartment variables (eg Membrane Voltage, Calcium conc.) traces from the output of simulation. Will
    attempt to look in the SONATA simulation configuration json "reports" sections for any matching "membrane_report"
    outputs with a matching report_name::

        plot_traces(config_file='config.json', report_name='membrane_potential')

    If the path the the report is different (or missing) than what's in the SONATA config then use the "report_path"
    option instead::

        plot_traces(report_path='/my/path/to/membrane_potential.h5')

    To display the traces of only a select number of nodes you can filter using the node_ids options::

        plot_traces(config_file='config.json', node_ids=[10, 20, 30, 40, 50])

    The average option will find the mean value of all the traces to display::

        plot_traces(config_file='config.json', node_ids=range(50, 100), average=True)

    You may also group together different subsets of nodes and display multiple averages based on certain attributes
    of the network, which can be done using the group_by key. The group_exlcudes option will exclude certain groups.
    For example if you want to plot the averaged membrane potential across each cortical "layer", exclude L1::

        plot_traces(config_file='config.json', report='membrane_potential', group_by='layer', group_excludes='L1')

    :param config_file: path to SONATA simulation configuration.
    :param report_name: name of the membrane_report "report" which will be plotted. If only one compartment report
        in the simulation config then function will find it automatically.
    :param population: string. If the report more than one population of nodes, use this to determine which nodes to
           plot. If only one population exists and population=None then the function will find it by default.
    :param report_path: Path to SONATA compartment report file. Do not use with "config_file" and "report_name" options.
    :param group_by: Attribute of the "nodes" file used to group and average subsets of nodes.
    :param group_excludes: list of strings or None. When using the "group_by", allows users to exclude certain groupings
        based on the attribute value.
    :param nodes_file: path to nodes hdf5 file containing "population". By default this will be resolved using the
        config.
    :param node_types_file: path to node-types csv file containing "population". By default this will be resolved using
        the config.
    :param node_ids: int or list of integers. Individual node to display the variable.
    :param sections: 'origin', 'all', or list of ids, Compartments/elements to display, By default will only show values
           at the soma.
    :param average: If true will display average of "node_ids". Default: False
    :param times: (float, float), start and stop times of simulation. By default will get values from simulation
        configs "run" section.
    :param title: str, adds a title to the plot. If None (default) then name will be automatically generated using the
        report_name.
    :param show_legend: Set True or False to determine if legend should be displayed on the plot. The default (None)
           function itself will guess if legend should be shown.
    :param show: bool to display or not display plot. default True.
    :param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
        save plot.
    :return: matplotlib figure.Figure object
    """

    sonata_config = SonataConfig.from_json(config_file) if config_file else None
    report_name, cr = _get_report(report_path=report_path, config=sonata_config, report_name=report_name)

    if population is None:
        pops = cr.populations
        if len(pops) > 1:
            raise ValueError('Report {} contains more than population of nodes ({}). Use population parameter'.format(
                report_name, pops
            ))
        population = pops[0]

    if title is None:
        title = '{} ({})'.format(report_name, population)

    # Create node-groups
    if group_by is not None:
        node_groups = []
        nodes = _find_nodes(population=population, config=sonata_config, nodes_file=nodes_file,
                            node_types_file=node_types_file)

        grouped_df = None
        for grp in nodes.groups:
            if group_by in grp.all_columns:
                grp_df = grp.to_dataframe()
                grp_df = grp_df[['node_id', group_by]]
                grouped_df = grp_df if grouped_df is None else grouped_df.append(grp_df, ignore_index=True)

        if grouped_df is None:
            raise ValueError('Could not find any nodes with group_by attribute "{}"'.format(group_by))

        # Convert from string to list so we can always use the isin() method for filtering
        if isinstance(group_excludes, string_types):
            group_excludes = [group_excludes]
        elif group_excludes is None:
            group_excludes = []

        for grp_key, grp in grouped_df.groupby(group_by):
            if grp_key in group_excludes:
                continue
            node_groups.append({'node_ids': np.array(grp['node_id']), 'label': grp_key})

        if len(node_groups) == 0:
            exclude_str = ' excluding values {}'.format(', '.join(group_excludes)) if len(group_excludes) > 0 else ''
            raise ValueError('Could not find any node-groups using group_by="{}"{}.'.format(group_by, exclude_str))

    else:
        node_groups = None

    return plotting.plot_traces(
        report=cr,
        population=population,
        node_ids=node_ids,
        sections=sections,
        average=average,
        node_groups=node_groups,
        times=times,
        title=title,
        show_legend=show_legend,
        show=show,
        save_as=save_as
    )
Ejemplo n.º 24
0
def save_synapses_csv(circuit_config, population):
    config = SonataConfig.from_json(circuit_config)
    config_dir = config['manifest']['configdir']

    edges_h5 = [n['edges_file'] for n in config['networks']['edges']]
    edge_types_csv = [
        n['edge_types_file'] for n in config['networks']['edges']
    ]

    l4_net = sonata.File(data_files=edges_h5, data_type_files=edge_types_csv)
    l4_edges = l4_net.edges['{0}_to_{0}'.format(population)]
    l4_nodes_df = pd.read_csv(os.path.join(config_dir, 'network_cells.csv'),
                              sep=' ')
    cells = {}
    for nid, r in l4_nodes_df.iterrows():
        cells[nid] = CellSections.load_row(node_id=nid,
                                           row=r,
                                           config_dir=config_dir)

    src_node_ids = []
    trg_node_ids = []
    sec_ids = []
    sec_xs = []
    syn_x = []
    syn_y = []
    syn_z = []
    for e in l4_edges:
        src_node_ids.append(e.source_node_id)
        trg_node_ids.append(e.target_node_id)
        cell = cells[e.target_node_id]
        print(cell.node_id, cell.model_type, cell.is_biophysical)
        if cell.is_biophysical:
            cell_secs = cells[e.target_node_id]
            sec_id = e['sec_id']
            sec_x = e['sec_x']
            syn_coords = cell_secs.get_coords(sec_id, sec_x)

            sec_ids.append(sec_id)
            sec_xs.append(sec_x)
            syn_x.append(syn_coords[0])
            syn_y.append(syn_coords[1])
            syn_z.append(syn_coords[2])
        else:
            sec_ids.append(-1)
            sec_xs.append(-1)
            syn_x.append(np.nan)
            syn_y.append(np.nan)
            syn_z.append(np.nan)

    edges_df = pd.DataFrame({
        'target_node_id': trg_node_ids,
        'source_node_id': src_node_ids,
        'section_id': sec_ids,
        'section_x': sec_xs,
        'afferent_x': syn_x,
        'afferent_y': syn_y,
        'afferent_z': syn_z
    })
    edges_df.to_csv(os.path.join(config_dir, 'network_synapses.csv'),
                    sep=' ',
                    index=False,
                    na_rep=np.nan)
Ejemplo n.º 25
0
def test_from_config():
    config = {
        'reports': {
            "membrane_potential": {
                "cells": 'some',
                "variable_name": "v",
                "module": "membrane_report",
                "sections": "soma",
                "enabled": True
            },
            "syn_report": {
                "cells": [0, 1],
                "variable_name": "tau1",
                "module": "netcon_report",
                "sections": "soma",
                "syn_type": "Exp2Syn"
            },
            "ecp": {
                "cells": 'all',
                "variable_name": "v",
                "module": "extracellular",
                "electrode_positions": "linear_electrode.csv",
                "file_name": "ecp.h5",
                "electrode_channels": "all",
                "contributions_dir": "ecp_contributions"
            },
            "spikes": {
                'cells': 'all',
                'module': 'spikes_report',
                'spikes_file': 'my_spikes.h5',
                'cache_to_disk': False
            }
        }
    }

    config_dict = SonataConfig.from_dict(config)
    reports = from_config(config_dict)

    assert (len(reports) == 4)
    assert ({r.report_name
             for r in reports
             } == {'spikes', 'ecp', 'membrane_potential', 'syn_report'})
    for report in reports:
        if report.report_name == 'spikes':
            assert (isinstance(report, SpikesReport))
            assert (report.params == {
                'cells': 'all',
                'spikes_file': 'my_spikes.h5',
                'cache_to_disk': False
            })

        elif report.report_name == 'ecp':
            assert (isinstance(report, ECPReport))
            assert (report.params == {
                'cells': 'all',
                'variable_name': 'v',
                'electrode_positions': 'linear_electrode.csv',
                'file_name': 'ecp.h5',
                'electrode_channels': 'all',
                'contributions_dir': 'ecp_contributions',
                'tmp_dir': '.'
            })

        elif report.report_name == 'membrane_potential':
            assert (isinstance(report, MembraneReport))
            assert (report.params == {
                'cells': 'some',
                'variable_name': ['v'],
                'sections': 'soma',
                'tmp_dir': '.',
                'file_name': 'membrane_potential.h5',
                'transform': {},
                'buffer_data': True
            })

        elif report.report_name == 'syn_report':
            assert (isinstance(report, MembraneReport))
            assert (report.params == {
                'cells': [0, 1],
                'variable_name': ['tau1'],
                'sections': 'soma',
                'syn_type': 'Exp2Syn',
                'tmp_dir': '.',
                'file_name': 'syn_report.h5',
                'transform': {},
                'buffer_data': True
            })
Ejemplo n.º 26
0
def test_negative_tstop():
    _ = pytest.importorskip('jsonschema')
    cfg = SonataConfig.from_dict({'run': {'tstop': -1.0}})

    with pytest.raises(Exception):
        cfg.validate()
Ejemplo n.º 27
0
def test_json_split():
    """Can independently load a circuit_config.json and simulation_config.json into a single dict"""
    circuit_cfg = {
        'manifest': {
            '$NETWORK_DIR': 'network_tst'
        },
        'networks': {
            'node_files': {
                'nodes': '$NETWORK_DIR/nodes.h5',
                'node_types': '${NETWORK_DIR}/node_types.csv'
            }
        }
    }

    simulation_cfg = {
        'manifest': {
            '$OUTPUT_DIR': 'output_tst'
        },
        'output': {
            'output_dir': '$OUTPUT_DIR',
            'spikes_file': 'spikes.h5'
        }
    }

    circuit_file = tempfile.NamedTemporaryFile(suffix='.json')
    json.dump(circuit_cfg, open(circuit_file.name, 'w'))

    # Case: circuit_cfg and simulation_cfg have been merged into a single json
    sim_file = tempfile.NamedTemporaryFile(suffix='.json')
    json.dump(simulation_cfg, open(sim_file.name, 'w'))
    config_file = tempfile.NamedTemporaryFile(suffix='.json')
    json.dump({
        'network': circuit_file.name,
        'simulation': sim_file.name
    }, open(config_file.name, 'w'))
    config_dict = SonataConfig.from_json(config_file.name)
    assert (isinstance(config_dict, SonataConfig))
    assert (isinstance(config_dict, dict))
    assert (config_dict['output']['output_dir'] == 'output_tst')
    assert (config_dict['output']['spikes_file'] == 'output_tst/spikes.h5')
    assert (config_dict['networks']['node_files']['nodes'] ==
            'network_tst/nodes.h5')
    assert (config_dict['networks']['node_files']['node_types'] ==
            'network_tst/node_types.csv')

    # Case: one of the config files is missing
    sim_file = tempfile.NamedTemporaryFile(suffix='.json')
    json.dump(simulation_cfg, open(sim_file.name, 'w'))
    config_file = tempfile.NamedTemporaryFile(suffix='.json')
    json.dump({'simulation': circuit_file.name}, open(config_file.name, 'w'))
    config_dict = SonataConfig.from_json(config_file.name)
    assert ('output' not in config_dict)
    assert (config_dict['networks']['node_files']['nodes'] ==
            'network_tst/nodes.h5')
    assert (config_dict['networks']['node_files']['node_types'] ==
            'network_tst/node_types.csv')

    # Case: one config contains a link to another
    sim_file = tempfile.NamedTemporaryFile(suffix='.json')
    json.dump(simulation_cfg, open(sim_file.name, 'w'))
    config_file = tempfile.NamedTemporaryFile(suffix='.json')
    simulation_cfg.update({'network': circuit_file.name})
    json.dump(simulation_cfg, open(config_file.name, 'w'))
    config_dict = SonataConfig.from_json(config_file.name)
    assert (config_dict['output']['output_dir'] == 'output_tst')
    assert (config_dict['output']['spikes_file'] == 'output_tst/spikes.h5')
    assert (config_dict['networks']['node_files']['nodes'] ==
            'network_tst/nodes.h5')
    assert (config_dict['networks']['node_files']['node_types'] ==
            'network_tst/node_types.csv')