Ejemplo n.º 1
0
def _check_syn_spec(syn_spec, group_names, groups):
    gsize = len(groups)
    # test if all types syn_spec are contained
    alltypes = set(
        ((1, 1), (1, -1), (-1, 1), (-1, -1))).issubset(syn_spec.keys())
    # is there more than 1 type?
    types = list(set(g.neuron_type for g in groups))
    mt_type = len(types) > 1
    # check that only allowed entries are present
    edge_keys = []
    for k in syn_spec.keys():
        if isinstance(k, tuple):
            edge_keys.extend(k)
    edge_keys = set(edge_keys)
    allkeys = group_names + types
    assert edge_keys.issubset(allkeys), \
        '`syn_spec` edge entries can only be made from {}.'.format(allkeys)
    # warn if connections might be missing
    nspec = len(edge_keys)
    has_default = len(syn_spec) > nspec
    if mt_type and nspec < gsize**2 and not alltypes and not has_default:
        _log_message(
            logger, "WARNING",
            'There is not one synaptic specifier per inter-group'
            'connection in `syn_spec` and no default model was provided. '
            'Therefore, {} or 4 entries were expected but only {} were '
            'provided. It might be right, but make sure all cases are '
            'covered. Missing connections will be set as "static_'
            'synapse".'.format(gsize**2, nspec))
    for val in syn_spec.values():
        assert 'weight' not in val, '`weight` cannot be set here.'
        assert 'delay' not in val, '`delay` cannot be set here.'
Ejemplo n.º 2
0
 def ids(self, value):
     if self._desired_size != len(value):
         _log_message(
             logger, "WARNING",
             'The length of the `ids` passed is not the same as '
             'the initial size that was declared: {} before '
             'vs {} now. Setting `ids` anyway, but check your '
             'code!'.format(self._desired_size, len(value)))
     self._ids = value
     self._desired_size = None
Ejemplo n.º 3
0
 def _validity_check(self, name, group):
     if self._has_models and not group.has_model:
         raise AttributeError(
             "This NeuralPop requires group to have a model attribute that "
             "is not `None`; to disable this, use `set_model(None)` "
             "method on this NeuralPop instance.")
     elif group.has_model and not self._has_models:
         _log_message(
             logger, "WARNING",
             "This NeuralPop is not set to take models into "
             "account; use the `set_model` method to change its "
             "behaviour.")
Ejemplo n.º 4
0
    def ids(self, value):
        data = set(value)

        if self._desired_size is not None and self._desired_size != len(data):
            _log_message(
                logger, "WARNING",
                'The number of unique `ids` passed is not the same '
                'as the initial size that was declared: {} before '
                'vs {} now. Setting `ids` anyway, but check your '
                'code!'.format(self._desired_size, len(value)))
        self._ids = data
        self._desired_size = None
Ejemplo n.º 5
0
    def wrapper(*args, **kwargs):
        if kwargs.get("_warn", True):
            _log_message(
                _logger, "WARNING", "This function could interfere "
                "with NNGT, making your Network obsolete compared to "
                "the one in NEST... make sure to check what is "
                "modified!")

        if "_warn" in kwargs:
            del kwargs["_warn"]

        return old_nest_func(*args, **kwargs)
Ejemplo n.º 6
0
 def wrapper(*args, **kwargs):
     self = args[0]
     partial_backend = nngt.get_config("backend") in ("networkx", "nngt")
     for graph_name in self.graphs:
         if partial_backend and "corr" in graph_name:
             _log_message(logger, "DEBUG",
                          "Skipping correlated attributes with "
                          "networkx and nngt backends.")
         else:
             generated = self.gen_graph(graph_name)
             # check for None when using MPI
             if generated is not None:
                 g, di = generated
                 func(self, g, instructions=di, **kwargs)
Ejemplo n.º 7
0
def _b2_from_data(ids, data):
    b2 = np.full(len(ids), np.NaN)
    if len(data[:, 0]) > 0:
        for i, neuron in enumerate(ids):
            ids = np.where(data[:, 0] == neuron)[0]
            dt1 = np.diff(data[ids, 1])
            dt2 = dt1[1:] + dt1[:-1]
            avg_isi = np.mean(dt1)
            if avg_isi != 0.:
                b2[i] = (2 * np.var(dt1) - np.var(dt2)) / (2 * avg_isi**2)
            else:
                b2[i] = np.inf
    else:
        _log_message(logger, "WARNING", 'No spikes in the data.')
    return b2
Ejemplo n.º 8
0
    def _validity_check(self, name, group):
        if self._has_models and not group.has_model:
            raise AttributeError(
                "This NeuralPop requires group to have a model attribute that "
                "is not `None`; to disable this, use `set_model(None)` "
                "method on this NeuralPop instance or set `with_models` to "
                "False when creating it.")
        elif group.has_model and not self._has_models:
            _log_message(logger, "WARNING",
                         "This NeuralPop is not set to take models into "
                         "account; use the `set_model` method to change its "
                         "behaviour.")

        if group.neuron_type not in (-1, 1):
            raise AttributeError("Valid neuron type must be -1 or 1.")

        # check pairwise disjoint
        super()._validity_check(name, group)
Ejemplo n.º 9
0
    def new_edge(self, source, target, attributes=None, ignore=False,
                 self_loop=False):
        '''
        Adding a connection to the graph, with optional properties.

        .. versionchanged :: 2.0
            Added `self_loop` argument to enable adding self-loops.

        Parameters
        ----------
        source : :class:`int/node`
            Source node.
        target : :class:`int/node`
            Target node.
        attributes : :class:`dict`, optional (default: ``{}``)
            Dictionary containing optional edge properties. If the graph is
            weighted, defaults to ``{"weight": 1.}``, the unit weight for the
            connection (synaptic strength in NEST).
        ignore : bool, optional (default: False)
            If set to True, ignore attempts to add an existing edge and accept
            self-loops; otherwise an error is raised.
        self_loop : bool, optional (default: False)
            Whether to allow self-loops or not.

        Returns
        -------
        The new connection or None if nothing was added.
        '''
        attributes = {} if attributes is None \
                     else {k: [v] for k, v in attributes.items()}

        if source == target:
            if not ignore and not self_loop:
                raise InvalidArgument("Trying to add a self-loop.")
            elif ignore:
                _log_message(logger, "INFO",
                             "Self-loop on {} ignored.".format(source))

                return None

        return self.new_edges(((source, target),), attributes,
                              check_self_loops=(not ignore and not self_loop),
                              ignore_invalid=ignore)
Ejemplo n.º 10
0
if nngt._config["color_lib"] == "seaborn":
    try:
        import seaborn as sns
        with_seaborn = True
        sns.set_style("whitegrid")

        def sns_palette(c):
            if isinstance(c, float):
                pal = sns.color_palette(nngt._config["palette"], 100)
                return pal[int(c*100)]
            else:
                return sns.color_palette(nngt._config["palette"], len(c))

        palette_continuous = sns_palette
    except ImportError as e:
        _log_message(logger, "WARNING",
                     "`seaborn` requested but could not set it: {}.".format(e))


if not with_seaborn:
    try:
        mpl.rcParams['font.size'] = 12
        mpl.rcParams['font.family'] = 'serif'
        if nngt._config['use_tex']:
            mpl.rc('text', usetex=True)
        mpl.rcParams['axes.labelsize'] = mpl.rcParams['font.size']
        mpl.rcParams['axes.titlesize'] = 1.2*mpl.rcParams['font.size']
        mpl.rcParams['legend.fontsize'] = mpl.rcParams['font.size']
        mpl.rcParams['xtick.labelsize'] = mpl.rcParams['font.size']
        mpl.rcParams['ytick.labelsize'] = mpl.rcParams['font.size']
        mpl.rcParams['savefig.dpi'] = 300
        mpl.rcParams['savefig.format'] = 'pdf'
Ejemplo n.º 11
0
def _xml(graph, attributes=None, additional_notif=None, **kwargs):
    try:
        from lxml import etree as ET
        lxml = True
    except:
        lxml = False
        import xml.etree.ElementTree as ET
        _log_message(logger, "WARNING",
                     "LXML is not installed, using Python XML for export. "
                     "Some apps like Gephi <= 0.9.2 will not read attributes "
                     "from the generated GraphML file due to elements' order.")

    NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns"
    NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
    NS_Y = "http://www.yworks.com/xml/graphml"
    NSMAP = {
        "xsi": NS_XSI
    }
    SCHEMALOCATION = " ".join(
        [
            "http://graphml.graphdrawing.org/xmlns",
            "http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd",
        ]
    )

    doc = ET.Element(
        "graphml",
        {
            "xmlns": NS_GRAPHML,
        },
        nsmap=NSMAP
    )

    n = doc.set("{{}}xsi".format(NS_GRAPHML), NS_XSI)
    n = doc.set("{{}}schemaLocation".format(NS_XSI), SCHEMALOCATION)

    # make graph element
    directedness = "directed" if graph.is_directed() else "undirected"

    eg = ET.SubElement(doc, "graph", edgedefault=directedness, id=graph.name)

    # prepare graph data
    del additional_notif["directed"]
    del additional_notif["name"]

    nattrs = additional_notif.pop("node_attributes")
    ntypes = additional_notif.pop("node_attr_types")

    for attr, atype in zip(nattrs, ntypes):
        kw = {"for": "node", "attr.name": attr, "attr.type": atype}
        if lxml:
            key = ET.Element("key", id=attr, **kw)
            eg.addprevious(key)
        else:
            ET.SubElement(doc, "key", id=attr, **kw)

    eattrs = additional_notif.pop("edge_attributes")
    etypes = additional_notif.pop("edge_attr_types")

    for attr, atype in zip(eattrs, etypes):
        kw = {"for": "edge", "attr.name": attr, "attr.type": atype}
        if lxml:
            key = ET.Element("key", id=attr, **kw)
            eg.addprevious(key)
        else:
            ET.SubElement(doc, "key", id=attr, **kw)

    # add remaining information as data to the graph
    for k, v in additional_notif.items():
        elt = ET.SubElement(doc, "data", key=k)
        elt.text = str(v)

    # add node information
    nattr = graph.get_node_attributes()

    for n in graph.get_nodes():
        nelt = ET.SubElement(eg, "node", id=str(n))

        for k, v in nattr.items():
            elt = ET.SubElement(nelt, "data", key=k)
            elt.text = str(v[n])

    # add edge information
    for e in graph.get_edges():
        nelt = ET.SubElement(eg, "edge", id="e{}".format(graph.edge_id(e)),
                             source=str(e[0]), target=str(e[1]))
        for k in eattrs:
            elt = ET.SubElement(nelt, "data", key=k)
            elt.text = str(graph.get_edge_attributes(e, name=k))

    kw = {"pretty_print": True} if lxml else {}

    return ET.tostring(doc, encoding="unicode", **kw)
Ejemplo n.º 12
0
def _load_from_file(filename,
                    fmt="auto",
                    separator=" ",
                    secondary=";",
                    attributes=None,
                    attributes_types=None,
                    notifier="@",
                    ignore="#",
                    cleanup=False):
    '''
    Load the main properties (edges, attributes...) from a file.

    Parameters
    ----------
    filename: str
        The path to the file.
    fmt : str, optional (default: "neighbour")
        The format used to save the graph. Supported formats are: "neighbour"
        (neighbour list, default if format cannot be deduced automatically),
        "ssp" (scipy.sparse), "edge_list" (list of all the edges in the graph,
        one edge per line, represented by a ``source target``-pair), "gml"
        (gml format, default if `filename` ends with '.gml'), "graphml"
        (graphml format, default if `filename` ends with '.graphml' or '.xml'),
        "dot" (dot format, default if `filename` ends with '.dot'), "gt" (only
        when using `graph_tool <http://graph-tool.skewed.de/>`_ as library,
        detected if `filename` ends with '.gt').
    separator : str, optional (default " ")
        separator used to separate inputs in the case of custom formats (namely
        "neighbour" and "edge_list")
    secondary : str, optional (default: ";")
        Secondary separator used to separate attributes in the case of custom
        formats.
    attributes : list, optional (default: [])
        List of names for the edge attributes present in the file. If a
        `notifier` is present in the file, names will be deduced from it;
        otherwise the attributes will be numbered.
    attributes_types : dict, optional (default: str)
        Backup information if the type of the attributes is not specified
        in the file. Values must be callables (types or functions) that will
        take the argument value as a string input and convert it to the proper
        type.
    notifier : str, optional (default: "@")
        Symbol specifying the following as meaningfull information. Relevant
        information are formatted ``@info_name=info_value``, where
        ``info_name`` is in ("attributes", "directed", "name", "size") and
        associated ``info_value`` are of type (``list``, ``bool``, ``str``,
        ``int``).
        Additional notifiers are ``@type=SpatialGraph/Network/SpatialNetwork``,
        which must be followed by the relevant notifiers among ``@shape``,
        ``@structure``, and ``@graph``.
    ignore : str, optional (default: "#")
        Ignore lines starting with the `ignore` string.
    cleanup : bool, optional (default: False)
       If true, removes nodes before the first one that appears in the
       edges and after the last one and renumber the nodes from 0. 

    Returns
    -------
    di_notif : dict
        Dictionary containing the main graph arguments.
    edges : list of 2-tuples
        Edges of the graph.
    di_nattributes : dict
        Dictionary containing the node attributes.
    di_eattributes : dict
        Dictionary containing the edge attributes (name as key, value as a
        list sorted in the same order as `edges`).
    struct : :class:`~nngt.NeuralPop`
        Population (``None`` if not present in the file).
    shape : :class:`~nngt.geometry.Shape`
        Shape of the graph (``None`` if not present in the file).
    positions : array-like of shape (N, d)
        The positions of the neurons (``None`` if not present in the file).
    '''
    # check for mpi
    if nngt.get_config("mpi"):
        raise NotImplementedError("This function is not ready for MPI yet.")

    # load
    lst_lines, struct, shape, positions = None, None, None, None
    fmt = _get_format(fmt, filename)

    if fmt not in di_get_edges:
        raise ValueError("Unsupported format: '{}'".format(fmt))

    with open(filename, "r") as filegraph:
        lst_lines = _process_file(filegraph, fmt, separator)

    # notifier lines
    di_notif = _get_notif(filename,
                          lst_lines,
                          notifier,
                          attributes,
                          fmt=fmt,
                          atypes=attributes_types)

    # get nodes attributes
    nattr_convertor = _gen_convert(di_notif["node_attributes"],
                                   di_notif["node_attr_types"],
                                   attributes_types=attributes_types)
    di_nattributes = _get_node_attr(di_notif,
                                    separator,
                                    fmt=fmt,
                                    lines=lst_lines,
                                    convertor=nattr_convertor)

    # make edges and attributes
    eattributes = di_notif["edge_attributes"]
    di_eattributes = {name: [] for name in eattributes}
    eattr_convertor = _gen_convert(di_notif["edge_attributes"],
                                   di_notif["edge_attr_types"],
                                   attributes_types=attributes_types)

    # process file
    edges = di_get_edges[fmt](lst_lines,
                              eattributes,
                              ignore,
                              notifier,
                              separator,
                              secondary,
                              di_attributes=di_eattributes,
                              convertor=eattr_convertor,
                              di_notif=di_notif)

    if cleanup:
        edges = np.array(edges) - np.min(edges)

    # add missing size information if necessary
    if "size" not in di_notif:
        di_notif["size"] = int(np.max(edges)) + 1

    # check whether a shape is present
    if 'shape' in di_notif:
        if _shapely_support:
            min_x, max_x = float(di_notif['min_x']), float(di_notif['max_x'])
            unit = di_notif['unit']
            shape = Shape.from_wkt(di_notif['shape'],
                                   min_x=min_x,
                                   max_x=max_x,
                                   unit=unit)
            # load areas
            try:
                def_areas = ast.literal_eval(di_notif['default_areas'])
                def_areas_prop = ast.literal_eval(
                    di_notif['default_areas_prop'])

                for k in def_areas:
                    p = {key: float(v) for key, v in def_areas_prop[k].items()}
                    if "default_area" in k:
                        shape._areas["default_area"]._prop.update(p)
                        shape._areas["default_area"].height = p["height"]
                    else:
                        a = Shape.from_wkt(def_areas[k], unit=unit)
                        shape.add_area(a,
                                       height=p["height"],
                                       name=k,
                                       properties=p)

                ndef_areas = ast.literal_eval(di_notif['non_default_areas'])
                ndef_areas_prop = ast.literal_eval(
                    di_notif['non_default_areas_prop'])
                for i in ndef_areas:
                    p = {k: float(v) for k, v in ndef_areas_prop[i].items()}
                    a = Shape.from_wkt(ndef_areas[i], unit=unit)
                    shape.add_area(a, height=p["height"], name=i, properties=p)
            except KeyError:
                # backup compatibility with older versions
                pass
        else:
            _log_message(
                logger, "WARNING",
                'A Shape object was present in the file but could '
                'not be loaded because Shapely is not installed.')

    # check whether a structure is present
    if 'structure' in di_notif:
        str_enc = di_notif['structure'].replace('~', '\n').encode()
        str_dec = codecs.decode(str_enc, "base64")
        try:
            struct = pickle.loads(str_dec)
        except UnicodeError:
            struct = pickle.loads(str_dec, encoding="latin1")

    if 'x' in di_notif:
        x = np.fromstring(di_notif['x'], sep=separator)
        y = np.fromstring(di_notif['y'], sep=separator)
        if 'z' in di_notif:
            z = np.fromstring(di_notif['z'], sep=separator)
            positions = np.array((x, y, z)).T
        else:
            positions = np.array((x, y)).T

    return (di_notif, edges, di_nattributes, di_eattributes, struct, shape,
            positions)
Ejemplo n.º 13
0
def plot_activity(gid_recorder=None,
                  record=None,
                  network=None,
                  gids=None,
                  axis=None,
                  show=False,
                  limits=None,
                  histogram=False,
                  title=None,
                  fignum=None,
                  label=None,
                  sort=None,
                  average=False,
                  normalize=1.,
                  decimate=None,
                  transparent=True,
                  kernel_center=0.,
                  kernel_std=None,
                  resolution=None,
                  cut_gaussian=5.,
                  **kwargs):
    '''
    Plot the monitored activity.

    .. versionchanged:: 1.2
        Switched `hist` to `histogram` and default value to False.

    .. versionchanged:: 1.0.1
        Added `axis` parameter, restored missing `fignum` parameter.

    Parameters
    ----------
    gid_recorder : tuple or list of tuples, optional (default: None)
        The gids of the recording devices. If None, then all existing
        spike_recs are used.
    record : tuple or list, optional (default: None)
        List of the monitored variables for each device. If `gid_recorder` is
        None, record can also be None and only spikes are considered.
    network : :class:`~nngt.Network` or subclass, optional (default: None)
        Network which activity will be monitored.
    gids : tuple, optional (default: None)
        NEST gids of the neurons which should be monitored.
    axis : matplotlib axis object, optional (default: new one)
        Axis that should be use to plot the activity. This takes precedence
        over `fignum`.
    show : bool, optional (default: False)
        Whether to show the plot right away or to wait for the next plt.show().
    histogram : bool, optional (default: False)
        Whether to display the histogram when plotting spikes rasters.
    limits : tuple, optional (default: None)
        Time limits of the plot (if not specified, times of first and last
        spike for raster plots).
    title : str, optional (default: None)
        Title of the plot.
    fignum : int, or dict, optional (default: None)
        Plot the activity on an existing figure (from ``figure.number``). This
        parameter is ignored if `axis` is provided.
    label : str or list, optional (default: None)
        Add labels to the plot (one per recorder).
    sort : str or list, optional (default: None)
        Sort neurons using a topological property ("in-degree", "out-degree",
        "total-degree" or "betweenness"), an activity-related property
        ("firing_rate" or neuronal property) or a user-defined list of sorted
        neuron ids. Sorting is performed by increasing value of the `sort`
        property from bottom to top inside each group.
    normalize : float or list, optional (default: None)
        Normalize the recorded results by a given float. If a list is provided,
        there should be one entry per voltmeter or multimeter in the recorders.
        If the recording was done through `monitor_groups`, the population can
        be passed to normalize the data by the nuber of nodes in each group.
    decimate : int or list of ints, optional (default: None)
        Represent only a fraction of the spiking neurons; only one neuron in
        `decimate` will be represented (e.g. setting `decimate` to 5 will lead
        to only 20% of the neurons being represented). If a list is provided,
        it must have one entry per NeuralGroup in the population.
    kernel_center : float, optional (default: 0.)
        Temporal shift of the Gaussian kernel, in ms (for the histogram).
    kernel_std : float, optional (default: 0.5% of simulation time)
        Characteristic width of the Gaussian kernel (standard deviation) in ms
        (for the histogram).
    resolution : float or array, optional (default: `0.1*kernel_std`)
        The resolution at which the firing rate values will be computed.
        Choosing a value smaller than `kernel_std` is strongly advised.
        If resolution is an array, it will be considered as the times were the
        firing rate should be computed (for the histogram).
    cut_gaussian : float, optional (default: 5.)
        Range over which the Gaussian will be computed (for the histogram).
        By default, we consider the 5-sigma range. Decreasing this value will
        increase speed at the cost of lower fidelity; increasing it with
        increase the fidelity at the cost of speed.
    **kwargs : dict
        "color" and "alpha" values can be overriden here.

    Warning
    -------
    Sorting with "firing_rate" only works if NEST gids form a continuous
    integer range.

    Returns
    -------
    lines : list of lists of :class:`matplotlib.lines.Line2D`
        Lines containing the data that was plotted, grouped by figure.
    '''
    import matplotlib.pyplot as plt
    recorders = _get_nest_gids([])
    lst_labels, lines, axes, labels = [], {}, {}, {}

    # normalize recorders and recordables
    if gid_recorder is not None:
        assert record is not None, "`record` must also be provided."
        if len(record) != len(gid_recorder):
            raise InvalidArgument('`record` must either be the same for all '
                                  'recorders, or contain one entry per '
                                  'recorder in `gid_recorder`')
        for rec in gid_recorder:
            if nest_version == 3:
                recorders = _get_nest_gids(gid_recorder)
            else:
                if isinstance(gid_recorder[0], tuple):
                    recorders.append(rec)
                else:
                    recorders.append((rec, ))
    else:
        prop = {'model': spike_rec}
        if nest_version == 3:
            recorders = nest.GetNodes(properties=prop)
        else:
            recorders = [(gid, )
                         for gid in nest.GetNodes((0, ), properties=prop)[0]]

        record = tuple("spikes" for _ in range(len(recorders)))

    # get gids and groups
    gids = network.nest_gids if (gids is None and network is not None) \
           else gids

    if gids is None:
        gids = []

        for rec in recorders:
            gids.extend(nest.GetStatus(rec)[0]["events"]["senders"])

        gids = np.unique(gids)

    num_group = 1 if network is None else len(network.population)
    num_lines = max(num_group, len(recorders))

    # sorting
    sorted_neurons = np.array([])

    if len(gids):
        sorted_neurons = np.arange(np.max(gids) +
                                   1).astype(int) - np.min(gids) + 1

    attr = None

    if sort is not None:
        assert network is not None, "`network` is required for sorting."
        if nonstring_container(sort):
            attr = sort
            sorted_neurons = _sort_neurons(attr, gids, network)
            sort = "user defined sort"
        else:
            data = None
            if sort.lower() in ("firing_rate", "b2"):  # get senders
                data = [[], []]
                for rec in recorders:
                    info = nest.GetStatus(rec)[0]
                    if str(info["model"]) == spike_rec:
                        data[0].extend(info["events"]["senders"])
                        data[1].extend(info["events"]["times"])
                data = np.array(data).T
            sorted_neurons, attr = _sort_neurons(sort,
                                                 gids,
                                                 network,
                                                 data=data,
                                                 return_attr=True)
    elif network is not None and network.is_spatial():
        sorted_neurons, attr = _sort_neurons("space",
                                             gids,
                                             network,
                                             data=None,
                                             return_attr=True)

    # spikes plotting
    colors = palette_discrete(np.linspace(0, 1, num_lines))
    num_raster, num_detec, num_meter = 0, 0, 0
    fignums = fignum if isinstance(fignum, dict) else {}
    decim = []
    if decimate is None:
        decim = [None for _ in range(num_lines)]
    elif is_integer(decimate):
        decim = [decimate for _ in range(num_lines)]
    elif nonstring_container(decimate):
        assert len(decimate) == num_lines, "`decimate` should have one " +\
                                           "entry per plot."
        decim = decimate
    else:
        raise AttributeError(
            "`decimate` must be either an int or a list of `int`.")

    # set labels
    if label is None:
        lst_labels = [None for _ in range(len(recorders))]
    else:
        if isinstance(label, str):
            lst_labels = [label]
        else:
            lst_labels = label
        if len(label) != len(recorders):
            _log_message(
                logger, "WARNING",
                'Incorrect length for `label`: expecting {} but got '
                '{}.\nIgnoring.'.format(len(recorders), len(label)))
            lst_labels = [None for _ in range(len(recorders))]

    datasets = []
    max_time = 0.

    for rec in recorders:
        info = nest.GetStatus(rec)[0]

        if len(info["events"]["times"]):
            max_time = max(max_time, np.max(info["events"]["times"]))

        datasets.append(info)

    if kernel_std is None:
        kernel_std = max_time * 0.005

    if resolution is None:
        resolution = 0.5 * kernel_std

    # plot
    for info, var, lbl in zip(datasets, record, lst_labels):
        fnum = fignums.get(info["model"], fignum)
        if info["model"] not in labels:
            labels[info["model"]] = []
            lines[info["model"]] = []

        if str(info["model"]) == spike_rec:
            if spike_rec in axes:
                axis = axes[spike_rec]
            c = colors[num_raster]
            times, senders = info["events"]["times"], info["events"]["senders"]
            sorted_ids = sorted_neurons[senders]
            l = raster_plot(times,
                            sorted_ids,
                            color=c,
                            show=False,
                            limits=limits,
                            sort=sort,
                            fignum=fnum,
                            axis=axis,
                            decimate=decim[num_raster],
                            sort_attribute=attr,
                            network=network,
                            histogram=histogram,
                            transparent=transparent,
                            hist_ax=axes.get('histogram', None),
                            kernel_center=kernel_center,
                            kernel_std=kernel_std,
                            resolution=resolution,
                            cut_gaussian=cut_gaussian)
            num_raster += 1
            if l:
                fig_raster = l[0].figure.number
                fignums[spike_rec] = fig_raster
                axes[spike_rec] = l[0].axes
                labels[spike_rec].append(lbl)
                lines[spike_rec].extend(l)
                if histogram:
                    axes['histogram'] = l[1].axes
        elif "detector" in str(info["model"]):
            c = colors[num_detec]
            times, senders = info["events"]["times"], info["events"]["senders"]
            sorted_ids = sorted_neurons[senders]
            l = raster_plot(times,
                            sorted_ids,
                            fignum=fnum,
                            color=c,
                            axis=axis,
                            show=False,
                            histogram=histogram,
                            limits=limits,
                            kernel_center=kernel_center,
                            kernel_std=kernel_std,
                            resolution=resolution,
                            cut_gaussian=cut_gaussian)
            if l:
                fig_detect = l[0].figure.number
                num_detec += 1
                fignums[info["model"]] = fig_detect
                labels[info["model"]].append(lbl)
                lines[info["model"]].extend(l)
                if histogram:
                    axes['histogram'] = l[1].axes
        else:
            da_time = info["events"]["times"]
            # prepare axis setup
            fig = None
            if axis is None:
                fig = plt.figure(fnum)
                fignums[info["model"]] = fig.number
            else:
                fig = axis.get_figure()
            lines_tmp, labels_tmp = [], []
            if nonstring_container(var):
                m_colors = palette_discrete(np.linspace(0, 1, len(var)))
                axes = fig.axes
                if axis is not None:
                    # multiple y axes on a single subplot, adapted from
                    # https://matplotlib.org/examples/pylab_examples/
                    # multiple_yaxis_with_spines.html
                    axes = [axis]
                    axis.name = var[0]
                    if len(var) > 1:
                        axes.append(axis.twinx())
                        axes[-1].name = var[1]
                    if len(var) > 2:
                        fig.subplots_adjust(right=0.75)
                        for i, name in zip(range(len(var) - 2), var[2:]):
                            new_ax = axis.twinx()
                            new_ax.spines["right"].set_position(
                                ("axes", 1.2 * (i + 1)))
                            axes.append(new_ax)
                            _make_patch_spines_invisible(new_ax)
                            new_ax.spines["right"].set_visible(True)
                            axes[-1].name = name
                if not axes:
                    axes = _set_new_plot(fig.number, names=var)[1]
                labels_tmp = [lbl for _ in range(len(var))]
                for subvar, c in zip(var, m_colors):
                    c = kwargs.get('color', c)
                    alpha = kwargs.get('alpha', 1)
                    for ax in axes:
                        if ax.name == subvar:
                            da_subvar = info["events"][subvar]
                            if isinstance(normalize, nngt.NeuralPop):
                                da_subvar /= normalize[num_meter].size
                            elif nonstring_container(normalize):
                                da_subvar /= normalize[num_meter]
                            elif normalize is not None:
                                da_subvar /= normalize
                            lines_tmp.extend(
                                ax.plot(da_time,
                                        da_subvar,
                                        color=c,
                                        alpha=alpha))
                            ax.set_ylabel(subvar)
                            ax.set_xlabel("time")
                            if limits is not None:
                                ax.set_xlim(limits[0], limits[1])
            else:
                num_axes, ax = len(fig.axes), axis
                if axis is None:
                    ax = fig.add_subplot(num_axes + 1, 1, num_axes + 1)
                da_var = info["events"][var]
                c = kwargs.get('color', None)
                alpha = kwargs.get('alpha', 1)
                lines_tmp.extend(
                    ax.plot(da_time, da_var / normalize, color=c, alpha=alpha))
                labels_tmp.append(lbl)
                ax.set_ylabel(var)
                ax.set_xlabel("time")
            labels[info["model"]].extend(labels_tmp)
            lines[info["model"]].extend(lines_tmp)
            num_meter += 1

    if spike_rec in axes:
        ax = axes[spike_rec]

        if limits is not None:
            ax.set_xlim(limits[0], limits[1])
        else:
            t_min, t_max, idx_min, idx_max = np.inf, -np.inf, np.inf, -np.inf

            for l in ax.lines:
                t_max = max(np.max(l.get_xdata()), t_max)
                t_min = min(np.min(l.get_xdata()), t_max)
                idx_min = min(np.min(l.get_ydata()), idx_min)
                idx_max = max(np.max(l.get_ydata()), idx_max)

            dt = t_max - t_min
            didx = idx_max - idx_min
            pc = 0.02

            if not np.any(np.isinf((t_max, t_min))):
                ax.set_xlim([t_min - pc * dt, t_max + pc * dt])

            if not np.any(np.isinf((idx_min, idx_max))):
                ax.set_ylim([idx_min - pc * didx, idx_max + pc * didx])

    for recorder in fignums:
        fig = plt.figure(fignums[recorder])
        if title is not None:
            fig.suptitle(title)
        if label is not None:
            fig.legend(lines[recorder], labels[recorder])

    if show:
        plt.show()

    return lines
Ejemplo n.º 14
0
def raster_plot(times,
                senders,
                limits=None,
                title="Spike raster",
                histogram=False,
                num_bins=1000,
                color="b",
                decimate=None,
                axis=None,
                fignum=None,
                label=None,
                show=True,
                sort=None,
                sort_attribute=None,
                network=None,
                transparent=True,
                kernel_center=0.,
                kernel_std=30.,
                resolution=None,
                cut_gaussian=5.,
                **kwargs):
    """
    Plotting routine that constructs a raster plot along with
    an optional histogram.

    .. versionchanged:: 1.2
        Switched `hist` to `histogram`.

    .. versionchanged:: 1.0.1
        Added `axis` parameter.

    Parameters
    ----------
    times : list or :class:`numpy.ndarray`
        Spike times.
    senders : list or :class:`numpy.ndarray`
        Index for the spiking neuron for each time in `times`.
    limits : tuple, optional (default: None)
        Time limits of the plot (if not specified, times of first and last
        spike).
    title : string, optional (default: 'Spike raster')
        Title of the raster plot.
    histogram : bool, optional (default: True)
        Whether to plot the raster's histogram.
    num_bins : int, optional (default: 1000)
        Number of bins for the histogram.
    color : string or float, optional (default: 'b')
        Color of the plot lines and markers.
    decimate : int, optional (default: None)
        Represent only a fraction of the spiking neurons; only one neuron in
        `decimate` will be represented (e.g. setting `decimate` to 10 will lead
        to only 10% of the neurons being represented).
    axis : matplotlib axis object, optional (default: new one)
        Axis that should be use to plot the activity.
    fignum : int, optional (default: None)
        Id of another raster plot to which the new data should be added.
    label : str, optional (default: None)
        Label the current data.
    show : bool, optional (default: True)
        Whether to show the plot right away or to wait for the next plt.show().
    kernel_center : float, optional (default: 0.)
        Temporal shift of the Gaussian kernel, in ms.
    kernel_std : float, optional (default: 30.)
        Characteristic width of the Gaussian kernel (standard deviation) in ms.
    resolution : float or array, optional (default: `0.1*kernel_std`)
        The resolution at which the firing rate values will be computed.
        Choosing a value smaller than `kernel_std` is strongly advised.
        If resolution is an array, it will be considered as the times were the
        firing rate should be computed.
    cut_gaussian : float, optional (default: 5.)
        Range over which the Gaussian will be computed (for the histogram).
        By default, we consider the 5-sigma range. Decreasing this value will
        increase speed at the cost of lower fidelity; increasing it with
        increase the fidelity at the cost of speed.

    Returns
    -------
    lines : list of :class:`matplotlib.lines.Line2D`
        Lines containing the data that was plotted.
    """
    import matplotlib.pyplot as plt

    lines = []

    mpl_kwargs = {k: v for k, v in kwargs.items() if k != 'hist_ax'}

    if label is None:
        mpl_kwargs['label'] = label

    # decimate if necessary
    if decimate is not None:
        idx_keep = np.where(np.mod(senders, decimate) == 0)[0]
        senders = senders[idx_keep]
        times = times[idx_keep]

    if len(times):
        if axis is not None:
            fig = axis.get_figure()
        else:
            fig = plt.figure(fignum)
        if transparent:
            fig.patch.set_visible(False)
        ylabel = "Neuron ID"
        xlabel = "Time (ms)"

        delta_t = 0.01 * (times[-1] - times[0])

        if histogram:
            ax1, ax2 = None, None
            if kwargs.get("hist_ax", None) is None:
                num_axes = len(fig.axes)
                for i, old_ax in enumerate(fig.axes):
                    old_ax.change_geometry(num_axes + 2, 1, i + 1)
                ax1 = fig.add_subplot(num_axes + 2, 1, num_axes + 1)
                ax2 = fig.add_subplot(num_axes + 2,
                                      1,
                                      num_axes + 2,
                                      sharex=ax1)
            else:
                ax1 = axis
                ax2 = kwargs["hist_ax"]

            if limits is not None:
                start, stop = limits

                keep = (times >= start) & (times <= stop)
                times = times[keep]
                senders = senders[keep]

            lines.extend(
                ax1.plot(times,
                         senders,
                         c=color,
                         marker="o",
                         linestyle='None',
                         mec="k",
                         mew=0.5,
                         ms=4,
                         **mpl_kwargs))

            ax1_lines = ax1.lines

            if len(ax1_lines) > 1:
                t_max = max(ax1_lines[0].get_xdata().max(), times[-1])
                ax1.set_xlim([-delta_t, t_max + delta_t])

            ax1.set_ylabel(ylabel)

            if limits is not None:
                ax1.set_xlim(*limits)

            fr, fr_times = total_firing_rate(data=np.array([senders, times]).T,
                                             kernel_center=kernel_center,
                                             kernel_std=kernel_std,
                                             resolution=resolution,
                                             cut_gaussian=cut_gaussian)

            hist_lines = ax2.get_lines()

            if hist_lines:
                data = hist_lines[-1].get_data()
                bottom = data[1]
                if limits is None:
                    dt = fr_times[1] - fr_times[0]
                    old_times = data[0]
                    old_start = int(old_times[0] / dt)
                    new_start = int(fr_times[0] / dt)
                    old_end = int(old_times[-1] / dt)
                    new_end = int(fr_times[-1] / dt)
                    diff_start = new_start - old_start
                    diff_end = new_end - old_end
                    if diff_start > 0:
                        bottom = bottom[diff_start:]
                    else:
                        bottom = np.concatenate(
                            (np.zeros(-diff_start), bottom))
                    if diff_end > 0:
                        bottom = np.concatenate((bottom, np.zeros(diff_end)))
                    else:
                        bottom = bottom[:diff_end - 1]
                    b_len, h_len = len(bottom), len(fr)
                    if b_len > h_len:
                        bottom = bottom[:h_len]
                    elif b_len < h_len:
                        bottom = np.concatenate(
                            (bottom, np.zeros(h_len - b_len)))
                else:
                    bottom = bottom[:-1]

                ax2.fill_between(fr_times, fr + bottom, bottom, color=color)
                lines.extend(ax2.plot(fr_times, fr + bottom, ls="", marker=""))
            else:
                ax2.fill_between(fr_times, fr, 0., color=color)
                lines.extend(ax2.plot(fr_times, fr, ls="", marker=""))

            ax2.set_ylabel("Rate (Hz)")
            ax2.set_xlabel(xlabel)
            ax2.set_xlim(ax1.get_xlim())
            _second_axis(sort, sort_attribute, ax1)
        else:
            if axis is not None:
                ax = axis
            else:
                num_axes = len(fig.axes)
                for i, old_ax in enumerate(fig.axes):
                    old_ax.change_geometry(num_axes + 1, 1, i + 1)
                ax = fig.add_subplot(num_axes + 1, 1, num_axes + 1)

            if limits is not None:
                start, stop = limits

                keep = (times >= start) & (times <= stop)
                times = times[keep]
                senders = senders[keep]

            if network is not None:
                pop = network.population
                colors = palette_discrete(np.linspace(0, 1, len(pop)))
                mm = itertools.cycle(markers)
                for m, (k, v), c in zip(mm, pop.items(), colors):
                    keep = np.where(np.in1d(senders,
                                            network.nest_gids[v.ids]))[0]
                    if len(keep):
                        if label is None:
                            mpl_kwargs['label'] = k
                        lines.extend(
                            ax.plot(times[keep],
                                    senders[keep],
                                    c=c,
                                    marker=m,
                                    ls='None',
                                    mec='k',
                                    mew=0.5,
                                    ms=4,
                                    **mpl_kwargs))
            else:
                lines.extend(
                    ax.plot(times,
                            senders,
                            c=color,
                            marker="o",
                            linestyle='None',
                            mec="k",
                            mew=0.5,
                            ms=4,
                            **mpl_kwargs))

            ax.set_ylabel(ylabel)
            ax.set_xlabel(xlabel)

            if limits is not None:
                ax.set_xlim(limits)
            else:
                _set_ax_lims(ax, np.max(times), np.min(times), np.max(senders),
                             np.min(senders))

            if label is not None:
                ax.legend(bbox_to_anchor=(1.1, 1.2))
            _second_axis(sort, sort_attribute, ax)

        fig.suptitle(title)

        if show:
            plt.show()
    else:
        _log_message(logger, "WARNING",
                     "No activity was detected during the simulation.")

    return lines
Ejemplo n.º 15
0
    def new_edge(self,
                 source,
                 target,
                 attributes=None,
                 ignore=False,
                 self_loop=False):
        '''
        Adding a connection to the graph, with optional properties.

        .. versionchanged :: 2.0
            Added `self_loop` argument to enable adding self-loops.

        Parameters
        ----------
        source : :class:`int/node`
            Source node.
        target : :class:`int/node`
            Target node.
        attributes : :class:`dict`, optional (default: ``{}``)
            Dictionary containing optional edge properties. If the graph is
            weighted, defaults to ``{"weight": 1.}``, the unit weight for the
            connection (synaptic strength in NEST).
        ignore : bool, optional (default: False)
            If set to True, ignore attempts to add an existing edge and accept
            self-loops; otherwise an error is raised.
        self_loop : bool, optional (default: False)
            Whether to allow self-loops or not.

        Returns
        -------
        The new connection or None if nothing was added.
        '''
        g = self._graph

        attributes = {} if attributes is None else deepcopy(attributes)

        # set default values for attributes that were not passed
        _set_default_edge_attributes(self, attributes, num_edges=1)

        # check that the edge does not already exist
        edge = (source, target)

        if source not in g._nodes:
            raise InvalidArgument("There is no node {}.".format(source))

        if target not in g._nodes:
            raise InvalidArgument("There is no node {}.".format(target))

        if source == target:
            if not ignore and not self_loop:
                raise InvalidArgument("Trying to add a self-loop.")
            elif ignore:
                _log_message(logger, "INFO",
                             "Self-loop on {} ignored.".format(source))

                return None

        if (g._directed and edge not in g._unique) or edge not in g._edges:
            edge_id = self._max_eid
            # ~ edge_id             = len(g._unique)
            g._unique[edge] = edge_id
            g._out_deg[source] += 1
            g._in_deg[target] += 1

            self._max_eid += 1

            # check distance
            _set_dist_new_edges(attributes, self, [edge])

            # attributes
            self._attr_new_edges([(source, target)], attributes=attributes)

            if not g._directed:
                # edges and unique are different objects, so update _edges
                g._edges[edge] = edge_id
                # add reciprocal
                e_recip = (target, source)
                g._edges[e_recip] = edge_id
                g._out_deg[target] += 1
                g._in_deg[source] += 1
        else:
            if not ignore:
                raise InvalidArgument("Trying to add existing edge.")

            _log_message(logger, "INFO", "Existing edge {} ignored.".format(
                (source, target)))

        return edge
Ejemplo n.º 16
0
def _as_string(graph, fmt="neighbour", separator=" ", secondary=";",
              attributes=None, notifier="@", return_info=False):
    '''
    Full string representation of the graph.

    .. versionchanged:: 0.7
        Added support to write position and Shape when saving
        :class:`~nngt.SpatialGraph`. Note that saving Shape requires shapely.

    Parameters
    ----------
    graph : :class:`~nngt.Graph` or subclass
        Graph to save.
    fmt : str, optional (default: "auto")
        The format used to save the graph. Supported formats are: "neighbour"
        (neighbour list, default if format cannot be deduced automatically),
        "ssp" (:mod:`scipy.sparse`), "edge_list" (list of all the edges in the 
        graph, one edge per line, represented by a ``source target``-pair), 
        "gml" (gml format, default if `filename` ends with '.gml'), "graphml"
        (graphml format, default if `filename` ends with '.graphml' or '.xml'),
        "dot" (dot format, default if `filename` ends with '.dot'), "gt" (only
        when using `graph_tool`<http://graph-tool.skewed.de/>_ as library,
        detected if `filename` ends with '.gt').
    separator : str, optional (default " ")
        separator used to separate inputs in the case of custom formats (namely
        "neighbour" and "edge_list")
    secondary : str, optional (default: ";")
        Secondary separator used to separate attributes in the case of custom
        formats.
    attributes : list, optional (default: all)
        List of names for the edge attributes present in the graph that will be
        saved to disk; by default, all attributes will be saved.
    notifier : str, optional (default: "@")
        Symbol specifying the following as meaningfull information. Relevant
        information are formatted ``@info_name=info_value``, with
        ``info_name`` in ("attributes", "attr_types", "directed", "name",
        "size").
        Additional notifiers are ``@type=SpatialGraph/Network/SpatialNetwork``,
        which are followed by the relevant notifiers among ``@shape``, ``@x``,
        ``@y``, ``@z``, ``@population``, and ``@graph`` to separate the
        sections.

    Returns
    -------
    str_graph : string
        The full graph representation as a string.
    '''
    # checks
    if separator == secondary:
        raise InvalidArgument("`separator` and `secondary` strings must be "
                              "different.")
    if notifier == separator or notifier == secondary:
        raise InvalidArgument("`notifier` string should differ from "
                              "`separator` and `secondary`.")
    # temporarily disable numpy cut threshold to save string
    old_threshold = np.get_printoptions()['threshold']
    np.set_printoptions(threshold=np.NaN)
    # data
    if attributes is None:
        attributes = [a for a in graph.edges_attributes if a != "bweight"]
    nattributes = [a for a in graph.nodes_attributes]
    additional_notif = {
        "directed": graph._directed,
        "node_attributes": nattributes,
        "node_attr_types": [
            graph.get_attribute_type(nattr, "node") for nattr in nattributes
        ],
        "edge_attributes": attributes,
        "edge_attr_types": [
            graph.get_attribute_type(attr, "edge") for attr in attributes
        ],
        "name": graph.get_name(),
        "size": graph.node_nb()
    }
    # add node attributes to the notifications
    for nattr in additional_notif["node_attributes"]:
        key                   = "na_" + nattr
        additional_notif[key] = np.array2string(
                graph.get_node_attributes(name=nattr), max_line_width=np.NaN,
                separator=separator)[1:-1]
        # ~ additional_notif[key] = codecs.encode(
            # ~ graph.get_node_attributes(name=nattr).tobytes(),
            # ~ "base64").decode().replace('\n', '~')
    # save positions for SpatialGraph (and shape if Shapely is available)
    if graph.is_spatial():
        if _shapely_support:
            additional_notif['shape'] = graph.shape.wkt
            additional_notif['unit'] = graph.shape.unit
            min_x, min_y, max_x, max_y = graph.shape.bounds
            additional_notif['min_x'] = min_x
            additional_notif['max_x'] = max_x
        else:
            _log_message(logger, "WARNING",
                         'The `shape` attribute of the graph could not be '
                         'saved to file because Shapely is not installed.')
        pos = graph.get_positions()
        additional_notif['x'] = np.array2string(
            pos[:, 0], max_line_width=np.NaN, separator=separator)[1:-1]
        additional_notif['y'] = np.array2string(
            pos[:, 1], max_line_width=np.NaN, separator=separator)[1:-1]
        if pos.shape[1] == 3:
            additional_notif['z'] = np.array2string(
                pos[:, 2], max_line_width=np.NaN, separator=separator)[1:-1]

    if graph.is_network():
        if nngt.get_config("mpi"):
            if nngt.get_config("mpi_comm").Get_rank() == 0:
                additional_notif["population"] = codecs.encode(
                    pickle.dumps(graph.population, protocol=2),
                                 "base64").decode().replace('\n', '~')
        else:
            additional_notif["population"] = codecs.encode(
                pickle.dumps(graph.population, protocol=2),
                             "base64").decode().replace('\n', '~')

    str_graph = di_format[fmt](graph, separator=separator,
                               secondary=secondary, attributes=attributes)

    # set numpy cut threshold back on
    np.set_printoptions(threshold=old_threshold)

    if return_info:
        return str_graph, additional_notif
    else:
        return str_graph
Ejemplo n.º 17
0
def _as_string(graph,
               fmt="neighbour",
               separator=" ",
               secondary=";",
               attributes=None,
               notifier="@",
               return_info=False):
    '''
    Full string representation of the graph.

    Parameters
    ----------
    graph : :class:`~nngt.Graph` or subclass
        Graph to save.
    fmt : str, optional (default: "auto")
        The format used to save the graph. Supported formats are: "neighbour"
        (neighbour list, default if format cannot be deduced automatically),
        "ssp" (:mod:`scipy.sparse`), "edge_list" (list of all the edges in the
        graph, one edge per line, represented by a ``source target``-pair),
        "gml" (gml format, default if `filename` ends with '.gml'), "graphml"
        (graphml format, default if `filename` ends with '.graphml' or '.xml'),
        "dot" (dot format, default if `filename` ends with '.dot'), "gt" (only
        when using `graph_tool`<http://graph-tool.skewed.de/>_ as library,
        detected if `filename` ends with '.gt').
    separator : str, optional (default " ")
        separator used to separate inputs in the case of custom formats (namely
        "neighbour" and "edge_list")
    secondary : str, optional (default: ";")
        Secondary separator used to separate attributes in the case of custom
        formats.
    attributes : list, optional (default: all)
        List of names for the edge attributes present in the graph that will be
        saved to disk; by default, all attributes will be saved.
    notifier : str, optional (default: "@")
        Symbol specifying the following as meaningfull information. Relevant
        information are formatted ``@info_name=info_value``, with
        ``info_name`` in ("attributes", "attr_types", "directed", "name",
        "size").
        Additional notifiers are ``@type=SpatialGraph/Network/SpatialNetwork``,
        which are followed by the relevant notifiers among ``@shape``, ``@x``,
        ``@y``, ``@z``, ``@structure``, and ``@graph`` to separate the
        sections.

    Returns
    -------
    str_graph : string
        The full graph representation as a string.
    '''
    # checks
    if separator == secondary and fmt != "edge_list":
        raise InvalidArgument("`separator` and `secondary` strings must be "
                              "different.")

    if notifier == separator or notifier == secondary:
        raise InvalidArgument("`notifier` string should differ from "
                              "`separator` and `secondary`.")

    # temporarily disable numpy cut threshold to save string
    old_threshold = np.get_printoptions()['threshold']
    np.set_printoptions(threshold=sys.maxsize)

    # data
    if attributes is None:
        attributes = [a for a in graph.edge_attributes if a != "bweight"]

    nattributes = [a for a in graph.node_attributes]

    additional_notif = {
        "directed":
        graph.is_directed(),
        "node_attributes":
        nattributes,
        "node_attr_types":
        [graph.get_attribute_type(nattr, "node") for nattr in nattributes],
        "edge_attributes":
        attributes,
        "edge_attr_types":
        [graph.get_attribute_type(attr, "edge") for attr in attributes],
        "name":
        graph.name,
        "size":
        graph.node_nb()
    }

    # add node attributes to the notifications
    if fmt != "graphml":
        for nattr in additional_notif["node_attributes"]:
            key = "na_" + nattr

            tmp = np.array2string(graph.get_node_attributes(name=nattr),
                                  max_line_width=np.NaN,
                                  separator=separator)[1:-1].replace(
                                      "'" + separator + "'",
                                      '"' + separator + '"')

            # replace possible variants
            tmp = tmp.replace("'" + separator + '"', '"' + separator + '"')
            tmp = tmp.replace('"' + separator + "'", '"' + separator + '"')

            if tmp.startswith("'"):
                tmp = '"' + tmp[1:]

            if tmp.endswith("'"):
                tmp = tmp[:-1] + '"'

            # make and store final string
            additional_notif[key] = tmp

    # save positions for SpatialGraph (and shape if Shapely is available)
    if graph.is_spatial():
        if _shapely_support:
            additional_notif['shape'] = graph.shape.wkt
            additional_notif['default_areas'] = \
                {k: v.wkt for k, v in graph.shape.default_areas.items()}
            additional_notif['default_areas_prop'] = \
                {k: v.properties for k, v in graph.shape.default_areas.items()}
            additional_notif['non_default_areas'] = \
                {k: v.wkt for k, v in graph.shape.non_default_areas.items()}
            additional_notif['non_default_areas_prop'] = \
                {k: v.properties
                 for k, v in graph.shape.non_default_areas.items()}
            additional_notif['unit'] = graph.shape.unit
            min_x, min_y, max_x, max_y = graph.shape.bounds
            additional_notif['min_x'] = min_x
            additional_notif['max_x'] = max_x
        else:
            _log_message(
                logger, "WARNING",
                'The `shape` attribute of the graph could not be '
                'saved to file because Shapely is not installed.')

        pos = graph.get_positions()
        additional_notif['x'] = np.array2string(pos[:, 0],
                                                max_line_width=np.NaN,
                                                separator=separator)[1:-1]
        additional_notif['y'] = np.array2string(pos[:, 1],
                                                max_line_width=np.NaN,
                                                separator=separator)[1:-1]
        if pos.shape[1] == 3:
            additional_notif['z'] = np.array2string(pos[:, 2],
                                                    max_line_width=np.NaN,
                                                    separator=separator)[1:-1]

    if graph.structure is not None:
        # temporarily remove weakrefs
        graph.structure._parent = None
        for g in graph.structure.values():
            g._struct = None
            g._net = None
        # save as string
        if nngt.get_config("mpi"):
            if nngt.get_config("mpi_comm").Get_rank() == 0:
                additional_notif["structure"] = codecs.encode(
                    pickle.dumps(graph.structure, protocol=2),
                    "base64").decode().replace('\n', '~')
        else:
            additional_notif["structure"] = codecs.encode(
                pickle.dumps(graph.structure, protocol=2),
                "base64").decode().replace('\n', '~')
        # restore weakrefs
        graph.structure._parent = weakref.ref(graph)
        for g in graph.structure.values():
            g._struct = weakref.ref(graph.structure)
            g._net = weakref.ref(graph)

    str_graph = di_format[fmt](graph,
                               separator=separator,
                               secondary=secondary,
                               attributes=attributes,
                               additional_notif=additional_notif)

    # set numpy cut threshold back on
    np.set_printoptions(threshold=old_threshold)

    if return_info:
        return str_graph, additional_notif

    # format the info into the string
    info_str = format_graph_info[fmt](additional_notif, notifier, graph=graph)

    return info_str + str_graph
Ejemplo n.º 18
0
def small_world_propensity(g, directed=None, use_global_clustering=False,
                           use_diameter=False, weights=None,
                           combine_weights="mean", clustering="continuous",
                           lattice=None, random=None, return_deviations=False):
    r'''
    Returns the small-world propensity of the graph as first defined in
    [Muldoon2016]_.

    .. versionadded: 2.0

    .. math::

        \phi = 1 - \sqrt{\frac{\Pi_{[0, 1]}(\Delta_C^2) + \Pi_{[0, 1]}(\Delta_L^2)}{2}}

    with :math:`\Delta_C` the clustering deviation, i.e. the relative global or
    average clustering of `g` compared to two reference graphs

    .. math::

        \Delta_C = \frac{C_{latt} - C_g}{C_{latt} - C_{rand}}

    and :math:`Delta_L` the deviation of the average path length or diameter,
    i.e. the relative average path length of `g` compared to that of the
    reference graphs

    .. math::

        \Delta_L = \frac{L_g - L_{rand}}{L_{latt} - L_{rand}}.

    In both cases, *latt* and *rand* refer to the equivalent lattice and
    Erdos-Renyi (ER) graphs obtained by rewiring `g` to obtain respectively the
    highest and lowest combination of clustering and average path length.

    Both deviations are clipped to the [0, 1] range in case some graphs have a
    higher clustering than the lattice or a lower average path length than the
    ER graph.

    Parameters
    ----------
    g : :class:`~nngt.Graph` object
        Graph to analyze.
    directed : bool, optional (default: True)
        Whether to compute the directed clustering if the graph is directed.
        If False, then the graph is treated as undirected. The option switches
        to False automatically if `g` is undirected.
    use_global_clustering : bool, optional (default: True)
        If False, then the average local clustering is used instead of the
        global clustering.
    use_diameter : bool, optional (default: False)
        Use the diameter instead of the average path length to have more global
        information. Ccan also be much faster in some cases, especially using
        graph-tool as the backend.
    weights : bool or str, optional (default: binary edges)
        Whether edge weights should be considered; if ``None`` or ``False``
        then use binary edges; if ``True``, uses the 'weight' edge attribute,
        otherwise uses any valid edge attribute required.
    combine_weights : str, optional (default: 'mean')
        How to combine the weights of reciprocal edges if the graph is directed
        but `directed` is set to False. It can be:

        * "sum": the sum of the edge attribute values will be used for the new
          edge.
        * "mean": the mean of the edge attribute values will be used for the
          new edge.
        * "min": the minimum of the edge attribute values will be used for the
          new edge.
        * "max": the maximum of the edge attribute values will be used for the
          new edge.
    clustering : str, optional (default: 'continuous')
        Method used to compute the weighted clustering coefficients, either
        'barrat' [Barrat2004]_, 'continuous' (recommended), or 'onnela'
        [Onnela2005]_.
    lattice : :class:`nngt.Graph`, optional (default: generated from `g`)
        Lattice to use as reference (since its generation is deterministic,
        enables to avoid multiple generations when running the algorithm
        several times with the same graph)
    random : :class:`nngt.Graph`, optional (default: generated from `g`)
        Random graph to use as reference. Can be useful for reproducibility or
        for very sparse graphs where ER algorithm would statistically lead to
        a disconnected graph.
    return_deviations : bool, optional (default: False)
        If True, the deviations are also returned, in addition to the
        small-world propensity.

    Note
    ----
    If `weights` are provided, the distance calculation uses the inverse of
    the weights.
    This implementation differs slightly from the `original implementation
    <https://github.com/KordingLab/nctpy>`_ as it can also use the global
    instead of the average clustering coefficient, the diameter instead of
    the avreage path length, and it is generalized to directed networks.

    References
    ----------
    .. [Muldoon2016] Muldoon, Bridgeford, Bassett. Small-World Propensity and
        Weighted Brain Networks. Sci Rep 2016, 6 (1), 22057.
        :doi:`10.1038/srep22057`, :arxiv:`1505.02194`.
    .. [Barrat2004] Barrat, Barthelemy, Pastor-Satorras, Vespignani. The
        Architecture of Complex Weighted Networks. PNAS 2004, 101 (11).
        :doi:`10.1073/pnas.0400087101`.
    .. [Onnela2005] Onnela, Saramäki, Kertész, Kaski. Intensity and Coherence
        of Motifs in Weighted Complex Networks. Phys. Rev. E 2005, 71 (6),
        065103. :doi:`10.1103/physreve.71.065103`, arxiv:`cond-mat/0408629`.

    Returns
    -------
    phi : float in [0, 1]
        The small-world propensity.
    delta_l : float
        The average path-length deviation (if `return_deviations` is True).
    delta_c : float
        The clustering deviation (if `return_deviations` is True).

    See also
    --------
    :func:`nngt.analysis.average_path_length`
    :func:`nngt.analysis.diameter`
    :func:`nngt.analysis.global_clustering`
    :func:`nngt.analysis.local_clustering`
    :func:`nngt.generation.lattice_rewire`
    :func:`nngt.generation.random_rewire`
    '''
    # special case for too sparse (unconnected) graphs
    if g.edge_nb() < g.node_nb():
        if return_deviations:
            return np.NaN, np.NaN, np.NaN

        return np.NaN

    # check graph directedness
    directed = g.is_directed() if directed is None else directed

    if g.is_directed() and not directed:
        g = g.to_undirected(combine_weights)

    # rewired graph
    latt = ng.lattice_rewire(g, weight=weights) if lattice is None else lattice
    rand = ng.random_rewire(g) if random is None else random

    # compute average path-length using the inverse of the weights
    inv_w, inv_wl, inv_wr = None, None, None

    if weights not in (None, False):
        inv_w = 1 / g.edge_attributes[weights]
        inv_wl = 1 / latt.edge_attributes[weights]
        inv_wr = 1 / rand.edge_attributes[weights]

    l_latt, l_rand, l_g = None, None, None

    if use_diameter:
        l_latt = diameter(latt, directed=directed, weights=inv_wl)
        l_rand = diameter(rand, directed=directed, weights=inv_wr)
        l_g    = diameter(g, directed=directed, weights=inv_w)
    else:
        l_latt = average_path_length(latt, directed=directed, weights=inv_wl)
        l_rand = average_path_length(rand, directed=directed, weights=inv_wr)
        l_g    = average_path_length(g, directed=directed, weights=inv_w)

    # compute clustering
    c_latt, c_rand, c_g = None, None, None

    if use_global_clustering:
        c_latt = global_clustering(
            latt, directed=directed, weights=weights, method=clustering)

        c_rand = global_clustering(
            rand, directed=directed, weights=weights, method=clustering)

        c_g = global_clustering(
            g, directed=directed, weights=weights, method=clustering)
    else:
        c_latt = np.average(local_clustering(
            latt, directed=directed, weights=weights, method=clustering))

        c_rand = np.average(local_clustering(
            rand, directed=directed, weights=weights, method=clustering))

        c_g = np.average(local_clustering(
            g, directed=directed, weights=weights, method=clustering))

    # compute deltas
    delta_l = (l_g - l_rand) / (l_latt - l_rand) if l_latt != l_rand \
              else float(l_g > l_rand)
    delta_c = (c_latt - c_g) / (c_latt - c_rand)

    if np.isinf(l_rand):
        _log_message(logger, "WARNING", 'Randomized graph was unconnected.')

    if return_deviations:
        return 1 - np.sqrt(
            0.5*(np.clip(delta_l, 0, 1)**2 + np.clip(delta_c, 0, 1)**2)), \
            delta_l, delta_c
    else:
        return 1 - np.sqrt(
            0.5*(np.clip(delta_l, 0, 1)**2 + np.clip(delta_c, 0, 1)**2))
Ejemplo n.º 19
0
def plot_activity(gid_recorder=None,
                  record=None,
                  network=None,
                  gids=None,
                  show=False,
                  limits=None,
                  hist=True,
                  title=None,
                  label=None,
                  sort=None,
                  average=False,
                  normalize=1.,
                  decimate=None,
                  transparent=True):
    '''
    Plot the monitored activity.
    
    Parameters
    ----------
    gid_recorder : tuple or list of tuples, optional (default: None)
        The gids of the recording devices. If None, then all existing
        "spike_detector"s are used.
    record : tuple or list, optional (default: None)
        List of the monitored variables for each device. If `gid_recorder` is
        None, record can also be None and only spikes are considered.
    network : :class:`~nngt.Network` or subclass, optional (default: None)
        Network which activity will be monitored.
    gids : tuple, optional (default: None)
        NEST gids of the neurons which should be monitored.
    show : bool, optional (default: False)
        Whether to show the plot right away or to wait for the next plt.show().
    hist : bool, optional (default: True)
        Whether to display the histogram when plotting spikes rasters.
    limits : tuple, optional (default: None)
        Time limits of the plot (if not specified, times of first and last
        spike for raster plots).
    title : str, optional (default: None)
        Title of the plot.
    fignum : int, optional (default: None)
        Plot the activity on an existing figure (from ``figure.number``).
    label : str or list, optional (default: None)
        Add labels to the plot (one per recorder).
    sort : str or list, optional (default: None)
        Sort neurons using a topological property ("in-degree", "out-degree",
        "total-degree" or "betweenness"), an activity-related property
        ("firing_rate" or neuronal property) or a user-defined list of sorted
        neuron ids. Sorting is performed by increasing value of the `sort`
        property from bottom to top inside each group.
    normalize : float or list, optional (default: None)
        Normalize the recorded results by a given float. If a list is provided,
        there should be one entry per voltmeter or multimeter in the recorders.
        If the recording was done through `monitor_groups`, the population can
        be passed to normalize the data by the nuber of nodes in each group.
    decimate : int or list of ints, optional (default: None)
        Represent only a fraction of the spiking neurons; only one neuron in
        `decimate` will be represented (e.g. setting `decimate` to 5 will lead
        to only 20% of the neurons being represented). If a list is provided,
        it must have one entry per NeuralGroup in the population.

    Warning
    -------
    Sorting with "firing_rate" only works if NEST gids form a continuous
    integer range.

    Returns
    -------
    lines : list of lists of :class:`matplotlib.lines.Line2D`
        Lines containing the data that was plotted, grouped by figure.
    '''
    lst_rec, lst_labels, lines, labels = [], [], {}, {}
    num_fig = np.max(plt.get_fignums()) if plt.get_fignums() else 0
    # normalize recorders and recordables
    if gid_recorder is not None:
        if len(record) != len(gid_recorder):
            raise InvalidArgument('`record` must either be the same for all '
                                  'recorders, or contain one entry per '
                                  'recorder in `gid_recorder`')
        for rec in gid_recorder:
            if isinstance(gid_recorder[0], tuple):
                lst_rec.append(rec[0])
            else:
                lst_rec.append(rec)
    else:
        lst_rec = nest.GetNodes((0, ), properties={'model':
                                                   'spike_detector'})[0]
        record = tuple("spikes" for _ in range(len(lst_rec)))
    # get gids and groups
    gids = network.nest_gid if (gids is None and network is not None) else gids
    if gids is None:
        gids = []
        for rec in lst_rec:
            gids.extend(nest.GetStatus([rec])[0]["events"]["senders"])
        gids = np.unique(gids)
    num_group = len(network.population) if network is not None else 1
    # sorting
    sorted_neurons = np.array([])
    if len(gids):
        sorted_neurons = np.arange(np.max(gids) +
                                   1).astype(int) - np.min(gids) + 1
    attr = None
    if sort is not None:
        assert network is not None, "`network` is required for sorting."
        if nonstring_container(sort):
            attr = sort
            sorted_neurons = _sort_neurons(attr, gids, network)
            sort = "user defined sort"
        else:
            data = None
            if sort.lower() in ("firing_rate", "b2"):  # get senders
                data = [[], []]
                for rec in lst_rec:
                    info = nest.GetStatus([rec])[0]
                    if str(info["model"]) == "spike_detector":
                        data[0].extend(info["events"]["senders"])
                        data[1].extend(info["events"]["times"])
                data = np.array(data).T
            sorted_neurons, attr = _sort_neurons(sort,
                                                 gids,
                                                 network,
                                                 data=data,
                                                 return_attr=True)
    # spikes plotting
    colors = palette(np.linspace(0, 1, num_group))
    num_raster, num_detec, num_meter = 0, 0, 0
    fignums = {}
    decim = []
    if decimate is None:
        decim = [None for _ in range(num_group)]
    elif is_integer(decimate):
        decim = [decimate for _ in range(num_group)]
    elif nonstring_container(decimate):
        assert len(decimate) == num_group, "`decimate` should have one " +\
                                           "entry per group in the population."
        decim = decimate
    else:
        raise AttributeError(
            "`decimate` must be either an int or a list of `int`.")

    # set labels
    if label is None:
        lst_labels = [None for _ in range(len(lst_rec))]
    else:
        if isinstance(label, str):
            lst_labels = [label]
        else:
            lst_labels = label
        if len(label) != len(lst_rec):
            _log_message(
                logger, "WARNING",
                'Incorrect length for `label`: expecting {} but got '
                '{}.\nIgnoring.'.format(len(lst_rec), len(label)))
            lst_labels = [None for _ in range(len(lst_rec))]

    # plot
    for rec, var, lbl in zip(lst_rec, record, lst_labels):
        info = nest.GetStatus([rec])[0]
        fnum = fignums[info["model"]] if info["model"] in fignums else None
        if info["model"] not in labels:
            labels[info["model"]] = []
            lines[info["model"]] = []
        if str(info["model"]) == "spike_detector":
            c = colors[num_raster]
            times, senders = info["events"]["times"], info["events"]["senders"]
            sorted_ids = sorted_neurons[senders]
            l = raster_plot(times,
                            sorted_ids,
                            color=c,
                            show=False,
                            limits=limits,
                            sort=sort,
                            fignum=fnum,
                            decimate=decim[num_raster],
                            sort_attribute=attr,
                            network=network,
                            transparent=transparent)
            num_raster += 1
            if l:
                fig_raster = l[0].figure.number
                fignums['spike_detector'] = fig_raster
                labels["spike_detector"].append(lbl)
                lines["spike_detector"].extend(l)
        elif "detector" in str(info["model"]):
            c = colors[num_detec]
            times, senders = info["events"]["times"], info["events"]["senders"]
            sorted_ids = sorted_neurons[senders]
            l = raster_plot(times,
                            sorted_ids,
                            fignum=fnum,
                            color=c,
                            show=False,
                            hist=hist,
                            limits=limits)
            if l:
                fig_detect = l[0].figure.number
                num_detec += 1
                fignums[info["model"]] = fig_detect
                labels[info["model"]].append(lbl)
                lines[info["model"]].extend(l)
        else:
            da_time = info["events"]["times"]
            fig = plt.figure(fnum)
            fignums[info["model"]] = fig.number
            lines_tmp, labels_tmp = [], []
            if nonstring_container(var):
                axes = fig.axes
                if not axes:
                    axes = _set_new_plot(fig.number, names=var)[1]
                labels_tmp = [lbl for _ in range(len(var))]
                for subvar in var:
                    for ax in axes:
                        if ax.name == subvar:
                            da_subvar = info["events"][subvar]
                            if isinstance(normalize, nngt.NeuralPop):
                                da_subvar /= normalize[num_meter].size
                            elif nonstring_container(normalize):
                                da_subvar /= normalize[num_meter]
                            elif normalize is not None:
                                da_subvar /= normalize
                            lines_tmp.extend(ax.plot(da_time, da_subvar))
                            ax.set_ylabel(subvar)
                            ax.set_xlabel("time")
                            if limits is not None:
                                ax.set_xlim(limits[0], limits[1])
            else:
                ax = fig.add_subplot(111)
                da_var = info["events"][var]
                lines_tmp.extend(ax.plot(da_time, da_var / normalize))
                labels_tmp.append(lbl)
                ax.set_ylabel(var)
                ax.set_xlabel("time")
            labels[info["model"]].extend(labels_tmp)
            lines[info["model"]].extend(lines_tmp)
            num_meter += 1
    for recorder in fignums:
        fig = plt.figure(fignums[recorder])
        if title is not None:
            fig.suptitle(title)
        if label is not None:
            fig.legend(lines[recorder], labels[recorder])
    if show:
        plt.show()
    return lines
Ejemplo n.º 20
0
def raster_plot(times,
                senders,
                limits=None,
                title="Spike raster",
                hist=False,
                num_bins=1000,
                color="b",
                decimate=None,
                fignum=None,
                label=None,
                show=True,
                sort=None,
                sort_attribute=None,
                network=None,
                transparent=True):
    """
    Plotting routine that constructs a raster plot along with
    an optional histogram.
    
    Parameters
    ----------
    times : list or :class:`numpy.ndarray`
        Spike times.
    senders : list or :class:`numpy.ndarray`
        Index for the spiking neuron for each time in `times`.
    limits : tuple, optional (default: None)
        Time limits of the plot (if not specified, times of first and last
        spike).
    title : string, optional (default: 'Spike raster')
        Title of the raster plot.
    hist : bool, optional (default: True)
        Whether to plot the raster's histogram.
    num_bins : int, optional (default: 1000)
        Number of bins for the histogram.
    color : string or float, optional (default: 'b')
        Color of the plot lines and markers.
    decimate : int, optional (default: None)
        Represent only a fraction of the spiking neurons; only one neuron in
        `decimate` will be represented (e.g. setting `decimate` to 10 will lead
        to only 10% of the neurons being represented).
    fignum : int, optional (default: None)
        Id of another raster plot to which the new data should be added.
    label : str, optional (default: None)
        Label the current data.
    show : bool, optional (default: True)
        Whether to show the plot right away or to wait for the next plt.show().
    
    Returns
    -------
    lines : list of :class:`matplotlib.lines.Line2D`
        Lines containing the data that was plotted.
    """
    num_neurons = len(np.unique(senders))
    lines = []
    kwargs = {} if label is None else {'label': label}

    # decimate if necessary
    if decimate is not None:
        idx_keep = np.where(np.mod(senders, decimate) == 0)[0]
        senders = senders[idx_keep]
        times = times[idx_keep]

    if len(times):
        fig = plt.figure(fignum)
        if transparent:
            fig.patch.set_visible(False)
        ylabel = "Neuron ID"
        xlabel = "Time (ms)"

        delta_t = 0.01 * (times[-1] - times[0])

        if hist:
            ax1, ax2 = None, None
            if len(fig.axes) == 2:
                ax1 = fig.axes[0]
                ax2 = fig.axes[1]
            else:
                ax1 = fig.add_axes([0.1, 0.3, 0.85, 0.6])
                ax2 = fig.add_axes([0.1, 0.08, 0.85, 0.17], sharex=ax1)
            lines.extend(
                ax1.plot(times,
                         senders,
                         c=color,
                         marker="o",
                         linestyle='None',
                         mec="k",
                         mew=0.5,
                         ms=4,
                         **kwargs))
            ax1_lines = ax1.lines
            if len(ax1_lines) > 1:
                t_max = max(ax1_lines[0].get_xdata().max(), times[-1])
                ax1.set_xlim([-delta_t, t_max + delta_t])
            ax1.set_ylabel(ylabel)
            if limits is not None:
                ax1.set_xlim(*limits)
            #~ ax1.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1), ncol=3)

            bin_width = (np.amax(times) - np.amin(times)) / float(num_bins)
            t_bins = np.linspace(np.amin(times), np.amax(times), num_bins)
            if limits is not None:
                t_bins = np.linspace(limits[0], limits[1], num_bins)
            n, bins = np.histogram(times, bins=t_bins)
            #~ n = _moving_average(n,5)
            t_bins = np.concatenate(([t_bins[0]], t_bins))
            #~ heights = 1000 * n / (hist_binwidth * num_neurons)
            # height = rate in Hz, knowing that t is in ms
            heights = 1000 * np.concatenate(
                ([0], n, [0])) / (num_neurons * bin_width)
            height = np.repeat(0, len(heights)) if bin_width == 0. else heights
            lines = ax2.patches
            if lines:
                data = lines[-1].get_xy()
                bottom = data[:, 1]
                if limits is None:
                    old_bins = data[:, 0]
                    old_start = int(old_bins[0] / (old_bins[2] - old_bins[0]))
                    new_start = int(t_bins[0] / (t_bins[2] - t_bins[0]))
                    old_end = int(old_bins[-2] / (old_bins[-2] - old_bins[-3]))
                    new_end = int(t_bins[-1] / (t_bins[-1] - t_bins[-2]))
                    diff_start = new_start - old_start
                    diff_end = new_end - old_end
                    if diff_start > 0:
                        bottom = bottom[diff_start:]
                    else:
                        bottom = np.concatenate(
                            (np.zeros(-diff_start), bottom))
                    if diff_end > 0:
                        bottom = np.concatenate((bottom, np.zeros(diff_end)))
                    else:
                        bottom = bottom[:diff_end - 1]
                    b_len, h_len = len(bottom), len(heights)
                    if b_len > h_len:
                        bottom = bottom[:h_len]
                    elif b_len < h_len:
                        bottom = np.concatenate(
                            (bottom, np.zeros(h_len - b_len)))
                else:
                    bottom = bottom[:-1]
                #~ x,y1,y2 = _fill_between_steps(t_bins,heights,bottom[::2], h_align='left')
                #~ x,y1,y2 = _fill_between_steps(t_bins[:-1],heights+bottom[::2], bottom[::2], h_align='left')
                ax2.fill_between(t_bins, heights + bottom, bottom, color=color)
            else:
                #~ x,y1,_ = _fill_between_steps(t_bins,heights, h_align='left')
                #~ x,y1,_ = _fill_between_steps(t_bins[:-1],heights)
                ax2.fill(t_bins, heights, color=color)
            yticks = [
                int(x) for x in np.linspace(0,
                                            int(max(heights) * 1.1) + 5, 4)
            ]
            ax2.set_yticks(yticks)
            ax2.set_ylabel("Rate (Hz)")
            ax2.set_xlabel(xlabel)
            ax2.set_xlim(ax1.get_xlim())
            _second_axis(sort, sort_attribute, ax1)
        else:
            ax = fig.axes[0] if fig.axes else fig.add_subplot(111)
            if network is not None:
                for m, (k, v) in zip(markers, network.population.items()):
                    keep = np.where(np.in1d(senders,
                                            network.nest_gid[v.ids]))[0]
                    if len(keep):
                        if label is None:
                            kwargs['label'] = k
                        lines.extend(
                            ax.plot(times[keep],
                                    senders[keep],
                                    c=color,
                                    marker=m,
                                    ls='None',
                                    mec='k',
                                    mew=0.5,
                                    ms=4,
                                    **kwargs))
                        if 'inh' in k:
                            c_rgba = ColorConverter().to_rgba(color, alpha=0.5)
                            lines[-1].set_markerfacecolor(c_rgba)
            else:
                lines.extend(
                    ax.plot(times,
                            senders,
                            c=color,
                            marker="o",
                            linestyle='None',
                            mec="k",
                            mew=0.5,
                            ms=4,
                            **kwargs))
            ax.set_ylabel(ylabel)
            ax.set_xlabel(xlabel)
            if limits is not None:
                ax.set_xlim(limits)
            else:
                _set_ax_lims(ax, np.max(times), np.min(times), np.max(senders),
                             np.min(senders))
            if label is not None:
                ax.legend(bbox_to_anchor=(1.1, 1.2))
            _second_axis(sort, sort_attribute, ax)
        fig.suptitle(title)
        if show:
            plt.show()
    else:
        _log_message(logger, "WARNING",
                     "No activity was detected during the simulation.")
    return lines
Ejemplo n.º 21
0
    def new_edge(self,
                 source,
                 target,
                 attributes=None,
                 ignore=False,
                 self_loop=False):
        '''
        Adding a connection to the graph, with optional properties.

        .. versionchanged :: 2.0
            Added `self_loop` argument to enable adding self-loops.

        Parameters
        ----------
        source : :class:`int/node`
            Source node.
        target : :class:`int/node`
            Target node.
        attributes : :class:`dict`, optional (default: ``{}``)
            Dictionary containing optional edge properties. If the graph is
            weighted, defaults to ``{"weight": 1.}``, the unit weight for the
            connection (synaptic strength in NEST).
        ignore : bool, optional (default: False)
            If set to True, ignore attempts to add an existing edge and accept
            self-loops; otherwise an error is raised.
        self_loop : bool, optional (default: False)
            Whether to allow self-loops or not.

        Returns
        -------
        The new connection or None if nothing was added.
        '''
        g = self._graph

        attributes = {} if attributes is None else deepcopy(attributes)

        _set_default_edge_attributes(self, attributes, num_edges=1)

        # check that the edge does not already exist and that nodes are valid
        try:
            edge = g.edge(source, target)
        except ValueError:
            raise InvalidArgument("`source` or `target` does not exist.")

        if edge is None:
            if source == target:
                if not ignore and not self_loop:
                    raise InvalidArgument("Trying to add a self-loop.")
                elif ignore:
                    _log_message(logger, "INFO",
                                 "Self-loop on {} ignored.".format(source))

                    return None

            g.add_edge(source, target, add_missing=False)

            # check distance
            _set_dist_new_edges(attributes, self, [(source, target)])

            # set the attributes
            self._attr_new_edges([(source, target)], attributes=attributes)

            # set edge id
            g.edge_properties["eid"][g.edge(source, target)] = self._max_eid

            self._max_eid += 1
        else:
            if not ignore:
                raise InvalidArgument("Trying to add existing edge.")

            _log_message(logger, "INFO", "Existing edge {} ignored.".format(
                (source, target)))

            return None

        return (source, target)
Ejemplo n.º 22
0
def load_from_file(filename, fmt="auto", separator=" ", secondary=";",
                   attributes=None, notifier="@", ignore="#"):
    '''
    Load the main properties (edges, attributes...) from a file.

    .. warning::
        To import a graph directly from a file, use the
        :func:`~nngt.Graph.from_file` classmethod.

    Parameters
    ----------
    filename: str
        The path to the file.
    fmt : str, optional (default: "neighbour")
        The format used to save the graph. Supported formats are: "neighbour"
        (neighbour list, default if format cannot be deduced automatically),
        "ssp" (scipy.sparse), "edge_list" (list of all the edges in the graph,
        one edge per line, represented by a ``source target``-pair), "gml"
        (gml format, default if `filename` ends with '.gml'), "graphml"
        (graphml format, default if `filename` ends with '.graphml' or '.xml'),
        "dot" (dot format, default if `filename` ends with '.dot'), "gt" (only
        when using `graph_tool`<http://graph-tool.skewed.de/>_ as library,
        detected if `filename` ends with '.gt').
    separator : str, optional (default " ")
        separator used to separate inputs in the case of custom formats (namely
        "neighbour" and "edge_list")
    secondary : str, optional (default: ";")
        Secondary separator used to separate attributes in the case of custom
        formats.
    attributes : list, optional (default: [])
        List of names for the attributes present in the file. If a `notifier`
        is present in the file, names will be deduced from it; otherwise the
        attributes will be numbered.
    notifier : str, optional (default: "@")
        Symbol specifying the following as meaningfull information. Relevant
        information are formatted ``@info_name=info_value``, where
        ``info_name`` is in ("attributes", "directed", "name", "size") and
        associated ``info_value``s are of type (``list``, ``bool``, ``str``,
        ``int``).
        Additional notifiers are ``@type=SpatialGraph/Network/SpatialNetwork``,
        which must be followed by the relevant notifiers among ``@shape``,
        ``@population``, and ``@graph``.
    ignore : str, optional (default: "#")
        Ignore lines starting with the `ignore` string.

    Returns
    -------
    edges : list of 2-tuples
        Edges of the graph.
    di_attributes : dict
        Dictionary containing the attribute name as key and its value as a
        list sorted in the same order as `edges`.
    pop : :class:`~nngt.NeuralPop`
        Population (``None`` if not present in the file).
    shape : :class:`~nngt.geometry.Shape`
        Shape of the graph (``None`` if not present in the file).
    positions : array-like of shape (N, d)
        The positions of the neurons (``None`` if not present in the file).
    '''
    # check for mpi
    if nngt.get_config("mpi"):
        raise NotImplementedError("This function is not ready for MPI yet.")
    # load
    lst_lines, di_notif, pop, shape, positions = None, None, None, None, None
    fmt = _get_format(fmt, filename)
    with open(filename, "r") as filegraph:
        lst_lines = [line.strip() for line in filegraph.readlines()]
    # notifier lines
    di_notif = _get_notif(lst_lines, notifier)
    # data
    lst_lines = lst_lines[::-1][:-len(di_notif)]
    while not lst_lines[-1] or lst_lines[-1].startswith(ignore):
        lst_lines.pop()
    # get nodes attributes
    di_nattributes  = _get_node_attr(di_notif, separator)
    # make edges and attributes
    edges           = []
    eattributes     = (di_notif["edge_attributes"] if attributes is None
                       else attributes)
    di_eattributes  = {name: [] for name in di_notif["edge_attributes"]}
    di_edge_convert = _gen_convert(di_notif["edge_attributes"],
                                   di_notif["edge_attr_types"])
    line            = None

    while lst_lines:
        line = lst_lines.pop()
        if line and not line.startswith(notifier):
            di_get_edges[fmt](
                line, eattributes, separator, secondary, edges, di_eattributes,
                di_edge_convert)
        else:
            break
    # check whether a shape is present
    if 'shape' in di_notif:
        if _shapely_support:
            min_x, max_x = float(di_notif['min_x']), float(di_notif['max_x'])
            unit = di_notif['unit']
            shape = Shape.from_wtk(
                di_notif['shape'], min_x=min_x, max_x=max_x, unit=unit)
        else:
            _log_message(logger, "WARNING",
                         'A Shape object was present in the file but could '
                         'not be loaded because Shapely is not installed.')
    # check whether a population is present
    if 'population' in di_notif:
        str_enc = di_notif['population'].replace('~', '\n').encode()
        str_dec = codecs.decode(str_enc, "base64")
        try:
            pop = pickle.loads(str_dec)
        except UnicodeError:
            pop = pickle.loads(str_dec, encoding="latin1")
    if 'x' in di_notif:
        x = np.fromstring(di_notif['x'], sep=separator)
        y = np.fromstring(di_notif['y'], sep=separator)
        if 'z' in di_notif:
            z = np.fromstring(di_notif['z'], sep=separator)
            positions = np.array((x, y, z)).T
        else:
            positions = np.array((x, y)).T
    return (di_notif, edges, di_nattributes, di_eattributes, pop, shape,
            positions)
Ejemplo n.º 23
0
from nngt.geometry.geom_utils import conversion_magnitude
from nngt.lib.connect_tools import _set_options
from nngt.lib.logger import _log_message
from nngt.lib.test_functions import mpi_checker, mpi_random

# try to import multithreaded or mpi algorithms

using_mt_algorithms = False

if nngt.get_config("multithreading"):
    logger = logging.getLogger(__name__)
    try:
        from .cconnect import *
        from .connect_algorithms import price_network
        using_mt_algorithms = True
        _log_message(logger, "DEBUG",
                     "Using multithreaded algorithms compiled on install.")
        nngt.set_config('multithreading', True, silent=True)
    except Exception as e:
        try:
            import cython
            import pyximport
            pyximport.install()
            from .cconnect import *
            from .connect_algorithms import price_network
            using_mt_algorithms = True
            _log_message(
                logger, "DEBUG",
                str(e) + "\n\tCompiled "
                "multithreaded algorithms on-the-run.")
            nngt.set_config('multithreading', True, silent=True)
        except Exception as e2:
Ejemplo n.º 24
0
    def new_edge(self,
                 source,
                 target,
                 attributes=None,
                 ignore=False,
                 self_loop=False):
        '''
        Adding a connection to the graph, with optional properties.

        .. versionchanged :: 2.0
            Added `self_loop` argument to enable adding self-loops.

        Parameters
        ----------
        source : :class:`int/node`
            Source node.
        target : :class:`int/node`
            Target node.
        attributes : :class:`dict`, optional (default: ``{}``)
            Dictionary containing optional edge properties. If the graph is
            weighted, defaults to ``{"weight": 1.}``, the unit weight for the
            connection (synaptic strength in NEST).
        ignore : bool, optional (default: False)
            If set to True, ignore attempts to add an existing edge and accept
            self-loops; otherwise an error is raised.
        self_loop : bool, optional (default: False)
            Whether to allow self-loops or not.

        Returns
        -------
        The new connection or None if nothing was added.
        '''
        g = self._graph

        attributes = {} if attributes is None else deepcopy(attributes)

        # check that nodes exist
        num_nodes = g.number_of_nodes()

        if source >= num_nodes or target >= num_nodes:
            raise InvalidArgument("`source` or `target` does not exist.")

        # set default values for attributes that were not passed
        _set_default_edge_attributes(self, attributes, num_edges=1)

        if g.has_edge(source, target):
            if not ignore:
                raise InvalidArgument("Trying to add existing edge.")

            _log_message(logger, "INFO", "Existing edge {} ignored.".format(
                (source, target)))
        else:
            if source == target:
                if not ignore and not self_loop:
                    raise InvalidArgument("Trying to add a self-loop.")
                elif ignore:
                    _log_message(logger, "INFO",
                                 "Self-loop on {} ignored.".format(source))

                    return None

            for attr in attributes:
                if "_corr" in attr:
                    raise NotImplementedError("Correlated attributes are not "
                                              "available with networkx.")

            if self.is_weighted() and "weight" not in attributes:
                attributes["weight"] = 1.

            # check distance
            _set_dist_new_edges(attributes, self, [(source, target)])

            g.add_edge(source, target)

            g[source][target]["eid"] = self._max_eid

            self._max_eid += 1

            # call parent function to set the attributes
            self._attr_new_edges([(source, target)], attributes=attributes)

        return (source, target)