示例#1
0
    def add_include(self, includefile):
        """
        Adds file to include *includefile* to model.
        *includefile* -- str

        Parameters
        ----------
        includefile : str
            name of the file to include
        """
        self._model.add(lems.Include(includefile))
示例#2
0
    def create_lems_model(self,
                          run_dict,
                          constants_file=None,
                          includes=[],
                          recordingsname='recording'):
        """
        Create lems model using standard dictionary

        Parameters
        ----------
        run_dict : dict
            standard dictionary representation of information required
            for creating lems model
        constants_file : str, optional
            file with units as constants definitions, if None an
            LEMS_CONSTANTS_XML is added automatically
        includes : list of str
            all additional XML files added in preamble
        recordingsname : str, optional
            output of LEMS simulation recordings, values with extension
            .dat and spikes with .spikes, default 'recording'
        """

        # if no constants_file is specified, use LEMS_CONSTANTS_XML
        if not constants_file:
            self._model.add(lems.Include(LEMS_CONSTANTS_XML))
        else:
            self._model.add(lems.Include(constants_file))
        # add includes files
        includes = set(includes)
        for include in INCLUDES:
            includes.add(include)

        # TODO: deal with single run for now
        single_run = run_dict[0]
        # check initializers are defined
        initializers = []
        if 'initializers_connectors' in single_run:
            for item in single_run['initializers_connectors']:
                if item['type'] == 'initializer':
                    initializers.append(item)

        netinputs = []
        if 'poissoninput' in single_run['components'].keys():
            netinputs = single_run['components']['poissoninput']

        if netinputs:
            includes.add(LEMS_INPUTS)
        for include in includes:
            self.add_include(include)

        if 'neurongroup' in single_run['components'].keys():
            neuron_count = 0
            for neurongroup in single_run['components']['neurongroup']:
                self.add_neurongroup(neurongroup, neuron_count, initializers)
                neuron_count += 1

        # DOM structure of the whole model is constructed below
        self._dommodel = self._model.export_to_dom()
        # add input
        input_counter = 0
        for poisson_inp in netinputs:
            self.add_input(poisson_inp, input_counter)
            input_counter += 1
        # A population should be created in `make_multiinstantiate`
        # so we can add it to our DOM structure.
        if self._population:
            self._extend_dommodel(self._population)

        self._model_namespace['simulname'] = "sim1"
        self._simulation = NeuroMLSimulation(
            self._model_namespace['simulname'],
            self._model_namespace['networkname'])

        #loop over components field of single_run
        for (obj_name, obj_list) in single_run['components'].items():

            #check unsupported
            if obj_name == 'synapses':
                raise NotImplementedError("Synapses are not supported yet")

            # check whether StateMonitor
            if obj_name == 'statemonitor':
                # loop over the statemonitors defined
                for statemonitor in obj_list:
                    self.add_statemonitor(statemonitor,
                                          filename=recordingsname,
                                          outputfile=True)

            # check whether SpikeMonitor
            if obj_name == 'spikemonitor':
                for spikemonitor in obj_list:
                    self.add_spikemonitor(spikemonitor,
                                          filename=recordingsname)

            # check whether EventMonitor
            # TODO: is this valid in NML/LEMS?
            if obj_name == 'eventmonitor':
                for eventmonitor in obj_list:
                    self.add_eventmonitor(eventmonitor,
                                          filename=recordingsname)

        # build the simulation
        simulation = self._simulation.build()
        self._extend_dommodel(simulation)
        target = NeuroMLTarget(self._model_namespace['simulname'])
        target = target.build()
        self._extend_dommodel(target)
示例#3
0
    def create_lems_model(self, network=None, namespace={}, initializers={},
                                           constants_file=None, includes=[],
                                           recordingsname='recording'):
        """
        From given *network* returns LEMS model object.

        Parameters
        ----------
        network : str, optional
            all brian objects collected into netowrk or None. In the
            second case brian2 objects are collected autmatically from
            the above scope.
        namespace : dict
            namespace variables defining extra model parameters
        initializers : dict
            all values which need to be initialized before simulation
            running
        constants_file : str, optional
            file with units as constants definitions, if None an
            LEMS_CONSTANTS_XML is added automatically
        includes : list of str
            all additional XML files added in preamble
        recordingsname : str, optional
            output of LEMS simulation recordings, values with extension
            .dat and spikes with .spikes, default 'recording'
        """
        if network is None:
            net = Network(collect(level=1))
        else:
            net = network

        if not constants_file:
            self._model.add(lems.Include(LEMS_CONSTANTS_XML))
        else:
            self._model.add(lems.Include(constants_file))
        includes = set(includes)
        for incl in INCLUDES:
            includes.add(incl)
        neuron_groups  = [o for o in net.objects if type(o) is NeuronGroup]
        state_monitors = [o for o in net.objects if type(o) is StateMonitor]
        spike_monitors = [o for o in net.objects if type(o) is SpikeMonitor]
        
        for o in net.objects:
            if type(o) not in [NeuronGroup, StateMonitor, SpikeMonitor,
                               Thresholder, Resetter, StateUpdater]:
                logger.warn("""{} export functionality
                               is not implemented yet.""".format(type(o).__name__))
        # --- not fully implemented
        synapses       = [o for o in net.objects if type(o) is Synapses]
        netinputs      = [o for o in net.objects if type(o) is PoissonInput]
        # ---
        #if len(synapses) > 0:
        #    logger.warn("Synpases export functionality is not implemented yet.")
        #if len(netinputs) > 0:
        #    logger.warn("Network Input export functionality is not implemented yet.")
        if len(netinputs) > 0:
            includes.add(LEMS_INPUTS)
        for incl in includes:
            self.add_include(incl)
        # First step is to add individual neuron deifinitions and initialize
        # them by MultiInstantiate
        for e, obj in enumerate(neuron_groups):
            self.add_neurongroup(obj, e, namespace, initializers)
        # DOM structure of the whole model is constructed below
        self._dommodel = self._model.export_to_dom()
        # input support - currently only Poisson Inputs
        for e, obj in enumerate(netinputs):
            self.add_input(obj, counter=e)
        # A population should be created in *make_multiinstantiate*
        # so we can add it to our DOM structure.
        if self._population:
            self._extend_dommodel(self._population)
        # if some State or Spike Monitors occur we support them by
        # Simulation tag
        self._model_namespace['simulname'] = "sim1"
        self._simulation = NeuroMLSimulation(self._model_namespace['simulname'],
                                             self._model_namespace['networkname'])

        for e, obj in enumerate(state_monitors):
            self.add_statemonitor(obj, filename=recordingsname, outputfile=True)
        for e, obj in enumerate(spike_monitors):
            self.add_spikemonitor(obj, filename=recordingsname)
        simulation = self._simulation.build()
        self._extend_dommodel(simulation)
        target = NeuroMLTarget(self._model_namespace['simulname'])
        target = target.build()
        self._extend_dommodel(target)
示例#4
0
#! /usr/bin/python

import lems.api as lems

model = lems.Model()

model.add(lems.Include("test.xml"))

model.add(lems.Dimension('voltage', m=1, l=3, t=-3, i=-1))
model.add(lems.Dimension('time', t=1))
model.add(lems.Dimension('capacitance', m=-1, l=-2, t=4, i=2))

model.add(lems.Unit('milliVolt', 'mV', 'voltage', -3))
model.add(lems.Unit('milliSecond', 'ms', 'time', -3))
model.add(lems.Unit('microFarad', 'uF', 'capacitance', -12))

iaf1 = lems.ComponentType('iaf1')
model.add(iaf1)

iaf1.add(lems.Parameter('threshold', 'voltage'))
iaf1.add(lems.Parameter('reset', 'voltage'))
iaf1.add(lems.Parameter('refractoryPeriod', 'time'))
iaf1.add(lems.Parameter('capacitance', 'capacitance'))
iaf1.add(lems.Exposure('vexp', 'voltage'))
dp = lems.DerivedParameter('range', 'threshold - reset', 'voltage')
iaf1.add(dp)

iaf1.dynamics.add(lems.StateVariable('v', 'voltage', 'vexp'))
iaf1.dynamics.add(lems.DerivedVariable('v2', dimension='voltage', value='v*2'))
cdv = lems.ConditionalDerivedVariable('v_abs', 'voltage')
cdv.add(lems.Case('v .geq. 0', 'v'))
示例#5
0
def mdf_to_neuroml(graph, save_to=None, format=None, run_duration_sec=2):

    print("Converting graph: %s to NeuroML" % (graph.id))

    net = neuromllite.Network(id=graph.id)
    net.notes = "NeuroMLlite export of {} graph: {}".format(
        format if format else "MDF",
        graph.id,
    )

    model = lems.Model()
    lems_definitions = "%s_lems_definitions.xml" % graph.id

    for node in graph.nodes:
        print("    Node: %s" % node.id)

        node_comp_type = "%s__definition" % node.id
        node_comp = "%s__instance" % node.id

        # Create the ComponentType which defines behaviour of the general class
        ct = lems.ComponentType(node_comp_type, extends="baseCellMembPotDL")
        ct.add(lems.Attachments("only_input_port", "basePointCurrentDL"))
        ct.dynamics.add(
            lems.DerivedVariable(name="V",
                                 dimension="none",
                                 value="0",
                                 exposure="V"))
        model.add(ct)

        # Define the Component - an instance of the ComponentType
        comp = lems.Component(node_comp, node_comp_type)
        model.add(comp)

        cell = neuromllite.Cell(id=node_comp,
                                lems_source_file=lems_definitions)
        net.cells.append(cell)

        pop = neuromllite.Population(
            id=node.id,
            size=1,
            component=cell.id,
            properties={
                "color": "0.2 0.2 0.2",
                "radius": 3
            },
        )
        net.populations.append(pop)

        if len(node.input_ports) > 1:
            raise Exception(
                "Currently only max 1 input port supported in NeuroML...")

        for ip in node.input_ports:
            ct.add(lems.Exposure(ip.id, "none"))
            ct.dynamics.add(
                lems.DerivedVariable(
                    name=ip.id,
                    dimension="none",
                    select="only_input_port[*]/I",
                    reduce="add",
                    exposure=ip.id,
                ))

        on_start = None

        for p in node.parameters:
            print("Converting %s" % p)
            if p.value is not None:
                try:
                    v_num = float(p.value)
                    ct.add(lems.Parameter(p.id, "none"))
                    comp.parameters[p.id] = v_num
                    print(comp.parameters[p.id])
                except Exception as e:

                    ct.add(lems.Exposure(p.id, "none"))
                    dv = lems.DerivedVariable(
                        name=p.id,
                        dimension="none",
                        value="%s" % (p.value),
                        exposure=p.id,
                    )
                    ct.dynamics.add(dv)

            elif p.function is not None:
                ct.add(lems.Exposure(p.id, "none"))
                func_info = mdf_functions[p.function]
                expr = func_info["expression_string"]
                expr2 = substitute_args(expr, p.args)
                for arg in p.args:
                    expr += ";{}={}".format(arg, p.args[arg])
                dv = lems.DerivedVariable(name=p.id,
                                          dimension="none",
                                          value="%s" % (expr2),
                                          exposure=p.id)
                ct.dynamics.add(dv)
            else:
                ct.add(lems.Exposure(p.id, "none"))
                ct.dynamics.add(
                    lems.StateVariable(name=p.id,
                                       dimension="none",
                                       exposure=p.id))
                if p.default_initial_value:
                    if on_start is None:
                        on_start = lems.OnStart()
                        ct.dynamics.add(on_start)
                    sa = lems.StateAssignment(
                        variable=p.id,
                        value=str(evaluate_expr(p.default_initial_value)))
                    on_start.actions.append(sa)

                if p.time_derivative:
                    td = lems.TimeDerivative(variable=p.id,
                                             value=p.time_derivative)
                    ct.dynamics.add(td)

        if len(node.output_ports) > 1:
            raise Exception(
                "Currently only max 1 output port supported in NeuroML...")

        for op in node.output_ports:
            ct.add(lems.Exposure(op.id, "none"))
            ct.dynamics.add(
                lems.DerivedVariable(name=op.id,
                                     dimension="none",
                                     value=op.value,
                                     exposure=op.id))
            only_output_port = "only_output_port"
            ct.add(lems.Exposure(only_output_port, "none"))
            ct.dynamics.add(
                lems.DerivedVariable(
                    name=only_output_port,
                    dimension="none",
                    value=op.id,
                    exposure=only_output_port,
                ))

    if len(graph.edges) > 0:

        model.add(
            lems.Include(
                os.path.join(os.path.dirname(__file__),
                             "syn_definitions.xml")))
        rsDL = neuromllite.Synapse(id="rsDL",
                                   lems_source_file=lems_definitions)
        net.synapses.append(rsDL)
        # syn_id = 'silentSyn'
        # silentSynDL = neuromllite.Synapse(id=syn_id, lems_source_file=lems_definitions)

    for edge in graph.edges:
        print(f"    Edge: {edge.id} connects {edge.sender} to {edge.receiver}")

        ssyn_id = "silentSyn_proj_%s" % edge.id
        ssyn_id = "silentSyn_proj_%s" % edge.id
        # ssyn_id = 'silentSynX'
        silentDLin = neuromllite.Synapse(id=ssyn_id,
                                         lems_source_file=lems_definitions)

        model.add(lems.Component(ssyn_id, "silentRateSynapseDL"))

        net.synapses.append(silentDLin)

        net.projections.append(
            neuromllite.Projection(
                id="proj_%s" % edge.id,
                presynaptic=edge.sender,
                postsynaptic=edge.receiver,
                synapse=rsDL.id,
                pre_synapse=silentDLin.id,
                type="continuousProjection",
                weight=1,
                random_connectivity=neuromllite.RandomConnectivity(
                    probability=1),
            ))

    # Much more todo...
    model.export_to_file(lems_definitions)

    print("Nml net: %s" % net)
    if save_to:
        new_file = net.to_json_file(save_to)
        print("Saved NML to: %s" % save_to)

    ################################################################################
    ###   Build Simulation object & save as JSON

    simtime = 1000 * run_duration_sec
    dt = 0.1
    sim = neuromllite.Simulation(
        id="Sim%s" % net.id,
        network=new_file,
        duration=simtime,
        dt=dt,
        seed=123,
        recordVariables={"OUTPUT": {
            "all": "*"
        }},
    )

    recordVariables = {}
    for node in graph.nodes:
        for ip in node.input_ports:
            if not ip.id in recordVariables:
                recordVariables[ip.id] = {}
            recordVariables[ip.id][node.id] = 0

        for p in node.parameters:
            if p.is_stateful():
                if not p.id in recordVariables:
                    recordVariables[p.id] = {}
                recordVariables[p.id][node.id] = 0

        for op in node.output_ports:
            if not op.id in recordVariables:
                recordVariables[op.id] = {}
            recordVariables[op.id][node.id] = 0

    sim.recordVariables = recordVariables
    if save_to:
        sf = sim.to_json_file()

        print("Saved Simulation to: %s" % sf)

    return net, sim