Exemplo n.º 1
0
def test_custom_events_neurongroup():

    start_scope()
    grp = NeuronGroup(10,
                      'dvar/dt = (100 - var) / tau_n : 1',
                      events={'test_event': 'var > 70'},
                      method='exact')
    tau_n = 10 * ms
    grp.thresholder['test_event'].clock.dt = 10 * ms
    neuron_dict = collect_NeuronGroup(grp, get_local_namespace(0))

    custom_event = neuron_dict['events']['test_event']
    thresholder = custom_event['threshold']

    assert thresholder['code'] == 'var > 70'
    assert thresholder['when'] == grp.thresholder['test_event'].when
    assert thresholder['order'] == grp.thresholder['test_event'].order
    assert thresholder['dt'] == 10 * ms

    with pytest.raises(KeyError):
        neuron_dict['events']['spike']
        custom_event['reset']
        custom_event['refractory']

    # check with reset
    grp.run_on_event('test_event', 'var = -10')
    neuron_dict = collect_NeuronGroup(grp, get_local_namespace(0))
    custom_event = neuron_dict['events']['test_event']
    resetter = custom_event['reset']

    assert resetter['code'] == 'var = -10'
    assert resetter['when'] == grp.resetter['test_event'].when
    assert resetter['order'] == grp.resetter['test_event'].order
    assert resetter['dt'] == thresholder['dt']
Exemplo n.º 2
0
def test_poissoninput():
    """
    Test collect_PoissonInput()
    """
    # test 1
    start_scope()
    v_th = 1 * volt
    grp = NeuronGroup(10, 'dv/dt = (v_th - v)/(10*ms) :volt', method='euler',
                      threshold='v>100*mV', reset='v=0*mV')
    poi = PoissonInput(grp, 'v', 10, 1*Hz, 'v_th * rand() + 1*mV')
    poi_dict = collect_PoissonInput(poi, get_local_namespace(0))
    assert poi_dict['target'] == grp.name
    assert poi_dict['rate'] == 1*Hz
    assert poi_dict['N'] == 10
    assert poi_dict['target_var'] == 'v'
    assert poi_dict['when'] == poi.when
    assert poi_dict['order'] == poi.order
    assert poi_dict['clock'] == poi.clock.dt
    assert poi_dict['identifiers']['v_th'] == v_th
    # test 2
    grp2 = NeuronGroup(10, 'dv_1_2_3/dt = (v_th - v_1_2_3)/(10*ms) :volt',
                       method='euler', threshold='v_1_2_3>v_th',
                       reset='v_1_2_3=-v_th')
    poi2 = PoissonInput(grp2, 'v_1_2_3', 0, 0*Hz, v_th)
    poi_dict = collect_PoissonInput(poi2, get_local_namespace(0))
    assert poi_dict['target'] == grp2.name
    assert poi_dict['rate'] == 0*Hz
    assert poi_dict['N'] == 0
    assert poi_dict['target_var'] == 'v_1_2_3'
    with pytest.raises(KeyError):
        poi_dict['identifiers']
Exemplo n.º 3
0
    def __getitem__(self, index):
        '''
        Returns indices for `index` an array, integer or slice, or a string
        (that might refer to ``i`` as the group element index).

        '''
        if isinstance(index, tuple):
            raise IndexError(('Can only interpret 1-d indices, '
                              'got %d dimensions.') % len(index))
        if isinstance(index, basestring):
            # interpret the string expression
            namespace = get_local_namespace(1)
            additional_namespace = ('implicit-namespace', namespace)
            abstract_code = '_cond = ' + index
            check_code_units(abstract_code, self.group,
                             additional_variables=self.variables,
                             additional_namespace=additional_namespace)
            codeobj = create_runner_codeobj(self.group,
                                            abstract_code,
                                            'state_variable_indexing',
                                            additional_variables=self.variables,
                                            additional_namespace=additional_namespace,
                                            )
            return codeobj()
        else:
            if isinstance(index, slice):
                start, stop, step = index.indices(self.N)
                index = slice(start + self.offset, stop + self.offset, step)
                return self._indices[index]
            else:
                index_array = np.asarray(index)
                if not np.issubdtype(index_array.dtype, np.int):
                    raise TypeError('Indexing is only supported for integer arrays')
                return self._indices[index_array + self.offset]
Exemplo n.º 4
0
 def network_run(self, net, duration, report=None, report_period=60*second,
                 namespace=None, level=0):
     
     if namespace is not None:
         net.before_run(('explicit-run-namespace', namespace))
     else:
         namespace = get_local_namespace(2 + level)
         net.before_run(('implicit-run-namespace', namespace))
         
     self.clocks.update(net._clocks)
     
     # TODO: remove this horrible hack
     for clock in self.clocks:
         if clock.name=='clock':
             clock._name = 'clock_'
         
     # Extract all the CodeObjects
     # Note that since we ran the Network object, these CodeObjects will be sorted into the right
     # running order, assuming that there is only one clock
     code_objects = []
     for obj in net.objects:
         for codeobj in obj._code_objects:
             code_objects.append((obj.clock, codeobj))
     
     # Generate the updaters
     run_lines = ['{net.name}.clear();'.format(net=net)]
     for clock, codeobj in code_objects:
         run_lines.append('{net.name}.add(&{clock.name}, _run_{codeobj.name});'.format(clock=clock, net=net,
                                                                                            codeobj=codeobj))
     run_lines.append('{net.name}.run({duration});'.format(net=net, duration=float(duration)))
     self.main_queue.append(('run_network', (net, run_lines)))
Exemplo n.º 5
0
    def network_run(self, network, duration, report=None, report_period=10*second,
                    namespace=None, profile=True, level=0):
        network._clocks = {obj.clock for obj in network.objects}
        # Get the local namespace
        if namespace is None:
            namespace = get_local_namespace(level=level+2)
        network.before_run(namespace)

        # Extract all the objects present in the network
        descriptions = []
        merged_namespace = {}
        monitors = []
        for obj in network.objects:
            one_description, one_namespace = description(obj, namespace)
            descriptions.append((obj.name, one_description))
            if type(obj) in [StateMonitor, SpikeMonitor]:
                monitors.append(obj)
            for key, value in one_namespace.iteritems():
                if key in merged_namespace and value != merged_namespace[key]:
                    raise ValueError('name "%s" is used inconsistently')
                merged_namespace[key] = value

        self.network = network
        assignments = list(self.assignments)
        self.assignments[:] = []
        self.runs.append((descriptions, duration, merged_namespace, assignments))
        if self.build_on_run:
            if self.has_been_run:
                raise RuntimeError('The network has already been built and run '
                                   'before. Use set_device with '
                                   'build_on_run=False and an explicit '
                                   'device.build call to use multiple run '
                                   'statements with this device.')
            self.build(direct_call=False, **self.build_options)
        self.has_been_run = True
Exemplo n.º 6
0
    def resolve_all(self,
                    identifiers,
                    user_identifiers=None,
                    additional_variables=None,
                    run_namespace=None,
                    level=0):
        '''
        Resolve a list of identifiers. Calls `Group._resolve` for each
        identifier.

        Parameters
        ----------
        identifiers : iterable of str
            The names to look up.
        user_identifiers : iterable of str, optional
            The names in ``identifiers`` that were provided by the user (i.e.
            are part of user-specified equations, abstract code, etc.). Will
            be used to determine when to issue namespace conflict warnings. If
            not specified, will be assumed to be identical to ``identifiers``.
        additional_variables : dict-like, optional
            An additional mapping of names to `Variable` objects that will be
            checked before `Group.variables`.
        run_namespace : dict-like, optional
            An additional namespace, provided as an argument to the
            `Network.run` method.
        level : int, optional
            How far to go up in the stack to find the original call frame.
        do_warn : bool, optional
            Whether to warn about names that are defined both as an internal
            variable (i.e. in `Group.variables`) and in some other namespace.
            Defaults to ``True`` but can be switched off for internal variables
            used in templates that the user might not even know about.

        Returns
        -------
        variables : dict of `Variable` or `Function`
            A mapping from name to `Variable`/`Function` object for each of the
            names given in `identifiers`

        Raises
        ------
        KeyError
            If one of the names in `identifier` cannot be resolved
        '''
        if user_identifiers is None:
            user_identifiers = identifiers
        if run_namespace is None:
            run_namespace = get_local_namespace(level=level + 1)
        resolved = {}
        for identifier in identifiers:
            resolved[identifier] = self._resolve(
                identifier,
                user_identifier=identifier in user_identifiers,
                additional_variables=additional_variables,
                run_namespace=run_namespace)
        return resolved
Exemplo n.º 7
0
def test_timedarray_customfunc():
    """
    Test TimedArray and Custom Functions
    """
    # simple timedarray test
    ta = TimedArray([1, 2, 3, 4] * mV, dt=0.1 * ms)
    eqn = 'v = ta(t) :volt'
    G = NeuronGroup(1, eqn, method='euler')
    neuro_dict = collect_NeuronGroup(G, get_local_namespace(0))
    ta_dict = neuro_dict['identifiers']['ta']
    assert ta_dict['name'] == ta.name
    assert (ta_dict['values'] == [1, 2, 3, 4] * mV).all()
    assert float(ta_dict['dt']) == float(ta.dt)
    assert ta_dict['ndim'] == 1
    assert ta_dict['type'] == 'timedarray'

    # test 2
    ta2d = TimedArray([[1, 2], [3, 4], [5, 6]] * mV, dt=1 * ms)
    G2 = NeuronGroup(4, 'v = ta2d(t, i%2) : volt')
    neuro_dict = collect_NeuronGroup(G2, get_local_namespace(0))
    ta_dict = neuro_dict['identifiers']['ta2d']
    assert ta_dict['name'] == ta2d.name
    assert (ta_dict['values'] == [[1, 2], [3, 4], [5, 6]] * mV).all()
    assert float(ta_dict['dt']) == float(ta2d.dt)
    assert ta_dict['ndim'] == 2
    assert ta_dict['type'] == 'timedarray'

    # test 3
    def da(x1, x2):
        return (x1 - x2)

    a = 1 * mV
    b = 1 * mV
    da = Function(da, arg_units=[volt, volt], return_unit=volt)
    grp = NeuronGroup(1, 'v = da(a, b) :volt', method='euler')
    neuro_dict = collect_NeuronGroup(grp, get_local_namespace(0))
    identi = neuro_dict['identifiers']['da']
    assert identi['type'] == 'custom_func'
    assert identi['arg_units'] == da._arg_units
    assert identi['arg_types'] == da._arg_types
    assert identi['return_unit'] == da._return_unit
    assert identi['return_type'] == da._return_type
Exemplo n.º 8
0
def test_poissongroup():
    """
    Test standard dictionary representation of PoissonGroup
    """

    # example1
    N = 10
    rates = numpy.arange(1, 11, step=1) * Hz

    poisongrp = PoissonGroup(N, rates)
    poisson_dict = collect_PoissonGroup(poisongrp, get_local_namespace(0))

    assert poisson_dict['N'] == N

    assert (poisson_dict['rates'] == rates).all()
    assert poisson_dict['rates'].has_same_dimensions(5 * Hz)
    assert poisson_dict['rates'].dtype == float

    with pytest.raises(KeyError):
        assert poisson_dict['run_regularly']

    # example2
    F = 10 * Hz
    three = 3 * Hz
    two = 2 * Hz
    poisongrp = PoissonGroup(N, rates='F + two')
    poisongrp.run_regularly('F = F + three', dt=10 * ms,
                            name="Run_at_0_01")
    poisson_dict = collect_PoissonGroup(poisongrp, get_local_namespace(0))

    assert poisson_dict['rates'] == 'F + two'
    assert poisson_dict['run_regularly'][0]['name'] == 'Run_at_0_01'
    assert poisson_dict['run_regularly'][0]['code'] == 'F = F + three'
    assert poisson_dict['run_regularly'][0]['dt'] == 10 * ms
    assert poisson_dict['run_regularly'][0]['when'] == 'start'
    assert poisson_dict['run_regularly'][0]['order'] == 0

    assert poisson_dict['identifiers']['three'] == three
    assert poisson_dict['identifiers']['two'] == two

    with pytest.raises(IndexError):
        poisson_dict['run_regularly'][1]
Exemplo n.º 9
0
def test_spikegenerator():
    """
    Test dictionary representation of SpikeGenerator
    """

    # example 1
    size = 1
    index = [0]
    time = [10] * ms

    spike_gen = SpikeGeneratorGroup(size, index, time)
    spike_gen_dict = collect_SpikeGenerator(spike_gen, get_local_namespace(0))

    assert spike_gen_dict['N'] == size
    assert spike_gen_dict['indices'] == [0]
    assert spike_gen_dict['indices'].dtype == int

    assert spike_gen_dict['times'] == time
    assert spike_gen_dict['times'].has_same_dimensions(10 * ms)
    assert spike_gen_dict['times'].dtype == float

    # example 2
    spike_gen2 = SpikeGeneratorGroup(10, index, time, period=20 * ms)
    var = 0.00002
    spike_gen2.run_regularly('var = var + 1', dt=10 * ms, name='spikerr')
    spike_gen_dict = collect_SpikeGenerator(spike_gen2,
                                            get_local_namespace(0))

    assert spike_gen_dict['N'] == 10
    assert spike_gen_dict['period'] == [20] * ms
    assert spike_gen_dict['period'].has_same_dimensions(20 * ms)
    assert spike_gen_dict['period'].dtype == float

    # (check run_regularly)
    assert spike_gen_dict['run_regularly'][0]['name'] == 'spikerr'
    assert spike_gen_dict['run_regularly'][0]['code'] == 'var = var + 1'
    assert spike_gen_dict['run_regularly'][0]['dt'] == 10 * ms
    assert spike_gen_dict['run_regularly'][0]['when'] == 'start'
    assert spike_gen_dict['run_regularly'][0]['order'] == 0
    assert spike_gen_dict['identifiers']['var'] == var
    with pytest.raises(IndexError):
        spike_gen_dict['run_regularly'][1]
Exemplo n.º 10
0
def test_resolution():
    # implicit namespace
    tau = 10 * ms
    group = SimpleGroup(namespace=None, variables={})
    namespace = get_local_namespace(level=0)
    resolved = group.resolve_all(['tau', 'ms'],
                                 namespace,
                                 user_identifiers=['tau', 'ms'])
    assert len(resolved) == 2
    assert isinstance(resolved, dict)
    assert resolved['tau'].get_value_with_unit() == tau
    assert resolved['ms'].get_value_with_unit() == ms
    del tau

    # explicit namespace
    group = SimpleGroup(namespace={'tau': 20 * ms}, variables={})
    namespace = get_local_namespace(level=0)
    resolved = group.resolve_all(['tau', 'ms'], namespace, ['tau', 'ms'])
    assert len(resolved) == 2
    assert resolved['tau'].get_value_with_unit() == 20 * ms
Exemplo n.º 11
0
def test_resolution():
    # implicit namespace
    tau = 10*ms
    group = SimpleGroup(namespace=None, variables={})
    namespace = get_local_namespace(level=0)
    resolved = group.resolve_all(['tau', 'ms'], namespace,
                                 user_identifiers=['tau', 'ms'])
    assert len(resolved) == 2
    assert type(resolved) == type(dict())
    assert resolved['tau'].get_value_with_unit() == tau
    assert resolved['ms'].get_value_with_unit() == ms
    del tau

    # explicit namespace
    group = SimpleGroup(namespace={'tau': 20 * ms}, variables={})
    namespace = get_local_namespace(level=0)
    resolved = group.resolve_all(['tau', 'ms'], namespace,
                                 ['tau', 'ms'])
    assert len(resolved) == 2
    assert resolved['tau'].get_value_with_unit() == 20 * ms
Exemplo n.º 12
0
    def resolve_all(self, identifiers, user_identifiers=None,
                    additional_variables=None, run_namespace=None, level=0):
        '''
        Resolve a list of identifiers. Calls `Group._resolve` for each
        identifier.

        Parameters
        ----------
        identifiers : iterable of str
            The names to look up.
        user_identifiers : iterable of str, optional
            The names in ``identifiers`` that were provided by the user (i.e.
            are part of user-specified equations, abstract code, etc.). Will
            be used to determine when to issue namespace conflict warnings. If
            not specified, will be assumed to be identical to ``identifiers``.
        additional_variables : dict-like, optional
            An additional mapping of names to `Variable` objects that will be
            checked before `Group.variables`.
        run_namespace : dict-like, optional
            An additional namespace, provided as an argument to the
            `Network.run` method.
        level : int, optional
            How far to go up in the stack to find the original call frame.
        do_warn : bool, optional
            Whether to warn about names that are defined both as an internal
            variable (i.e. in `Group.variables`) and in some other namespace.
            Defaults to ``True`` but can be switched off for internal variables
            used in templates that the user might not even know about.

        Returns
        -------
        variables : dict of `Variable` or `Function`
            A mapping from name to `Variable`/`Function` object for each of the
            names given in `identifiers`

        Raises
        ------
        KeyError
            If one of the names in `identifier` cannot be resolved
        '''
        if user_identifiers is None:
            user_identifiers = identifiers
        if run_namespace is None:
            run_namespace = get_local_namespace(level=level+1)
        resolved = {}
        for identifier in identifiers:
            resolved[identifier] = self._resolve(identifier,
                                                 user_identifier=identifier in user_identifiers,
                                                 additional_variables=additional_variables,
                                                 run_namespace=run_namespace)
        return resolved
Exemplo n.º 13
0
    def get_with_expression(self, group, variable_name, variable, code, level=0):
        '''
        Gets a variable using a string expression. Is called by
        `VariableView.get_item` for statements such as
        ``print G.v['g_syn > 0']``.

        Parameters
        ----------
        group : `Group`
            The group providing the context for the indexing.
        variable_name : str
            The name of the variable in its context (e.g. ``'g_post'`` for a
            variable with name ``'g'``)
        variable : `ArrayVariable`
            The `ArrayVariable` object for the variable to be set
        code : str
            An expression that states a condition for elements that should be
            selected. Can contain references to indices, such as ``i`` or ``j``
            and to state variables. For example: ``'i>3 and v>0*mV'``.
        level : int, optional
            How much farther to go up in the stack to find the namespace.
        '''
        # interpret the string expression
        namespace = get_local_namespace(level+1)
        additional_namespace = ('implicit-namespace', namespace)
        # Add the recorded variable under a known name to the variables
        # dictionary. Important to deal correctly with
        # the type of the variable in C++
        variables = Variables(None)
        variables.add_auxiliary_variable('_variable', unit=variable.unit,
                                         dtype=variable.dtype,
                                         scalar=variable.scalar,
                                         is_bool=variable.is_bool)
        variables.add_auxiliary_variable('_cond', unit=Unit(1), dtype=np.bool,
                                         is_bool=True)

        abstract_code = '_variable = ' + variable_name + '\n'
        abstract_code += '_cond = ' + code
        check_code_units(abstract_code, group,
                         additional_namespace=additional_namespace,
                         additional_variables=variables)
        codeobj = create_runner_codeobj(group,
                                        abstract_code,
                                        'group_variable_get_conditional',
                                        additional_variables=variables,
                                        additional_namespace=additional_namespace,
                                        )
        return codeobj()
Exemplo n.º 14
0
    def set_with_expression_conditional(self, group, varname, variable, cond,
                                        code, check_units=True, level=0):
        '''
        Sets a variable using a string expression and string condition. Is
        called by `VariableView.set_item` for statements such as
        ``S.var['i!=j'] = 'exp(-abs(i-j)/space_constant)*nS'``

        Parameters
        ----------
        group : `Group`
            The group providing the context for the indexing.
        varname : str
            The name of the variable to be set.
        variable : `ArrayVariable`
            The `ArrayVariable` object for the variable to be set.
        cond : str
            The string condition for which the variables should be set.
        code : str
            The code that should be executed to set the variable values.
        check_units : bool, optional
            Whether to check the units of the expression.
        level : int, optional
            How much farther to go up in the stack to find the namespace.
        '''

        abstract_code_cond = '_cond = '+cond
        abstract_code = varname + ' = ' + code
        namespace = get_local_namespace(level + 1)
        additional_namespace = ('implicit-namespace', namespace)
        variables = Variables(None)
        variables.add_auxiliary_variable('_cond', unit=Unit(1), dtype=np.bool,
                                         is_bool=True)
        check_code_units(abstract_code_cond, group,
                         additional_variables=variables,
                         additional_namespace=additional_namespace)
        # TODO: Have an additional argument to avoid going through the index
        # array for situations where iterate_all could be used
        codeobj = create_runner_codeobj(group,
                                 {'condition': abstract_code_cond,
                                  'statement': abstract_code},
                                 'group_variable_set_conditional',
                                 additional_variables=variables,
                                 additional_namespace=additional_namespace,
                                 check_units=check_units)
        codeobj()
Exemplo n.º 15
0
def test_warning():
    from brian2.core.functions import DEFAULT_FUNCTIONS
    from brian2.units.stdunits import cm as brian_cm
    # Name in external namespace clashes with unit/function name
    exp = 23
    cm = 42
    group = SimpleGroup(namespace=None, variables={})
    namespace = get_local_namespace(level=0)
    with catch_logs() as l:
        resolved = group.resolve_all(['exp'], namespace)['exp']
        assert resolved == DEFAULT_FUNCTIONS['exp']
        assert len(l) == 1, 'got warnings: %s' % str(l)
        assert l[0][1].endswith('.resolution_conflict')
    with catch_logs() as l:
        resolved = group.resolve_all(['cm'], namespace)['cm']
        assert resolved.get_value_with_unit() == brian_cm
        assert len(l) == 1, 'got warnings: %s' % str(l)
        assert l[0][1].endswith('.resolution_conflict')
Exemplo n.º 16
0
def test_warning():
    from brian2.core.functions import DEFAULT_FUNCTIONS
    from brian2.units.stdunits import cm as brian_cm
    # Name in external namespace clashes with unit/function name
    exp = 23
    cm = 42
    group = SimpleGroup(namespace=None, variables={})
    namespace = get_local_namespace(level=0)
    with catch_logs() as l:
        resolved = group.resolve_all(['exp'], namespace)['exp']
        assert resolved == DEFAULT_FUNCTIONS['exp']
        assert len(l) == 1, 'got warnings: %s' % str(l)
        assert l[0][1].endswith('.resolution_conflict')
    with catch_logs() as l:
        resolved = group.resolve_all(['cm'], namespace)['cm']
        assert resolved.get_value_with_unit() == brian_cm
        assert len(l) == 1, 'got warnings: %s' % str(l)
        assert l[0][1].endswith('.resolution_conflict')
Exemplo n.º 17
0
def test_warning():
    from brian2.core.functions import DEFAULT_FUNCTIONS
    from brian2.units.stdunits import cm as brian_cm
    # Name in external namespace clashes with unit/function name
    exp = 23
    cm = 42
    namespace = create_namespace()
    local_ns = get_local_namespace(level=0)
    with catch_logs() as l:
        resolved = namespace.resolve('exp', ('implicit namespace', local_ns))
        assert resolved == DEFAULT_FUNCTIONS['exp']
        assert len(l) == 1
        assert l[0][1].endswith('.resolution_conflict')
    with catch_logs() as l:
        resolved = namespace.resolve('cm', ('implicit namespace', local_ns))
        assert resolved == brian_cm
        assert len(l) == 1
        assert l[0][1].endswith('.resolution_conflict')
Exemplo n.º 18
0
    def set_with_expression(self, group, varname, variable, item, code,
                            check_units=True, level=0):
        '''
        Sets a variable using a string expression. Is called by
        `VariableView.set_item` for statements such as
        ``S.var[:, :] = 'exp(-abs(i-j)/space_constant)*nS'``

        Parameters
        ----------
        group : `Group`
            The group providing the context for the indexing.
        varname : str
            The name of the variable to be set
        variable : `ArrayVariable`
            The `ArrayVariable` object for the variable to be set.
        item : `ndarray`
            The indices for the variable (in the context of this `group`).
        code : str
            The code that should be executed to set the variable values.
            Can contain references to indices, such as `i` or `j`
        check_units : bool, optional
            Whether to check the units of the expression.
        level : int, optional
            How much farther to go up in the stack to find the namespace.
        '''
        indices = group.calc_indices(item)
        abstract_code = varname + ' = ' + code
        namespace = get_local_namespace(level + 1)
        additional_namespace = ('implicit-namespace', namespace)
        variables = Variables(None)
        variables.add_array('_group_idx', unit=Unit(1),
                            size=len(indices), dtype=np.int32)
        variables['_group_idx'].set_value(indices)

        # TODO: Have an additional argument to avoid going through the index
        # array for situations where iterate_all could be used
        codeobj = create_runner_codeobj(group,
                                 abstract_code,
                                 'group_variable_set',
                                 additional_variables=variables,
                                 additional_namespace=additional_namespace,
                                 check_units=check_units)
        codeobj()
Exemplo n.º 19
0
    def _set_with_code(self, variable, group_indices, code,
                       check_units=True, level=0):
        '''
        Sets a variable using a string expression. Is called by
        `VariableView.__setitem__` for statements such as
        `S.var[:, :] = 'exp(-abs(i-j)/space_constant)*nS'`

        Parameters
        ----------
        variable : `ArrayVariable`
            The `Variable` for the variable to be set
        group_indices : ndarray of int
            The indices of the elements that are to be set.
        code : str
            The code that should be executed to set the variable values.
            Can contain references to indices, such as `i` or `j`
        check_units : bool, optional
            Whether to check the units of the expression.
        level : int, optional
            How much farther to go down in the stack to find the namespace.
            Necessary so that both `X.var = ` and `X.var[:] = ` have access
            to the surrounding namespace.
        '''
        abstract_code = variable.name + ' = ' + code
        namespace = get_local_namespace(level + 1)
        additional_namespace = ('implicit-namespace', namespace)
        # TODO: Find a name that makes sense for reset and variable setting
        # with code
        additional_variables = self.item_mapping.variables
        additional_variables['_spikes'] = ArrayVariable('_spikes',
                                                         Unit(1),
                                                         value=group_indices.astype(np.int32),
                                                         group_name=self.name)
        # TODO: Have an additional argument to avoid going through the index
        # array for situations where iterate_all could be used
        codeobj = create_runner_codeobj(self,
                                 abstract_code,
                                 'reset',
                                 additional_variables=additional_variables,
                                 additional_namespace=additional_namespace,
                                 check_units=check_units)
        codeobj()
Exemplo n.º 20
0
    def __getitem__(self, item):
        if isinstance(item, basestring):
            variables = Variables(None)
            variables.add_auxiliary_variable('_indices', dtype=np.int32)
            variables.add_auxiliary_variable('_cond', dtype=np.bool)

            abstract_code = '_cond = ' + item
            namespace = get_local_namespace(level=1)
            from brian2.devices.device import get_device
            device = get_device()
            codeobj = create_runner_codeobj(self.group,
                                            abstract_code,
                                            'group_get_indices',
                                            run_namespace=namespace,
                                            additional_variables=variables,
                                            codeobj_class=device.code_object_class(fallback_pref='codegen.string_expression_target')
                                            )
            return codeobj()
        else:
            return self.indices(item)
Exemplo n.º 21
0
    def __getitem__(self, item):
        if isinstance(item, str):
            variables = Variables(None)
            variables.add_auxiliary_variable('_indices', dtype=np.int32)
            variables.add_auxiliary_variable('_cond', dtype=bool)

            abstract_code = '_cond = ' + item
            namespace = get_local_namespace(level=1)
            from brian2.devices.device import get_device
            device = get_device()
            codeobj = create_runner_codeobj(self.group,
                                            abstract_code,
                                            'group_get_indices',
                                            run_namespace=namespace,
                                            additional_variables=variables,
                                            codeobj_class=device.code_object_class(fallback_pref='codegen.string_expression_target')
                                            )
            return codeobj()
        else:
            return self.indices(item)
Exemplo n.º 22
0
Arquivo: group.py Projeto: yger/brian2
    def __getitem__(self, item):
        if isinstance(item, basestring):
            namespace = get_local_namespace(1)
            additional_namespace = ('implicit-namespace', namespace)
            variables = Variables(None)
            variables.add_auxiliary_variable('_indices', unit=Unit(1),
                                             dtype=np.int32)
            variables.add_auxiliary_variable('_cond', unit=Unit(1),
                                             dtype=np.bool,
                                             is_bool=True)

            abstract_code = '_cond = ' + item
            check_code_units(abstract_code, self.group,
                             additional_namespace=additional_namespace,
                             additional_variables=variables)
            codeobj = create_runner_codeobj(self.group,
                                            abstract_code,
                                            'group_get_indices',
                                            additional_variables=variables,
                                            additional_namespace=additional_namespace,
                                            )
            return codeobj()
        else:
            return self.group.calc_indices(item)
Exemplo n.º 23
0
def test_Subgroup():
    """
    Test Subgroup
    """
    eqn = '''
    dv/dt = (1 - v) / tau :1
    '''
    tau = 10 * ms
    group = NeuronGroup(10, eqn, threshold='v>10', reset='v=0', method='euler')
    sub_a = group[0:5]
    sub_b = group[0:10]
    syn = Synapses(sub_a, sub_b)
    syn.connect(p=0.8)
    mon = StateMonitor(group[0:7], 'v', record=1)
    sub_a.v = 1
    sub_b.v = -1
    mon_dict = collect_StateMonitor(mon)
    assert mon_dict['source']['group'] == group.name
    assert mon_dict['source']['start'] == 0
    assert mon_dict['source']['stop'] == 6
    syn_dict = collect_Synapses(syn, get_local_namespace(0))
    assert syn_dict['source']['group'] == group.name
    assert syn_dict['target']['start'] == 0
    assert syn_dict['source']['stop'] == 4
Exemplo n.º 24
0
    def network_run(
        self, net, duration, report=None, report_period=10 * second, namespace=None, profile=True, level=0, **kwds
    ):
        if kwds:
            logger.warn(("Unsupported keyword argument(s) provided for run: " "%s") % ", ".join(kwds.keys()))
        net._clocks = {obj.clock for obj in net.objects}
        t_end = net.t + duration
        for clock in net._clocks:
            clock.set_interval(net.t, t_end)

        # Get the local namespace
        if namespace is None:
            namespace = get_local_namespace(level=level + 2)

        net.before_run(namespace)

        self.clocks.update(net._clocks)
        net.t_ = float(t_end)

        # TODO: remove this horrible hack
        for clock in self.clocks:
            if clock.name == "clock":
                clock._name = "_clock"

        # Extract all the CodeObjects
        # Note that since we ran the Network object, these CodeObjects will be sorted into the right
        # running order, assuming that there is only one clock
        code_objects = []
        for obj in net.objects:
            for codeobj in obj._code_objects:
                code_objects.append((obj.clock, codeobj))

        # Code for a progress reporting function
        standard_code = """
        void report_progress(const double elapsed, const double completed, const double duration)
        {
            if (completed == 0.0)
            {
                %STREAMNAME% << "Starting simulation for duration " << duration << " s";
            } else
            {
                %STREAMNAME% << completed*duration << " s (" << (int)(completed*100.) << "%) simulated in " << elapsed << " s";
                if (completed < 1.0)
                {
                    const int remaining = (int)((1-completed)/completed*elapsed+0.5);
                    %STREAMNAME% << ", estimated " << remaining << " s remaining.";
                }
            }

            %STREAMNAME% << std::endl << std::flush;
        }
        """
        if report is None:
            self.report_func = ""
        elif report == "text" or report == "stdout":
            self.report_func = standard_code.replace("%STREAMNAME%", "std::cout")
        elif report == "stderr":
            self.report_func = standard_code.replace("%STREAMNAME%", "std::cerr")
        elif isinstance(report, basestring):
            self.report_func = """
            void report_progress(const double elapsed, const double completed, const double duration)
            {
            %REPORT%
            }
            """.replace(
                "%REPORT%", report
            )
        else:
            raise TypeError(
                ('report argument has to be either "text", ' '"stdout", "stderr", or the code for a report ' "function")
            )

        if report is not None:
            report_call = "report_progress"
        else:
            report_call = "NULL"

        # Generate the updaters
        run_lines = ["{net.name}.clear();".format(net=net)]
        for clock, codeobj in code_objects:
            run_lines.append(
                "{net.name}.add(&{clock.name}, _run_{codeobj.name});".format(clock=clock, net=net, codeobj=codeobj)
            )
        run_lines.append(
            "{net.name}.run({duration}, {report_call}, {report_period});".format(
                net=net, duration=float(duration), report_call=report_call, report_period=float(report_period)
            )
        )
        self.main_queue.append(("run_network", (net, run_lines)))

        # Manually set the cache for the clocks, simulation scripts might
        # want to access the time (which has been set in code and is therefore
        # not accessible by the normal means until the code has been built and
        # run)
        for clock in net._clocks:
            self.array_cache[clock.variables["timestep"]] = np.array([clock._i_end])
            self.array_cache[clock.variables["t"]] = np.array([clock._i_end * clock.dt_])
Exemplo n.º 25
0
    def _resolve_external(self,
                          identifier,
                          run_namespace=None,
                          level=0,
                          do_warn=True):
        '''
        Resolve an external identifier in the context of a `Group`. If the `Group`
        declares an explicit namespace, this namespace is used in addition to the
        standard namespace for units and functions. Additionally, the namespace in
        the `run_namespace` argument (i.e. the namespace provided to `Network.run`)
        or, if this argument is unspecified, the implicit namespace of
        surrounding variables in the stack frame where the original call was made
        is used (to determine this stack frame, the `level` argument has to be set
        correctly).

        Parameters
        ----------
        identifier : str
            The name to resolve.
        group : `Group`
            The group that potentially defines an explicit namespace for looking up
            external names.
        run_namespace : dict, optional
            A namespace (mapping from strings to objects), as provided as an
            argument to the `Network.run` function.
        level : int, optional
            How far to go up in the stack to find the calling frame.
        do_warn : int, optional
            Whether to display a warning if an identifier resolves to different
            objects in different namespaces. Defaults to ``True``.
        '''
        # We save tuples of (namespace description, referred object) to
        # give meaningful warnings in case of duplicate definitions
        matches = []

        namespaces = OrderedDict()
        # Default namespaces (units and functions)
        namespaces['constants'] = DEFAULT_CONSTANTS
        namespaces['units'] = DEFAULT_UNITS
        namespaces['functions'] = DEFAULT_FUNCTIONS
        if getattr(self, 'namespace', None) is not None:
            namespaces['group-specific'] = self.namespace

        # explicit or implicit run namespace
        if run_namespace is not None:
            namespaces['run'] = run_namespace
        else:
            namespaces['implicit'] = get_local_namespace(level + 1)

        for description, namespace in namespaces.iteritems():
            if identifier in namespace:
                matches.append((description, namespace[identifier]))

        if len(matches) == 0:
            # No match at all
            raise KeyError(
                ('The identifier "%s" could not be resolved.') % (identifier))
        elif len(matches) > 1:
            # Possibly, all matches refer to the same object
            first_obj = matches[0][1]
            found_mismatch = False
            for m in matches:
                if _same_value(m[1], first_obj):
                    continue
                if _same_function(m[1], first_obj):
                    continue
                try:
                    proxy = weakref.proxy(first_obj)
                    if m[1] is proxy:
                        continue
                except TypeError:
                    pass

                # Found a mismatch
                found_mismatch = True
                break

            if found_mismatch and do_warn:
                _conflict_warning(
                    ('The name "%s" refers to different objects '
                     'in different namespaces used for resolving '
                     'names in the context of group "%s". '
                     'Will use the object from the %s namespace '
                     'with the value %r') %
                    (identifier, getattr(
                        self, 'name', '<unknown>'), matches[0][0], first_obj),
                    matches[1:])

        # use the first match (according to resolution order)
        resolved = matches[0][1]

        # Replace pure Python functions by a Functions object
        if callable(resolved) and not isinstance(resolved, Function):
            resolved = Function(resolved)

        if not isinstance(resolved, (Function, Variable)):
            # Wrap the value in a Constant object
            unit = get_unit(resolved)
            value = np.asarray(resolved)
            if value.shape != ():
                raise KeyError('Variable %s was found in the namespace, but is'
                               ' not a scalar value' % identifier)
            resolved = Constant(identifier, unit=unit, value=value)

        return resolved
Exemplo n.º 26
0
    def run(self,
            duration,
            report=None,
            report_period=10 * second,
            namespace=None,
            profile=True,
            level=0):
        '''
        run(duration, report=None, report_period=60*second, namespace=None, level=0)
        
        Runs the simulation for the given duration.
        
        Parameters
        ----------
        duration : `Quantity`
            The amount of simulation time to run for.
        report : {None, 'text', 'stdout', 'stderr', function}, optional
            How to report the progress of the simulation. If ``None``, do not
            report progress. If ``'text'`` or ``'stdout'`` is specified, print
            the progress to stdout. If ``'stderr'`` is specified, print the
            progress to stderr. Alternatively, you can specify a callback
            ``callable(elapsed, complete, duration)`` which will be passed
            the amount of time elapsed as a `Quantity`, the
            fraction complete from 0.0 to 1.0 and the total duration of the
            simulation (in biological time).
            The function will always be called at the beginning and the end
            (i.e. for fractions 0.0 and 1.0), regardless of the `report_period`.
        report_period : `Quantity`
            How frequently (in real time) to report progress.
        namespace : dict-like, optional
            A namespace that will be used in addition to the group-specific
            namespaces (if defined). If not specified, the locals
            and globals around the run function will be used.
        profile : bool, optional
            Whether to record profiling information (see
            `Network.profiling_info`). Defaults to ``True``.
        level : int, optional
            How deep to go up the stack frame to look for the locals/global
            (see `namespace` argument). Only used by run functions that call
            this run function, e.g. `MagicNetwork.run` to adjust for the
            additional nesting.

        Notes
        -----
        The simulation can be stopped by calling `Network.stop` or the
        global `stop` function.
        '''
        self._clocks = set([obj.clock for obj in self.objects])
        # We get direct references to the underlying variables for all clocks
        # to avoid expensive access during the run loop
        self._clock_variables = {
            c: (c.variables['timestep'].get_value(),
                c.variables['t'].get_value(), c.variables['dt'].get_value())
            for c in self._clocks
        }
        t_start = self.t
        t_end = self.t + duration
        for clock in self._clocks:
            clock.set_interval(self.t, t_end)

        # Get the local namespace
        if namespace is None:
            namespace = get_local_namespace(level=level + 3)

        self.before_run(namespace)

        if len(self.objects) == 0:
            return  # TODO: raise an error? warning?

        # Find the first clock to be updated (see note below)
        clock, curclocks = self._nextclocks()
        start_time = time.time()

        logger.debug(
            "Simulating network '%s' from time %s to %s." %
            (self.name, t_start, t_end), 'run')

        if report is not None:
            report_period = float(report_period)
            next_report_time = start_time + report_period
            if report == 'text' or report == 'stdout':
                report_callback = TextReport(sys.stdout)
            elif report == 'stderr':
                report_callback = TextReport(sys.stderr)
            elif isinstance(report, basestring):
                raise ValueError(('Do not know how to handle report argument '
                                  '"%s".' % report))
            elif callable(report):
                report_callback = report
            else:
                raise TypeError(('Do not know how to handle report argument, '
                                 'it has to be one of "text", "stdout", '
                                 '"stderr", or a callable function/object, '
                                 'but it is of type %s') % type(report))
            report_callback(0 * second, 0.0, t_start, duration)

        profiling_info = defaultdict(float)

        timestep, _, _ = self._clock_variables[clock]
        running = timestep[0] < clock._i_end
        while running and not self._stopped and not Network._globally_stopped:
            timestep, t, dt = self._clock_variables[clock]
            # update the network time to this clock's time
            self.t_ = t[0]
            if report is not None:
                current = time.time()
                if current > next_report_time:
                    report_callback((current - start_time) * second,
                                    (self.t_ - float(t_start)) / float(t_end),
                                    t_start, duration)
                    next_report_time = current + report_period
                # update the objects with this clock
            for obj in self.objects:
                if obj._clock in curclocks and obj.active:
                    if profile:
                        obj_time = time.time()
                        obj.run()
                        profiling_info[obj.name] += (time.time() - obj_time)
                    else:
                        obj.run()

            # tick the clock forward one time step
            for c in curclocks:
                timestep, t, dt = self._clock_variables[c]
                timestep[0] += 1
                t[0] = timestep[0] * dt[0]

            # find the next clocks to be updated. The < operator for Clock
            # determines that the first clock to be updated should be the one
            # with the smallest t value, unless there are several with the
            # same t value in which case we update all of them
            clock, curclocks = self._nextclocks()

            if device._maximum_run_time is not None and time.time(
            ) - start_time > float(device._maximum_run_time):
                self._stopped = True
            else:
                timestep, _, _ = self._clock_variables[clock]
                running = timestep < clock._i_end

        end_time = time.time()
        if self._stopped or Network._globally_stopped:
            self.t_ = clock.t_
        else:
            self.t_ = float(t_end)

        device._last_run_time = end_time - start_time
        if duration > 0:
            device._last_run_completed_fraction = (self.t - t_start) / duration
        else:
            device._last_run_completed_fraction = 1.0

        # check for nans
        for obj in self.objects:
            if isinstance(obj, Group):
                obj._check_for_invalid_states()

        if report is not None:
            report_callback((end_time - start_time) * second, 1.0, t_start,
                            duration)
        self.after_run()

        logger.debug(("Finished simulating network '%s' "
                      "(took %.2fs)") % (self.name, end_time - start_time),
                     'run')
        # Store profiling info (or erase old info to avoid confusion)
        if profile:
            self._profiling_info = [(name, t * second)
                                    for name, t in profiling_info.iteritems()]
            # Dump a profiling summary to the log
            logger.debug('\n' + str(profiling_summary(self)))
        else:
            self._profiling_info = None
Exemplo n.º 27
0
def test_Synapses():
    """
    Test cases to verify standard export on Synapses
    """
    # check simple Synapses
    eqn = 'dv/dt = (1 - v)/tau :1'
    tau = 1 * ms
    P = NeuronGroup(1, eqn, method='euler', threshold='v>0.7')
    Q = NeuronGroup(1, eqn, method='euler')
    w = 1
    S = Synapses(P, Q, on_pre='v += w')
    syn_dict = collect_Synapses(S, get_local_namespace(0))

    assert syn_dict['name'] == S.name

    pathways = syn_dict['pathways'][0]
    assert pathways['dt'] == S._pathways[0].clock.dt
    assert pathways['prepost'] == 'pre'
    assert pathways['source'] == P.name
    assert pathways['target'] == Q.name
    assert pathways['order'] == -1
    assert pathways['when'] == 'synapses'
    assert pathways['code'] == 'v += w'
    assert pathways['event'] == 'spike'
    with pytest.raises(KeyError):
        syn_dict['equations']
        syn_dict['user_method']
        syn_dict['summed_variables']
        syn_dict['identifiers']
        pathways['delay']

    # test 2: check pre, post, eqns, identifiers and summed variables
    start_scope()
    eqn = '''
    dv/dt = (1 - v)/tau :1
    summ_v :1
    '''
    tau = 1 * ms
    P = NeuronGroup(1, eqn, method='euler', threshold='v>0.7')
    Q = NeuronGroup(1, eqn, method='euler', threshold='v>0.9')
    eqn = '''
    dvar/dt = -var/tau :1 (event-driven)
    dvarr/dt = -varr/tau :1 (clock-driven)
    w = 1 :1
    summ_v_pre = kiki :1 (summed)
    '''
    kiki = 0.01
    preki = 0
    postki = -0.01
    S = Synapses(P,
                 Q,
                 eqn,
                 on_pre='v += preki',
                 on_post='v -= w + postki',
                 delay=2 * ms,
                 method='euler')
    syn_dict = collect_Synapses(S, get_local_namespace(0))

    var = syn_dict['equations']['var']
    assert var['type'] == 'differential equation'
    assert var['var_type'] == 'float'
    assert var['expr'] == '-var/tau'
    assert var['flags'][0] == 'event-driven'
    varr = syn_dict['equations']['varr']
    assert varr['type'] == 'differential equation'
    assert varr['var_type'] == 'float'
    assert varr['expr'] == '-varr/tau'
    assert varr['flags'][0] == 'clock-driven'
    assert syn_dict['equations']['w']['type'] == 'subexpression'
    assert syn_dict['equations']['w']['expr'] == '1'
    assert syn_dict['equations']['w']['var_type'] == 'float'
    assert syn_dict['summed_variables'][0]['target'] == P.name
    pre_path = syn_dict['pathways'][0]
    post_path = syn_dict['pathways'][1]
    assert pre_path['delay'] == 2 * ms
    assert pre_path['prepost'] == 'pre'
    assert pre_path['code'] == 'v += preki'
    assert post_path['prepost'] == 'post'
    assert post_path['code'] == 'v -= w + postki'
    with pytest.raises(KeyError):
        post_path['delay']
    assert syn_dict['user_method'] == 'euler'
    assert syn_dict['identifiers']['preki'] == 0
    assert syn_dict['identifiers']['postki'] == -0.01
Exemplo n.º 28
0
    def network_run(self,
                    net,
                    duration,
                    report=None,
                    report_period=10 * second,
                    namespace=None,
                    profile=None,
                    level=0,
                    **kwds):

        if not prefs.devices.cuda_standalone.profile is None:
            self.profile = prefs.devices.cuda_standalone.profile
            if not profile is None:
                logger.warn(
                    "Got `profile` argumtent in `network_run` and `prefs.devices.cuda_standalone.profile` set. "
                    "Ignoring the `network_run` argument.")
        elif not profile is None:
            if not isinstance(profile, (bool, basestring)) or (isinstance(
                    profile, basestring) and profile != 'blocking'):
                raise ValueError(
                    "network_run got an unexpected value for `profile`. It must be a bool or "
                    "'blocking'. Got {} ({}) instead.".format(
                        profile, type(profile)))
            self.profile = profile
        else:
            self.profile = False  # the default

        ###################################################
        ### This part is copied from CPPStandaoneDevice ###
        ###################################################
        if kwds:
            logger.warn(('Unsupported keyword argument(s) provided for run: '
                         '%s') % ', '.join(kwds.keys()))
        net._clocks = {obj.clock for obj in net.objects}
        t_end = net.t + duration
        for clock in net._clocks:
            clock.set_interval(net.t, t_end)

        # Get the local namespace
        if namespace is None:
            namespace = get_local_namespace(level=level + 2)

        net.before_run(namespace)

        self.clocks.update(net._clocks)
        net.t_ = float(t_end)

        # TODO: remove this horrible hack
        for clock in self.clocks:
            if clock.name == 'clock':
                clock._name = '_clock'

        # Extract all the CodeObjects
        # Note that since we ran the Network object, these CodeObjects will be sorted into the right
        # running order, assuming that there is only one clock
        code_objects = []
        for obj in net.objects:
            if obj.active:
                for codeobj in obj._code_objects:
                    code_objects.append((obj.clock, codeobj))

        # Code for a progress reporting function
        standard_code = '''
        void report_progress(const double elapsed, const double completed, const double start, const double duration)
        {
            if (completed == 0.0)
            {
                %STREAMNAME% << "Starting simulation at t=" << start << " s for duration " << duration << " s";
            } else
            {
                %STREAMNAME% << completed*duration << " s (" << (int)(completed*100.) << "%) simulated in " << elapsed << " s";
                if (completed < 1.0)
                {
                    const int remaining = (int)((1-completed)/completed*elapsed+0.5);
                    %STREAMNAME% << ", estimated " << remaining << " s remaining.";
                }
            }

            %STREAMNAME% << std::endl << std::flush;
        }
        '''
        if report is None:
            report_func = ''
        elif report == 'text' or report == 'stdout':
            report_func = standard_code.replace('%STREAMNAME%', 'std::cout')
        elif report == 'stderr':
            report_func = standard_code.replace('%STREAMNAME%', 'std::cerr')
        elif isinstance(report, basestring):
            report_func = '''
            void report_progress(const double elapsed, const double completed, const double start, const double duration)
            {
            %REPORT%
            }
            '''.replace('%REPORT%', report)
        else:
            raise TypeError(('report argument has to be either "text", '
                             '"stdout", "stderr", or the code for a report '
                             'function'))

        if report_func != '':
            if self.report_func != '' and report_func != self.report_func:
                raise NotImplementedError('The C++ standalone device does not '
                                          'support multiple report functions, '
                                          'each run has to use the same (or '
                                          'none).')
            self.report_func = report_func

        if report is not None:
            report_call = 'report_progress'
        else:
            report_call = 'NULL'

        ##############################################################
        ### From here on the code differs from CPPStandaloneDevice ###
        ##############################################################

        # For profiling variables we need a unique set of all active objects in the simulation over possibly multiple runs
        self.active_objects.update([obj[1].name for obj in code_objects])

        # Generate the updaters
        run_lines = ['{net.name}.clear();'.format(net=net)]

        # create all random numbers needed for the next clock cycle
        for clock in net._clocks:
            run_lines.append(
                '{net.name}.add(&{clock.name}, _run_random_number_generation, &random_number_generation_timer_start, '
                '&random_number_generation_timer_stop, &random_number_generation_profiling_info);'
                .format(clock=clock, net=net))

        all_clocks = set()
        for clock, codeobj in code_objects:
            run_lines.append(
                '{net.name}.add(&{clock.name}, _run_{codeobj.name}, &{codeobj.name}_timer_start, '
                '&{codeobj.name}_timer_stop, &{codeobj.name}_profiling_info);'.
                format(clock=clock, net=net, codeobj=codeobj))
            all_clocks.add(clock)

        # Under some rare circumstances (e.g. a NeuronGroup only defining a
        # subexpression that is used by other groups (via linking, or recorded
        # by a StateMonitor) *and* not calculating anything itself *and* using a
        # different clock than all other objects) a clock that is not used by
        # any code object should nevertheless advance during the run. We include
        # such clocks without a code function in the network.
        for clock in net._clocks:
            if clock not in all_clocks:
                run_lines.append(
                    '{net.name}.add(&{clock.name}, NULL, NULL, NULL, NULL);'.
                    format(clock=clock, net=net))

        run_lines.append(
            '{net.name}.run({duration!r}, {report_call}, {report_period!r});'.
            format(net=net,
                   duration=float(duration),
                   report_call=report_call,
                   report_period=float(report_period)))
        self.main_queue.append(('run_network', (net, run_lines)))

        # Manually set the cache for the clocks, simulation scripts might
        # want to access the time (which has been set in code and is therefore
        # not accessible by the normal means until the code has been built and
        # run)
        for clock in net._clocks:
            self.array_cache[clock.variables['timestep']] = np.array(
                [clock._i_end])
            self.array_cache[clock.variables['t']] = np.array(
                [clock._i_end * clock.dt_])

        # Initialize eventspaces with -1 before the network runs
        for codeobj in self.code_objects.values():
            if codeobj.template_name == "threshold" or codeobj.template_name == "spikegenerator":
                for key in codeobj.variables.iterkeys():
                    if key.endswith(
                            'space'):  # get the correct eventspace name
                        # In case of custom scheduling, the thresholder might come after synapses or monitors
                        # and needs to be initialized in the beginning of the simulation
                        self.main_queue.insert(
                            0, ('set_by_constant', (self.get_array_name(
                                codeobj.variables[key], False), -1, False)))

        if self.build_on_run:
            if self.has_been_run:
                raise RuntimeError(
                    'The network has already been built and run '
                    'before. Use set_device with '
                    'build_on_run=False and an explicit '
                    'device.build call to use multiple run '
                    'statements with this device.')
            self.build(direct_call=False, **self.build_options)
Exemplo n.º 29
0
    def run(self, duration, report=None, report_period=10 * second, namespace=None, profile=True, level=0):
        """
        run(duration, report=None, report_period=60*second, namespace=None, level=0)
        
        Runs the simulation for the given duration.
        
        Parameters
        ----------
        duration : `Quantity`
            The amount of simulation time to run for.
        report : {None, 'text', 'stdout', 'stderr', function}, optional
            How to report the progress of the simulation. If ``None``, do not
            report progress. If ``'text'`` or ``'stdout'`` is specified, print
            the progress to stdout. If ``'stderr'`` is specified, print the
            progress to stderr. Alternatively, you can specify a callback
            ``callable(elapsed, complete, duration)`` which will be passed
            the amount of time elapsed as a `Quantity`, the
            fraction complete from 0.0 to 1.0 and the total duration of the
            simulation (in biological time).
            The function will always be called at the beginning and the end
            (i.e. for fractions 0.0 and 1.0), regardless of the `report_period`.
        report_period : `Quantity`
            How frequently (in real time) to report progress.
        namespace : dict-like, optional
            A namespace that will be used in addition to the group-specific
            namespaces (if defined). If not specified, the locals
            and globals around the run function will be used.
        profile : bool, optional
            Whether to record profiling information (see
            `Network.profiling_info`). Defaults to ``True``.
        level : int, optional
            How deep to go up the stack frame to look for the locals/global
            (see `namespace` argument). Only used by run functions that call
            this run function, e.g. `MagicNetwork.run` to adjust for the
            additional nesting.

        Notes
        -----
        The simulation can be stopped by calling `Network.stop` or the
        global `stop` function.
        """
        self._clocks = set([obj.clock for obj in self.objects])
        # We get direct references to the underlying variables for all clocks
        # to avoid expensive access during the run loop
        self._clock_variables = {
            c: (c.variables["timestep"].get_value(), c.variables["t"].get_value(), c.variables["dt"].get_value())
            for c in self._clocks
        }
        t_start = self.t
        t_end = self.t + duration
        for clock in self._clocks:
            clock.set_interval(self.t, t_end)

        # Get the local namespace
        if namespace is None:
            namespace = get_local_namespace(level=level + 3)

        self.before_run(namespace)

        if len(self.objects) == 0:
            return  # TODO: raise an error? warning?

        # Find the first clock to be updated (see note below)
        clock, curclocks = self._nextclocks()
        start_time = time.time()

        logger.debug("Simulating network '%s' from time %s to %s." % (self.name, t_start, t_end), "run")

        if report is not None:
            report_period = float(report_period)
            next_report_time = start_time + report_period
            if report == "text" or report == "stdout":
                report_callback = TextReport(sys.stdout)
            elif report == "stderr":
                report_callback = TextReport(sys.stderr)
            elif isinstance(report, basestring):
                raise ValueError(("Do not know how to handle report argument " '"%s".' % report))
            elif callable(report):
                report_callback = report
            else:
                raise TypeError(
                    (
                        "Do not know how to handle report argument, "
                        'it has to be one of "text", "stdout", '
                        '"stderr", or a callable function/object, '
                        "but it is of type %s"
                    )
                    % type(report)
                )
            report_callback(0 * second, 0.0, t_start, duration)

        profiling_info = defaultdict(float)

        timestep, _, _ = self._clock_variables[clock]
        running = timestep[0] < clock._i_end
        while running and not self._stopped and not Network._globally_stopped:
            timestep, t, dt = self._clock_variables[clock]
            # update the network time to this clock's time
            self.t_ = t[0]
            if report is not None:
                current = time.time()
                if current > next_report_time:
                    report_callback(
                        (current - start_time) * second, (self.t_ - float(t_start)) / float(t_end), t_start, duration
                    )
                    next_report_time = current + report_period
                # update the objects with this clock
            for obj in self.objects:
                if obj._clock in curclocks and obj.active:
                    if profile:
                        obj_time = time.time()
                        obj.run()
                        profiling_info[obj.name] += time.time() - obj_time
                    else:
                        obj.run()

            # tick the clock forward one time step
            for c in curclocks:
                timestep, t, dt = self._clock_variables[c]
                timestep[0] += 1
                t[0] = timestep[0] * dt[0]

            # find the next clocks to be updated. The < operator for Clock
            # determines that the first clock to be updated should be the one
            # with the smallest t value, unless there are several with the
            # same t value in which case we update all of them
            clock, curclocks = self._nextclocks()

            if device._maximum_run_time is not None and time.time() - start_time > float(device._maximum_run_time):
                self._stopped = True
            else:
                timestep, _, _ = self._clock_variables[clock]
                running = timestep < clock._i_end

        end_time = time.time()
        if self._stopped or Network._globally_stopped:
            self.t_ = clock.t_
        else:
            self.t_ = float(t_end)

        device._last_run_time = end_time - start_time
        if duration > 0:
            device._last_run_completed_fraction = (self.t - t_start) / duration
        else:
            device._last_run_completed_fraction = 1.0

        # check for nans
        for obj in self.objects:
            if isinstance(obj, Group):
                obj._check_for_invalid_states()

        if report is not None:
            report_callback((end_time - start_time) * second, 1.0, t_start, duration)
        self.after_run()

        logger.debug(("Finished simulating network '%s' " "(took %.2fs)") % (self.name, end_time - start_time), "run")
        # Store profiling info (or erase old info to avoid confusion)
        if profile:
            self._profiling_info = [(name, t * second) for name, t in profiling_info.iteritems()]
            # Dump a profiling summary to the log
            logger.debug("\n" + str(profiling_summary(self)))
        else:
            self._profiling_info = None
Exemplo n.º 30
0
def test_spike_neurongroup():
    """
    Test dictionary representation of spiking neuron
    """
    eqn = ''' dv/dt = (v_th - v) / tau : volt
              v_th = 900 * mV :volt
              v_rest = -70 * mV :volt
              tau :second (constant)'''

    tau = 10 * ms
    size = 10

    grp = NeuronGroup(size,
                      eqn,
                      threshold='v > v_th',
                      reset='v = v_rest',
                      refractory=2 * ms)

    neuron_dict = collect_NeuronGroup(grp, get_local_namespace(0))

    assert neuron_dict['N'] == size
    assert neuron_dict['user_method'] is None

    eqns = Equations(eqn)
    assert neuron_dict['equations']['v']['type'] == DIFFERENTIAL_EQUATION
    assert neuron_dict['equations']['v']['unit'] == volt
    assert neuron_dict['equations']['v']['var_type'] == FLOAT
    assert neuron_dict['equations']['v']['expr'] == eqns['v'].expr.code

    assert neuron_dict['equations']['v_th']['type'] == SUBEXPRESSION
    assert neuron_dict['equations']['v_th']['unit'] == volt
    assert neuron_dict['equations']['v_th']['var_type'] == FLOAT
    assert neuron_dict['equations']['v_th']['expr'] == eqns['v_th'].expr.code

    assert neuron_dict['equations']['v_rest']['type'] == SUBEXPRESSION
    assert neuron_dict['equations']['v_rest']['unit'] == volt
    assert neuron_dict['equations']['v_rest']['var_type'] == FLOAT

    assert neuron_dict['equations']['tau']['type'] == PARAMETER
    assert neuron_dict['equations']['tau']['unit'] == second
    assert neuron_dict['equations']['tau']['var_type'] == FLOAT
    assert neuron_dict['equations']['tau']['flags'][0] == 'constant'

    thresholder = grp.thresholder['spike']
    neuron_events = neuron_dict['events']['spike']
    assert neuron_events['threshold']['code'] == 'v > v_th'
    assert neuron_events['threshold']['when'] == thresholder.when
    assert neuron_events['threshold']['order'] == thresholder.order
    assert neuron_events['threshold']['dt'] == grp.clock.dt

    resetter = grp.resetter['spike']
    assert neuron_events['reset']['code'] == 'v = v_rest'
    assert neuron_events['reset']['when'] == resetter.when
    assert neuron_events['reset']['order'] == resetter.order
    assert neuron_events['reset']['dt'] == resetter.clock.dt

    assert neuron_dict['events']['spike']['refractory'] == Quantity(2 * ms)

    # example 2 with threshold but no reset

    start_scope()
    grp2 = NeuronGroup(size,
                       '''dv/dt = (100 * mV - v) / tau_n : volt''',
                       threshold='v > 800 * mV',
                       method='euler')
    tau_n = 10 * ms

    neuron_dict2 = collect_NeuronGroup(grp2, get_local_namespace(0))
    thresholder = grp2.thresholder['spike']
    neuron_events = neuron_dict2['events']['spike']
    assert neuron_events['threshold']['code'] == 'v > 800 * mV'
    assert neuron_events['threshold']['when'] == thresholder.when
    assert neuron_events['threshold']['order'] == thresholder.order
    assert neuron_events['threshold']['dt'] == grp2.clock.dt

    with pytest.raises(KeyError):
        neuron_dict2['events']['spike']['reset']
        neuron_dict2['events']['spike']['refractory']
Exemplo n.º 31
0
    def __getitem__(self, index):
        '''
        Returns synaptic indices for `index`, which can be a tuple of indices
        (including arrays and slices), a single index or a string.

        '''
        if (not isinstance(index, (tuple, basestring)) and isinstance(
                index, (int, np.ndarray, slice, collections.Sequence))):
            index = (index, slice(None), slice(None))
        if isinstance(index, tuple):
            if len(index) == 2:  # two indices (pre- and postsynaptic cell)
                index = (index[0], index[1], slice(None))
            elif len(index) > 3:
                raise IndexError('Need 1, 2 or 3 indices, got %d.' %
                                 len(index))

            I, J, K = index

            pre_synapses = find_synapses(I, self.pre_synaptic,
                                         self.synaptic_pre)
            post_synapses = find_synapses(J, self.post_synaptic,
                                          self.synaptic_post)
            matching_synapses = np.intersect1d(pre_synapses,
                                               post_synapses,
                                               assume_unique=True)

            if K == slice(None):
                return matching_synapses
            elif isinstance(K, (int, slice)):
                test_k = slice_to_test(K)
            else:
                raise NotImplementedError(('Indexing synapses with arrays not'
                                           'implemented yet'))

            pre_neurons = self.synaptic_pre[pre_synapses]
            post_neurons = self.synaptic_post[post_synapses]
            synapse_numbers = _synapse_numbers(pre_neurons, post_neurons)
            return np.intersect1d(matching_synapses,
                                  np.flatnonzero(test_k(synapse_numbers)),
                                  assume_unique=True)

        elif isinstance(index, basestring):
            # interpret the string expression
            identifiers = get_identifiers(index)
            variables = dict(self.variables)
            if 'k' in identifiers:
                synapse_numbers = _synapse_numbers(self.synaptic_pre[:],
                                                   self.synaptic_post[:])
                variables['k'] = ArrayVariable('k', Unit(1), synapse_numbers)
            namespace = get_local_namespace(1)
            additional_namespace = ('implicit-namespace', namespace)
            abstract_code = '_cond = ' + index
            codeobj = create_runner_codeobj(
                self.synapses,
                abstract_code,
                'state_variable_indexing',
                additional_variables=variables,
                additional_namespace=additional_namespace,
            )

            result = codeobj()
            return result
        else:
            raise IndexError(
                'Unsupported index type {itype}'.format(itype=type(index)))
Exemplo n.º 32
0
    def run(self, duration, report=None, report_period=60*second,
            namespace=None, level=0):
        '''
        run(duration, report=None, report_period=60*second, namespace=None, level=0)
        
        Runs the simulation for the given duration.
        
        Parameters
        ----------
        
        duration : `Quantity`
            The amount of simulation time to run for.
        report : {None, 'stdout', 'stderr', 'graphical', function}, optional
            How to report the progress of the simulation. If None, do not
            report progress. If stdout or stderr is specified, print the
            progress to stdout or stderr. If graphical, Tkinter is used to
            show a graphical progress bar. Alternatively, you can specify
            a callback ``function(elapsed, complete)`` which will be passed
            the amount of time elapsed (in seconds) and the fraction complete
            from 0 to 1.
        report_period : `Quantity`
            How frequently (in real time) to report progress.
        namespace : dict-like, optional
            A namespace in which objects which do not define their own
            namespace will be run. If no namespace is given at all, the locals
            and globals around the run function will be used.
        level : int, optional
            How deep to go down the stack frame to look for the locals/global
            (see `namespace` argument). Only used by run functions that call
            this run function, e.g. `MagicNetwork.run` to adjust for the
            additional nesting.
        Notes
        -----
        
        The simulation can be stopped by calling `Network.stop` or the
        global `stop` function.
        '''
        
        if namespace is not None:
            self.before_run(('explicit-run-namespace', namespace))
        else:
            namespace = get_local_namespace(3 + level)
            self.before_run(('implicit-run-namespace', namespace))

        if len(self.objects)==0:
            return # TODO: raise an error? warning?

        t_end = self.t+duration
        for clock in self._clocks:
            clock.set_interval(self.t, t_end)
            
        # TODO: progress reporting stuff
        
        # Find the first clock to be updated (see note below)
        clock, curclocks = self._nextclocks()
        if report is not None:
            start = current = time.time()
            next_report_time = start + 10

        while clock.running and not self._stopped and not Network._globally_stopped:
            # update the network time to this clocks time
            self.t_ = clock.t_
            if report is not None:
                current = time.time()
                if current > next_report_time:
                    report_msg = '{t} simulated ({percent}%), estimated {remaining} s remaining.'
                    remaining = int(round((current - start)/self.t*(duration-self.t)))
                    print report_msg.format(t=self.t, percent=int(round(100*self.t/duration)),
                                            remaining=remaining)
                    next_report_time = current + 10
                # update the objects with this clock
            for obj in self.objects:
                if obj.clock in curclocks and obj.active:
                    obj.run()
            # tick the clock forward one time step
            for c in curclocks:
                c.tick()
            # find the next clocks to be updated. The < operator for Clock
            # determines that the first clock to be updated should be the one
            # with the smallest t value, unless there are several with the 
            # same t value in which case we update all of them
            clock, curclocks = self._nextclocks()

        self.t = t_end

        if report is not None:
            print 'Took ', current-start, 's in total.'
        self.after_run()
Exemplo n.º 33
0
    def _add_synapses(self, sources, targets, n, p, condition=None,
                      level=0):

        if condition is None:
            sources = np.atleast_1d(sources).astype(np.int32)
            targets = np.atleast_1d(targets).astype(np.int32)
            n = np.atleast_1d(n)
            p = np.atleast_1d(p)
            if not len(p) == 1 or p != 1:
                use_connections = np.random.rand(len(sources)) < p
                sources = sources[use_connections]
                targets = targets[use_connections]
                n = n[use_connections]
            sources = sources.repeat(n)
            targets = targets.repeat(n)
            new_synapses = len(sources)

            old_N = len(self)
            new_N = old_N + new_synapses
            self._resize(new_N)

            # Deal with subgroups
            if '_sub_idx' in self.source.variables:
                real_sources = self.source.variables['_sub_idx'].get_value()[sources]
            else:
                real_sources = sources
            if '_sub_idx' in self.target.variables:
                real_targets = self.target.variables['_sub_idx'].get_value()[targets]
            else:
                real_targets = targets
            self.variables['_synaptic_pre'].get_value()[old_N:new_N] = real_sources
            self.variables['_synaptic_post'].get_value()[old_N:new_N] = real_targets
        else:
            abstract_code = '_pre_idx = _all_pre \n'
            abstract_code += '_post_idx = _all_post \n'
            abstract_code += '_cond = ' + condition + '\n'
            abstract_code += '_n = ' + str(n) + '\n'
            abstract_code += '_p = ' + str(p)
            namespace = get_local_namespace(level + 1)
            additional_namespace = ('implicit-namespace', namespace)
            # This overwrites 'i' and 'j' in the synapses' variables dictionary
            # This is necessary because in the context of synapse creation, i
            # and j do not correspond to the sources/targets of the existing
            # synapses but to all the possible sources/targets
            variables = Variables(None)
            # Will be set in the template
            variables.add_auxiliary_variable('i', unit=Unit(1))
            variables.add_auxiliary_variable('j', unit=Unit(1))

            if '_sub_idx' in self.source.variables:
                variables.add_reference('_all_pre', self.source.variables['_sub_idx'])
            else:
                variables.add_reference('_all_pre', self.source.variables['i'])

            if '_sub_idx' in self.target.variables:
                variables.add_reference('_all_post', self.target.variables['_sub_idx'])
            else:
                variables.add_reference('_all_post', self.target.variables['i'])

            variable_indices = defaultdict(lambda: '_idx')
            for varname in self.variables:
                if self.variables.indices[varname] == '_presynaptic_idx':
                    variable_indices[varname] = '_all_pre'
                elif self.variables.indices[varname] == '_postsynaptic_idx':
                    variable_indices[varname] = '_all_post'
            variable_indices['_all_pre'] = 'i'
            variable_indices['_all_post'] = 'j'
            codeobj = create_runner_codeobj(self,
                                            abstract_code,
                                            'synapses_create',
                                            variable_indices=variable_indices,
                                            additional_variables=variables,
                                            additional_namespace=additional_namespace,
                                            check_units=False
                                            )
            codeobj()
Exemplo n.º 34
0
def test_simple_neurongroup():
    """
    Test dictionary representation of simple NeuronGroup
    """
    # example 1
    eqn = '''dv/dt = (1 - v) / tau : volt'''
    tau = 10 * ms
    size = 1

    grp = NeuronGroup(size, eqn, method='exact')
    neuron_dict = collect_NeuronGroup(grp, get_local_namespace(0))

    assert neuron_dict['N'] == size
    assert neuron_dict['user_method'] == 'exact'
    assert neuron_dict['equations']['v']['type'] == DIFFERENTIAL_EQUATION
    assert neuron_dict['equations']['v']['unit'] == volt
    assert neuron_dict['equations']['v']['var_type'] == FLOAT

    with pytest.raises(KeyError):
        neuron_dict['equations']['tau']

    with pytest.raises(KeyError):
        neuron_dict['run_regularly']

    eqn_obj = Equations(eqn)
    assert neuron_dict['equations']['v']['expr'] == eqn_obj['v'].expr.code
    assert neuron_dict['identifiers']['tau'] == 10 * ms
    with pytest.raises(KeyError):
        neuron_dict['identifiers']['size']
    assert neuron_dict['when'] == 'groups'
    assert neuron_dict['order'] == 0

    # example 2
    start_scope()
    area = 100 * umetre**2
    g_L = 1e-2 * siemens * cm**-2 * area
    E_L = 1000
    div_2 = 2
    dim_2 = 0.02 * amp
    Cm = 1 * ufarad * cm**-2 * area
    grp = NeuronGroup(
        10, '''dv/dt = I_leak / Cm : volt
                        I_leak = g_L*(E_L - v) : amp''')
    grp.run_regularly('v = v / div_2', dt=20 * ms, name='i_am_run_reg_senior')
    grp.run_regularly('I_leak = I_leak + dim_2',
                      dt=10 * ms,
                      name='i_am_run_reg_junior')

    neuron_dict = collect_NeuronGroup(grp, get_local_namespace(0))

    assert neuron_dict['N'] == 10
    assert neuron_dict['user_method'] is None
    assert neuron_dict['when'] == 'groups'
    assert neuron_dict['order'] == 0
    eqn_str = '''
    dv/dt = I_leak / Cm : volt
    I_leak = g_L*(E_L - v) : amp
    '''
    assert neuron_dict['equations']['v']['type'] == DIFFERENTIAL_EQUATION
    assert neuron_dict['equations']['v']['unit'] == volt
    assert neuron_dict['equations']['v']['var_type'] == FLOAT

    parsed = parse_string_equations(eqn_str)
    assert neuron_dict['equations']['v']['expr'] == parsed['v'].expr.code

    assert neuron_dict['equations']['I_leak']['type'] == SUBEXPRESSION
    assert neuron_dict['equations']['I_leak']['unit'] == amp
    assert neuron_dict['equations']['I_leak']['var_type'] == FLOAT
    assert neuron_dict['equations']['I_leak']['expr'] == 'g_L*(E_L - v)'
    assert neuron_dict['identifiers']['g_L'] == g_L
    assert neuron_dict['identifiers']['Cm'] == Cm
    assert neuron_dict['identifiers']['div_2'] == div_2
    assert neuron_dict['identifiers']['dim_2'] == dim_2

    with pytest.raises(KeyError):
        neuron_dict['events']
    with pytest.raises(KeyError):
        neuron_dict['identifiers']['area']

    assert neuron_dict['run_regularly'][0]['name'] == 'i_am_run_reg_senior'
    assert neuron_dict['run_regularly'][1]['name'] == 'i_am_run_reg_junior'
    assert neuron_dict['run_regularly'][0]['code'] == 'v = v / div_2'
    assert (
        neuron_dict['run_regularly'][1]['code'] == 'I_leak = I_leak + dim_2')
    assert neuron_dict['run_regularly'][0]['dt'] == 20 * ms
    assert neuron_dict['run_regularly'][1]['dt'] == 10 * ms
    assert neuron_dict['run_regularly'][0]['when'] == 'start'
    assert neuron_dict['run_regularly'][1]['when'] == 'start'
    assert neuron_dict['run_regularly'][0]['order'] == 0
    assert neuron_dict['run_regularly'][1]['order'] == 0

    with pytest.raises(IndexError):
        neuron_dict['run_regularly'][2]
Exemplo n.º 35
0
    def run(self, duration, report=None, report_period=60*second,
            namespace=None, level=0):
        '''
        run(duration, report=None, report_period=60*second)
        
        Runs the simulation for the given duration.
        
        Parameters
        ----------
        
        duration : `Quantity`
            The amount of simulation time to run for.
        report : {None, 'stdout', 'stderr', 'graphical', function}, optional
            How to report the progress of the simulation. If None, do not
            report progress. If stdout or stderr is specified, print the
            progress to stdout or stderr. If graphical, Tkinter is used to
            show a graphical progress bar. Alternatively, you can specify
            a callback ``function(elapsed, complete)`` which will be passed
            the amount of time elapsed (in seconds) and the fraction complete
            from 0 to 1.
        report_period : `Quantity`
            How frequently (in real time) to report progress.
        namespace : dict-like, optional
            A namespace in which objects which do not define their own
            namespace will be run. If not namespace is given, the locals and
            globals around the run function will be used. 
        level : int, optional
            How deep to go down the stack frame to look for the locals/global
            (see `namespace` argument). Only used by run functions that call
            this run function, e.g. `MagicNetwork.run` to adjust for the
            additional nesting.
        Notes
        -----
        
        The simulation can be stopped by calling `Network.stop` or the
        global `stop` function.
        '''
        
        if namespace is not None:
            self.pre_run(('explicit-run-namespace', namespace))
        else:
            namespace = get_local_namespace(2 + level)
            self.pre_run(('implicit-run-namespace', namespace))

        if len(self.objects)==0:
            return # TODO: raise an error? warning?

        t_end = self.t+duration
        for clock in self._clocks:
            clock.set_interval(self.t, t_end)
            
        # TODO: progress reporting stuff
        
        # Find the first clock to be updated (see note below)
        clock, curclocks = self._nextclocks()
        if report is not None:
            start = current = time.time()
            next_report_time = start + 10
        while clock.running and not self._stopped and not Network._globally_stopped:
            # update the network time to this clocks time
            self.t_ = clock.t_
            if report is not None:
                current = time.time()
                if current > next_report_time:
                    report_msg = '{t} simulated ({percent}%), estimated {remaining} s remaining.'
                    remaining = int(round((current - start)/self.t*(duration-self.t)))
                    print report_msg.format(t=self.t, percent=int(round(100*self.t/duration)),
                                            remaining=remaining)
                    next_report_time = current + 10
                # update the objects with this clock
            for obj in self.objects:
                if obj.clock in curclocks and obj.active:
                    obj.update()
            # tick the clock forward one time step
            for c in curclocks:
                c.tick()
            # find the next clocks to be updated. The < operator for Clock
            # determines that the first clock to be updated should be the one
            # with the smallest t value, unless there are several with the 
            # same t value in which case we update all of them
            clock, curclocks = self._nextclocks()
            
        self.t = t_end

        if report is not None:
            print 'Took ', current-start, 's in total.'
        self.post_run()
Exemplo n.º 36
0
    def _add_synapses(self, sources, targets, n, p, condition=None,
                      level=0):

        if condition is None:
            sources = np.atleast_1d(sources)
            targets = np.atleast_1d(targets)
            n = np.atleast_1d(n)
            p = np.atleast_1d(p)
            if not len(p) == 1 or p != 1:
                use_connections = np.random.rand(len(sources)) < p
                sources = sources[use_connections]
                targets = targets[use_connections]
                n = n[use_connections]
            sources = sources.repeat(n)
            targets = targets.repeat(n)
            new_synapses = len(sources)

            old_N = self.N
            new_N = old_N + new_synapses
            self._resize(new_N)

            self.synaptic_pre[old_N:new_N] = sources
            self.synaptic_post[old_N:new_N] = targets
            synapse_idx = old_N
            for source, target in zip(sources, targets):
                synapses = self.pre_synaptic[source]
                synapses.resize(len(synapses) + 1)
                synapses[-1] = synapse_idx
                synapses = self.post_synaptic[target]
                synapses.resize(len(synapses) + 1)
                synapses[-1] = synapse_idx
                synapse_idx += 1
        else:
            abstract_code = '_cond = ' + condition + '\n'
            abstract_code += '_n = ' + str(n) + '\n'
            abstract_code += '_p = ' + str(p)
            namespace = get_local_namespace(level + 1)
            additional_namespace = ('implicit-namespace', namespace)
            variables = {
                '_source_neurons': ArrayVariable('_source_neurons', Unit(1),
                                                 self.source.item_mapping[:] -
                                                 self.source.offset,
                                                 constant=True),
                '_target_neurons': ArrayVariable('_target_neurons', Unit(1),
                                                 self.target.item_mapping[:] -
                                                 self.target.offset,
                                                 constant=True),
                # The template needs to have access to the DynamicArray here,
                # having access to the underlying array (which would be much
                # faster), is not enough
                '_synaptic_pre': Variable(Unit(1),
                                          self.synaptic_pre, constant=True),
                '_synaptic_post': Variable(Unit(1),
                                           self.synaptic_post, constant=True),
                '_pre_synaptic': Variable(Unit(1),
                                          self.pre_synaptic, constant=True),
                '_post_synaptic': Variable(Unit(1),
                                           self.post_synaptic, constant=True),
                # Will be set in the template
                'i': Variable(unit=Unit(1), constant=True),
                'j': Variable(unit=Unit(1), constant=True)
            }
            codeobj = create_runner_codeobj(self.synapses,
                                            abstract_code,
                                            'synapses_create',
                                            additional_variables=variables,
                                            additional_namespace=additional_namespace,
                                            check_units=False
                                            )
            codeobj()
            number = len(self.synaptic_pre)
            for variable in self._registered_variables:
                variable.resize(number)
Exemplo n.º 37
0
    def _add_synapses(self, sources, targets, n, p, condition=None, level=0):

        if condition is None:
            sources = np.atleast_1d(sources)
            targets = np.atleast_1d(targets)
            n = np.atleast_1d(n)
            p = np.atleast_1d(p)
            if not len(p) == 1 or p != 1:
                use_connections = np.random.rand(len(sources)) < p
                sources = sources[use_connections]
                targets = targets[use_connections]
                n = n[use_connections]
            sources = sources.repeat(n)
            targets = targets.repeat(n)
            new_synapses = len(sources)

            old_N = self.N
            new_N = old_N + new_synapses
            self._resize(new_N)

            self.synaptic_pre[old_N:new_N] = sources
            self.synaptic_post[old_N:new_N] = targets
            synapse_idx = old_N
            for source, target in zip(sources, targets):
                synapses = self.pre_synaptic[source]
                synapses.resize(len(synapses) + 1)
                synapses[-1] = synapse_idx
                synapses = self.post_synaptic[target]
                synapses.resize(len(synapses) + 1)
                synapses[-1] = synapse_idx
                synapse_idx += 1
        else:
            abstract_code = '_cond = ' + condition + '\n'
            abstract_code += '_n = ' + str(n) + '\n'
            abstract_code += '_p = ' + str(p)
            namespace = get_local_namespace(level + 1)
            additional_namespace = ('implicit-namespace', namespace)
            variables = {
                '_source_neurons':
                ArrayVariable('_source_neurons',
                              Unit(1),
                              self.source.item_mapping[:] - self.source.offset,
                              constant=True),
                '_target_neurons':
                ArrayVariable('_target_neurons',
                              Unit(1),
                              self.target.item_mapping[:] - self.target.offset,
                              constant=True),
                # The template needs to have access to the DynamicArray here,
                # having access to the underlying array (which would be much
                # faster), is not enough
                '_synaptic_pre':
                Variable(Unit(1), self.synaptic_pre, constant=True),
                '_synaptic_post':
                Variable(Unit(1), self.synaptic_post, constant=True),
                '_pre_synaptic':
                Variable(Unit(1), self.pre_synaptic, constant=True),
                '_post_synaptic':
                Variable(Unit(1), self.post_synaptic, constant=True),
                # Will be set in the template
                'i':
                Variable(unit=Unit(1), constant=True),
                'j':
                Variable(unit=Unit(1), constant=True)
            }
            codeobj = create_runner_codeobj(
                self.synapses,
                abstract_code,
                'synapses_create',
                additional_variables=variables,
                additional_namespace=additional_namespace,
                check_units=False)
            codeobj()
            number = len(self.synaptic_pre)
            for variable in self._registered_variables:
                variable.resize(number)
Exemplo n.º 38
0
    def _resolve_external(self, identifier, user_identifier=True, run_namespace=None, level=0):
        """
        Resolve an external identifier in the context of a `Group`. If the `Group`
        declares an explicit namespace, this namespace is used in addition to the
        standard namespace for units and functions. Additionally, the namespace in
        the `run_namespace` argument (i.e. the namespace provided to `Network.run`)
        or, if this argument is unspecified, the implicit namespace of
        surrounding variables in the stack frame where the original call was made
        is used (to determine this stack frame, the `level` argument has to be set
        correctly).

        Parameters
        ----------
        identifier : str
            The name to resolve.
        user_identifier : bool, optional
            Whether this is an identifier that was used by the user (and not
            something automatically generated that the user might not even
            know about). Will be used to determine whether to display a
            warning in the case of namespace clashes. Defaults to ``True``.
        group : `Group`
            The group that potentially defines an explicit namespace for looking up
            external names.
        run_namespace : dict, optional
            A namespace (mapping from strings to objects), as provided as an
            argument to the `Network.run` function.
        level : int, optional
            How far to go up in the stack to find the calling frame.
        """
        # We save tuples of (namespace description, referred object) to
        # give meaningful warnings in case of duplicate definitions
        matches = []

        namespaces = OrderedDict()
        # Default namespaces (units and functions)
        namespaces["constants"] = DEFAULT_CONSTANTS
        namespaces["units"] = DEFAULT_UNITS
        namespaces["functions"] = DEFAULT_FUNCTIONS
        if getattr(self, "namespace", None) is not None:
            namespaces["group-specific"] = self.namespace

        # explicit or implicit run namespace
        if run_namespace is not None:
            namespaces["run"] = run_namespace
        else:
            namespaces["implicit"] = get_local_namespace(level + 1)

        for description, namespace in namespaces.iteritems():
            if identifier in namespace:
                matches.append((description, namespace[identifier]))

        if len(matches) == 0:
            # No match at all
            raise KeyError(('The identifier "%s" could not be resolved.') % (identifier))
        elif len(matches) > 1:
            # Possibly, all matches refer to the same object
            first_obj = matches[0][1]
            found_mismatch = False
            for m in matches:
                if _same_value(m[1], first_obj):
                    continue
                if _same_function(m[1], first_obj):
                    continue
                try:
                    proxy = weakref.proxy(first_obj)
                    if m[1] is proxy:
                        continue
                except TypeError:
                    pass

                # Found a mismatch
                found_mismatch = True
                break

            if found_mismatch and user_identifier:
                _conflict_warning(
                    (
                        'The name "%s" refers to different objects '
                        "in different namespaces used for resolving "
                        'names in the context of group "%s". '
                        "Will use the object from the %s namespace "
                        "with the value %r"
                    )
                    % (identifier, getattr(self, "name", "<unknown>"), matches[0][0], first_obj),
                    matches[1:],
                )

        # use the first match (according to resolution order)
        resolved = matches[0][1]

        # Replace pure Python functions by a Functions object
        if callable(resolved) and not isinstance(resolved, Function):
            resolved = Function(resolved, stateless=False)

        if not isinstance(resolved, (Function, Variable)):
            # Wrap the value in a Constant object
            unit = get_unit(resolved)
            value = np.asarray(resolved)
            if value.shape != ():
                raise KeyError("Variable %s was found in the namespace, but is" " not a scalar value" % identifier)
            resolved = Constant(identifier, unit=unit, value=value)

        return resolved
Exemplo n.º 39
0
    def __getitem__(self, index):
        '''
        Returns synaptic indices for `index`, which can be a tuple of indices
        (including arrays and slices), a single index or a string.

        '''
        if (not isinstance(index, (tuple, basestring)) and
                isinstance(index, (int, np.ndarray, slice,
                                   collections.Sequence))):
            index = (index, slice(None), slice(None))
        if isinstance(index, tuple):
            if len(index) == 2:  # two indices (pre- and postsynaptic cell)
                index = (index[0], index[1], slice(None))
            elif len(index) > 3:
                raise IndexError('Need 1, 2 or 3 indices, got %d.' % len(index))

            I, J, K = index

            pre_synapses = find_synapses(I, self.pre_synaptic,
                                         self.synaptic_pre)
            post_synapses = find_synapses(J, self.post_synaptic,
                                          self.synaptic_post)
            matching_synapses = np.intersect1d(pre_synapses, post_synapses,
                                               assume_unique=True)

            if K == slice(None):
                return matching_synapses
            elif isinstance(K, (int, slice)):
                test_k = slice_to_test(K)
            else:
                raise NotImplementedError(('Indexing synapses with arrays not'
                                           'implemented yet'))

            pre_neurons = self.synaptic_pre[pre_synapses]
            post_neurons = self.synaptic_post[post_synapses]
            synapse_numbers = _synapse_numbers(pre_neurons,
                                               post_neurons)
            return np.intersect1d(matching_synapses,
                                  np.flatnonzero(test_k(synapse_numbers)),
                                  assume_unique=True)

        elif isinstance(index, basestring):
            # interpret the string expression
            identifiers = get_identifiers(index)
            variables = dict(self.variables)
            if 'k' in identifiers:
                synapse_numbers = _synapse_numbers(self.synaptic_pre[:],
                                                   self.synaptic_post[:])
                variables['k'] = ArrayVariable('k', Unit(1),
                                                synapse_numbers)
            namespace = get_local_namespace(1)
            additional_namespace = ('implicit-namespace', namespace)
            abstract_code = '_cond = ' + index
            codeobj = create_runner_codeobj(self.synapses,
                                            abstract_code,
                                            'state_variable_indexing',
                                            additional_variables=variables,
                                            additional_namespace=additional_namespace,
                                            )

            result = codeobj()
            return result
        else:
            raise IndexError('Unsupported index type {itype}'.format(itype=type(index)))