示例#1
0
def test_cpp_standalone():
    set_device('cpp_standalone', build_on_run=False)
    ##### Define the model
    tau = 1 * ms
    eqs = '''
    dV/dt = (-40*mV-V)/tau : volt (unless refractory)
    '''
    threshold = 'V>-50*mV'
    reset = 'V=-60*mV'
    refractory = 5 * ms
    N = 1000

    G = NeuronGroup(N,
                    eqs,
                    reset=reset,
                    threshold=threshold,
                    refractory=refractory,
                    name='gp')
    G.V = '-i*mV'
    M = SpikeMonitor(G)
    S = Synapses(G, G, 'w : volt', on_pre='V += w')
    S.connect('abs(i-j)<5 and i!=j')
    S.w = 0.5 * mV
    S.delay = '0*ms'

    net = Network(G, M, S)
    net.run(100 * ms)
    device.build(directory=None, with_output=False)
    # we do an approximate equality here because depending on minor details of how it was compiled, the results
    # may be slightly different (if -ffast-math is on)
    assert len(M.i) >= 17000 and len(M.i) <= 18000
    assert len(M.t) == len(M.i)
    assert M.t[0] == 0.
    reset_device()
示例#2
0
def test_time_after_run():
    set_device('cpp_standalone', build_on_run=False)
    # Check that the clock and network time after a run is correct, even if we
    # have not actually run the code yet (via build)
    G = NeuronGroup(10, 'dv/dt = -v/(10*ms) : 1')
    net = Network(G)
    assert_allclose(defaultclock.dt, 0.1 * ms)
    assert_allclose(defaultclock.t, 0. * ms)
    assert_allclose(G.t, 0. * ms)
    assert_allclose(net.t, 0. * ms)
    net.run(10 * ms)
    assert_allclose(defaultclock.t, 10. * ms)
    assert_allclose(G.t, 10. * ms)
    assert_allclose(net.t, 10. * ms)
    net.run(10 * ms)
    assert_allclose(defaultclock.t, 20. * ms)
    assert_allclose(G.t, 20. * ms)
    assert_allclose(net.t, 20. * ms)
    device.build(directory=None, with_output=False)
    # Everything should of course still be accessible
    assert_allclose(defaultclock.t, 20. * ms)
    assert_allclose(G.t, 20. * ms)
    assert_allclose(net.t, 20. * ms)

    reset_device()
示例#3
0
def test_set_reset_device_implicit():
    from angela2.devices import device_module
    old_prev_devices = list(device_module.previous_devices)
    device_module.previous_devices = []
    test_device1 = ATestDevice()
    all_devices['test1'] = test_device1
    test_device2 = ATestDevice()
    all_devices['test2'] = test_device2

    set_device('test1', build_on_run=False, my_opt=1)
    set_device('test2', build_on_run=True, my_opt=2)
    assert get_device() is test_device2
    assert get_device()._options['my_opt'] == 2
    assert get_device().build_on_run

    reset_device()
    assert get_device() is test_device1
    assert get_device()._options['my_opt'] == 1
    assert not get_device().build_on_run

    reset_device()
    assert get_device() is runtime_device

    reset_device(
    )  # If there is no previous device, will reset to runtime device
    assert get_device() is runtime_device
    del all_devices['test1']
    del all_devices['test2']
    device_module.previous_devices = old_prev_devices
示例#4
0
def test_run_with_debug():
    # We just want to make sure that it works for now (i.e. not fails with a
    # compilation or runtime error), capturing the output is actually
    # a bit involved to get right.
    set_device('cpp_standalone', build_on_run=True, debug=True, directory=None)
    group = NeuronGroup(1, 'v: 1', threshold='False')
    syn = Synapses(group, group, on_pre='v += 1')
    syn.connect()
    mon = SpikeMonitor(group)
    run(defaultclock.dt)
示例#5
0
def test_multiple_connects():
    set_device('cpp_standalone', build_on_run=False)
    G = NeuronGroup(10, 'v:1')
    S = Synapses(G, G, 'w:1')
    S.connect(i=[0], j=[0])
    S.connect(i=[1], j=[1])
    run(0 * ms)
    device.build(directory=None, with_output=False)
    assert len(S) == 2 and len(S.w[:]) == 2
    reset_device()
示例#6
0
def test_openmp_scalar_writes():
    # Test that writing to a scalar variable only is done once in an OpenMP
    # setting (see github issue #551)
    set_device('cpp_standalone', build_on_run=False)
    prefs.devices.cpp_standalone.openmp_threads = 4
    G = NeuronGroup(10, 's : 1 (shared)')
    G.run_regularly('s += 1')
    run(defaultclock.dt)
    device.build(directory=None, with_output=False)
    assert_equal(G.s[:], 1.0)

    reset_device()
示例#7
0
def test_changing_profile_arg():
    set_device('cpp_standalone', build_on_run=False)
    G = NeuronGroup(10000, 'v : 1')
    op1 = G.run_regularly('v = exp(-v)', name='op1')
    op2 = G.run_regularly('v = exp(-v)', name='op2')
    op3 = G.run_regularly('v = exp(-v)', name='op3')
    op4 = G.run_regularly('v = exp(-v)', name='op4')
    # Op 1 is active only during the first profiled run
    # Op 2 is active during both profiled runs
    # Op 3 is active only during the second profiled run
    # Op 4 is never active (only during the unprofiled run)
    op1.active = True
    op2.active = True
    op3.active = False
    op4.active = False
    run(1000 * defaultclock.dt, profile=True)
    op1.active = True
    op2.active = True
    op3.active = True
    op4.active = True
    run(1000 * defaultclock.dt, profile=False)
    op1.active = False
    op2.active = True
    op3.active = True
    op4.active = False
    run(1000 * defaultclock.dt, profile=True)
    device.build(directory=None, with_output=False)
    profiling_dict = dict(magic_network.profiling_info)
    # Note that for now, C++ standalone creates a new CodeObject for every run,
    # which is most of the time unnecessary (this is partly due to the way we
    # handle constants: they are included as literals in the code but they can
    # change between runs). Therefore, the profiling info is potentially
    # difficult to interpret
    assert len(profiling_dict) == 4  # 2 during first run, 2 during last run
    # The two code objects that were executed during the first run
    assert ('op1_codeobject' in profiling_dict
            and profiling_dict['op1_codeobject'] > 0 * second)
    assert ('op2_codeobject' in profiling_dict
            and profiling_dict['op2_codeobject'] > 0 * second)
    # Four code objects were executed during the second run, but no profiling
    # information was saved
    for name in [
            'op1_codeobject_1', 'op2_codeobject_1', 'op3_codeobject',
            'op4_codeobject'
    ]:
        assert name not in profiling_dict
    # Two code objects were exectued during the third run
    assert ('op2_codeobject_2' in profiling_dict
            and profiling_dict['op2_codeobject_2'] > 0 * second)
    assert ('op3_codeobject_1' in profiling_dict
            and profiling_dict['op3_codeobject_1'] > 0 * second)
示例#8
0
def test_continued_standalone_runs():
    # see github issue #1237
    set_device('cpp_standalone', build_on_run=False)

    source = SpikeGeneratorGroup(1, [0], [0] * ms)
    target = NeuronGroup(1, 'v : 1')
    C_ee = Synapses(source, target, on_pre='v += 1', delay=2 * ms)
    C_ee.connect()
    run(1 * ms)
    # Spike has not been delivered yet
    run(2 * ms)

    device.build(directory=None)
    assert target.v[0] == 1  # Make sure the spike got delivered
示例#9
0
def test_duplicate_names_across_nets():
    set_device('cpp_standalone', build_on_run=False)
    # In standalone mode, names have to be globally unique, not just unique
    # per network
    obj1 = angelaObject(name='name1')
    obj2 = angelaObject(name='name2')
    obj3 = angelaObject(name='name3')
    obj4 = angela
    Object(name='name1')
    net1 = Network(obj1, obj2)
    net2 = Network(obj3, obj4)
    net1.run(0 * ms)
    net2.run(0 * ms)
    with pytest.raises(ValueError):
        device.build()

    reset_device()
示例#10
0
def test_delete_directory():
    set_device('cpp_standalone', build_on_run=True, directory=None)
    group = NeuronGroup(10, 'dv/dt = -v / (10*ms) : volt', method='exact')
    group.v = np.arange(10) * mV  # uses the static array mechanism
    run(defaultclock.dt)
    # Add a new file
    dummy_file = os.path.join(device.project_dir, 'results', 'dummy.txt')
    open(dummy_file, 'w').flush()
    assert os.path.isfile(dummy_file)
    with catch_logs() as logs:
        device.delete(directory=True)
    assert len(logs) == 1
    assert os.path.isfile(dummy_file)
    with catch_logs() as logs:
        device.delete(directory=True, force=True)
    assert len(logs) == 0
    # everything should be deleted
    assert not os.path.exists(device.project_dir)
示例#11
0
def test_multiple_standalone_runs():
    # see github issue #1189
    set_device('cpp_standalone', directory=None)

    network = Network()
    Pe = NeuronGroup(1, 'v : 1', threshold='False')
    C_ee = Synapses(Pe, Pe, on_pre='v += 1')
    C_ee.connect()
    network.add(Pe, C_ee)
    network.run(defaultclock.dt)

    device.reinit()
    device.activate(directory=None)

    network2 = Network()
    Pe = NeuronGroup(1, 'v : 1', threshold='False')
    C_ee = Synapses(Pe, Pe, on_pre='v += 1')
    C_ee.connect()
    network2.add(Pe, C_ee)
    network2.run(defaultclock.dt)
示例#12
0
def setup_and_teardown(request):
    # Set preferences before each test
    import angela2
    if hasattr(request.config, 'workerinput'):
        config = request.config.workerinput
        for key, value in config['angela_prefs'].items():
            if isinstance(value, tuple) and value[0] == 'TYPE':
                matches = re.match(r"<(type|class) 'numpy\.(.+)'>", value[1])
                if matches is None or len(matches.groups()) != 2:
                    raise TypeError('Do not know how to handle {} in '
                                    'preferences'.format(value[1]))
                t = matches.groups()[1]
                if t == 'float64':
                    value = np.float64
                elif t == 'float32':
                    value = np.float32
                elif t == 'int64':
                    value = np.int64
                elif t == 'int32':
                    value = np.int32

            angela2.prefs[key] = value
        set_device(config['device'], **config['device_options'])
    else:
        for k, v in request.config.angela_prefs.items():
            angela2.prefs[k] = v
        set_device(request.config.device, **request.config.device_options)
    angela2.prefs._backup()
    # Print output changed in numpy 1.14, stick with the old format to
    # avoid doctest failures
    try:
        np.set_printoptions(legacy='1.13')
    except TypeError:
        pass  # using a numpy version < 1.14

    yield  # run test

    # Reset defaultclock.dt to be sure
    defaultclock.dt = 0.1 * ms
示例#13
0
def test_delete_code_data():
    set_device('cpp_standalone', build_on_run=True, directory=None)
    group = NeuronGroup(10, 'dv/dt = -v / (10*ms) : volt', method='exact')
    group.v = np.arange(10) * mV  # uses the static array mechanism
    run(defaultclock.dt)
    results_dir = os.path.join(device.project_dir, 'results')
    assert os.path.exists(results_dir) and os.path.isdir(results_dir)
    # There should be 3 files for the clock, 2 for the neurongroup (index + v),
    # and the "last_run_info.txt" file
    assert len(os.listdir(results_dir)) == 6
    device.delete(data=True, code=False, directory=False)
    assert os.path.exists(results_dir) and os.path.isdir(results_dir)
    assert len(os.listdir(results_dir)) == 0
    assert len(os.listdir(os.path.join(device.project_dir,
                                       'static_arrays'))) > 0
    assert len(os.listdir(os.path.join(device.project_dir,
                                       'code_objects'))) > 0
    device.delete(data=False, code=True, directory=False)
    assert len(os.listdir(os.path.join(device.project_dir,
                                       'static_arrays'))) == 0
    assert len(os.listdir(os.path.join(device.project_dir,
                                       'code_objects'))) == 0
示例#14
0
def test_storing_loading():
    set_device('cpp_standalone', build_on_run=False)
    G = NeuronGroup(
        10, '''v : volt
                           x : 1
                           n : integer
                           b : boolean''')
    v = np.arange(10) * volt
    x = np.arange(10, 20)
    n = np.arange(20, 30)
    b = np.array([True, False]).repeat(5)
    G.v = v
    G.x = x
    G.n = n
    G.b = b
    S = Synapses(
        G, G, '''v_syn : volt
                          x_syn : 1
                          n_syn : integer
                          b_syn : boolean''')
    S.connect(j='i')
    S.v_syn = v
    S.x_syn = x
    S.n_syn = n
    S.b_syn = b
    run(0 * ms)
    device.build(directory=None, with_output=False)
    assert_allclose(G.v[:], v)
    assert_allclose(S.v_syn[:], v)
    assert_allclose(G.x[:], x)
    assert_allclose(S.x_syn[:], x)
    assert_allclose(G.n[:], n)
    assert_allclose(S.n_syn[:], n)
    assert_allclose(G.b[:], b)
    assert_allclose(S.b_syn[:], b)
    reset_device()
示例#15
0
def test_set_reset_device_explicit():
    original_device = get_device()
    test_device1 = ATestDevice()
    all_devices['test1'] = test_device1
    test_device2 = ATestDevice()
    all_devices['test2'] = test_device2
    test_device3 = ATestDevice()
    all_devices['test3'] = test_device3

    set_device('test1', build_on_run=False, my_opt=1)
    set_device('test2', build_on_run=True, my_opt=2)
    set_device('test3', build_on_run=False, my_opt=3)

    reset_device('test1')  # Directly jump back to the first device
    assert get_device() is test_device1
    assert get_device()._options['my_opt'] == 1
    assert not get_device().build_on_run

    del all_devices['test1']
    del all_devices['test2']
    del all_devices['test3']
    reset_device(original_device)
示例#16
0
def test_array_cache():
    # Check that variables are only accessible from Python when they should be
    set_device('cpp_standalone', build_on_run=False)
    G = NeuronGroup(10,
                    '''dv/dt = -v / (10*ms) : 1
                           w : 1
                           x : 1
                           y : 1
                           z : 1 (shared)''',
                    threshold='v>1')
    S = Synapses(G, G, 'weight: 1', on_pre='w += weight')
    S.connect(p=0.2)
    S.weight = 7
    # All neurongroup values should be known
    assert_allclose(G.v, 0)
    assert_allclose(G.w, 0)
    assert_allclose(G.x, 0)
    assert_allclose(G.y, 0)
    assert_allclose(G.z, 0)
    assert_allclose(G.i, np.arange(10))

    # But the synaptic variable is not -- we don't know the number of synapses
    with pytest.raises(NotImplementedError):
        S.weight[:]

    # Setting variables with explicit values should not change anything
    G.v = np.arange(10) + 1
    G.w = 2
    G.y = 5
    G.z = 7
    assert_allclose(G.v, np.arange(10) + 1)
    assert_allclose(G.w, 2)
    assert_allclose(G.y, 5)
    assert_allclose(G.z, 7)

    # But setting with code should invalidate them
    G.x = 'i*2'
    with pytest.raises(NotImplementedError):
        G.x[:]

    # Make sure that the array cache does not allow to use incorrectly sized
    # values to pass
    with pytest.raises(ValueError):
        setattr(G, 'w', [0, 2])
    with pytest.raises(ValueError):
        G.w.__setitem__(slice(0, 4), [0, 2])

    run(10 * ms)
    # v is now no longer known without running the network
    with pytest.raises(NotImplementedError):
        G.v[:]
    # Neither is w, it is updated in the synapse
    with pytest.raises(NotImplementedError):
        G.w[:]
    # However, no code touches y or z
    assert_allclose(G.y, 5)
    assert_allclose(G.z, 7)
    # i is read-only anyway
    assert_allclose(G.i, np.arange(10))

    # After actually running the network, everything should be accessible
    device.build(directory=None, with_output=False)
    assert all(G.v > 0)
    assert all(G.w > 0)
    assert_allclose(G.x, np.arange(10) * 2)
    assert_allclose(G.y, 5)
    assert_allclose(G.z, 7)
    assert_allclose(G.i, np.arange(10))
    assert_allclose(S.weight, 7)
示例#17
0
def test_openmp_consistency():
    previous_device = get_device()
    n_cells = 100
    n_recorded = 10
    numpy.random.seed(42)
    taum = 20 * ms
    taus = 5 * ms
    Vt = -50 * mV
    Vr = -60 * mV
    El = -49 * mV
    fac = (60 * 0.27 / 10)
    gmax = 20 * fac
    dApre = .01
    taupre = 20 * ms
    taupost = taupre
    dApost = -dApre * taupre / taupost * 1.05
    dApost *= 0.1 * gmax
    dApre *= 0.1 * gmax

    connectivity = numpy.random.randn(n_cells, n_cells)
    sources = numpy.random.randint(0, n_cells - 1, 10 * n_cells)
    # Only use one spike per time step (to rule out that a single source neuron
    # has more than one spike in a time step)
    times = numpy.random.choice(
        numpy.arange(10 * n_cells), 10 * n_cells, replace=False) * ms
    v_init = Vr + numpy.random.rand(n_cells) * (Vt - Vr)

    eqs = Equations('''
    dv/dt = (g-(v-El))/taum : volt
    dg/dt = -g/taus         : volt
    ''')

    results = {}

    for (n_threads,
         devicename) in [(0, 'runtime'), (0, 'cpp_standalone'),
                         (1, 'cpp_standalone'), (2, 'cpp_standalone'),
                         (3, 'cpp_standalone'), (4, 'cpp_standalone')]:
        set_device(devicename, build_on_run=False, with_output=False)
        Synapses.__instances__().clear()
        if devicename == 'cpp_standalone':
            reinit_and_delete()
        prefs.devices.cpp_standalone.openmp_threads = n_threads
        P = NeuronGroup(n_cells,
                        model=eqs,
                        threshold='v>Vt',
                        reset='v=Vr',
                        refractory=5 * ms)
        Q = SpikeGeneratorGroup(n_cells, sources, times)
        P.v = v_init
        P.g = 0 * mV
        S = Synapses(P,
                     P,
                     model='''dApre/dt=-Apre/taupre    : 1 (event-driven)    
                                       dApost/dt=-Apost/taupost : 1 (event-driven)
                                       w                        : 1''',
                     pre='''g     += w*mV
                                     Apre  += dApre
                                     w      = w + Apost''',
                     post='''Apost += dApost
                                      w      = w + Apre''')
        S.connect()

        S.w = fac * connectivity.flatten()

        T = Synapses(Q, P, model="w : 1", on_pre="g += w*mV")
        T.connect(j='i')
        T.w = 10 * fac

        spike_mon = SpikeMonitor(P)
        rate_mon = PopulationRateMonitor(P)
        state_mon = StateMonitor(S,
                                 'w',
                                 record=np.arange(n_recorded),
                                 dt=0.1 * second)
        v_mon = StateMonitor(P, 'v', record=np.arange(n_recorded))

        run(0.2 * second, report='text')

        if devicename == 'cpp_standalone':
            device.build(directory=None, with_output=False)

        results[n_threads, devicename] = {}
        results[n_threads, devicename]['w'] = state_mon.w
        results[n_threads, devicename]['v'] = v_mon.v
        results[n_threads, devicename]['s'] = spike_mon.num_spikes
        results[n_threads, devicename]['r'] = rate_mon.rate[:]

    for key1, key2 in [((0, 'runtime'), (0, 'cpp_standalone')),
                       ((1, 'cpp_standalone'), (0, 'cpp_standalone')),
                       ((2, 'cpp_standalone'), (0, 'cpp_standalone')),
                       ((3, 'cpp_standalone'), (0, 'cpp_standalone')),
                       ((4, 'cpp_standalone'), (0, 'cpp_standalone'))]:
        assert_allclose(results[key1]['w'], results[key2]['w'])
        assert_allclose(results[key1]['v'], results[key2]['v'])
        assert_allclose(results[key1]['r'], results[key2]['r'])
        assert_allclose(results[key1]['s'], results[key2]['s'])
    reset_device(previous_device)