Exemple #1
0
    def test_domain_decomposition_exceptions(self):
        nranks = 1
        rank = 0
        if (mpi_enabled):
            comm = arb.mpi_comm()
            context = arb.context(threads=1, gpu_id=None, mpi=comm)
            nranks = context.ranks
            rank = context.rank
        else:
            context = arb.context(threads=1, gpu_id=None)

        recipe = gj_symmetric(nranks)

        hint1 = arb.partition_hint()
        hint1.prefer_gpu = False
        hint1.cpu_group_size = 0
        hints1 = dict([(arb.cell_kind.cable, hint1)])

        with self.assertRaisesRegex(RuntimeError,
            "unable to perform load balancing because cell_kind::cable has invalid suggested cpu_cell_group size of 0"):
            decomp1 = arb.partition_load_balance(recipe, context, hints1)

        hint2 = arb.partition_hint()
        hint2.prefer_gpu = True
        hint2.gpu_group_size = 0
        hints2 = dict([(arb.cell_kind.cable, hint2)])

        with self.assertRaisesRegex(RuntimeError,
            "unable to perform load balancing because cell_kind::cable has invalid suggested gpu_cell_group size of 0"):
            decomp2 = arb.partition_load_balance(recipe, context, hints2)
Exemple #2
0
    def test_exceptions_context_arbmpi(self):
        alloc = arb.proc_allocation()

        with self.assertRaisesRegex(
                RuntimeError, "mpi must be None, or an MPI communicator"):
            arb.context(mpi='MPI_COMM_WORLD')
        with self.assertRaisesRegex(
                RuntimeError, "mpi must be None, or an MPI communicator"):
            arb.context(alloc, mpi=0)
Exemple #3
0
    def test_domain_decomposition_heterogenous_MC(self):
        if (mpi_enabled):
            comm = arb.mpi_comm()
            context = arb.context(threads=1, gpu_id=None, mpi=comm)
        else:
            context = arb.context(threads=1, gpu_id=None)

        N = context.ranks
        I = context.rank

        # 10 cells per domain
        n_local = 10
        n_global = n_local * N
        n_local_groups = n_local # 1 cell per group

        recipe = hetero_recipe(n_global)
        decomp = arb.partition_load_balance(recipe, context)

        self.assertEqual(decomp.num_local_cells, n_local)
        self.assertEqual(decomp.num_global_cells, n_global)
        self.assertEqual(len(decomp.groups), n_local)

        b = I * n_local
        e = (I + 1) * n_local
        gids = list(range(b,e))

        for gid in gids:
            self.assertEqual(I, decomp.gid_domain(gid))

        # Each cell group contains 1 cell of kind cable
        # Each group should also be tagged for cpu execution
        grps = list(range(n_local_groups))
        kind_lists = dict()
        for i in grps:
            grp = decomp.groups[i]
            self.assertEqual(len(grp.gids), 1)
            k = grp.kind
            if k not in kind_lists:
                kind_lists[k] = []
            kind_lists[k].append(grp.gids[0])

            self.assertEqual(grp.backend, arb.backend.multicore)

        kinds = [arb.cell_kind.cable, arb.cell_kind.spike_source]
        for k in kinds:
            gids = kind_lists[k]
            self.assertEqual(len(gids), int(n_local/2))
            for gid in gids:
                self.assertEqual(k, recipe.cell_kind(gid))
    def test_domain_decomposition_hints(self):
        n_cells = 20
        recipe = hetero_recipe(n_cells)
        context = arb.context()
        # The hints perfer the multicore backend, so the decomposition is expected
        # to never have cell groups on the GPU, regardless of whether a GPU is
        # available or not.
        cable_hint = arb.partition_hint()
        cable_hint.prefer_gpu = False
        cable_hint.cpu_group_size = 3
        spike_hint = arb.partition_hint()
        spike_hint.prefer_gpu = False
        spike_hint.cpu_group_size = 4
        hints = dict([(arb.cell_kind.cable, cable_hint),
                      (arb.cell_kind.spike_source, spike_hint)])

        decomp = arb.partition_load_balance(recipe, context, hints)

        exp_cable_groups = [[0, 2, 4], [6, 8, 10], [12, 14, 16], [18]]
        exp_spike_groups = [[1, 3, 5, 7], [9, 11, 13, 15], [17, 19]]

        cable_groups = []
        spike_groups = []

        for g in decomp.groups:
            self.assertTrue(g.kind == arb.cell_kind.cable
                            or g.kind == arb.cell_kind.spike_source)

            if (g.kind == arb.cell_kind.cable):
                cable_groups.append(g.gids)
            elif (g.kind == arb.cell_kind.spike_source):
                spike_groups.append(g.gids)

        self.assertEqual(exp_cable_groups, cable_groups)
        self.assertEqual(exp_spike_groups, spike_groups)
    def test_domain_decomposition_heterogenous_GPU(self):
        n_cells = 10
        recipe = hetero_recipe(n_cells)
        context = arb.context(threads=1, gpu_id=0)
        decomp = arb.partition_load_balance(recipe, context)

        self.assertEqual(decomp.num_local_cells, n_cells)
        self.assertEqual(decomp.num_global_cells, n_cells)

        # one cell group with n_cells/2 on gpu, and n_cells/2 groups on cpu
        expected_groups = int(n_cells / 2) + 1
        self.assertEqual(len(decomp.groups), expected_groups)

        grps = range(expected_groups)
        n = 0
        # iterate over each group and test its properties
        for i in grps:
            grp = decomp.groups[i]
            k = grp.kind
            if (k == arb.cell_kind.cable):
                self.assertEqual(grp.backend, arb.backend.gpu)
                self.assertEqual(len(grp.gids), int(n_cells / 2))
                for gid in grp.gids:
                    self.assertTrue(gid % 2 == 0)
                    n += 1
            elif (k == arb.cell_kind.spike_source):
                self.assertEqual(grp.backend, arb.backend.multicore)
                self.assertEqual(len(grp.gids), 1)
                self.assertTrue(grp.gids[0] % 2)
                n += 1
        self.assertEqual(n_cells, n)
    def test_domain_decomposition_heterogenous_CPU(self):
        n_cells = 10
        recipe = hetero_recipe(n_cells)
        context = arb.context()
        decomp = arb.partition_load_balance(recipe, context)

        self.assertEqual(decomp.num_local_cells, n_cells)
        self.assertEqual(decomp.num_global_cells, n_cells)
        self.assertEqual(len(decomp.groups), n_cells)

        gids = list(range(n_cells))
        for gid in gids:
            self.assertEqual(0, decomp.gid_domain(gid))

        # Each cell group contains 1 cell of kind cable
        # Each group should also be tagged for cpu execution
        grps = list(range(n_cells))
        kind_lists = dict()
        for i in grps:
            grp = decomp.groups[i]
            self.assertEqual(len(grp.gids), 1)
            k = grp.kind
            if k not in kind_lists:
                kind_lists[k] = []
            kind_lists[k].append(grp.gids[0])

            self.assertEqual(grp.backend, arb.backend.multicore)

        kinds = [arb.cell_kind.cable, arb.cell_kind.spike_source]
        for k in kinds:
            gids = kind_lists[k]
            self.assertEqual(len(gids), int(n_cells / 2))
            for gid in gids:
                self.assertEqual(k, recipe.cell_kind(gid))
Exemple #7
0
def run(dT, n_pairs=1, do_plots=False):
    recipe = single_recipe(dT, n_pairs)

    context = arbor.context()
    domains = arbor.partition_load_balance(recipe, context)
    sim = arbor.simulation(recipe, domains, context)

    sim.record(arbor.spike_recording.all)

    reg_sched = arbor.regular_schedule(0.1)
    handle_mem = sim.sample((0, 0), reg_sched)
    handle_g = sim.sample((0, 1), reg_sched)
    handle_apost = sim.sample((0, 2), reg_sched)
    handle_apre = sim.sample((0, 3), reg_sched)
    handle_weight_plastic = sim.sample((0, 4), reg_sched)

    sim.run(tfinal=600)

    if do_plots:
        print("Plotting detailed results ...")

        for (handle, var) in [(handle_mem, 'U'), (handle_g, "g"),
                              (handle_apost, "apost"), (handle_apre, "apre"),
                              (handle_weight_plastic, "weight_plastic")]:

            data, meta = sim.samples(handle)[0]

            df = pandas.DataFrame({'t/ms': data[:, 0], var: data[:, 1]})
            seaborn.relplot(data=df, kind="line", x="t/ms", y=var,
                            ci=None).savefig(
                                'single_cell_stdp_result_{}.svg'.format(var))

    weight_plastic, meta = sim.samples(handle_weight_plastic)[0]

    return weight_plastic[:, 1][-1]
Exemple #8
0
    def test_context(self):
        ctx = arb.context(threads=42, gpu_id=None)

        self.assertFalse(ctx.has_mpi)
        self.assertFalse(ctx.has_gpu)
        self.assertEqual(ctx.threads, 42)
        self.assertEqual(ctx.ranks, 1)
        self.assertEqual(ctx.rank, 0)
Exemple #9
0
    def test_context_allocation_mpi4py(self):
        comm = arb.mpi_comm(mpi.COMM_WORLD)

        # test context with alloc and mpi
        alloc = arb.proc_allocation()
        ctx = arb.context(alloc, comm)

        self.assertEqual(ctx.threads, alloc.threads)
        self.assertTrue(ctx.has_mpi)
Exemple #10
0
    def test_default_context(self):
        ctx = arb.context()

        # test that by default context has 1 thread and no GPU, no MPI
        self.assertFalse(ctx.has_mpi)
        self.assertFalse(ctx.has_gpu)
        self.assertEqual(ctx.threads, 1)
        self.assertEqual(ctx.ranks, 1)
        self.assertEqual(ctx.rank, 0)
Exemple #11
0
    def test_context_allocation(self):
        alloc = arb.proc_allocation()

        # test context construction with proc_allocation()
        ctx = arb.context(alloc)
        self.assertEqual(ctx.threads, alloc.threads)
        self.assertEqual(ctx.has_gpu, alloc.has_gpu)
        self.assertEqual(ctx.ranks, 1)
        self.assertEqual(ctx.rank, 0)
Exemple #12
0
    def test_context_avail_threads(self):
        # test that 'avail_threads' returns at least 1.
        ctx = arb.context(threads = 'avail_threads', gpu_id = None)

        self.assertFalse(ctx.has_mpi)
        self.assertFalse(ctx.has_gpu)
        self.assertTrue(ctx.threads >= 1)
        self.assertEqual(ctx.ranks, 1)
        self.assertEqual(ctx.rank, 0)
    def test_domain_decomposition_nonsymmetric(self):
        nranks = 1
        rank = 0
        if (mpi_enabled):
            comm = arb.mpi_comm()
            context = arb.context(threads=1, gpu_id=None, mpi=comm)
            nranks = context.ranks
            rank = context.rank
        else:
            context = arb.context(threads=1, gpu_id=None)

        recipe = gj_non_symmetric(nranks)
        decomp = arb.partition_load_balance(recipe, context)

        cells_per_rank = nranks

        # check groups
        i = 0
        for gid in range(rank * cells_per_rank, (rank + 1) * cells_per_rank):
            if (gid % nranks == rank - 1):
                continue
            elif (gid % nranks == rank and rank != nranks - 1):
                cg = [gid, gid + cells_per_rank]
                self.assertEqual(cg,
                                 decomp.groups[len(decomp.groups) - 1].gids)
            else:
                cg = [gid]
                self.assertEqual(cg, decomp.groups[i].gids)
                i += 1

        # check gid_domains
        for gid in range(recipe.num_cells()):
            group = int(gid / cells_per_rank)
            idx = gid % cells_per_rank
            ngroups = nranks
            if (idx == group - 1):
                self.assertEqual(group - 1, decomp.gid_domain(gid))
            elif (idx == group and group != ngroups - 1):
                self.assertEqual(group, decomp.gid_domain(gid))
            else:
                self.assertEqual(group, decomp.gid_domain(gid))
Exemple #14
0
    def test_domain_decomposition_homogenous_GPU(self):

        if (mpi_enabled):
            comm = arb.mpi_comm()
            context = arb.context(threads=1, gpu_id=0, mpi=comm)
        else:
            context = arb.context(threads=1, gpu_id=0)

        N = context.ranks
        I = context.rank

        # 10 cells per domain
        n_local = 10
        n_global = n_local * N

        recipe = homo_recipe(n_global)
        decomp = arb.partition_load_balance(recipe, context)

        self.assertEqual(decomp.num_local_cells, n_local)
        self.assertEqual(decomp.num_global_cells, n_global)
        self.assertEqual(len(decomp.groups), 1)

        b = I * n_local
        e = (I + 1) * n_local
        gids = list(range(b,e))

        for gid in gids:
            self.assertEqual(I, decomp.gid_domain(gid))

        # Each cell group contains 1 cell of kind cable
        # Each group should also be tagged for gpu execution

        grp = decomp.groups[0]

        self.assertEqual(len(grp.gids), n_local)
        self.assertEqual(grp.gids[0], b)
        self.assertEqual(grp.gids[-1], e-1)
        self.assertEqual(grp.backend, arb.backend.gpu)
        self.assertEqual(grp.kind, arb.cell_kind.cable)
Exemple #15
0
def run():
    v = options.parse_arguments().verbosity

    comm = arb.mpi_comm(mpi.COMM_WORLD)
    alloc = arb.proc_allocation()
    ctx = arb.context(alloc, comm)
    rank = ctx.rank

    if rank == 0:
        runner = unittest.TextTestRunner(verbosity=v)
    else:
        sys.stdout = open(os.devnull, 'w')
        runner = unittest.TextTestRunner(stream=sys.stdout)

    runner.run(suite())
Exemple #16
0
    def init_sim(self):
        comm = A.mpi_comm()
        context = A.context(threads=1, gpu_id=None, mpi=A.mpi_comm())
        self.rank = context.rank
        self.ranks = context.ranks

        recipe = lifN_recipe(context.ranks)
        dd = A.partition_load_balance(recipe, context)

        # Confirm decomposition has gid 0 on rank 0, ..., gid N-1 on rank N-1.
        self.assertEqual(1, dd.num_local_cells)
        local_groups = dd.groups
        self.assertEqual(1, len(local_groups))
        self.assertEqual([self.rank], local_groups[0].gids)

        return A.simulation(recipe, dd, context)
Exemple #17
0
def context():
    """
    Fixture that produces an MPI sensitive `arbor.context`
    """
    args = [arbor.proc_allocation()]
    if _mpi_enabled:
        if not arbor.mpi_is_initialized():
            print("Context fixture initializing mpi", flush=True)
            arbor.mpi_init()
            atexit.register(_finalize_mpi)
        if _mpi4py_enabled:
            from mpi4py.MPI import COMM_WORLD as comm
        else:
            comm = arbor.mpi_comm()
        args.append(comm)
    return arbor.context(*args)
Exemple #18
0
def run():
    v = options.parse_arguments().verbosity

    if not arb.mpi_is_initialized():
        arb.mpi_init()

    comm = arb.mpi_comm()
    alloc = arb.proc_allocation()
    ctx = arb.context(alloc, comm)
    rank = ctx.rank

    if rank == 0:
        runner = unittest.TextTestRunner(verbosity=v)
    else:
        sys.stdout = open(os.devnull, 'w')
        runner = unittest.TextTestRunner(stream=sys.stdout)

    runner.run(suite())

    if not arb.mpi_is_finalized():
        arb.mpi_finalize()
    def test_domain_decomposition_homogenous_CPU(self):
        n_cells = 10
        recipe = homo_recipe(n_cells)
        context = arb.context()
        decomp = arb.partition_load_balance(recipe, context)

        self.assertEqual(decomp.num_local_cells, n_cells)
        self.assertEqual(decomp.num_global_cells, n_cells)
        self.assertEqual(len(decomp.groups), n_cells)

        gids = list(range(n_cells))
        for gid in gids:
            self.assertEqual(0, decomp.gid_domain(gid))

        # Each cell group contains 1 cell of kind cable
        # Each group should also be tagged for cpu execution
        for i in gids:
            grp = decomp.groups[i]
            self.assertEqual(len(grp.gids), 1)
            self.assertEqual(grp.gids[0], i)
            self.assertEqual(grp.backend, arb.backend.multicore)
            self.assertEqual(grp.kind, arb.cell_kind.cable)
    def test_domain_decomposition_homogenous_GPU(self):
        n_cells = 10
        recipe = homo_recipe(n_cells)
        context = arb.context(threads=1, gpu_id=0)
        decomp = arb.partition_load_balance(recipe, context)

        self.assertEqual(decomp.num_local_cells, n_cells)
        self.assertEqual(decomp.num_global_cells, n_cells)
        self.assertEqual(len(decomp.groups), 1)

        gids = range(n_cells)
        for gid in gids:
            self.assertEqual(0, decomp.gid_domain(gid))

        # Each cell group contains 1 cell of kind cable
        # Each group should also be tagged for gpu execution

        grp = decomp.groups[0]

        self.assertEqual(len(grp.gids), n_cells)
        self.assertEqual(grp.gids[0], 0)
        self.assertEqual(grp.gids[-1], n_cells - 1)
        self.assertEqual(grp.backend, arb.backend.gpu)
        self.assertEqual(grp.kind, arb.cell_kind.cable)
    def test_domain_decomposition_exceptions(self):
        n_cells = 20
        recipe = hetero_recipe(n_cells)
        context = arb.context()
        # The hints perfer the multicore backend, so the decomposition is expected
        # to never have cell groups on the GPU, regardless of whether a GPU is
        # available or not.
        cable_hint = arb.partition_hint()
        cable_hint.prefer_gpu = False
        cable_hint.cpu_group_size = 0
        spike_hint = arb.partition_hint()
        spike_hint.prefer_gpu = False
        spike_hint.gpu_group_size = 1
        hints = dict([(arb.cell_kind.cable, cable_hint),
                      (arb.cell_kind.spike_source, spike_hint)])

        with self.assertRaisesRegex(
                RuntimeError,
                "unable to perform load balancing because cell_kind::cable has invalid suggested cpu_cell_group size of 0"
        ):
            decomp = arb.partition_load_balance(recipe, context, hints)

        cable_hint = arb.partition_hint()
        cable_hint.prefer_gpu = False
        cable_hint.cpu_group_size = 1
        spike_hint = arb.partition_hint()
        spike_hint.prefer_gpu = True
        spike_hint.gpu_group_size = 0
        hints = dict([(arb.cell_kind.cable, cable_hint),
                      (arb.cell_kind.spike_source, spike_hint)])

        with self.assertRaisesRegex(
                RuntimeError,
                "unable to perform load balancing because cell_kind::spike_source has invalid suggested gpu_cell_group size of 0"
        ):
            decomp = arb.partition_load_balance(recipe, context, hints)
Exemple #22
0
 def test_simulation(self):
     rcp = recipe()
     ctx = arb.context()
     dom = arb.partition_load_balance(rcp, ctx)
     sim = arb.simulation(rcp, dom, ctx)
     sim.run(tfinal=30)
Exemple #23
0
        if gid==0:
            sched = arbor.explicit_schedule([1])
            return [arbor.event_generator(arbor.cell_member(0,0), 0.1, sched)]
        return []

    def probes(self, gid):
        return [arbor.cable_probe_membrane_voltage('"root"')]

    def global_properties(self, kind):
        return self.props

comm = arbor.mpi_comm()
print(comm)

# (10) Set up the hardware context
context = arbor.context(threads=20, gpu_id=None, mpi=comm)
print(context)

# (11) Set up and start the meter manager
meters = arbor.meter_manager()
meters.start(context)

# (12) Instantiate recipe
ncells = 50
recipe = ring_recipe(ncells)
meters.checkpoint('recipe-create', context)

# (13) Define a hint at to the execution.
hint = arbor.partition_hint()
hint.prefer_gpu = True
hint.gpu_group_size = 1000
Exemple #24
0
        d = 10
        return [
            arbor.connection(arbor.cell_member(src, 0),
                             arbor.cell_member(gid, 0), w, d)
        ]

    # Attach a generator to the first cell in the ring.
    def event_generators(self, gid):
        if gid == 0:
            sched = arbor.explicit_schedule([1])
            return [arbor.event_generator(arbor.cell_member(0, 0), 0.1, sched)]
        return []


nthreads = 1
context = arbor.context(threads=nthreads, gpu_id=None)

meters = arbor.meter_manager()
meters.start(context)

# Create a recipe
recipe = ring_recipe(1000)
meters.checkpoint('recipe-create', context)  # checkpoint

# Perform decomposition
decomp = arbor.partition_load_balance(recipe, context)
meters.checkpoint('load-balance', context)  # checkpoint

# Create the simulation
sim = arbor.simulation(recipe, decomp, context)
# Attach a spike recorder
Exemple #25
0
    def test_probe_addr_metadata(self):
        recipe = cc_recipe()
        context = A.context()
        dd = A.partition_load_balance(recipe, context)
        sim = A.simulation(recipe, dd, context)

        all_cv_cables = [A.cable(0, 0, 1)]

        m = sim.probe_metadata((0, 0))
        self.assertEqual(1, len(m))
        self.assertEqual(A.location(0, 0.0), m[0])

        m = sim.probe_metadata((0, 1))
        self.assertEqual(1, len(m))
        self.assertEqual(all_cv_cables, m[0])

        m = sim.probe_metadata((0, 2))
        self.assertEqual(1, len(m))
        self.assertEqual(A.location(0, 0.02), m[0])

        m = sim.probe_metadata((0, 3))
        self.assertEqual(1, len(m))
        self.assertEqual(A.location(0, 0.03), m[0])

        m = sim.probe_metadata((0, 4))
        self.assertEqual(1, len(m))
        self.assertEqual(all_cv_cables, m[0])

        m = sim.probe_metadata((0, 5))
        self.assertEqual(1, len(m))
        self.assertEqual(all_cv_cables, m[0])

        m = sim.probe_metadata((0, 6))
        self.assertEqual(1, len(m))
        self.assertEqual(A.location(0, 0.06), m[0])

        m = sim.probe_metadata((0, 7))
        self.assertEqual(1, len(m))
        self.assertEqual(all_cv_cables, m[0])

        m = sim.probe_metadata((0, 8))
        self.assertEqual(1, len(m))
        self.assertEqual(A.location(0, 0.08), m[0].location)
        self.assertEqual(1, m[0].multiplicity)
        self.assertEqual(0, m[0].target)

        m = sim.probe_metadata((0, 9))
        self.assertEqual(1, len(m))
        self.assertEqual(1, len(m[0]))
        self.assertEqual(A.location(0, 0.09), m[0][0].location)
        self.assertEqual(1, m[0][0].multiplicity)
        self.assertEqual(1, m[0][0].target)

        m = sim.probe_metadata((0, 10))
        self.assertEqual(1, len(m))
        self.assertEqual(A.location(0, 0.10), m[0])

        m = sim.probe_metadata((0, 11))
        self.assertEqual(1, len(m))
        self.assertEqual(all_cv_cables, m[0])

        m = sim.probe_metadata((0, 12))
        self.assertEqual(1, len(m))
        self.assertEqual(A.location(0, 0.12), m[0])

        m = sim.probe_metadata((0, 13))
        self.assertEqual(1, len(m))
        self.assertEqual(all_cv_cables, m[0])

        m = sim.probe_metadata((0, 14))
        self.assertEqual(1, len(m))
        self.assertEqual(A.location(0, 0.14), m[0])

        m = sim.probe_metadata((0, 15))
        self.assertEqual(1, len(m))
        self.assertEqual(all_cv_cables, m[0])

        m = sim.probe_metadata((0, 16))
        self.assertEqual(1, len(m))
        self.assertEqual(all_cv_cables, m[0])
Exemple #26
0
        self.props.catalogue = arb.load_catalogue(cat)
        d = arb.decor()
        d.paint('(all)', 'dummy')
        d.set_property(Vm=0.0)
        self.cell = arb.cable_cell(self.tree, arb.label_dict(), d)

    def global_properties(self, _):
        return self.props

    def num_cells(self):
        return 1

    def cell_kind(self, gid):
        return arb.cell_kind.cable

    def cell_description(self, gid):
        return self.cell

if not Path(cat).is_file():
    print("""Catalogue not found in this directory.
Please ensure it has been compiled by calling")
  <arbor>/scripts/build-catalogue cat <arbor>/python/examples/cat
where <arbor> is the location of the arbor source tree.""")
    exit(1)

rcp = recipe()
ctx = arb.context()
dom = arb.partition_load_balance(rcp, ctx)
sim = arb.simulation(rcp, dom, ctx)
sim.run(tfinal=30)
    def gap_junction_on(self, gid):
        return []

    def event_generators(self, gid):
        return []

    def global_properties(self, gid):
        return self.the_props


recipe = single_recipe(cell, [probe])

# (4) Create an execution context

context = arbor.context()

# (5) Create a domain decomposition

domains = arbor.partition_load_balance(recipe, context)

# (6) Create a simulation

sim = arbor.simulation(recipe, domains, context)

# Instruct the simulation to record the spikes and sample the probe

sim.record(arbor.spike_recording.all)

probe_id = arbor.cell_member(0, 0)
handle = sim.sample(probe_id, arbor.regular_schedule(0.02))
Exemple #28
0
    # Attach a generator to the first cell in the ring.
    def event_generators(self, gid):
        if gid==0:
            sched = arbor.explicit_schedule([1])
            return [arbor.event_generator(arbor.cell_member(0,0), 0.1, sched)]
        return []

    # Define one probe (for measuring voltage at the soma) on each cell.
    def num_probes(self, gid):
        return 1

    def get_probe(self, id):
        loc = arbor.location(0, 0) # at the soma
        return arbor.cable_probe('voltage', id, loc)

context = arbor.context(threads=12, gpu_id=None)
print(context)

meters = arbor.meter_manager()
meters.start(context)

ncells = 4
recipe = ring_recipe(ncells)
print(f'{recipe}')

meters.checkpoint('recipe-create', context)

decomp = arbor.partition_load_balance(recipe, context)
print(f'{decomp}')

hint = arbor.partition_hint()
Exemple #29
0
    def test_context_arbmpi(self):
        comm = arb.mpi_comm()

        # test context with mpi
        ctx = arb.context(mpi=comm)
        self.assertTrue(ctx.has_mpi)
Exemple #30
0
    def test_context_mpi4py(self):
        comm = arb.mpi_comm(mpi.COMM_WORLD)

        # test context with mpi
        ctx = arb.context(mpi=comm)
        self.assertTrue(ctx.has_mpi)