def test_domain_decomposition_exceptions(self): nranks = 1 rank = 0 if (mpi_enabled): comm = arb.mpi_comm() context = arb.context(threads=1, gpu_id=None, mpi=comm) nranks = context.ranks rank = context.rank else: context = arb.context(threads=1, gpu_id=None) recipe = gj_symmetric(nranks) hint1 = arb.partition_hint() hint1.prefer_gpu = False hint1.cpu_group_size = 0 hints1 = dict([(arb.cell_kind.cable, hint1)]) with self.assertRaisesRegex(RuntimeError, "unable to perform load balancing because cell_kind::cable has invalid suggested cpu_cell_group size of 0"): decomp1 = arb.partition_load_balance(recipe, context, hints1) hint2 = arb.partition_hint() hint2.prefer_gpu = True hint2.gpu_group_size = 0 hints2 = dict([(arb.cell_kind.cable, hint2)]) with self.assertRaisesRegex(RuntimeError, "unable to perform load balancing because cell_kind::cable has invalid suggested gpu_cell_group size of 0"): decomp2 = arb.partition_load_balance(recipe, context, hints2)
def test_domain_decomposition_heterogenous_GPU(self): n_cells = 10 recipe = hetero_recipe(n_cells) context = arb.context(threads=1, gpu_id=0) decomp = arb.partition_load_balance(recipe, context) self.assertEqual(decomp.num_local_cells, n_cells) self.assertEqual(decomp.num_global_cells, n_cells) # one cell group with n_cells/2 on gpu, and n_cells/2 groups on cpu expected_groups = int(n_cells / 2) + 1 self.assertEqual(len(decomp.groups), expected_groups) grps = range(expected_groups) n = 0 # iterate over each group and test its properties for i in grps: grp = decomp.groups[i] k = grp.kind if (k == arb.cell_kind.cable): self.assertEqual(grp.backend, arb.backend.gpu) self.assertEqual(len(grp.gids), int(n_cells / 2)) for gid in grp.gids: self.assertTrue(gid % 2 == 0) n += 1 elif (k == arb.cell_kind.spike_source): self.assertEqual(grp.backend, arb.backend.multicore) self.assertEqual(len(grp.gids), 1) self.assertTrue(grp.gids[0] % 2) n += 1 self.assertEqual(n_cells, n)
def test_domain_decomposition_hints(self): n_cells = 20 recipe = hetero_recipe(n_cells) context = arb.context() # The hints perfer the multicore backend, so the decomposition is expected # to never have cell groups on the GPU, regardless of whether a GPU is # available or not. cable_hint = arb.partition_hint() cable_hint.prefer_gpu = False cable_hint.cpu_group_size = 3 spike_hint = arb.partition_hint() spike_hint.prefer_gpu = False spike_hint.cpu_group_size = 4 hints = dict([(arb.cell_kind.cable, cable_hint), (arb.cell_kind.spike_source, spike_hint)]) decomp = arb.partition_load_balance(recipe, context, hints) exp_cable_groups = [[0, 2, 4], [6, 8, 10], [12, 14, 16], [18]] exp_spike_groups = [[1, 3, 5, 7], [9, 11, 13, 15], [17, 19]] cable_groups = [] spike_groups = [] for g in decomp.groups: self.assertTrue(g.kind == arb.cell_kind.cable or g.kind == arb.cell_kind.spike_source) if (g.kind == arb.cell_kind.cable): cable_groups.append(g.gids) elif (g.kind == arb.cell_kind.spike_source): spike_groups.append(g.gids) self.assertEqual(exp_cable_groups, cable_groups) self.assertEqual(exp_spike_groups, spike_groups)
def run(dT, n_pairs=1, do_plots=False): recipe = single_recipe(dT, n_pairs) context = arbor.context() domains = arbor.partition_load_balance(recipe, context) sim = arbor.simulation(recipe, domains, context) sim.record(arbor.spike_recording.all) reg_sched = arbor.regular_schedule(0.1) handle_mem = sim.sample((0, 0), reg_sched) handle_g = sim.sample((0, 1), reg_sched) handle_apost = sim.sample((0, 2), reg_sched) handle_apre = sim.sample((0, 3), reg_sched) handle_weight_plastic = sim.sample((0, 4), reg_sched) sim.run(tfinal=600) if do_plots: print("Plotting detailed results ...") for (handle, var) in [(handle_mem, 'U'), (handle_g, "g"), (handle_apost, "apost"), (handle_apre, "apre"), (handle_weight_plastic, "weight_plastic")]: data, meta = sim.samples(handle)[0] df = pandas.DataFrame({'t/ms': data[:, 0], var: data[:, 1]}) seaborn.relplot(data=df, kind="line", x="t/ms", y=var, ci=None).savefig( 'single_cell_stdp_result_{}.svg'.format(var)) weight_plastic, meta = sim.samples(handle_weight_plastic)[0] return weight_plastic[:, 1][-1]
def test_domain_decomposition_heterogenous_CPU(self): n_cells = 10 recipe = hetero_recipe(n_cells) context = arb.context() decomp = arb.partition_load_balance(recipe, context) self.assertEqual(decomp.num_local_cells, n_cells) self.assertEqual(decomp.num_global_cells, n_cells) self.assertEqual(len(decomp.groups), n_cells) gids = list(range(n_cells)) for gid in gids: self.assertEqual(0, decomp.gid_domain(gid)) # Each cell group contains 1 cell of kind cable # Each group should also be tagged for cpu execution grps = list(range(n_cells)) kind_lists = dict() for i in grps: grp = decomp.groups[i] self.assertEqual(len(grp.gids), 1) k = grp.kind if k not in kind_lists: kind_lists[k] = [] kind_lists[k].append(grp.gids[0]) self.assertEqual(grp.backend, arb.backend.multicore) kinds = [arb.cell_kind.cable, arb.cell_kind.spike_source] for k in kinds: gids = kind_lists[k] self.assertEqual(len(gids), int(n_cells / 2)) for gid in gids: self.assertEqual(k, recipe.cell_kind(gid))
def test_domain_decomposition_heterogenous_MC(self): if (mpi_enabled): comm = arb.mpi_comm() context = arb.context(threads=1, gpu_id=None, mpi=comm) else: context = arb.context(threads=1, gpu_id=None) N = context.ranks I = context.rank # 10 cells per domain n_local = 10 n_global = n_local * N n_local_groups = n_local # 1 cell per group recipe = hetero_recipe(n_global) decomp = arb.partition_load_balance(recipe, context) self.assertEqual(decomp.num_local_cells, n_local) self.assertEqual(decomp.num_global_cells, n_global) self.assertEqual(len(decomp.groups), n_local) b = I * n_local e = (I + 1) * n_local gids = list(range(b,e)) for gid in gids: self.assertEqual(I, decomp.gid_domain(gid)) # Each cell group contains 1 cell of kind cable # Each group should also be tagged for cpu execution grps = list(range(n_local_groups)) kind_lists = dict() for i in grps: grp = decomp.groups[i] self.assertEqual(len(grp.gids), 1) k = grp.kind if k not in kind_lists: kind_lists[k] = [] kind_lists[k].append(grp.gids[0]) self.assertEqual(grp.backend, arb.backend.multicore) kinds = [arb.cell_kind.cable, arb.cell_kind.spike_source] for k in kinds: gids = kind_lists[k] self.assertEqual(len(gids), int(n_local/2)) for gid in gids: self.assertEqual(k, recipe.cell_kind(gid))
def test_domain_decomposition_exceptions(self): n_cells = 20 recipe = hetero_recipe(n_cells) context = arb.context() # The hints perfer the multicore backend, so the decomposition is expected # to never have cell groups on the GPU, regardless of whether a GPU is # available or not. cable_hint = arb.partition_hint() cable_hint.prefer_gpu = False cable_hint.cpu_group_size = 0 spike_hint = arb.partition_hint() spike_hint.prefer_gpu = False spike_hint.gpu_group_size = 1 hints = dict([(arb.cell_kind.cable, cable_hint), (arb.cell_kind.spike_source, spike_hint)]) with self.assertRaisesRegex( RuntimeError, "unable to perform load balancing because cell_kind::cable has invalid suggested cpu_cell_group size of 0" ): decomp = arb.partition_load_balance(recipe, context, hints) cable_hint = arb.partition_hint() cable_hint.prefer_gpu = False cable_hint.cpu_group_size = 1 spike_hint = arb.partition_hint() spike_hint.prefer_gpu = True spike_hint.gpu_group_size = 0 hints = dict([(arb.cell_kind.cable, cable_hint), (arb.cell_kind.spike_source, spike_hint)]) with self.assertRaisesRegex( RuntimeError, "unable to perform load balancing because cell_kind::spike_source has invalid suggested gpu_cell_group size of 0" ): decomp = arb.partition_load_balance(recipe, context, hints)
def init_sim(self): comm = A.mpi_comm() context = A.context(threads=1, gpu_id=None, mpi=A.mpi_comm()) self.rank = context.rank self.ranks = context.ranks recipe = lifN_recipe(context.ranks) dd = A.partition_load_balance(recipe, context) # Confirm decomposition has gid 0 on rank 0, ..., gid N-1 on rank N-1. self.assertEqual(1, dd.num_local_cells) local_groups = dd.groups self.assertEqual(1, len(local_groups)) self.assertEqual([self.rank], local_groups[0].gids) return A.simulation(recipe, dd, context)
def test_domain_decomposition_nonsymmetric(self): nranks = 1 rank = 0 if (mpi_enabled): comm = arb.mpi_comm() context = arb.context(threads=1, gpu_id=None, mpi=comm) nranks = context.ranks rank = context.rank else: context = arb.context(threads=1, gpu_id=None) recipe = gj_non_symmetric(nranks) decomp = arb.partition_load_balance(recipe, context) cells_per_rank = nranks # check groups i = 0 for gid in range(rank * cells_per_rank, (rank + 1) * cells_per_rank): if (gid % nranks == rank - 1): continue elif (gid % nranks == rank and rank != nranks - 1): cg = [gid, gid + cells_per_rank] self.assertEqual(cg, decomp.groups[len(decomp.groups) - 1].gids) else: cg = [gid] self.assertEqual(cg, decomp.groups[i].gids) i += 1 # check gid_domains for gid in range(recipe.num_cells()): group = int(gid / cells_per_rank) idx = gid % cells_per_rank ngroups = nranks if (idx == group - 1): self.assertEqual(group - 1, decomp.gid_domain(gid)) elif (idx == group and group != ngroups - 1): self.assertEqual(group, decomp.gid_domain(gid)) else: self.assertEqual(group, decomp.gid_domain(gid))
def test_domain_decomposition_homogenous_CPU(self): n_cells = 10 recipe = homo_recipe(n_cells) context = arb.context() decomp = arb.partition_load_balance(recipe, context) self.assertEqual(decomp.num_local_cells, n_cells) self.assertEqual(decomp.num_global_cells, n_cells) self.assertEqual(len(decomp.groups), n_cells) gids = list(range(n_cells)) for gid in gids: self.assertEqual(0, decomp.gid_domain(gid)) # Each cell group contains 1 cell of kind cable # Each group should also be tagged for cpu execution for i in gids: grp = decomp.groups[i] self.assertEqual(len(grp.gids), 1) self.assertEqual(grp.gids[0], i) self.assertEqual(grp.backend, arb.backend.multicore) self.assertEqual(grp.kind, arb.cell_kind.cable)
def test_domain_decomposition_homogenous_GPU(self): if (mpi_enabled): comm = arb.mpi_comm() context = arb.context(threads=1, gpu_id=0, mpi=comm) else: context = arb.context(threads=1, gpu_id=0) N = context.ranks I = context.rank # 10 cells per domain n_local = 10 n_global = n_local * N recipe = homo_recipe(n_global) decomp = arb.partition_load_balance(recipe, context) self.assertEqual(decomp.num_local_cells, n_local) self.assertEqual(decomp.num_global_cells, n_global) self.assertEqual(len(decomp.groups), 1) b = I * n_local e = (I + 1) * n_local gids = list(range(b,e)) for gid in gids: self.assertEqual(I, decomp.gid_domain(gid)) # Each cell group contains 1 cell of kind cable # Each group should also be tagged for gpu execution grp = decomp.groups[0] self.assertEqual(len(grp.gids), n_local) self.assertEqual(grp.gids[0], b) self.assertEqual(grp.gids[-1], e-1) self.assertEqual(grp.backend, arb.backend.gpu) self.assertEqual(grp.kind, arb.cell_kind.cable)
def test_domain_decomposition_homogenous_GPU(self): n_cells = 10 recipe = homo_recipe(n_cells) context = arb.context(threads=1, gpu_id=0) decomp = arb.partition_load_balance(recipe, context) self.assertEqual(decomp.num_local_cells, n_cells) self.assertEqual(decomp.num_global_cells, n_cells) self.assertEqual(len(decomp.groups), 1) gids = range(n_cells) for gid in gids: self.assertEqual(0, decomp.gid_domain(gid)) # Each cell group contains 1 cell of kind cable # Each group should also be tagged for gpu execution grp = decomp.groups[0] self.assertEqual(len(grp.gids), n_cells) self.assertEqual(grp.gids[0], 0) self.assertEqual(grp.gids[-1], n_cells - 1) self.assertEqual(grp.backend, arb.backend.gpu) self.assertEqual(grp.kind, arb.cell_kind.cable)
self.props.catalogue = arb.load_catalogue(cat) d = arb.decor() d.paint('(all)', 'dummy') d.set_property(Vm=0.0) self.cell = arb.cable_cell(self.tree, arb.label_dict(), d) def global_properties(self, _): return self.props def num_cells(self): return 1 def cell_kind(self, gid): return arb.cell_kind.cable def cell_description(self, gid): return self.cell if not Path(cat).is_file(): print("""Catalogue not found in this directory. Please ensure it has been compiled by calling") <arbor>/scripts/build-catalogue cat <arbor>/python/examples/cat where <arbor> is the location of the arbor source tree.""") exit(1) rcp = recipe() ctx = arb.context() dom = arb.partition_load_balance(rcp, ctx) sim = arb.simulation(rcp, dom, ctx) sim.run(tfinal=30)
def test_domain_decomposition_symmetric(self): nranks = 1 rank = 0 if (mpi_enabled): comm = arb.mpi_comm() context = arb.context(threads=1, gpu_id=None, mpi=comm) nranks = context.ranks rank = context.rank else: context = arb.context(threads=1, gpu_id=None) recipe = gj_symmetric(nranks) decomp0 = arb.partition_load_balance(recipe, context) self.assertEqual(6, len(decomp0.groups)) shift = int((rank * recipe.num_cells())/nranks) exp_groups0 = [ [0 + shift], [3 + shift], [4 + shift], [5 + shift], [8 + shift], [1 + shift, 2 + shift, 6 + shift, 7 + shift, 9 + shift]] for i in range(6): self.assertEqual(exp_groups0[i], decomp0.groups[i].gids) cells_per_rank = int(recipe.num_cells()/nranks) for i in range(recipe.num_cells()): self.assertEqual(int(i/cells_per_rank), decomp0.gid_domain(i)) # Test different group_hints hint1 = arb.partition_hint() hint1.prefer_gpu = False hint1.cpu_group_size = recipe.num_cells() hints1 = dict([(arb.cell_kind.cable, hint1)]) decomp1 = arb.partition_load_balance(recipe, context, hints1) self.assertEqual(1, len(decomp1.groups)) exp_groups1 = [0 + shift, 3 + shift, 4 + shift, 5 + shift, 8 + shift, 1 + shift, 2 + shift, 6 + shift, 7 + shift, 9 + shift] self.assertEqual(exp_groups1, decomp1.groups[0].gids) for i in range(recipe.num_cells()): self.assertEqual(int(i/cells_per_rank), decomp1.gid_domain(i)) hint2 = arb.partition_hint() hint2.prefer_gpu = False hint2.cpu_group_size = int(cells_per_rank/2) hints2 = dict([(arb.cell_kind.cable, hint2)]) decomp2 = arb.partition_load_balance(recipe, context, hints2) self.assertEqual(2, len(decomp2.groups)) exp_groups2 = [ [0 + shift, 3 + shift, 4 + shift, 5 + shift, 8 + shift], [1 + shift, 2 + shift, 6 + shift, 7 + shift, 9 + shift] ] for i in range(2): self.assertEqual(exp_groups2[i], decomp2.groups[i].gids) for i in range(recipe.num_cells()): self.assertEqual(int(i/cells_per_rank), decomp2.gid_domain(i))
def init_sim(self, recipe): context = A.context() dd = A.partition_load_balance(recipe, context) return A.simulation(recipe, dd, context)
def test_probe_addr_metadata(self): recipe = cc_recipe() context = A.context() dd = A.partition_load_balance(recipe, context) sim = A.simulation(recipe, dd, context) all_cv_cables = [A.cable(0, 0, 1)] m = sim.probe_metadata((0, 0)) self.assertEqual(1, len(m)) self.assertEqual(A.location(0, 0.0), m[0]) m = sim.probe_metadata((0, 1)) self.assertEqual(1, len(m)) self.assertEqual(all_cv_cables, m[0]) m = sim.probe_metadata((0, 2)) self.assertEqual(1, len(m)) self.assertEqual(A.location(0, 0.02), m[0]) m = sim.probe_metadata((0, 3)) self.assertEqual(1, len(m)) self.assertEqual(A.location(0, 0.03), m[0]) m = sim.probe_metadata((0, 4)) self.assertEqual(1, len(m)) self.assertEqual(all_cv_cables, m[0]) m = sim.probe_metadata((0, 5)) self.assertEqual(1, len(m)) self.assertEqual(all_cv_cables, m[0]) m = sim.probe_metadata((0, 6)) self.assertEqual(1, len(m)) self.assertEqual(A.location(0, 0.06), m[0]) m = sim.probe_metadata((0, 7)) self.assertEqual(1, len(m)) self.assertEqual(all_cv_cables, m[0]) m = sim.probe_metadata((0, 8)) self.assertEqual(1, len(m)) self.assertEqual(A.location(0, 0.08), m[0].location) self.assertEqual(1, m[0].multiplicity) self.assertEqual(0, m[0].target) m = sim.probe_metadata((0, 9)) self.assertEqual(1, len(m)) self.assertEqual(1, len(m[0])) self.assertEqual(A.location(0, 0.09), m[0][0].location) self.assertEqual(1, m[0][0].multiplicity) self.assertEqual(1, m[0][0].target) m = sim.probe_metadata((0, 10)) self.assertEqual(1, len(m)) self.assertEqual(A.location(0, 0.10), m[0]) m = sim.probe_metadata((0, 11)) self.assertEqual(1, len(m)) self.assertEqual(all_cv_cables, m[0]) m = sim.probe_metadata((0, 12)) self.assertEqual(1, len(m)) self.assertEqual(A.location(0, 0.12), m[0]) m = sim.probe_metadata((0, 13)) self.assertEqual(1, len(m)) self.assertEqual(all_cv_cables, m[0]) m = sim.probe_metadata((0, 14)) self.assertEqual(1, len(m)) self.assertEqual(A.location(0, 0.14), m[0]) m = sim.probe_metadata((0, 15)) self.assertEqual(1, len(m)) self.assertEqual(all_cv_cables, m[0]) m = sim.probe_metadata((0, 16)) self.assertEqual(1, len(m)) self.assertEqual(all_cv_cables, m[0])
def art_spiking_sim(context, art_spiker_recipe): dd = arbor.partition_load_balance(art_spiker_recipe, context) return arbor.simulation(art_spiker_recipe, dd, context)
def event_generators(self, gid): return [] def global_properties(self, gid): return self.the_props recipe = single_recipe(cell, [probe]) # (4) Create an execution context context = arbor.context() # (5) Create a domain decomposition domains = arbor.partition_load_balance(recipe, context) # (6) Create a simulation sim = arbor.simulation(recipe, domains, context) # Instruct the simulation to record the spikes and sample the probe sim.record(arbor.spike_recording.all) probe_id = arbor.cell_member(0, 0) handle = sim.sample(probe_id, arbor.regular_schedule(0.02)) # (7) Run the simulation sim.run(tfinal=100, dt=0.025)
def test_simulation(self): rcp = recipe() ctx = arb.context() dom = arb.partition_load_balance(rcp, ctx) sim = arb.simulation(rcp, dom, ctx) sim.run(tfinal=30)
loc = arbor.location(0, 0) # at the soma return arbor.cable_probe('voltage', id, loc) context = arbor.context(threads=12, gpu_id=None) print(context) meters = arbor.meter_manager() meters.start(context) ncells = 4 recipe = ring_recipe(ncells) print(f'{recipe}') meters.checkpoint('recipe-create', context) decomp = arbor.partition_load_balance(recipe, context) print(f'{decomp}') hint = arbor.partition_hint() hint.prefer_gpu = True hint.gpu_group_size = 1000 print(f'{hint}') hints = dict([(arbor.cell_kind.cable, hint)]) decomp = arbor.partition_load_balance(recipe, context, hints) print(f'{decomp}') meters.checkpoint('load-balance', context) sim = arbor.simulation(recipe, decomp, context)