def test_domain_decomposition_hints(self): n_cells = 20 recipe = hetero_recipe(n_cells) context = arb.context() # The hints perfer the multicore backend, so the decomposition is expected # to never have cell groups on the GPU, regardless of whether a GPU is # available or not. cable_hint = arb.partition_hint() cable_hint.prefer_gpu = False cable_hint.cpu_group_size = 3 spike_hint = arb.partition_hint() spike_hint.prefer_gpu = False spike_hint.cpu_group_size = 4 hints = dict([(arb.cell_kind.cable, cable_hint), (arb.cell_kind.spike_source, spike_hint)]) decomp = arb.partition_load_balance(recipe, context, hints) exp_cable_groups = [[0, 2, 4], [6, 8, 10], [12, 14, 16], [18]] exp_spike_groups = [[1, 3, 5, 7], [9, 11, 13, 15], [17, 19]] cable_groups = [] spike_groups = [] for g in decomp.groups: self.assertTrue(g.kind == arb.cell_kind.cable or g.kind == arb.cell_kind.spike_source) if (g.kind == arb.cell_kind.cable): cable_groups.append(g.gids) elif (g.kind == arb.cell_kind.spike_source): spike_groups.append(g.gids) self.assertEqual(exp_cable_groups, cable_groups) self.assertEqual(exp_spike_groups, spike_groups)
def test_domain_decomposition_exceptions(self): nranks = 1 rank = 0 if (mpi_enabled): comm = arb.mpi_comm() context = arb.context(threads=1, gpu_id=None, mpi=comm) nranks = context.ranks rank = context.rank else: context = arb.context(threads=1, gpu_id=None) recipe = gj_symmetric(nranks) hint1 = arb.partition_hint() hint1.prefer_gpu = False hint1.cpu_group_size = 0 hints1 = dict([(arb.cell_kind.cable, hint1)]) with self.assertRaisesRegex(RuntimeError, "unable to perform load balancing because cell_kind::cable has invalid suggested cpu_cell_group size of 0"): decomp1 = arb.partition_load_balance(recipe, context, hints1) hint2 = arb.partition_hint() hint2.prefer_gpu = True hint2.gpu_group_size = 0 hints2 = dict([(arb.cell_kind.cable, hint2)]) with self.assertRaisesRegex(RuntimeError, "unable to perform load balancing because cell_kind::cable has invalid suggested gpu_cell_group size of 0"): decomp2 = arb.partition_load_balance(recipe, context, hints2)
def test_domain_decomposition_exceptions(self): n_cells = 20 recipe = hetero_recipe(n_cells) context = arb.context() # The hints perfer the multicore backend, so the decomposition is expected # to never have cell groups on the GPU, regardless of whether a GPU is # available or not. cable_hint = arb.partition_hint() cable_hint.prefer_gpu = False cable_hint.cpu_group_size = 0 spike_hint = arb.partition_hint() spike_hint.prefer_gpu = False spike_hint.gpu_group_size = 1 hints = dict([(arb.cell_kind.cable, cable_hint), (arb.cell_kind.spike_source, spike_hint)]) with self.assertRaisesRegex( RuntimeError, "unable to perform load balancing because cell_kind::cable has invalid suggested cpu_cell_group size of 0" ): decomp = arb.partition_load_balance(recipe, context, hints) cable_hint = arb.partition_hint() cable_hint.prefer_gpu = False cable_hint.cpu_group_size = 1 spike_hint = arb.partition_hint() spike_hint.prefer_gpu = True spike_hint.gpu_group_size = 0 hints = dict([(arb.cell_kind.cable, cable_hint), (arb.cell_kind.spike_source, spike_hint)]) with self.assertRaisesRegex( RuntimeError, "unable to perform load balancing because cell_kind::spike_source has invalid suggested gpu_cell_group size of 0" ): decomp = arb.partition_load_balance(recipe, context, hints)
context = arbor.context(threads=12, gpu_id=None) print(context) meters = arbor.meter_manager() meters.start(context) ncells = 4 recipe = ring_recipe(ncells) print(f'{recipe}') meters.checkpoint('recipe-create', context) decomp = arbor.partition_load_balance(recipe, context) print(f'{decomp}') hint = arbor.partition_hint() hint.prefer_gpu = True hint.gpu_group_size = 1000 print(f'{hint}') hints = dict([(arbor.cell_kind.cable, hint)]) decomp = arbor.partition_load_balance(recipe, context, hints) print(f'{decomp}') meters.checkpoint('load-balance', context) sim = arbor.simulation(recipe, decomp, context) meters.checkpoint('simulation-init', context) spike_recorder = arbor.attach_spike_recorder(sim)
def test_domain_decomposition_symmetric(self): nranks = 1 rank = 0 if (mpi_enabled): comm = arb.mpi_comm() context = arb.context(threads=1, gpu_id=None, mpi=comm) nranks = context.ranks rank = context.rank else: context = arb.context(threads=1, gpu_id=None) recipe = gj_symmetric(nranks) decomp0 = arb.partition_load_balance(recipe, context) self.assertEqual(6, len(decomp0.groups)) shift = int((rank * recipe.num_cells())/nranks) exp_groups0 = [ [0 + shift], [3 + shift], [4 + shift], [5 + shift], [8 + shift], [1 + shift, 2 + shift, 6 + shift, 7 + shift, 9 + shift]] for i in range(6): self.assertEqual(exp_groups0[i], decomp0.groups[i].gids) cells_per_rank = int(recipe.num_cells()/nranks) for i in range(recipe.num_cells()): self.assertEqual(int(i/cells_per_rank), decomp0.gid_domain(i)) # Test different group_hints hint1 = arb.partition_hint() hint1.prefer_gpu = False hint1.cpu_group_size = recipe.num_cells() hints1 = dict([(arb.cell_kind.cable, hint1)]) decomp1 = arb.partition_load_balance(recipe, context, hints1) self.assertEqual(1, len(decomp1.groups)) exp_groups1 = [0 + shift, 3 + shift, 4 + shift, 5 + shift, 8 + shift, 1 + shift, 2 + shift, 6 + shift, 7 + shift, 9 + shift] self.assertEqual(exp_groups1, decomp1.groups[0].gids) for i in range(recipe.num_cells()): self.assertEqual(int(i/cells_per_rank), decomp1.gid_domain(i)) hint2 = arb.partition_hint() hint2.prefer_gpu = False hint2.cpu_group_size = int(cells_per_rank/2) hints2 = dict([(arb.cell_kind.cable, hint2)]) decomp2 = arb.partition_load_balance(recipe, context, hints2) self.assertEqual(2, len(decomp2.groups)) exp_groups2 = [ [0 + shift, 3 + shift, 4 + shift, 5 + shift, 8 + shift], [1 + shift, 2 + shift, 6 + shift, 7 + shift, 9 + shift] ] for i in range(2): self.assertEqual(exp_groups2[i], decomp2.groups[i].gids) for i in range(recipe.num_cells()): self.assertEqual(int(i/cells_per_rank), decomp2.gid_domain(i))