Beispiel #1
0
    def test_domain_decomposition_nonsymmetric(self):
        nranks = 1
        rank = 0
        if (mpi_enabled):
            comm = arb.mpi_comm()
            context = arb.context(threads=1, gpu_id=None, mpi=comm)
            nranks = context.ranks
            rank = context.rank
        else:
            context = arb.context(threads=1, gpu_id=None)

        recipe = gj_non_symmetric(nranks)
        decomp = arb.partition_load_balance(recipe, context)

        cells_per_rank = nranks

        # check groups
        i = 0
        for gid in range(rank * cells_per_rank, (rank + 1) * cells_per_rank):
            if (gid % nranks == rank - 1):
                continue
            elif (gid % nranks == rank and rank != nranks - 1):
                cg = [gid, gid + cells_per_rank]
                self.assertEqual(cg,
                                 decomp.groups[len(decomp.groups) - 1].gids)
            else:
                cg = [gid]
                self.assertEqual(cg, decomp.groups[i].gids)
                i += 1

        # check gid_domains
        for gid in range(recipe.num_cells()):
            group = int(gid / cells_per_rank)
            idx = gid % cells_per_rank
            ngroups = nranks
            if (idx == group - 1):
                self.assertEqual(group - 1, decomp.gid_domain(gid))
            elif (idx == group and group != ngroups - 1):
                self.assertEqual(group, decomp.gid_domain(gid))
            else:
                self.assertEqual(group, decomp.gid_domain(gid))
Beispiel #2
0
def run():
    v = options.parse_arguments().verbosity

    if not arb.mpi_is_initialized():
        arb.mpi_init()

    comm = arb.mpi_comm()
    alloc = arb.proc_allocation()
    ctx = arb.context(alloc, comm)
    rank = ctx.rank

    if rank == 0:
        runner = unittest.TextTestRunner(verbosity=v)
    else:
        sys.stdout = open(os.devnull, 'w')
        runner = unittest.TextTestRunner(stream=sys.stdout)

    runner.run(suite())

    if not arb.mpi_is_finalized():
        arb.mpi_finalize()
Beispiel #3
0
    def test_domain_decomposition_homogenous_GPU(self):

        if (mpi_enabled):
            comm = arb.mpi_comm()
            context = arb.context(threads=1, gpu_id=0, mpi=comm)
        else:
            context = arb.context(threads=1, gpu_id=0)

        N = context.ranks
        I = context.rank

        # 10 cells per domain
        n_local = 10
        n_global = n_local * N

        recipe = homo_recipe(n_global)
        decomp = arb.partition_load_balance(recipe, context)

        self.assertEqual(decomp.num_local_cells, n_local)
        self.assertEqual(decomp.num_global_cells, n_global)
        self.assertEqual(len(decomp.groups), 1)

        b = I * n_local
        e = (I + 1) * n_local
        gids = list(range(b,e))

        for gid in gids:
            self.assertEqual(I, decomp.gid_domain(gid))

        # Each cell group contains 1 cell of kind cable
        # Each group should also be tagged for gpu execution

        grp = decomp.groups[0]

        self.assertEqual(len(grp.gids), n_local)
        self.assertEqual(grp.gids[0], b)
        self.assertEqual(grp.gids[-1], e-1)
        self.assertEqual(grp.backend, arb.backend.gpu)
        self.assertEqual(grp.kind, arb.cell_kind.cable)
Beispiel #4
0
    def test_context_mpi4py(self):
        comm = arb.mpi_comm(mpi.COMM_WORLD)

        # test context with mpi
        ctx = arb.context(mpi=comm)
        self.assertTrue(ctx.has_mpi)
Beispiel #5
0
    def test_communicator_mpi4py(self):
        comm = arb.mpi_comm(mpi.COMM_WORLD)

        # test that set communicator is MPI_COMM_WORLD
        self.assertEqual(str(comm), '<arbor.mpi_comm: MPI_COMM_WORLD>')
Beispiel #6
0
    def test_context_arbmpi(self):
        comm = arb.mpi_comm()

        # test context with mpi
        ctx = arb.context(mpi=comm)
        self.assertTrue(ctx.has_mpi)
Beispiel #7
0
    def test_communicator_arbmpi(self):
        comm = arb.mpi_comm()

        # test that by default communicator is MPI_COMM_WORLD
        self.assertEqual(str(comm), '<arbor.mpi_comm: MPI_COMM_WORLD>')
Beispiel #8
0
        return 1

    # (9) Attach a generator to the first cell in the ring.
    def event_generators(self, gid):
        if gid==0:
            sched = arbor.explicit_schedule([1])
            return [arbor.event_generator(arbor.cell_member(0,0), 0.1, sched)]
        return []

    def probes(self, gid):
        return [arbor.cable_probe_membrane_voltage('"root"')]

    def global_properties(self, kind):
        return self.props

comm = arbor.mpi_comm()
print(comm)

# (10) Set up the hardware context
context = arbor.context(threads=20, gpu_id=None, mpi=comm)
print(context)

# (11) Set up and start the meter manager
meters = arbor.meter_manager()
meters.start(context)

# (12) Instantiate recipe
ncells = 50
recipe = ring_recipe(ncells)
meters.checkpoint('recipe-create', context)
Beispiel #9
0
    def test_domain_decomposition_symmetric(self):
        nranks = 1
        rank = 0
        if (mpi_enabled):
            comm = arb.mpi_comm()
            context = arb.context(threads=1, gpu_id=None, mpi=comm)
            nranks = context.ranks
            rank = context.rank
        else:
            context = arb.context(threads=1, gpu_id=None)

        recipe = gj_symmetric(nranks)
        decomp0 = arb.partition_load_balance(recipe, context)

        self.assertEqual(6, len(decomp0.groups))

        shift = int((rank * recipe.num_cells())/nranks)

        exp_groups0 = [ [0 + shift],
                        [3 + shift],
                        [4 + shift],
                        [5 + shift],
                        [8 + shift],
                        [1 + shift, 2 + shift, 6 + shift, 7 + shift, 9 + shift]]

        for i in range(6):
            self.assertEqual(exp_groups0[i], decomp0.groups[i].gids)

        cells_per_rank = int(recipe.num_cells()/nranks)

        for i in range(recipe.num_cells()):
            self.assertEqual(int(i/cells_per_rank), decomp0.gid_domain(i))

        # Test different group_hints
        hint1 = arb.partition_hint()
        hint1.prefer_gpu = False
        hint1.cpu_group_size = recipe.num_cells()
        hints1 = dict([(arb.cell_kind.cable, hint1)])

        decomp1 = arb.partition_load_balance(recipe, context, hints1)
        self.assertEqual(1, len(decomp1.groups))

        exp_groups1 = [0 + shift, 3 + shift, 4 + shift, 5 + shift, 8 + shift,
                        1 + shift, 2 + shift, 6 + shift, 7 + shift, 9 + shift]

        self.assertEqual(exp_groups1, decomp1.groups[0].gids)

        for i in range(recipe.num_cells()):
            self.assertEqual(int(i/cells_per_rank), decomp1.gid_domain(i))

        hint2 = arb.partition_hint()
        hint2.prefer_gpu = False
        hint2.cpu_group_size = int(cells_per_rank/2)
        hints2 = dict([(arb.cell_kind.cable, hint2)])

        decomp2 = arb.partition_load_balance(recipe, context, hints2)
        self.assertEqual(2, len(decomp2.groups))

        exp_groups2 = [ [0 + shift, 3 + shift, 4 + shift, 5 + shift, 8 + shift],
                        [1 + shift, 2 + shift, 6 + shift, 7 + shift, 9 + shift] ]

        for i in range(2):
            self.assertEqual(exp_groups2[i], decomp2.groups[i].gids)

        for i in range(recipe.num_cells()):
            self.assertEqual(int(i/cells_per_rank), decomp2.gid_domain(i))
Beispiel #10
0
        suites.append(test_module_suite)

    suite = unittest.TestSuite(suites)

    return suite


if __name__ == "__main__":
    v = options.parse_arguments().verbosity

    if not arb.mpi_is_initialized():
        print(" Runner initializing mpi")
        arb.mpi_init()

    if mpi4py_enabled:
        comm = arb.mpi_comm(mpi.COMM_WORLD)
    elif mpi_enabled:
        comm = arb.mpi_comm()

    alloc = arb.proc_allocation()
    ctx = arb.context(alloc, comm)
    rank = ctx.rank

    if rank == 0:
        runner = unittest.TextTestRunner(verbosity=v)
    else:
        sys.stdout = open(os.devnull, 'w')
        runner = unittest.TextTestRunner(stream=sys.stdout)

    result = runner.run(suite())