コード例 #1
0
def main(procs_per_worker):
    """

    :param procs_per_worker: int
    """

    try:
        from mpi4py import MPI
        from neuron import h
    except ImportError:
        raise ImportError(
            'nested: ParallelContextInterface: problem with importing neuron')
    try:
        h.nrnmpi_init()
    except Exception:
        print(
            'nested: ParallelContextInterface: h.nrnmpi_init() not executed; may not be defined in this version '
            'of NEURON')
        sys.stdout.flush()
        time.sleep(1.)
    global_comm = MPI.COMM_WORLD
    pc = h.ParallelContext()
    pc.subworlds(procs_per_worker)
    global_rank = int(pc.id_world())
    global_size = int(pc.nhost_world())
    rank = int(pc.id())
    size = int(pc.nhost())
    print(
        'MPI rank: %i, MPI size: %i, pc local rank: %i, pc local size: %i, pc global rank: %i, '
        'pc global size: %i\r' % (global_comm.rank, global_comm.size, rank,
                                  size, global_rank, global_size))
    sys.stdout.flush()
    time.sleep(1.)
コード例 #2
0
    def __init__(self, procs_per_worker=1):
        """

        :param procs_per_worker: int
        """
        try:
            from mpi4py import MPI
            from neuron import h
        except Exception:
            raise ImportError(
                'nested: ParallelContextInterface: problem with importing neuron'
            )
        try:
            h.nrnmpi_init()
        except Exception:
            print(
                'nested: ParallelContextInterface: h.nrnmpi_init() not executed; may not be defined in this version '
                'of NEURON')
        self.global_comm = MPI.COMM_WORLD
        group = self.global_comm.Get_group()
        sub_group = group.Incl(list(range(1, self.global_comm.size)))
        self.worker_comm = self.global_comm.Create(sub_group)
        self.procs_per_worker = procs_per_worker
        self.h = h
        self.pc = h.ParallelContext()
        self.pc.subworlds(procs_per_worker)
        self.global_rank = int(self.pc.id_world())
        self.global_size = int(self.pc.nhost_world())
        self.rank = int(self.pc.id())
        self.size = int(self.pc.nhost())
        global_ranks = [self.global_rank] * self.size
        global_ranks = self.pc.py_alltoall(global_ranks)
        group = self.global_comm.Get_group()
        sub_group = group.Incl(global_ranks)
        self.comm = self.global_comm.Create(sub_group)
        self.worker_id = self.comm.bcast(int(self.pc.id_bbs()), root=0)
        self.num_workers = self.comm.bcast(int(self.pc.nhost_bbs()), root=0)
        # 'collected' dict acts as a temporary storage container on the master process for results retrieved from
        # the ParallelContext bulletin board.
        self.collected = {}
        assert self.rank == self.comm.rank and self.global_rank == self.global_comm.rank and \
               self.global_comm.size // self.procs_per_worker == self.num_workers, \
            'nested: ParallelContextInterface: pc.ids do not match MPI ranks'
        self._running = False
        self.map = self.map_sync
        self.apply = self.apply_sync
        self.key_counter = 0
        self.maxint = 1e7
        self.controller_is_worker = True
コード例 #3
0
def run(config_file):
    load()
    h.nrnmpi_init()
    pc = h.ParallelContext()  # object to access MPI methods
    MPI_size = int(pc.nhost())
    MPI_rank = int(pc.id())

    config_file = 'config.json'

    conf = bionet.Config.from_json(config_file, validate=True)
    conf.build_env()

    graph = bionet.BioNetwork.from_config(conf)
    sim = bionet.BioSimulator.from_config(conf, network=graph)

    cells = graph.get_local_cells()

    sim.run()

    pc.barrier()
    pc.gid_clear()
    pc.done()
コード例 #4
0
"""
neuron_pc_extension

Classes and methods to provide an interface to extend the NEURON ParallelContext bulletin board for flexible
nested parallel computations.
"""
__author__ = 'Aaron D. Milstein'
import sys
import os
import time
import pprint
from mpi4py import MPI
from neuron import h
try:
    h.nrnmpi_init()
except:
    print(
        'pc_extension: h.nrnmpi_init() not executed; may not be defined in this version of NEURON'
    )


class Context(object):
    """
    A container replacement for global variables to be shared and modified by any function in a module.
    """
    def __init__(self, namespace_dict=None, **kwargs):
        self.update(namespace_dict, **kwargs)

    def update(self, namespace_dict=None, **kwargs):
        """
        Converts items in a dictionary (such as globals() or locals()) into context object internals.
コード例 #5
0
ファイル: testmpi.py プロジェクト: tapaswenipathak/nrn
from neuron import h

h.nrnmpi_init()  # initialize MPI
pc = h.ParallelContext()
print("I am {} of {}".format(pc.id(), pc.nhost()))
h.quit()
コード例 #6
0
ファイル: test_spikes.py プロジェクト: asiaszmek/nrn
def test_spikes(use_mpi4py=False, use_nrnmpi_init=False, file_mode=False):
    # mpi4py needs tp be imported before importing h
    if use_mpi4py:
        from mpi4py import MPI
        from neuron import h, gui
    # without mpi4py we need to call nrnmpi_init explicitly
    elif use_nrnmpi_init:
        from neuron import h, gui
        h.nrnmpi_init()
    # otherwise serial execution
    else:
        from neuron import h, gui

    h('''create soma''')
    h.soma.L=5.6419
    h.soma.diam=5.6419
    h.soma.insert("hh")
    h.soma.nseg = 3
    ic = h.IClamp(h.soma(.25))
    ic.delay = .1
    ic.dur = 0.1
    ic.amp = 0.3

    ic2 = h.IClamp(h.soma(.75))
    ic2.delay = 5.5
    ic2.dur = 1
    ic2.amp = 0.3

    h.tstop = 10
    h.cvode.use_fast_imem(1)
    h.cvode.cache_efficient(1)

    pc = h.ParallelContext()

    pc.set_gid2node(pc.id()+1, pc.id())
    myobj = h.NetCon(h.soma(0.5)._ref_v, None, sec=h.soma)
    pc.cell(pc.id()+1, myobj)

    # NEURON run
    nrn_spike_t = h.Vector()
    nrn_spike_gids = h.Vector()

    # rank 0 record spikes for all gid while others
    # for specific gid. this is for better test coverage.
    pc.spike_record(-1 if pc.id() == 0 else (pc.id()+1), nrn_spike_t, nrn_spike_gids)

    h.run()

    nrn_spike_t = nrn_spike_t.to_python()
    nrn_spike_gids = nrn_spike_gids.to_python()

    # CORENEURON run
    from neuron import coreneuron
    coreneuron.enable = True
    coreneuron.file_mode = file_mode
    coreneuron.verbose = 0
    h.stdinit()
    corenrn_all_spike_t = h.Vector()
    corenrn_all_spike_gids = h.Vector()

    pc.spike_record(-1, corenrn_all_spike_t, corenrn_all_spike_gids )
    pc.psolve(h.tstop)

    corenrn_all_spike_t = corenrn_all_spike_t.to_python()
    corenrn_all_spike_gids = corenrn_all_spike_gids.to_python()

    # check spikes match
    assert(len(nrn_spike_t)) # check we've actually got spikes
    assert(len(nrn_spike_t) == len(nrn_spike_gids)); # matching no. of gids
    assert(nrn_spike_t == corenrn_all_spike_t)
    assert(nrn_spike_gids == corenrn_all_spike_gids)

    h.quit()
コード例 #7
0
def main(import_mpi4py, run_nrnmpi_init, procs_per_worker, sleep):
    """

    :param import_mpi4py: int
    :param run_nrnmpi_init: bool
    :param procs_per_worker: int
    :param sleep: float
    """
    time.sleep(sleep)
    if import_mpi4py == 1:
        order = 'before'
        from mpi4py import MPI
        time.sleep(1.)
        print('test_mpiguard: getting past import mpi4py')
        sys.stdout.flush()
        time.sleep(1.)

    from neuron import h
    time.sleep(1.)
    print('test_mpiguard: getting past from neuron import h')
    sys.stdout.flush()
    time.sleep(1.)

    if run_nrnmpi_init:
        try:
            h.nrnmpi_init()
            time.sleep(1.)
            print('test_mpiguard: getting past h.nrnmpi_init()')
            sys.stdout.flush()
            time.sleep(1.)
        except:
            print(
                'test_mpiguard: problem calling h.nrnmpi_init(); may not be defined in this version of NEURON'
            )
            time.sleep(1.)
            sys.stdout.flush()
            time.sleep(1.)
    else:
        print('test_mpiguard: h.nrnmpi_init() not executed')
        time.sleep(1.)
        sys.stdout.flush()
        time.sleep(1.)
    if import_mpi4py == 2:
        order = 'after'
        from mpi4py import MPI
        print('test_mpiguard: getting past import mpi4py')
        time.sleep(1.)
        sys.stdout.flush()
        time.sleep(1.)

    if import_mpi4py > 0:
        comm = MPI.COMM_WORLD

    pc = h.ParallelContext()
    pc.subworlds(procs_per_worker)
    global_rank = int(pc.id_world())
    global_size = int(pc.nhost_world())
    rank = int(pc.id())
    size = int(pc.nhost())

    if import_mpi4py > 0:
        print(
            'test_mpiguard: mpi4py imported %s neuron: process id: %i; global rank: %i / %i; local rank: %i / %i; '
            'comm.rank: %i; comm.size: %i' %
            (order, os.getpid(), global_rank, global_size, rank, size,
             comm.rank, comm.size))
    else:
        print(
            'test_mpiguard: mpi4py not imported: process id: %i; global rank: %i / %i; local rank: %i / %i'
            % (os.getpid(), global_rank, global_size, rank, size))
    time.sleep(1.)
    sys.stdout.flush()
    time.sleep(1.)

    pc.runworker()
    time.sleep(1.)
    print('test_mpiguard: got past pc.runworker()')
    time.sleep(1.)
    sys.stdout.flush()
    time.sleep(1.)

    # catch workers escaping from runworker loop
    if global_rank != 0:
        print(
            'test_mpiguard: global_rank: %i escaped from the pc.runworker loop'
        )
        sys.stdout.flush()
        time.sleep(1.)
        os._exit(1)

    pc.done()
    time.sleep(1.)
    print('test_mpiguard: got past pc.done()')
    time.sleep(1.)
    sys.stdout.flush()
    time.sleep(1.)

    print('calling h_quit')
    sys.stdout.flush()
    time.sleep(1.)
    h.quit()
    time.sleep(1.)
    sys.stdout.flush()
    time.sleep(1.)
コード例 #8
0
ファイル: test0.py プロジェクト: nrnhines/nrn
from neuron import h
h.nrnmpi_init()
pc = h.ParallelContext()

id = int(pc.id())
nhost = int(pc.nhost())

print ('I am %d of %d'%(id, nhost))

pc.barrier()
h.quit()