示例#1
0
def RandomSize(N_lower, N_upper):
    if COMM_WORLD.rank == 0:
        size = randint(N_lower, N_upper)
    else:
        size = None
    size = COMM_WORLD.bcast(size, root=0)
    assert size is not None
    return size
示例#2
0
def RandomNumber():
    if COMM_WORLD.rank == 0:
        number = _rand(1)[0]
    else:
        number = None
    number = COMM_WORLD.bcast(number, root=0)
    assert number is not None
    return number
示例#3
0
    def __init__(self):
        """Obtain configurations and all filelists from the master node
        """
        assert world.rank >= 1

        # synchronize configuration file and file lists with the master node
        self.config = {}; self.mapIn = []; self.reduceIn = []; self.reduceOut = []
        self.config = world.bcast(self.config, root=0)
示例#4
0
def RandomTuple(Q):
    if COMM_WORLD.rank == 0:
        tuple_ = tuple(float(v) for v in _rand(Q))
    else:
        tuple_ = None
    tuple_ = COMM_WORLD.bcast(tuple_, root=0)
    assert tuple_ is not None
    return tuple_
示例#5
0
    def __init__(self):
        """Obtain configurations and all filelists from the master node
        """
        assert world.rank >= 1

        # synchronize configuration file and file lists with the master node
        self.config = {}
        self.mapIn = []
        self.reduceIn = []
        self.reduceOut = []
        self.config = world.bcast(self.config, root=0)
示例#6
0
文件: propagate.py 项目: vincefn/B4PC
def lens(front, mirror, mode):

    rank = multi_process.Get_rank()

    if rank == 0:
        result = _propagate._lens(front, mirror, mode)
    else:
        result = None

    recv_data = multi_process.bcast(result, root=0)
    mirror.lens = recv_data
示例#7
0
文件: propagate.py 项目: vincefn/B4PC
def source_spread(source, back):

    rank = multi_process.Get_rank()

    if rank == 0:
        result = _propagate._source_spread(source, back)
    else:
        result = None

    # Broadcast
    recv_data = multi_process.bcast(result, root=0)
    back.wavefront = recv_data[0]
    back.intensity = recv_data[1]
示例#8
0
    def __init__(self, config):
        """Read in user created config module and initialize the master node
        """
        # set default values and update according to user input (config)
        # NOTE: input files should be prepared in the user module
        # (e.g. split the BIG file into smaller chunks using 'split')
        # each file is fed into a mapper, supposing it can fit into mapper's memory

        assert hasattr(config, 'mapfn') and hasattr(config, 'reducefn')
        self.config = {'nReduce':1, 'nMap':1, 'maxLoop':1, 'appendReduce':True,\
                'scratchFolder':'./', 'readPickle':False, 'writePickle':False,\
                'verbosity':6, 'timeout':60, 'delay':0.2, 'jobwait':1,\
                'mapfn':config.mapfn, 'reducefn':config.reducefn, 'ctrlfn':None,\
                'finalfn':None, 'readfn':None, 'hashfn':hash }

        if world.size == 1:
            raise AttributeError('Parallel mode only! At least one worker node is required.')

        # number of mapping tasks by default equals number of initial files
        # it can be overidden by user input
        assert isinstance(config.initFiles, list)
        self.config['nMap'] = len(config.initFiles)
        self.initFiles = config.initFiles

        # read in user defined configurations
        for key, val in self.config.items():
            if hasattr(config, key): self.config[key] = getattr(config, key)

        # sync config with all nodes
        self.config = world.bcast(self.config, root=0)

        # setup workers into a priority queue
        self.workers = [ State(ii) for ii in range(1, world.size) ]
        heapq.heapify(self.workers)
        self.nActive =  world.size - 1

        # assign map / reduce / finalize file list
        tmpList = [ config.__name__+'_'+str(ii).zfill(len(str(self.config['nMap'])))\
                +'.map' for ii in range(1, self.config['nMap']+1) ]
        self.mapIn = [ os.path.join(self.config['scratchFolder'], file) for file in tmpList ]

        tmpList = [ config.__name__+'_'+str(ii).zfill(len(str(self.config['nReduce'])))\
                +'.int' for ii in range(1, self.config['nReduce']+1) ]
        self.reduceIn = [ os.path.join(self.config['scratchFolder'], file) for file in tmpList ]
        self.reduceOut = [ os.path.splitext(file)[0]+'.red' for file in self.reduceIn ]

        # Currently only support single output file
        self.finalOut = [ config.__name__+'.out' ]

        # count number of iterations
        self.nLoop = 0; self.init = True
示例#9
0
文件: fixtures.py 项目: Helveg/arbor
def _build_cat_distributed(comm, name, path):
    # Control flow explanation:
    # * `build_err` starts out as `None`
    # * Rank 1 to N wait for a broadcast from rank 0 to receive the new value
    #   for `build_err`
    # * Rank 0 splits off from the others and executes the build.
    #   * If it builds correctly it finishes the collective `build_err`
    #     broadcast with the initial value `None`: all nodes continue.
    #   * If it errors, it finishes the collective broadcast with the caught err
    #
    # All MPI ranks either continue or raise the same err. (prevents stalling)
    build_err = None
    if not comm.Get_rank():
        try:
            _build_cat_local(name, path)
        except Exception as e:
            build_err = e
    build_err = comm.bcast(build_err, root=0)
    if build_err:
        raise build_err
示例#10
0
# In ``pmc.py`` the following line defines the sequential single process sampler:
# sampler = pypmc.sampler.importance_sampling.ImportanceSampler(log_target, initial_proposal)
#
# We now use the parallel MPISampler instead:
SequentialIS = pypmc.sampler.importance_sampling.ImportanceSampler
parallel_sampler = pypmc.tools.parallel_sampler.MPISampler(
    SequentialIS, target=log_target, proposal=initial_proposal)

# Draw 10,000 samples adapting the proposal every 1,000 samples:

# make sure that every process has a different random number generator seed
if comm.Get_rank() == 0:
    seed = np.random.randint(1e5)
else:
    seed = None
seed = comm.bcast(seed)
np.random.seed(seed + comm.Get_rank())

generating_components = []
for i in range(10):
    # With the invocation "mpirun -n 10 python pmc_mpi.py", there are
    # 10 processes which means in order to draw 1,000 samples
    # ``parallel_sampler.run(1000//comm.Get_size())`` makes each process draw
    # 100 samples.
    # Hereby the generating proposal component for each sample in each process
    # is returned by ``parallel_sampler.run``.
    # In the master process, ``parallel_sampler.run`` is a list containing the
    # return values of the sequential ``run`` method of every process.
    # In all other processes, ``parallel_sampler.run`` returns the generating
    # component for its own samples only.
    last_generating_components = parallel_sampler.run(1000 // comm.Get_size(),
示例#11
0
    CW.Send(data, dest=1, tag=13)
elif rank == 1:
    data = np.empty(100, dtype=np.float64)
    CW.Recv(data, source=0, tag=13)

print("On rank", rank, "data =", data)

CW.Barrier()

#But what if you want to send someone from one rank to everyone else? Well you can braodcast:

if rank == 0:
    data = {'key1': [7, 2.72, 2 + 3j], 'key2': ('abc', 'xyz')}
else:
    data = None
data = CW.bcast(data, root=0)
# or CW.Bcast(data, root=0) for numpy arrays

print("On rank", rank, "data =", data)

CW.Barrier()

#You might have to do some testing of doing a task on one process and sending that to everyone else though. If you are sending big stuff it might take a while.
#Something that is a litle bit different to broadcasting but still shares information on one node with others is scatter, which can scatter elements of a list, or array to all the process. This can be good with those independant tasks. So lets try scattering the pickle files:

if rank == 0:
    pickle_file = glob.glob("test_data_*.pkl")
else:
    pickle_file = None
pickle_file = CW.scatter(pickle_file, root=0)

maf.bcast = bcast

def compare(msg, mpi_func, maf_func, count=100):
    mpi_times = numpy.zeros(count)
    maf_times = numpy.zeros(count)
    for i in range(count):
        start = timeit.default_timer()
        mpi_func()
        mpi_times[i] = timeit.default_timer() - start
        start = timeit.default_timer()
        maf_func()
        maf_times[i] = timeit.default_timer() - start
    maf.log('{:<20} {:.4e} {:.4e} {:>6.2f}'.format(msg, mpi_times.mean(), maf_times.mean(), 
            100*((maf_times - mpi_times) / mpi_times).mean()))

compare('bcast(s)', lambda: COMM_WORLD.bcast('s'), lambda: maf.bcast('s'))
compare('bcast(1)', lambda: COMM_WORLD.bcast(1), lambda: maf.bcast(1))
compare('bcast(s*100)', lambda: COMM_WORLD.bcast('s'*100), lambda: maf.bcast('s'*100))
compare('bcast(s*1000)', lambda: COMM_WORLD.bcast('s'*1000), lambda: maf.bcast('s'*1000))

compare('bcast(range(1000))',
        lambda: COMM_WORLD.bcast(range(1000)),
        lambda: maf.bcast(range(1000)))

compare('bcast(range(10000))',
        lambda: COMM_WORLD.bcast(range(10000)),
        lambda: maf.bcast(range(10000)))

示例#13
0
def temporal_hook(u_, p_, newfolder, mesh, check_steady, Vv, Pv, tstep,
                  eval_dict, norm_l, nu, z, rho, DG, eval_t, files, T, folder,
                  prev, u_mean, normal, dt, domains, plot_t, checkpoint, uv,
                  hdf5_link, **NS_namespace):
    # Print timestep
    if tstep % eval_t == 0:
        if MPI.rank(mpi_comm_world()) == 0:
            print tstep

    if tstep % check_steady == 0 and eval_dict.has_key("initial_u"):
        [assign(uv.sub(i), u_[i]) for i in range(mesh.geometry().dim())]
        inlet_flux = assemble(dot(uv, normal) * ds(mesh)[domains](1))
        outlet_flux = assemble(dot(uv, normal) * ds(mesh)[domains](2))
        walls_flux = assemble(dot(uv, normal) * ds(mesh)[domains](3))

        if MPI.rank(mpi_comm_world()) == 0:
            print "Flux in: %e out: %e walls:%e" % (inlet_flux, outlet_flux,
                                                    walls_flux)

    # Initial conditions is "washed away"
        if tstep * dt > 0.4:
            if MPI.rank(mpi_comm_world()) == 0:
                print "=" * 25 + "\n DONE WITH FIRST ROUND\n\t%s\n" % tstep + "=" * 25
            eval_dict.pop("initial_u")

    if not eval_dict.has_key("initial_u"):
        # Evaluate points
        [assign(uv.sub(i), u_[i]) for i in range(mesh.geometry().dim())]
        evaluate_points(eval_dict, {"u": u_, "p": p_}, uv)

        if tstep % plot_t == 0:
            uv.rename("u", "velocity")
            p_.rename("p", "pressure")

            # Store vtk files for post process in paraview
            components = {"u": uv, "p": p_}
            for key in components.keys():
                field_name = "velocity" if key == "u" else "pressure"
                save_hdf5(files[key], field_name, components[key], tstep,
                          hdf5_link)

        if tstep % check_steady == 0:
            # Check the max norm of the difference
            num = eval_dict["senterline_u"].number_of_evaluations()
            arr = eval_dict["senterline_u"].array()
            arr = comm.bcast(
                arr, root=0)  # Might be better to do bcast after norm_l
            arr_ = arr[:, :3] / num - prev[0]

            norm = norm_l(arr_, l="max")

            # Update prev
            prev[0] = (arr[:, :3] / num).copy()

            # Print info
            if MPI.rank(mpi_comm_world()) == 0:
                print "Condition:", norm < 0.00001,
                print "On timestep:", tstep,
                print "Norm:", norm

            # Check if stats have stabilized
            if norm < 0.00001:
                dump_stats(eval_dict, newfolder, dt, tstep)

                # Clean kill of program
                if MPI.rank(mpi_comm_world()) == 0:
                    kill = open(folder + '/killoasis', 'w')
                    kill.close()
                MPI.barrier(mpi_comm_world())

        if tstep % checkpoint == 0 and not eval_dict.has_key("initial_u"):
            dump_stats(eval_dict, newfolder, dt, tstep)
示例#14
0
def get_parameters(name):
    """ Reads the simulation parameters from the input hdf5 file. """

    if comm.rank == 0:
        f = h5py.File(name + '.grid', 'r')
        files_to_delete = [name + '.grid']

        omega = np.complex128(f['omega_r'][0] + 1j * f['omega_i'][0])
        shape = tuple([int(s) for s in f['shape'][:]])

        # bound_conds = f['bound_conds'][:]

        # Function used to read in a 1D complex vector fields.
        get_1D_fields = lambda a: [(f[a+'_'+u+'r'][:] + 1j * f[a+'_'+u+'i'][:]).\
                                astype(np.complex128) for u in 'xyz']

        # Read in s and t vectors.
        s = get_1D_fields('sp')
        t = get_1D_fields('sd')

        # Read in max_iters and err_thresh.
        max_iters = int(f['max_iters'][0])
        # max_iters = 100
        err_thresh = float(f['err_thresh'][0])

        f.close()  # Close file.

        # Function used to read in 3D complex vector fields.
        def get_3D_fields(a):
            field = []
            for k in range(3):
                key = name + '.' + a + '_' + 'xyz'[k]
                field.append((h5py.File(key + 'r')['data'][:] + \
                        1j * h5py.File(key + 'i')['data'][:]).astype(np.complex128))
                files_to_delete.append(key + 'r')
                files_to_delete.append(key + 'i')
            return field


#         # Read in m, e, and j fields.
#         for name in 'eJmE':
#             print comm.rank, name
#             params[name] = get_3D_fields(name)

        e = get_3D_fields('e')
        j = get_3D_fields('J')
        m = get_3D_fields('m')
        x = get_3D_fields('E')

        # Delete input files.
        for filename in files_to_delete:
            os.remove(filename)

        # Do some simple pre-computation.
        for k in range(3):
            m[k] = m[k]**-1
            e[k] = omega**2 * e[k]
            j[k] = -1j * omega * j[k]

        params = {'omega': omega, 'shape': shape, \
                'max_iters': max_iters, 'err_thresh': err_thresh, \
                's': s, 't': t}
        # 'e': e, 'm': m, 'j': j, 'x': x}
    else:
        params = None

    params = comm.bcast(params)

    if comm.rank == 0:
        params['e'] = e
        params['m'] = m
        params['j'] = j
        params['x'] = x

    else:
        for field_name in 'emjx':
            params[field_name] = [None] * 3

    return params
示例#15
0
    def __init__(self, config):
        """Read in user created config module and initialize the master node
        """
        # set default values and update according to user input (config)
        # NOTE: input files should be prepared in the user module
        # (e.g. split the BIG file into smaller chunks using 'split')
        # each file is fed into a mapper, supposing it can fit into mapper's memory

        assert hasattr(config, 'mapfn') and hasattr(config, 'reducefn')
        self.config = {'nReduce':1, 'nMap':1, 'maxLoop':1, 'appendReduce':True,\
                'scratchFolder':'./', 'readPickle':False, 'writePickle':False,\
                'verbosity':6, 'timeout':60, 'delay':0.2, 'jobwait':1,\
                'mapfn':config.mapfn, 'reducefn':config.reducefn, 'ctrlfn':None,\
                'finalfn':None, 'readfn':None, 'hashfn':hash }

        if world.size == 1:
            raise AttributeError(
                'Parallel mode only! At least one worker node is required.')

        # number of mapping tasks by default equals number of initial files
        # it can be overidden by user input
        assert isinstance(config.initFiles, list)
        self.config['nMap'] = len(config.initFiles)
        self.initFiles = config.initFiles

        # read in user defined configurations
        for key, val in self.config.items():
            if hasattr(config, key): self.config[key] = getattr(config, key)

        # sync config with all nodes
        self.config = world.bcast(self.config, root=0)

        # setup workers into a priority queue
        self.workers = [State(ii) for ii in range(1, world.size)]
        heapq.heapify(self.workers)
        self.nActive = world.size - 1

        # assign map / reduce / finalize file list
        tmpList = [ config.__name__+'_'+str(ii).zfill(len(str(self.config['nMap'])))\
                +'.map' for ii in range(1, self.config['nMap']+1) ]
        self.mapIn = [
            os.path.join(self.config['scratchFolder'], file)
            for file in tmpList
        ]

        tmpList = [ config.__name__+'_'+str(ii).zfill(len(str(self.config['nReduce'])))\
                +'.int' for ii in range(1, self.config['nReduce']+1) ]
        self.reduceIn = [
            os.path.join(self.config['scratchFolder'], file)
            for file in tmpList
        ]
        self.reduceOut = [
            os.path.splitext(file)[0] + '.red' for file in self.reduceIn
        ]

        # Currently only support single output file
        self.finalOut = [config.__name__ + '.out']

        # count number of iterations
        self.nLoop = 0
        self.init = True
示例#16
0
def get_parameters(name):
    """ Reads the simulation parameters from the input hdf5 file. """

    if comm.rank == 0:
        f = h5py.File(name + '.grid', 'r')
        files_to_delete = [name + '.grid']

        omega = np.complex128(f['omega_r'][0] + 1j * f['omega_i'][0])
        shape = tuple([int(s) for s in f['shape'][:]])

        # bound_conds = f['bound_conds'][:]

        # Function used to read in a 1D complex vector fields.
        get_1D_fields = lambda a: [(f[a+'_'+u+'r'][:] + 1j * f[a+'_'+u+'i'][:]).\
                                astype(np.complex128) for u in 'xyz']

        # Read in s and t vectors.
        s = get_1D_fields('sp')
        t = get_1D_fields('sd')

        # Read in max_iters and err_thresh.
        max_iters = int(f['max_iters'][0])
        # max_iters = 100
        err_thresh = float(f['err_thresh'][0])


        f.close() # Close file.

        # Function used to read in 3D complex vector fields.
        def get_3D_fields(a):
            field = []
            for k in range(3):
                key = name + '.' + a + '_' + 'xyz'[k]
                field.append((h5py.File(key + 'r')['data'][:] + \
                        1j * h5py.File(key + 'i')['data'][:]).astype(np.complex128))
                files_to_delete.append(key + 'r')
                files_to_delete.append(key + 'i')
            return field

#         # Read in m, e, and j fields.
#         for name in 'eJmE':
#             print comm.rank, name
#             params[name] = get_3D_fields(name)
        e = get_3D_fields('e')
        j = get_3D_fields('J')
        m = get_3D_fields('m')
        x = get_3D_fields('E')

        # Delete input files.
        for filename in files_to_delete:
            os.remove(filename)

        # Do some simple pre-computation.
        for k in range(3):
            m[k] = m[k]**-1
            e[k] = omega**2 * e[k]
            j[k] = -1j * omega * j[k]

        params = {'omega': omega, 'shape': shape, \
                'max_iters': max_iters, 'err_thresh': err_thresh, \
                's': s, 't': t}
                # 'e': e, 'm': m, 'j': j, 'x': x}
    else:
        params = None

    params = comm.bcast(params)

    if comm.rank == 0:
        params['e'] = e
        params['m'] = m
        params['j'] = j
        params['x'] = x
        
    else:
        for field_name in 'emjx':
            params[field_name] = [None] * 3

    return params

def neighb(x, n, ng):
    return np.vstack((x, gen(x - ng, x + ng, n - 1)))


if rank == 0:
    solO = opticalpath(np.loadtxt("dbr.txt")[:, 1])
    sol = neighb(solO, n, Neighb)
    wyn = solO
    cel_wyn = goal(wyn, w)
    mkdir(path)
else:
    sol = np.array([])

sol = mpi.bcast(sol, root=0)
j = 0
start = timeit.default_timer()
stop = start
end = 500
mpi.Barrier()

while (j <= end) & ((stop - start) < time_limit):
    best_sol = np.array([])
    good_sol = np.array([])
    m = 0
    e = 0
    cel = np.array([])

    period = n // size
    full_range = np.linspace(0, size, size + 1) * period
示例#18
0
文件: fdfd.py 项目: shanham/maxwell-b
def get_parameters(name):
    """ Reads the simulation parameters from the input hdf5 file. """

    if comm.rank == 0:
        f = h5py.File(name + '.grid', 'r')
        files_to_delete = [name + '.grid']

        omega = np.complex128(f['omega_r'][0] + 1j * f['omega_i'][0])
        shape = tuple([int(s) for s in f['shape'][:]])
        n_eig = int(f['n_eig'][0])

        # bloch boundary conditions
        bloch_phase = f['bloch_phase'].value

        # PEC or PMC boundary conditions
        pemc = f['pemc'].value.astype('int32')

        # get solver
        EM_solvers = ['CG', 'biCGSTAB', 'lgmres', 'Jacobi-Davidson']
        solver = EM_solvers[f['solver'].value]

        # Function used to read in a 1D complex vector fields.
        get_1D_fields = lambda a: [(f[a+'_'+u+'r'][:] + 1j * f[a+'_'+u+'i'][:]).\
                                astype(np.complex128) for u in 'xyz']

        # Read in s and t vectors.
        s = get_1D_fields('sp')
        t = get_1D_fields('sd')

        # Read in max_iters and err_thresh.
        max_iters = int(f['max_iters'][0])
        err_thresh = float(f['err_thresh'][0])

        # Function used to read in 3D complex vector fields.
        def get_3D_fields(a):
            field = []
            # Check if field data all in one HDF5 file.
            if (a + '_xr') in f:
                for k in range(3):
                    key = a + '_' + 'xyz'[k]
                    field.append((f[key + 'r'][:] +
                                  1j * f[key + 'i'][:]).astype(np.complex128))
                return field

            for k in range(3):
                key = name + '.' + a + '_' + 'xyz'[k]
                field.append((h5py.File(key + 'r')['data'][:] + \
                        1j * h5py.File(key + 'i')['data'][:]).astype(np.complex128))
                files_to_delete.append(key + 'r')
                files_to_delete.append(key + 'i')
            return field

        e = get_3D_fields('e')  # Permittivity (eps).
        j = get_3D_fields('J')  # Current source.
        m = get_3D_fields('m')  # Permeability (mu).
        x = get_3D_fields('A')  # Initial fields (E0).

        f.close()  # Close file.

        # Delete input files.
        for filename in files_to_delete:
            os.remove(filename)

        # Do some simple pre-computation.
        for k in range(3):
            m[k] = m[k]**-1
            e[k] = omega**2 * e[k]
            j[k] = -1j * omega * j[k]

        params = {'omega': omega, 'shape': shape, 'n_eig': n_eig,\
                  'max_iters': max_iters, 'err_thresh': err_thresh, \
                  's': s, 't': t, 'bloch_phase': bloch_phase, \
                  'pemc': pemc, 'solver': solver}
    else:
        params = None

    params = comm.bcast(params)

    if comm.rank == 0:
        params['e'] = e
        params['m'] = m
        params['j'] = j
        params['x'] = x
    else:
        for field_name in 'emjx':
            params[field_name] = [None] * 3

    return params
示例#19
0
from distributed import Client
from mpi4py.MPI import COMM_WORLD as world

from dask_mpi import initialize, send_close_signal

# Split our MPI world into two pieces, one consisting just of
# the old rank 3 process and the other with everything else
new_comm_assignment = 1 if world.rank == 3 else 0
comm = world.Split(new_comm_assignment)

if world.rank != 3:
    # run tests with rest of comm
    is_client = initialize(comm=comm, exit=False)

    if is_client:
        with Client() as c:
            c.submit(lambda x: x + 1, 10).result() == 11
            c.submit(lambda x: x + 1, 20).result() == 21
        send_close_signal()

# check that our original comm is intact
world.Barrier()
x = 100 if world.rank == 0 else 200
x = world.bcast(x)
assert x == 100