예제 #1
0
파일: mpi.py 프로젝트: toddrme2178/pyccel
    def reduce(self, x):
        ierr = -1

        global_x = 0.0

        mpi_allreduce(x, global_x, 1, MPI_DOUBLE, MPI_SUM, self.comm_cart,
                      ierr)
        print(global_x, x)
예제 #2
0
    # ... Computation of u at the n+1 iteration
    for i,j in mesh.indices:
        u_new[i, j] = c0 * (c1*(u[i+1, j] + u[i-1, j]) + c2*(u[i, j+1] + u[i, j-1]) - f[i, j])
    # ...

    # ... Computation of the global error
    u_error = 0.0
    for i,j in mesh.indices:
        u_error += abs(u[i,j]-u_new[i,j])
    local_error = u_error/(ntx*nty)

    # Reduction
    ierr = -1
    global_error = 0.0
    mpi_allreduce (local_error, global_error, 1, MPI_DOUBLE, MPI_SUM, mesh.comm_cart, ierr)
    # ...

    # ...
    if (global_error < tol) or (it == n_iterations - 1):
        if mesh.rank == 0:
            print ("> convergence after ", it, " iterations")
            print ("  local  error = ", local_error)
            print ("  global error = ", global_error)
        break
    # ...

del mesh

mpi_finalize(ierr)
예제 #3
0
    for i in range(sx, ex + 1):
        for j in range(sy, ey + 1):
            u_new[i, j] = c0 * (c1 * (u[i + 1, j] + u[i - 1, j]) + c2 *
                                (u[i, j + 1] + u[i, j - 1]) - f[i, j])
    # ...

    # ... Computation of the global error
    u_error = 0.0
    for i in range(sx, ex + 1):
        for j in range(sy, ey + 1):
            u_error += abs(u[i, j] - u_new[i, j])
    local_error = u_error / (ntx * nty)

    # Reduction
    global_error = 0.0
    mpi_allreduce(local_error, global_error, 1, MPI_DOUBLE, MPI_SUM, comm_2d,
                  ierr)
    # ...

    # ...
    if (global_error < tol) or (it == n_iterations - 1):
        if rank == 0:
            print("> convergence after ", it, " iterations")
            print("  local  error = ", local_error)
            print("  global error = ", global_error)
        break
    # ...

# Free the datatype
mpi_type_free(type_line, ierr)
mpi_type_free(type_column, ierr)
예제 #4
0
from pyccel.stdlib.parallel.mpi import mpi_comm_rank
from pyccel.stdlib.parallel.mpi import mpi_comm_world
from pyccel.stdlib.parallel.mpi import mpi_status_size
from pyccel.stdlib.parallel.mpi import mpi_allreduce
from pyccel.stdlib.parallel.mpi import MPI_INTEGER
from pyccel.stdlib.parallel.mpi import MPI_PROD

# we need to declare these variables somehow,
# since we are calling mpi subroutines
ierr = -1
size = -1
rank = -1

mpi_init(ierr)

comm = mpi_comm_world
mpi_comm_size(comm, size, ierr)
mpi_comm_rank(comm, rank, ierr)

if rank == 0:
    value = 1000
else:
    value = rank

product_value = 0
mpi_allreduce (value, product_value, 1, MPI_INTEGER, MPI_PROD, comm, ierr)

print('I, process ', rank,', have the global product value ', product_value)

mpi_finalize(ierr)