Esempio n. 1
0
    def mpi_jit(*args, **kwargs):

        # Create MPI_COMM_WORLD wrapper
        mpi_comm = kwargs.get("mpi_comm")
        if mpi_comm is None:
            mpi_comm = cpp.mpi_comm_world()

        # Just call JIT compiler when running in serial
        if MPI.size(mpi_comm) == 1:
            return local_jit(*args, **kwargs)

        # Compile first on process 0
        root = MPI.rank(mpi_comm) == 0
        if root:
            output = local_jit(*args, **kwargs)

        # Wait for the first process to finish
        MPI.barrier(mpi_comm)

        # Then compile on all other processes (which may then just
        # read the cache)
        if not root:
            output = local_jit(*args, **kwargs)

        return output
Esempio n. 2
0
    def mpi_jit(*args, **kwargs):

        # Create MPI_COMM_WORLD wrapper
        mpi_comm = kwargs.get("mpi_comm")
        if mpi_comm is None:
            mpi_comm = cpp.mpi_comm_world()

        # Just call JIT compiler when running in serial
        if MPI.size(mpi_comm) == 1:
            return local_jit(*args, **kwargs)

        # Default status (0 == ok, 1 == fail)
        status = 0

        # Compile first on process 0
        root = MPI.rank(mpi_comm) == 0
        if root:
            try:
                output = local_jit(*args, **kwargs)
            except Exception as e:
                status = 1
                error_msg = str(e)

        # TODO: This would have lower overhead if using the dijitso.jit
        # features to inject a waiting callback instead of waiting out here.
        # That approach allows all processes to first look in the cache,
        # introducing a barrier only on cache miss.
        # There's also a sketch in dijitso of how to make only one
        # process per physical cache directory do the compilation.

        # Wait for the compiling process to finish and get status
        # TODO: Would be better to broadcast the status from root but this works.
        global_status = MPI.max(mpi_comm, status)

        if global_status == 0:
            # Success, call jit on all other processes
            # (this should just read the cache)
            if not root:
                output = local_jit(*args, **kwargs)
        else:
            # Fail simultaneously on all processes,
            # to allow catching the error without deadlock
            if not root:
                error_msg = "Compilation failed on root node."
            cpp.dolfin_error("jit.py",
                             "perform just-in-time compilation of form",
                             error_msg)
        return output
Esempio n. 3
0
    def mpi_jit(*args, **kwargs):


        # Create MPI_COMM_WORLD wrapper
        mpi_comm = cpp.mpi_comm_world()

        # Just call JIT compiler when running in serial
        if MPI.size(mpi_comm) == 1:
            return local_jit(*args, **kwargs)

        # Compile first on process 0
        if MPI.rank(mpi_comm) == 0:
            output = local_jit(*args, **kwargs)

        # Wait for the first process to finish
        MPI.barrier(mpi_comm)

        # Then compile on all other processes (which may then just
        # read the cache)
        if not MPI.rank(mpi_comm) == 0:
            output = local_jit(*args,**kwargs)

        return output
Esempio n. 4
0
def assemble_multimesh(form,
                       tensor=None,
                       form_compiler_parameters=None,
                       backend=None):
    "Assemble the given multimesh form and return the corresponding tensor."

    # The form that comes in is (by construction in function.Argument)
    # defined on the first part of the multimesh. We now need to create
    # the DOLFIN Forms with the proper function spaces for each part.

    # FIXME: This code makes a number of assumptions and will need to
    # be revisited and improved.

    # Make sure that we generate code for evaluate_basis_derivatives
    if not form_compiler_parameters:
        form_compiler_parameters = parameters['form_compiler']
    form_compiler_parameters = form_compiler_parameters.copy()
    form_compiler_parameters["no-evaluate_basis_derivatives"] = False

    # Extract arguments and multimesh function space
    coefficients = form.coefficients()
    arguments = form.arguments()

    # Extract rank
    rank = len(arguments)

    # Extract multimesh function spaces for arguments
    V_multi = [v._V_multi for v in arguments]

    # Exstract number of parts, the multimesh and create the multimesh form
    if rank > 0:
        num_parts = V_multi[0].num_parts()
        multimesh_form = cpp.MultiMeshForm(*V_multi)
        multimesh = V_multi[0].multimesh()
    else:
        for coeff in coefficients:
            # Only create these variables once
            if isinstance(coeff, MultiMeshFunction):
                multimesh = coeff.function_space().multimesh()
                num_parts = coeff.function_space().num_parts()
                multimesh_form = cpp.MultiMeshForm(multimesh)
                break
    # Developer note: This won't work for assemble_multimesh(Constant(1)*dX)

    # Build multimesh DOLFIN form
    for part in range(num_parts):
        # Extract standard function spaces for all arguments on
        # current part
        function_spaces = [V_multi[i].part(part) for i in range(rank)]

        # Wrap standard form
        dolfin_form = _create_dolfin_form(form, form_compiler_parameters,
                                          function_spaces)

        # Setting coefficients for the multimesh form
        for i in range(len(coefficients)):
            if isinstance(coefficients[i], MultiMeshFunction):
                coeff = coefficients[i].part(part)
            else:
                coeff = coefficients[i]
            # Developer note: This may be done more elegantly by modifiying
            # _create_dolfin_form
            dolfin_form.set_coefficient(i, coeff)
            dolfin_form.coefficients[i] = coeff

        # Add standard mesh to the standard form and the
        # standard form to the multimesh form
        dolfin_form.set_mesh(multimesh.part(part))
        multimesh_form.add(dolfin_form)

    # Build multimesh form
    multimesh_form.build()

    # Create tensor
    comm = cpp.mpi_comm_world()
    tensor = _create_tensor(comm, form, rank, backend, tensor)

    # Call C++ assemble function
    assembler = cpp.MultiMeshAssembler()
    assembler.assemble(tensor, multimesh_form)

    # Convert to float for scalars
    if rank == 0:
        tensor = tensor.get_scalar_value()

    # Return value
    return tensor
Esempio n. 5
0
def assemble_multimesh(form,
                       tensor=None,
                       form_compiler_parameters=None,
                       backend=None):
    "Assemble the given multimesh form and return the corresponding tensor."

    # The form that comes in is (by construction in function.Argument)
    # defined on the first part of the multimesh. We now need to create
    # the DOLFIN Forms with the proper function spaces for each part.

    # FIXME: This code makes a number of assumptions and will need to
    # be revisited and improved.

    # Make sure that we generate code for evaluate_basis_derivatives
    if not form_compiler_parameters:
        form_compiler_parameters = parameters['form_compiler']
    form_compiler_parameters = form_compiler_parameters.copy()
    form_compiler_parameters["no-evaluate_basis_derivatives"] = False

    # Extract arguments and multimesh function space
    arguments = form.arguments()

    # Extract multimesh function spaces for arguments
    V_multi = [v._V_multi for v in arguments]

    # Extract number of parts and rank
    num_parts = V_multi[0].num_parts()  # check only first
    rank = len(arguments)

    # Build multimesh DOLFIN form
    multimesh_form = cpp.MultiMeshForm(*V_multi)
    for part in range(num_parts):

        # Extract standard function spaces for all arguments on
        # current part
        function_spaces = [V_multi[i].part(part) for i in range(rank)]

        # Wrap standard form
        dolfin_form = _create_dolfin_form(form,
                                          form_compiler_parameters,
                                          function_spaces)

        # Add standard form to multimesh form
        multimesh_form.add(dolfin_form)

    # Build multimesh form
    multimesh_form.build()

    # Create tensor
    comm = cpp.mpi_comm_world()
    tensor = _create_tensor(comm, form, rank, backend, tensor)

    # Call C++ assemble function
    assembler = cpp.MultiMeshAssembler()
    assembler.assemble(tensor, multimesh_form)

    # Convert to float for scalars
    if rank == 0:
        tensor = tensor.get_scalar_value()

    # Return value
    return tensor