Exemple #1
0
    def before_first_compute(self, get):
        u = get("Velocity")
        V = u.function_space()

        spaces = SpacePool(V.mesh())
        degree = V.ufl_element().degree()
        
        if degree <= 1:
            Q = spaces.get_grad_space(V, shape=(spaces.d,))
        else:
            if degree > 2:
                cbc_warning("Unable to handle higher order WSS space. Using CG1.")
            Q = spaces.get_space(1,1)

        Q_boundary = spaces.get_space(Q.ufl_element().degree(), 1, boundary=True)

        self.v = TestFunction(Q)
        self.tau = Function(Q, name="WSS_full")
        self.tau_boundary = Function(Q_boundary, name="WSS")

        local_dofmapping = mesh_to_boundarymesh_dofmap(spaces.BoundaryMesh, Q, Q_boundary)
        self._keys = np.array(local_dofmapping.keys(), dtype=np.intc)
        self._values = np.array(local_dofmapping.values(), dtype=np.intc)
        self._temp_array = np.zeros(len(self._keys), dtype=np.float_)

        Mb = assemble(inner(TestFunction(Q_boundary), TrialFunction(Q_boundary))*dx)
        self.solver = create_solver("gmres", "jacobi")
        self.solver.set_operator(Mb)

        self.b = Function(Q_boundary).vector()

        self._n = FacetNormal(V.mesh())
Exemple #2
0
def compute_regular_timesteps(problem):
    """Compute fixed timesteps for problem.

    The first timestep will be T0 while the last timestep will be in the interval [T, T+dt).

    Returns (dt, timesteps, start_timestep).
    """
    # Get the time range and timestep from the problem
    T0 = problem.params.T0
    T = problem.params.T
    dt = problem.params.dt
    start_timestep = problem.params.start_timestep

    # Compute regular timesteps, including T0 and T
    timesteps = arange(T0, T+dt, dt)

    if abs(dt - (timesteps[1]-timesteps[0])) > 1e-8:
        error("Computed timestep size does not match specified dt.")

    if timesteps[-1] < T - dt*1e-6:
        error("Computed timesteps range does not include end time.")

    if timesteps[-1] > T + dt*1e-6:
        cbc_warning("End time for simulation does not match end time set for problem (T-T0 not a multiple of dt).")

    if start_timestep < 0 or start_timestep >= len(timesteps):
        error("start_timestep is beyond the computed timesteps.")

    return dt, timesteps, start_timestep
Exemple #3
0
 def _clean_pvd(self, fieldname, del_metadata):
     if os.path.isfile(
             os.path.join(self._pp.get_savedir(fieldname),
                          fieldname + '.pvd')):
         cbc_warning(
             "No functionality for cleaning pvd-files for restart. Will overwrite."
         )
Exemple #4
0
    def _clean_hdf5(self, fieldname, del_metadata):
        delete_from_hdf5_file = '''
        namespace dolfin {
            #include <hdf5.h>
            void delete_from_hdf5_file(const MPI_Comm comm,
                                       const std::string hdf5_filename,
                                       const std::string dataset,
                                       const bool use_mpiio)
            {
                //const hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
                // Open file existing file for append
                //hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDWR, plist_id);
                hid_t hdf5_file_id = HDF5Interface::open_file(comm, hdf5_filename, "a", use_mpiio);

                H5Ldelete(hdf5_file_id, dataset.c_str(), H5P_DEFAULT);
                HDF5Interface::close_file(hdf5_file_id);
            }
        }
        '''
        cpp_module = compile_extension_module(
            delete_from_hdf5_file,
            additional_system_headers=["dolfin/io/HDF5Interface.h"])

        hdf5filename = os.path.join(self._pp.get_savedir(fieldname),
                                    fieldname + '.hdf5')

        if not os.path.isfile(hdf5filename):
            return

        for k, v in del_metadata.items():
            if 'hdf5' not in v:
                continue
            else:
                cpp_module.delete_from_hdf5_file(
                    mpi_comm_world(), hdf5filename, v['hdf5']['dataset'],
                    MPI.size(mpi_comm_world()) > 1)

        hdf5tmpfilename = os.path.join(self._pp.get_savedir(fieldname),
                                       fieldname + '_tmp.hdf5')
        #import ipdb; ipdb.set_trace()
        MPI.barrier(mpi_comm_world())
        if on_master_process():
            # status, result = getstatusoutput("h5repack -V")
            status, result = -1, -1
            if status != 0:
                cbc_warning(
                    "Unable to run h5repack. Will not repack hdf5-files before replay, which may cause bloated hdf5-files."
                )
            else:
                subprocess.call("h5repack %s %s" %
                                (hdf5filename, hdf5tmpfilename),
                                shell=True)
                os.remove(hdf5filename)
                os.rename(hdf5tmpfilename, hdf5filename)
        MPI.barrier(mpi_comm_world())
Exemple #5
0
def disable_plotting():
    "Disable all plotting if we run in parallell."
    if disable_plotting.value == "init":
        if in_serial() and 'DISPLAY' in os.environ:
            disable_plotting.value = False
        elif 'DISPLAY' not in os.environ:
            cbc_warning("Did not find display. Disabling plotting.")
            disable_plotting.value = True
        else:
            cbc_warning("Unable to plot in paralell. Disabling plotting.")
            disable_plotting.value = True

    return disable_plotting.value
Exemple #6
0
def import_pylab():
    "Set up pylab if available."
    if import_pylab.value == "init":
        if disable_plotting():
            import_pylab.value = None
        else:
            try:
                import pylab
                pylab.ion()
                import_pylab.value = pylab
            except ImportError:
                cbc_warning("Unable to load pylab. Disabling pylab plotting.")
                import_pylab.value = None
    return import_pylab.value
Exemple #7
0
    def compute(self, get):
        u = get(self.valuename)

        if isinstance(u, Function):
            if not hasattr(self, "use_project"):
                self.before_first_compute(get)

            if LooseVersion(dolfin_version()) > LooseVersion("1.6.0"):
                rank = len(u.ufl_shape)
            else:
                rank = u.rank()

            if rank == 0:
                self.f.vector().zero()
                self.f.vector().axpy(1.0, u.vector())
                self.f.vector().abs()
                return self.f
            elif rank >= 1:
                if self.use_project:
                    b = assemble(sqrt(inner(u, u)) * self.v * dx(None))
                    self.projection.solve(self.f.vector(), b)
                else:
                    self.assigner.assign(self.subfuncs, u)
                    self.f.vector().zero()
                    for i in xrange(u.function_space().num_sub_spaces()):
                        vec = self.subfuncs[i].vector()
                        vec.apply('')
                        self.f.vector().axpy(1.0, vec * vec)

                    try:
                        sqrt_in_place(self.f.vector())
                    except:
                        r = self.f.vector().local_range()
                        self.f.vector()[r[0]:r[1]] = np.sqrt(
                            self.f.vector()[r[0]:r[1]])
                    self.f.vector().apply('')

                return self.f
        elif isinstance(u, Iterable) and all(
                isinstance(_u, Number) for _u in u):
            return np.sqrt(sum(_u**2 for _u in u))
        elif isinstance(u, Number):
            return abs(u)
        else:
            # Don't know how to handle object
            cbc_warning(
                "Don't know how to calculate magnitude of object of type %s. Returning object."
                % type(u))
            return u
Exemple #8
0
    def _action_plot(self, t, timestep, field, data):
        "Apply the 'plot' action to computed field data."
        if data is None:
            return

        if disable_plotting():
            return
        if isinstance(data, Function):
            self._plot_dolfin(t, timestep, field, data)
        elif isinstance(data, float):
            self._plot_pylab(t, timestep, field, data)
        else:
            cbc_warning("Unable to plot object %s of type %s." %
                        (field.name, type(data)))
        self._timer.completed("PP: plot %s" % field.name)
Exemple #9
0
    def compute(self, get):
        u = get(self.valuename)

        if u is None:
            return None

        if not isinstance(u, Function):
            cbc_warning("Do not understand how to handle datatype %s" %
                        str(type(u)))
            return None

        #if not hasattr(self, "restriction_map"):
        if not hasattr(self, "keys"):
            V = u.function_space()
            element = V.ufl_element()
            family = element.family()
            degree = element.degree()

            if LooseVersion(dolfin_version()) > LooseVersion("1.6.0"):
                rank = len(u.ufl_shape)
            else:
                rank = u.rank()

            if rank == 0:
                FS = FunctionSpace(self.submesh, family, degree)
            elif rank == 1:
                FS = VectorFunctionSpace(self.submesh, family, degree)
            elif rank == 2:
                FS = TensorFunctionSpace(self.submesh,
                                         family,
                                         degree,
                                         symmetry={})

            self.u = Function(FS)

            #self.restriction_map = restriction_map(V, FS)
            rmap = restriction_map(V, FS)
            self.keys = np.array(rmap.keys(), dtype=np.intc)
            self.values = np.array(rmap.values(), dtype=np.intc)
            self.temp_array = np.zeros(len(self.keys), dtype=np.float_)

        # The simple __getitem__, __setitem__ has been removed in dolfin 1.5.0.
        # The new cbcpost-method get_set_vector should be compatible with 1.4.0 and 1.5.0.
        #self.u.vector()[self.keys] = u.vector()[self.values]

        get_set_vector(self.u.vector(), self.keys, u.vector(), self.values,
                       self.temp_array)
        return self.u
Exemple #10
0
    def before_first_compute(self, get):
        u = get("Velocity")
        V = u.function_space()

        spaces = SpacePool(V.mesh())
        degree = V.ufl_element().degree()

        if degree <= 1:
            Q = spaces.get_grad_space(V, shape=(spaces.d, ))
        else:
            if degree > 2:
                cbc_warning(
                    "Unable to handle higher order WSS space. Using CG1.")
            Q = spaces.get_space(1, 1)

        Q_boundary = spaces.get_space(Q.ufl_element().degree(),
                                      1,
                                      boundary=True)

        self.v = TestFunction(Q)
        self.tau = Function(Q, name="WSS_full")
        self.tau_boundary = Function(Q_boundary, name="WSS")

        local_dofmapping = mesh_to_boundarymesh_dofmap(spaces.BoundaryMesh, Q,
                                                       Q_boundary)
        self._keys = np.array(local_dofmapping.keys(), dtype=np.intc)
        self._values = np.array(local_dofmapping.values(), dtype=np.intc)
        self._temp_array = np.zeros(len(self._keys), dtype=np.float_)

        Mb = assemble(
            inner(TestFunction(Q_boundary), TrialFunction(Q_boundary)) * dx)
        self.solver = create_solver("gmres", "jacobi")
        self.solver.set_operator(Mb)

        self.b = Function(Q_boundary).vector()

        self._n = FacetNormal(V.mesh())
Exemple #11
0
    def before_first_compute(self, get):
        u = get(self.valuename)

        if isinstance(u, Function):

            if LooseVersion(dolfin_version()) > LooseVersion("1.6.0"):
                rank = len(u.ufl_shape)
            else:
                rank = u.rank()

            if rank == 0:
                self.f = Function(u.function_space())
            elif rank >= 1:
                # Assume all subpaces are equal
                V = u.function_space().extract_sub_space([0]).collapse()
                mesh = V.mesh()
                el = V.ufl_element()
                self.f = Function(V)

                # Find out if we can operate directly on vectors, or if we have to use a projection
                # We can operate on vectors if all sub-dofmaps are ordered the same way
                # For simplicity, this is only tested for CG- or DG0-spaces
                # (this might always be true for these spaces, but better to be safe than sorry )
                self.use_project = True
                if el.family() == "Lagrange" or (el.family()
                                                 == "Discontinuous Lagrange"
                                                 and el.degree() == 0):
                    #dm = u.function_space().dofmap()
                    dm0 = V.dofmap()
                    self.use_project = False
                    for i in xrange(u.function_space().num_sub_spaces()):
                        Vi = u.function_space().extract_sub_space(
                            [i]).collapse()
                        dmi = Vi.dofmap()
                        try:
                            # For 1.6.0+ and newer
                            diff = Vi.tabulate_dof_coordinates(
                            ) - V.tabulate_dof_coordinates()
                        except:
                            # For 1.6.0 and older
                            diff = dmi.tabulate_all_coordinates(
                                mesh) - dm0.tabulate_all_coordinates(mesh)
                        if len(diff) > 0:
                            max_diff = max(abs(diff))
                        else:
                            max_diff = 0.0
                        max_diff = MPI.max(mpi_comm_world(), max_diff)
                        if max_diff > 1e-12:
                            self.use_project = True
                            break
                        self.assigner = FunctionAssigner(
                            [V] * u.function_space().num_sub_spaces(),
                            u.function_space())
                        self.subfuncs = [
                            Function(V)
                            for _ in range(u.function_space().num_sub_spaces())
                        ]

                # IF we have to use a projection, build projection matrix only once
                if self.use_project:
                    self.v = TestFunction(V)
                    M = assemble(inner(self.v, TrialFunction(V)) * dx)
                    self.projection = KrylovSolver("cg", "default")
                    self.projection.set_operator(M)
        elif isinstance(u, Iterable) and all(
                isinstance(_u, Number) for _u in u):
            pass
        elif isinstance(u, Number):
            pass
        else:
            # Don't know how to handle object
            cbc_warning(
                "Don't know how to calculate magnitude of object of type %s." %
                type(u))
Exemple #12
0
def create_submesh(mesh, markers, marker):
    "This function allows for a SubMesh-equivalent to be created in parallel"
    # Build mesh
    submesh = Mesh()
    mesh_editor = MeshEditor()
    mesh_editor.open(submesh,
                     mesh.ufl_cell().cellname(),
                     mesh.ufl_cell().topological_dimension(),
                     mesh.ufl_cell().geometric_dimension())

    # Return empty mesh if no matching markers
    if MPI.sum(mpi_comm_world(), int(marker in markers.array())) == 0:
        cbc_warning(
            "Unable to find matching markers in meshfunction. Submesh is empty."
        )
        mesh_editor.close()
        return submesh

    base_cell_indices = np.where(markers.array() == marker)[0]
    base_cells = mesh.cells()[base_cell_indices]
    base_vertex_indices = np.unique(base_cells.flatten())

    base_global_vertex_indices = sorted(
        [mesh.topology().global_indices(0)[vi] for vi in base_vertex_indices])

    gi = mesh.topology().global_indices(0)
    shared_local_indices = set(base_vertex_indices).intersection(
        set(mesh.topology().shared_entities(0).keys()))
    shared_global_indices = [gi[vi] for vi in shared_local_indices]

    unshared_global_indices = list(
        set(base_global_vertex_indices) - set(shared_global_indices))
    unshared_vertices_dist = distribution(len(unshared_global_indices))

    # Number unshared vertices on separate process
    idx = sum(unshared_vertices_dist[:MPI.rank(mpi_comm_world())])
    base_to_sub_global_indices = {}
    for gi in unshared_global_indices:
        base_to_sub_global_indices[gi] = idx
        idx += 1

    # Gather all shared process on process 0 and assign global index
    all_shared_global_indices = gather(shared_global_indices,
                                       on_process=0,
                                       flatten=True)
    all_shared_global_indices = np.unique(all_shared_global_indices)

    shared_base_to_sub_global_indices = {}
    idx = int(
        MPI.max(mpi_comm_world(),
                float(max(base_to_sub_global_indices.values() + [-1e16]))) + 1)
    if MPI.rank(mpi_comm_world()) == 0:
        for gi in all_shared_global_indices:
            shared_base_to_sub_global_indices[int(gi)] = idx
            idx += 1

    # Broadcast global numbering of all shared vertices
    shared_base_to_sub_global_indices = dict(
        zip(broadcast(shared_base_to_sub_global_indices.keys(), 0),
            broadcast(shared_base_to_sub_global_indices.values(), 0)))

    # Join shared and unshared numbering in one dict
    base_to_sub_global_indices = dict(
        base_to_sub_global_indices.items() +
        shared_base_to_sub_global_indices.items())

    # Create mapping of local indices
    base_to_sub_local_indices = dict(
        zip(base_vertex_indices, range(len(base_vertex_indices))))

    # Define sub-cells
    sub_cells = [None] * len(base_cells)
    for i, c in enumerate(base_cells):
        sub_cells[i] = [base_to_sub_local_indices[j] for j in c]

    # Store vertices as sub_vertices[local_index] = (global_index, coordinates)
    sub_vertices = {}
    for base_local, sub_local in base_to_sub_local_indices.items():
        sub_vertices[sub_local] = (base_to_sub_global_indices[
            mesh.topology().global_indices(0)[base_local]],
                                   mesh.coordinates()[base_local])

    ## Done with base mesh

    # Distribute meshdata on (if any) empty processes
    sub_cells, sub_vertices = distribute_meshdata(sub_cells, sub_vertices)
    global_cell_distribution = distribution(len(sub_cells))
    #global_vertex_distribution = distribution(len(sub_vertices))

    global_num_cells = MPI.sum(mpi_comm_world(), len(sub_cells))
    global_num_vertices = sum(unshared_vertices_dist) + MPI.sum(
        mpi_comm_world(), len(all_shared_global_indices))

    mesh_editor.init_vertices(len(sub_vertices))
    #mesh_editor.init_cells(len(sub_cells))
    mesh_editor.init_cells_global(len(sub_cells), global_num_cells)
    global_index_start = sum(
        global_cell_distribution[:MPI.rank(mesh.mpi_comm())])

    for index, cell in enumerate(sub_cells):
        if LooseVersion(dolfin_version()) >= LooseVersion("1.6.0"):
            mesh_editor.add_cell(index, *cell)
        else:
            mesh_editor.add_cell(int(index), global_index_start + index,
                                 np.array(cell, dtype=np.uintp))

    for local_index, (global_index, coordinates) in sub_vertices.items():
        #print coordinates
        mesh_editor.add_vertex_global(int(local_index), int(global_index),
                                      coordinates)

    mesh_editor.close()

    submesh.topology().init(0, len(sub_vertices), global_num_vertices)
    submesh.topology().init(mesh.ufl_cell().topological_dimension(),
                            len(sub_cells), global_num_cells)

    # FIXME: Set up shared entities
    # What damage does this do?
    submesh.topology().shared_entities(0)[0] = []
    # The code below sets up shared vertices, but lacks shared facets.
    # It is considered incomplete, and therefore commented out
    '''
    #submesh.topology().shared_entities(0)[0] = []
    from dolfin import compile_extension_module
    cpp_code = """
    void set_shared_entities(Mesh& mesh, std::size_t idx, const Array<std::size_t>& other_processes)
    {
        std::set<unsigned int> set_other_processes;
        for (std::size_t i=0; i<other_processes.size(); i++)
        {
            set_other_processes.insert(other_processes[i]);
            //std::cout << idx << " --> " << other_processes[i] << std::endl;
        }
        //std::cout << idx << " --> " << set_other_processes[0] << std::endl;
        mesh.topology().shared_entities(0)[idx] = set_other_processes;
    }
    """

    set_shared_entities = compile_extension_module(cpp_code).set_shared_entities
    base_se = mesh.topology().shared_entities(0)
    se = submesh.topology().shared_entities(0)

    for li in shared_local_indices:
        arr = np.array(base_se[li], dtype=np.uintp)
        sub_li = base_to_sub_local_indices[li]
        set_shared_entities(submesh, base_to_sub_local_indices[li], arr)
    '''
    return submesh
Exemple #13
0
    def get(self, name, relative_timestep=0, compute=True, finalize=False):
        """Get the value of a named field at a particular relative_timestep.

        The relative_timestep is relative to now.
        Values are computed at first request and cached.
        """
        cbc_log(
            20, "Getting: %s, %d (compute=%s, finalize=%s)" %
            (name, relative_timestep, compute, finalize))

        # Check cache
        c = self._cache[relative_timestep]
        data = c.get(name, "N/A")

        # Check if field has been finalized, and if so,
        # return finalized value
        if name in self._finalized and data == "N/A":
            if compute:
                cbc_warning(
                    "Field %s has already been finalized. Will not call compute on field."
                    % name)
            return self._finalized[name]

        # Are we attempting to get value from before update was started?
        # Use constant extrapolation if allowed.
        if abs(relative_timestep) > self._update_all_count and data == "N/A":
            if self._extrapolate:
                cbc_log(
                    20, "Extrapolating %s from %d to %d" %
                    (name, relative_timestep, -self._update_all_count))
                data = self.get(name, -self._update_all_count, compute,
                                finalize)
                c[name] = data
            else:
                raise RuntimeError(
                    "Unable to get data from before update was started. \
                                   (%s, relative_timestep: %d, update_all_count: %d)"
                    % (name, relative_timestep, self._update_all_count))
        # Cache miss?
        if data == "N/A":
            field = self._fields[name]
            if relative_timestep == 0:
                # Ensure before_first_compute is always called once initially
                if self._compute_counts[field.name] == 0:
                    init_data = field.before_first_compute(self.get)
                    self._timer.completed("PP: before first compute %s" % name)
                    if init_data is not None:
                        cbc_warning("Did not expect a return value from \
                                    %s.before_first_compute." %
                                    field.__class__)

                # Compute value
                if name in self._solution:
                    data = self._solution[name]()
                    self._timer.completed("PP: call solution %s" % name)
                else:
                    if compute:
                        data = field.compute(self.get)
                        self._timer.completed("PP: compute %s" % name)
                    """
                    if finalize:
                        finalized_data = field.after_last_compute(self.get)
                        if finalized_data not in [None, "N/A"]:
                            data = finalized_data
                        self._finalized[name] = data
                        self._timer.completed("PP: finalize %s" %name)
                    """
                self._compute_counts[field.name] += 1

                # Copy functions to avoid storing references to the same function objects at each relative_timestep
                # NB! In other cases we assume that the fields return a new object for every compute!
                # Check first if we actually will cache this object by looking at 'time to keep' in the plan
                if self._plan[0][name] > 0:
                    if isinstance(data, Function):
                        # TODO: Use function pooling to avoid costly allocations?
                        data = data.copy(deepcopy=True)

                # Cache it!
                #c[name] = data
            else:
                # Cannot compute missing value from previous relative_timestep,
                # dependency handling must have failed
                raise DependencyException(name,
                                          relative_timestep=relative_timestep)

        if finalize:
            field = self._fields[name]
            finalized_data = field.after_last_compute(self.get)
            if finalized_data not in [None, "N/A"]:
                data = finalized_data
            self._finalized[name] = data
            self._timer.completed("PP: finalize %s" % name)

        c[name] = data
        return data
def restriction_map(V, Vb, _all_coords=None, _all_coordsb=None):
    "Return a map between dofs in Vb to dofs in V. Vb's mesh should be a submesh of V's Mesh."
    if V.ufl_element().family(
    ) == "Discontinuous Lagrange" and V.ufl_element().degree() > 0:
        raise RuntimeError(
            "This function does not work for DG-spaces of degree >0 \
                           (several dofs associated with same point in same subspace)."
        )

    if V.ufl_element().family() != "Lagrange":
        cbc_warning("This function is only tested for CG-spaces.")
    assert V.ufl_element().family() == Vb.ufl_element().family(
    ), "ufl elements differ in the two spaces"
    assert V.ufl_element().degree() == Vb.ufl_element().degree(
    ), "ufl elements differ in the two spaces"
    assert V.ufl_element().cell() == Vb.ufl_element().cell(
    ), "ufl elements differ in the two spaces"

    D = V.mesh().geometry().dim()

    # Recursively call this function if V has sub-spaces
    if V.num_sub_spaces() > 0:
        mapping = {}
        if MPI.size(mpi_comm_world()) == 1:
            if _all_coords is None:
                try:
                    # For 1.6.0+ and newer
                    all_coords = V.tabulate_dof_coordinates().reshape(
                        V.dim(), D)
                    all_coordsb = Vb.tabulate_dof_coordinates().reshape(
                        Vb.dim(), D)
                except:
                    # For 1.6.0 and older
                    all_coords = V.dofmap().tabulate_all_coordinates(
                        V.mesh()).reshape(V.dim(), D)
                    all_coordsb = Vb.dofmap().tabulate_all_coordinates(
                        Vb.mesh()).reshape(Vb.dim(), D)
            else:
                all_coords = _all_coords
                all_coordsb = _all_coordsb
        else:
            all_coords = None
            all_coordsb = None
        for i in range(V.num_sub_spaces()):
            mapping.update(
                restriction_map(V.sub(i), Vb.sub(i), all_coords, all_coordsb))

        return mapping

    dm = V.dofmap()
    dmb = Vb.dofmap()

    N = len(dm.dofs())
    Nb = len(dmb.dofs())

    dofs = dm.dofs()

    # Extract coordinates of dofs
    if dm.is_view():
        if _all_coords is not None:
            coords = _all_coords[V.dofmap().dofs()]
        else:
            try:
                # For 1.6.0+ and newer
                coords = V.collapse().tabulate_dof_coordinates().reshape(N, D)
            except:
                # For 1.6.0 and older
                coords = V.collapse().dofmap().tabulate_all_coordinates(
                    V.mesh()).reshape(N, D)

        if _all_coordsb is not None:
            coordsb = _all_coordsb[Vb.dofmap().dofs()]
        else:
            try:
                # For 1.6.0+ and newer
                coordsb = Vb.collapse().tabulate_dof_coordinates().reshape(
                    Nb, D)
            except:
                # For 1.6.0 and older
                coordsb = Vb.collapse().dofmap().tabulate_all_coordinates(
                    Vb.mesh()).reshape(Nb, D)
    else:
        if LooseVersion(dolfin_version()) > LooseVersion("1.6.0"):
            # For 1.6.0+ and newer
            coords = V.tabulate_dof_coordinates().reshape(N, D)
            coordsb = Vb.tabulate_dof_coordinates().reshape(Nb, D)
        else:
            # For 1.6.0 and older
            coords = V.dofmap().tabulate_all_coordinates(V.mesh()).reshape(
                N, D)
            coordsb = Vb.dofmap().tabulate_all_coordinates(Vb.mesh()).reshape(
                Nb, D)

    # Build KDTree to compute distances from coordinates in base
    kdtree = KDTree(coords)
    eps = 1e-12

    mapping = {}
    request_dofs = np.array([])

    distances, indices = kdtree.query(coordsb)

    for i, subdof in enumerate(dmb.dofs()):
        # Find closest dof in base
        #d, idx = kdtree.query(coordsb[i])
        d, idx = distances[i], indices[i]
        if d < eps:
            # Dof found on this process, add to map
            dof = dofs[idx]
            assert subdof not in mapping
            mapping[subdof] = dof
        else:
            # Search for this dof on other processes
            add_dofs = np.hstack(([subdof], coordsb[i]))
            request_dofs = np.append(request_dofs, add_dofs)

    del distances
    del indices

    # Scatter all dofs not found on current process to all processes
    all_request_dofs = [None] * MPI.size(mpi_comm_world())
    for j in xrange(MPI.size(mpi_comm_world())):
        all_request_dofs[j] = broadcast(request_dofs, j)

    # Re-order all requested dofs
    # Remove items coming from this process
    all_request_dofs[MPI.rank(mpi_comm_world())] = []
    all_request_dofs = np.hstack(all_request_dofs)

    all_request_dofs = all_request_dofs.reshape(
        len(all_request_dofs) / (D + 1), D + 1)
    all_request_dofs = dict(
        zip(all_request_dofs[:, 0], all_request_dofs[:, 1:]))

    # Search this process for all dofs not found on same process as subdof
    for subdof, coordsbi in all_request_dofs.items():
        subdof = int(subdof)

        # Find closest dof in base
        d, idx = kdtree.query(coordsbi)
        if d < eps:
            # Dof found on this process, add to map
            dof = dofs[idx]
            assert subdof not in mapping
            mapping[subdof] = dof
    return mapping