def get_dom_bdr_choice(self, mesh): from collections import defaultdict def get_internal_bdr(d, dom_choice): dd = [] for k in d: if k in dom_choice: dd.extend(d[k]) seen = defaultdict(int) for k in dd: seen[k] += 1 return [k for k in seen if seen[k] > 1] if self.dim == 3: kk = 'vol2surf' elif self.dim == 2: kk = 'surf2line' elif self.dim == 1: kk = 'line2vert' else: assert False, "not supported" d = mesh.extended_connectivity[kk] if d is None: return [], [], [] dom_choice = list(d) bdr_choice = sum([list(d[x]) for x in d], []) if self.sel_index[0] != 'all': dom_choice = [int(x) for x in self.sel_index] bdrs = [d[int(x)] if int(x) in d else [] for x in self.sel_index] bdr_choice = list(np.unique(np.hstack(bdrs)).astype(int)) internal_bdr = get_internal_bdr(d, dom_choice) from petram.mfem_config import use_parallel if use_parallel: from mfem.common.mpi_debug import nicePrint from petram.helper.mpi_recipes import allgather dom_choice = list(set(sum(allgather(dom_choice), []))) bdr_choice = list(set(sum(allgather(bdr_choice), []))) internal_bdr = list(set(sum(allgather(internal_bdr), []))) # return unique list return list(set(dom_choice)), list(set(bdr_choice)), list( set(internal_bdr))
def sharekeys(self): keys = list(self.keys()) if not hasMPI: keys = np.unique(keys) self._gkey = keys return self keys = sum(allgather(keys), []) keys = np.unique(keys) freezekey = self._freezekey for k in keys: if not k in self: self[k] = np.atleast_1d([]).astype(self.dtype) self._freezekey = freezekey self._gkey = keys return self
def _gather_shared_vertex(mesh, u, shared_info, *iverts): # u_own, iv1, iv2... = gather_shared_vertex(mesh, u, ld, md, iv1, iv2...) # u_own : unique vertex id ownd by a process # shared_info : shared data infomation # iv1, iv2, ...: array of vertex is after overwriting shadow vertex # to a real one, which is owned by other process # process shared vertex # 1) a vertex in sub-volume may be shadow # 2) the real one may not a part of sub-volume on the master node # 3) we always use the real vertex # 1) shadow vertex index is over-written to a real vertex # 2) make sure that the real one is added to sub-volume mesh obj. offset = np.hstack([0, np.cumsum(allgather(mesh.GetNV()))]) iverts = [iv + offset[myid] for iv in iverts] u = u + offset[myid] # -> global numbering ld, md = shared_info mv_list = [[] for i in range(nprc)] for key in ld.keys(): mid, g_in_master = key if mid != myid: for lv, mv in zip(ld[key][0], md[key][0]): ic = 0 for iv in iverts: iii = np.where(iv == lv)[0] ic = ic + len(iii) if len(iii) > 0: iv[iii] = mv if ic > 0: mv_list[mid].append(mv) u = u[np.in1d(u, ld[key][0], invert=True)] for i in range(nprc): mvv = gather_vector(np.atleast_1d(mv_list[i]).astype(int), root=i) if i == myid: missing = np.unique(mvv[np.in1d(mvv, u, invert=True)]) if len(missing) != 0: print("adding (vertex)", missing) u = np.hstack((u, missing)) u_own = np.sort(u - offset[myid]) return [u_own] + list(iverts) ## u_own, iv1, iv2 =
def find_corner(mesh): ''' For 2D geometry find line (boundary between two bdr_attribute) and corner of lines ''' use_parallel = hasattr(mesh, "GroupNVertices") if use_parallel: from mpi4py import MPI myid = MPI.COMM_WORLD.rank nprc = MPI.COMM_WORLD.size comm = MPI.COMM_WORLD from mfem.common.mpi_debug import nicePrint, niceCall from petram.helper.mpi_recipes import allgather, allgather_vector, gather_vector from petram.mesh.mesh_utils import distribute_shared_entity if not hasattr(mesh, "shared_info"): mesh.shared_info = distribute_shared_entity(mesh) else: myid = 0 nprc = 1 ndim = mesh.Dimension() sdim = mesh.SpaceDimension() ne = mesh.GetNEdges() assert ndim == 2, "find_edge_corner is for 3D mesh" get_edges = mesh.GetElementEdges get_attr = mesh.GetAttribute iattr = mesh.GetAttributeArray() # min of this array is 1 nattr = 0 if iattr.size == 0 else np.max(iattr) nb = mesh.GetNE() nbe = mesh.GetNBE() if use_parallel: nbe = sum(allgather(nbe)) if nbe == 0: return {}, {}, {} if use_parallel: offset = np.hstack([0, np.cumsum(allgather(mesh.GetNEdges()))]) offsetf = np.hstack([0, np.cumsum(allgather(mesh.GetNFaces()))]) offsetv = np.hstack([0, np.cumsum(allgather(mesh.GetNV()))]) myoffset = offset[myid] myoffsetf = offsetf[myid] myoffsetv = offsetv[myid] nattr = max(allgather(nattr)) ne = sum(allgather(mesh.GetNEdges())) else: myoffset = np.array(0, dtype=int) myoffsetf = np.array(0, dtype=int) myoffsetv = np.array(0, dtype=int) if mesh.GetNBE() == 0: # some parallel node may have zero boundary battrs = [] iedges = np.array([], dtype=int) else: battrs = mesh.GetBdrAttributeArray() iedges = np.hstack([ mesh.GetBdrElementEdgeIndex(ibdr) for ibdr in range(mesh.GetNBE()) ]).astype(int, copy=False) line2edge = GlobalNamedList() line2edge.setlists(battrs, iedges) if use_parallel: ld, md = mesh.shared_info iedges = iedges + myoffset if use_parallel: for key2 in ld: if key2[0] == myid: continue iii = np.in1d(iedges, ld[key2][1], invert=True) if len(iii) == 0: continue iedges = iedges[iii] battrs = battrs[iii] line2realedge = GlobalNamedList() line2realedge.setlists(battrs, iedges) line2realvert = GlobalNamedList() for key in line2realedge: data = np.hstack([ mesh.GetEdgeVertices(i - myoffset) + myoffsetv for i in line2realedge[key] ]) if use_parallel: for key2 in ld: if key2[0] == myid: continue for lv, mv in zip(ld[key2][0], md[key2][0]): iii = np.where(data == lv)[0] data[iii] = mv line2realvert[key] = data line2realvert.sharekeys().gather(nprc, distribute=True) corners = GlobalNamedList() for key in line2realvert: seen = defaultdict(int) for iiv in line2realvert[key]: seen[iiv] += 1 corners[key] = [kk for kk in seen if seen[kk] == 1] sorted_key = corners.sharekeys().globalkeys corners.allgather() u_own = np.unique( np.hstack([corners[key] for key in corners]).astype(int, copy=False)) if use_parallel: idx = np.logical_and(u_own >= offsetv[myid], u_own < offsetv[myid + 1]) u_own = u_own[idx] if len(u_own) > 0: vtx = np.hstack([mesh.GetVertexArray(i - myoffsetv) for i in u_own]) else: vtx = np.atleast_1d([]) if use_parallel: vtx = gather_vector(vtx) u_own = gather_vector(u_own) # sort vertex if myid == 0: vtx = vtx.reshape(-1, sdim) tmp = sorted([(k, tuple(x)) for k, x in enumerate(vtx)], key=lambda x: x[1]) if len(tmp) > 0: vtx = np.vstack([x[1] for x in tmp]) u_own = np.hstack([[u_own[x[0]] for x in tmp]]).astype(int) ivert = np.arange(len(vtx), dtype=int) + 1 else: u_own = np.atleast_1d([]).astype(int) ivert = np.atleast_1d([]).astype(int) if use_parallel: #if myid != 0: # u_own = None; vtx = None u_own = comm.bcast(u_own) ivert = np.arange(len(u_own), dtype=int) + 1 for key in ld: if key[0] == myid: continue for lv, mv in zip(ld[key][0], md[key][0]): iii = np.where(u_own == mv)[0] u_own[iii] = lv idx = np.logical_and(u_own >= offsetv[myid], u_own < offsetv[myid + 1]) u_own = u_own[idx] vtx = comm.bcast(vtx) vtx = comm.bcast(vtx)[idx.flatten()] ivert = ivert[idx] vert2vert = {iv: iu - myoffsetv for iv, iu in zip(ivert, u_own)} # mapping line index to vertex index (not MFFEM vertex id) line2vert = {} #nicePrint(corners) corners.bcast(nprc, distributed=True) for j, key in enumerate(sorted_key): data = corners[key] if use_parallel: for key2 in ld: if key2[0] == myid: continue for lv, mv in zip(ld[key2][0], md[key2][0]): iii = np.where(data == mv)[0] data[iii] = lv idx = np.logical_and(data >= offsetv[myid], data < offsetv[myid + 1]) data = data[idx] data = list(data - myoffsetv) line2vert[j + 1] = [k for k in vert2vert if vert2vert[k] in data] if debug: g = GlobalNamedList(line2vert) g.sharekeys() gg = g.gather(nprc, overwrite=False).unique() if myid == 0: print(gg) for i in range(nprc): if use_parallel: comm.barrier() if myid == i: for k in vert2vert: print(myid, k, mesh.GetVertexArray(vert2vert[k])) if use_parallel: comm.barrier() return line2vert, line2edge, vert2vert
def find_edge_corner(mesh): ''' For 3D geometry find line (boundary between two bdr_attribute) and corner of lines ''' use_parallel = hasattr(mesh, "GroupNVertices") if use_parallel: from mpi4py import MPI myid = MPI.COMM_WORLD.rank nprc = MPI.COMM_WORLD.size comm = MPI.COMM_WORLD from mfem.common.mpi_debug import nicePrint, niceCall from petram.helper.mpi_recipes import allgather, allgather_vector, gather_vector from petram.mesh.mesh_utils import distribute_shared_entity if not hasattr(mesh, "shared_info"): mesh.shared_info = distribute_shared_entity(mesh) else: myid = 0 nprc = 1 ndim = mesh.Dimension() sdim = mesh.SpaceDimension() ne = mesh.GetNEdges() assert ndim == 3, "find_edge_corner is for 3D mesh" # 3D mesh get_edges = mesh.GetBdrElementEdges get_attr = mesh.GetBdrAttribute iattr = mesh.GetBdrAttributeArray() # min of this array is 1 nattr = 0 if iattr.size == 0 else np.max(iattr) nb = mesh.GetNBE() if mesh.GetNBE() == 0 and nprc == 1: return {}, {}, {}, {} if use_parallel: offset = np.hstack([0, np.cumsum(allgather(mesh.GetNEdges()))]) offsetf = np.hstack([0, np.cumsum(allgather(mesh.GetNFaces()))]) offsetv = np.hstack([0, np.cumsum(allgather(mesh.GetNV()))]) myoffset = offset[myid] myoffsetf = offsetf[myid] myoffsetv = offsetv[myid] nattr = max(allgather(nattr)) ne = sum(allgather(mesh.GetNEdges())) else: myoffset = np.array(0, dtype=int) myoffsetf = np.array(0, dtype=int) myoffsetv = np.array(0, dtype=int) edges = defaultdict(list) iedges = np.arange(nb, dtype=int) if use_parallel: # eliminate slave faces from consideration iface = np.array([mesh.GetBdrElementEdgeIndex(i) for i in iedges], dtype=int) + myoffsetf mask = np.array([True] * len(iface), dtype=bool) ld, md = mesh.shared_info for key in ld.keys(): mid, g_in_master = key if mid == myid: continue iii = np.in1d(iedges, ld[key][2], invert=True) mask = np.logical_and(mask, iii) iedges = iedges[mask] # nicePrint(len(iedges)) np 1,2,4 gives 900... ok for i in iedges: ie, io = get_edges(i) ie += myoffset iattr = get_attr(i) edges[iattr].extend(list(ie)) if use_parallel: # collect edges using master edge number # and gather it to a node. edgesc = {} ld, md = mesh.shared_info for j in range(1, nattr + 1): if j in edges: data = np.array(edges[j], dtype=int) for key in ld.keys(): mid, g_in_master = key if mid == myid: continue for le, me in zip(ld[key][1], md[key][1]): iii = np.where(data == le)[0] data[iii] = me else: data = np.atleast_1d([]).astype(int) data = gather_vector(data, root=j % nprc) if data is not None: edgesc[j] = data edges = edgesc # for each iattr real edge appears only once for key in edges.keys(): seen = defaultdict(int) for x in edges[key]: seen[x] += 1 edges[key] = [k for k in seen if seen[k] == 1] #nicePrint('Num edges', nedge = sum([len(edges[k]) for k in edges]) if nedge != 0: N = np.hstack( [np.zeros(len(edges[k]), dtype=int) + k - 1 for k in edges.keys()]) M = np.hstack([np.array(edges[k]) for k in edges.keys()]) else: N = np.atleast_1d([]).astype(int) M = np.atleast_1d([]).astype(int) M = M.astype(int, copy=False) N = N.astype(int, copy=False) if use_parallel: # send attribute to owner of edges for j in range(nprc): idx = np.logical_and(M >= offset[j], M < offset[j + 1]) Mpart = M[idx] Npart = N[idx] Mpart = gather_vector(Mpart, root=j) Npart = gather_vector(Npart, root=j) if j == myid: M2, N2 = Mpart, Npart M, N = M2, N2 #nicePrint('unique edge', len(np.unique(M))) #nicePrint('N', len(N)) data = M * 0 + 1 table1 = coo_matrix((data, (M, N)), shape=(ne, nattr), dtype=int) csr = table1.tocsr() #embeded surface only touches to one iattr idx = np.where(np.diff(csr.indptr) >= 1)[0] csr = csr[idx, :] # this is true bdr edges. bb_edges = defaultdict(list) indptr = csr.indptr indices = csr.indices for i in range(csr.shape[0]): idxs = tuple(sorted(indices[indptr[i]:indptr[i + 1]] + 1)) bb_edges[idxs].append(idx[i]) bb_edges.default_factory = None # sort keys (= attribute set) keys = list(bb_edges) if use_parallel: keys = comm.gather(keys) if myid == 0: keys = sum(keys, []) sorted_key = None if myid == 0: sorted_key = list(set(keys)) sorted_key.sort(key=lambda x: (len(x), x)) if use_parallel: sorted_key = comm.bcast(sorted_key, root=0) bb_edgess = OrderedDict() for k in sorted_key: if k in bb_edges: bb_edgess[k] = bb_edges[k] else: bb_edgess[k] = [ ] # in parallel, put empty so that key order is kept bb_edges = bb_edgess ''' res = [] for key in sorted_key: tmp = allgather(len(bb_edges[key])) if myid == 0: res.append((key, sum(tmp))) if myid == 0: print res ''' # at this point each node has its own edges populated in bb_edges (no shadow) ivert = {} for k in sorted_key: if len(bb_edges[k]) > 0: ivert[k] = np.hstack([ mesh.GetEdgeVertices(i - myoffset) + myoffsetv for i in np.unique(bb_edges[k]) ]).astype(int) else: ivert[k] = np.atleast_1d([]).astype(int) if use_parallel: # convert shadow vertex to real for k in sorted_key: data = ivert[k] for key in ld: if key[0] == myid: continue for le, me in zip(ld[key][0], md[key][0]): iii = np.where(data == le)[0] data[iii] = me ivert[k] = data ivertc = {} for j, k in enumerate(sorted_key): data = gather_vector(ivert[k], root=j % nprc) if data is not None: ivertc[k] = data ivert = ivertc corners = {} for key in ivert: seen = defaultdict(int) for iiv in ivert[key]: seen[iiv] += 1 corners[key] = [kk for kk in seen if seen[kk] == 1] if len(corners) == 0: u = np.atleast_1d([]).astype(int) else: u = np.unique(np.hstack([corners[key] for key in corners])).astype(int, copy=False) # collect vertex on each node and gather to node 0 u_own = u if use_parallel: u = np.unique(allgather_vector(u)) u_own = u.copy() for key in ld: if key[0] == myid: continue for lv, mv in zip(ld[key][0], md[key][0]): iii = np.where(u == mv)[0] u[iii] = lv idx = np.logical_and(u >= offsetv[myid], u < offsetv[myid + 1]) u = u[idx] # u include shared vertex idx = np.logical_and(u_own >= offsetv[myid], u_own < offsetv[myid + 1]) u_own = u_own[idx] # u_own is only owned vertex #nicePrint('u_own',mesh.GetNV(),",", u_own) if len(u_own) > 0: vtx = np.vstack([mesh.GetVertexArray(i - myoffsetv) for i in u_own]) else: vtx = np.atleast_1d([]).reshape(-1, sdim) if use_parallel: u_own = gather_vector(u_own) vtx = gather_vector(vtx.flatten()) # sort vertex if myid == 0: vtx = vtx.reshape(-1, sdim) #print('vtx shape', vtx.shape) tmp = sorted([(k, tuple(x)) for k, x in enumerate(vtx)], key=lambda x: x[1]) if len(tmp) > 0: vtx = np.vstack([x[1] for x in tmp]) u_own = np.hstack([[u_own[x[0]] for x in tmp]]).astype(int) ivert = np.arange(len(vtx), dtype=int) + 1 else: vtx = np.atleast_1d([]).astype(float) u_own = np.atleast_1d([]).astype(int) u_own = np.atleast_1d([]).astype(int) if use_parallel: #if myid != 0: # u_own = None; vtx = None u_own = comm.bcast(u_own) ivert = np.arange(len(u_own), dtype=int) + 1 for key in ld: if key[0] == myid: continue for lv, mv in zip(ld[key][0], md[key][0]): iii = np.where(u_own == mv)[0] u_own[iii] = lv idx = np.logical_and(u_own >= offsetv[myid], u_own < offsetv[myid + 1]) u_own = u_own[idx] ivert = ivert[idx] #vtx = comm.bcast(vtx) #vtx = comm.bcast(vtx)[idx.flatten()] vert2vert = {iv: iu - myoffsetv for iv, iu in zip(ivert, u_own)} #nicePrint('vert2vert', vert2vert) # mapping line index to vertex index (not MFFEM vertex id) line2vert = {} #nicePrint(corners) for j, key in enumerate(sorted_key): data = corners[key] if key in corners else None if use_parallel: data = comm.bcast(data, root=j % nprc) data = np.array(data, dtype=int) for key2 in ld: if key2[0] == myid: continue for lv, mv in zip(ld[key2][0], md[key2][0]): iii = np.where(data == mv)[0] data[iii] = lv idx = np.logical_and(data >= offsetv[myid], data < offsetv[myid + 1]) data = data[idx] else: data = np.array(data, dtype=int) data = list(data - myoffsetv) line2vert[j + 1] = [k for k in vert2vert if vert2vert[k] in data] # finish-up edge data if use_parallel: # distribute edges, convert (add) from master to local # number for attr_set in bb_edges: data = sum(allgather(bb_edges[attr_set]), []) data = np.array(data, dtype=int) for key in ld: if key[0] == myid: continue for le, me in zip(ld[key][1], md[key][1]): iii = np.where(data == me)[0] data[iii] = le idx = np.logical_and(data >= offset[myid], data < offset[myid + 1]) data = data[idx] bb_edges[attr_set] = list(data - myoffset) attrs = list(edges) attrsa = np.unique(sum(allgather(attrs), [])) for a in attrsa: if a in attrs: data = np.array(edges[a], dtype=int) else: data = np.atleast_1d([]).astype(int) data = allgather_vector(data) for key in ld: if key[0] == myid: continue for le, me in zip(ld[key][1], md[key][1]): iii = np.where(data == me)[0] data[iii] = le idx = np.logical_and(data >= offset[myid], data < offset[myid + 1]) data = data[idx] edges[a] = list(data - myoffset) line2edge = {} for k, attr_set in enumerate(sorted_key): if attr_set in bb_edges: line2edge[k + 1] = bb_edges[attr_set] else: line2edge[k + 1] = [] ''' # debug find true (non-shadow) edges line2edge_true = {} for k, attr_set in enumerate(sorted_key): if attr_set in bb_edges: data = np.array(bb_edges[attr_set], dtype=int) for key in ld: if key[0] == myid: continue iii = np.in1d(data+myoffset, ld[key][1], invert = True) data = data[iii] line2edge_true[k+1] = data else: line2edge_true[k+1] = [] nicePrint([sum(allgather(len(line2edge_true[key]))) for key in line2edge]) ''' surf2line = {k + 1: [] for k in range(nattr)} for k, attr_set in enumerate(sorted_key): for a in attr_set: surf2line[a].append(k + 1) if debug: g = GlobalNamedList(line2vert) g.sharekeys() gg = g.gather(nprc, overwrite=False).unique() if myid == 0: print("debug (gathered line2vert)", gg) return surf2line, line2vert, line2edge, vert2vert
def distribute_shared_entity(pmesh): ''' distribute entitiy numbering in master (owner) process ''' from mpi4py import MPI myid = MPI.COMM_WORLD.rank nprc = MPI.COMM_WORLD.size comm = MPI.COMM_WORLD from mfem.common.mpi_debug import nicePrint, niceCall from petram.helper.mpi_recipes import allgather, allgather_vector, gather_vector master_entry = [] local_data = {} master_data = {} MFEM3 = hasattr(pmesh, 'GroupFace') offset_v = np.hstack([0, np.cumsum(allgather(pmesh.GetNV()))]) offset_e = np.hstack([0, np.cumsum(allgather(pmesh.GetNEdges()))]) offset_f = np.hstack([0, np.cumsum(allgather(pmesh.GetNFaces()))]) ng = pmesh.GetNGroups() from mfem.par import intp def GroupEdge(j, iv): edge = intp() o = intp() pmesh.GroupEdge(j, iv, edge, o) return edge.value() def GroupFace(j, iv): face = intp() o = intp() pmesh.GroupFace(j, iv, face, o) return face.value() def GroupTriangle(j, iv): face = intp() o = intp() pmesh.GroupTriangle(j, iv, face, o) return face.value() def GroupQuadrilateral(j, iv): face = intp() o = intp() pmesh.GroupQuadrilateral(j, iv, face, o) return face.value() for j in range(ng): if j == 0: continue nv = pmesh.GroupNVertices(j) sv = np.array([pmesh.GroupVertex(j, iv) for iv in range(nv)]) ne = pmesh.GroupNEdges(j) se = np.array([GroupEdge(j, iv) for iv in range(ne)]) if MFEM3: nf = pmesh.GroupNFaces(j) sf = np.array([GroupFace(j, iv) for iv in range(nf)]) else: nt = pmesh.GroupNTriangles(j) nq = pmesh.GroupNQuadrilaterals(j) sf = np.array([GroupTriangle(j, iv) for iv in range(nt)] + [GroupQuadrilateral(j, iv) for iv in range(nq)]) data = (sv + offset_v[myid], se + offset_e[myid], sf + offset_f[myid]) local_data[(pmesh.gtopo.GetGroupMasterRank(j), pmesh.gtopo.GetGroupMasterGroup(j))] = data if pmesh.gtopo.IAmMaster(j): master_entry.append(( myid, j, )) ''' mv = sv + offset_v[myid] me = se + offset_e[myid] mf = sf + offset_f[myid] data = (mv, me, mf) ''' else: data = None master_data[(pmesh.gtopo.GetGroupMasterRank(j), pmesh.gtopo.GetGroupMasterGroup(j))] = data master_entry = comm.gather(master_entry) if myid == 0: master_entry = sum(master_entry, []) master_entry = comm.bcast(master_entry) for entry in master_entry: master_id = entry[0] if master_id == myid: data = master_data[entry] else: data = None data = comm.bcast(data, root=master_id) if entry in master_data: master_data[entry] = data return local_data, master_data
def find_loop_par(pmesh, *face): ''' find_loop_ser(mesh, 1) # loop around boundary index = 1 find_loop_ser(mesh, [1, 2, 3]) # loop around boundary made by union of index = 1,2,3 ''' import mfem.par as mfem from mpi4py import MPI myid = MPI.COMM_WORLD.rank nprc = MPI.COMM_WORLD.size comm = MPI.COMM_WORLD if nprc == 1: return find_loop_ser(pmesh, face) from mfem.common.mpi_debug import nicePrint from petram.helper.mpi_recipes import allgather, allgather_vector, gather_vector battrs = pmesh.GetBdrAttributeArray() face = face[0] faces = np.atleast_1d(face) bidx = np.where(np.in1d(battrs, faces))[0] offset_e = np.hstack([0, np.cumsum(allgather(pmesh.GetNEdges()))]) edges = [pmesh.GetBdrElementEdges(i) for i in bidx] iedges = np.array(sum([e1[0] for e1 in edges], []), dtype=int) + offset_e[myid] dirs = np.array(sum([e1[1] for e1 in edges], []), dtype=int) from petram.mesh.mesh_utils import distribute_shared_entity shared_info = distribute_shared_entity(pmesh) keys = shared_info[0].keys() local_edges = np.hstack([shared_info[0][key][1] for key in keys]) global_edges = np.hstack([shared_info[1][key][1] for key in keys]) own_edges = [] for k, ie in enumerate(iedges): iii = np.where(local_edges == ie)[0] if len(iii) != 0: if ie == global_edges[iii[0]]: own_edges.append(ie) iedges[k] = global_edges[iii[0]] else: own_edges.append(ie) #nicePrint("iedges", iedges) iedges_all = allgather_vector(iedges) dirs = allgather_vector(dirs) seens = defaultdict(int) seendirs = defaultdict(int) for k, ie in enumerate(iedges_all): seens[ie] = seens[ie] + 1 seendirs[ie] = dirs[k] seens.default_factory = None idx = [] signs = [] for k in seens.keys(): if seens[k] == 1: idx.append(k) signs.append(seendirs[k]) iedges_g = np.array(idx) # here idx is global numbering #nicePrint("global_index", idx, signs) #nicePrint("own_edges", own_edges) iedges_l = [] signs_l = [] for k, ie in enumerate(iedges_g): iii = np.where(own_edges == ie)[0] if len(iii) != 0: iedges_l.append(ie) signs_l.append(signs[k]) iedges_l = np.array(iedges_l) - offset_e[myid] signs_l = np.array(signs_l) #nicePrint("local_index", iedges_l, signs_l) return iedges_l, signs_l