def _create_subnetworks(network, subgraph_ids, coarse_graph, mpicomm): """ Create subnetworks for each processor given a pore network and partition information. Parameters ---------- network: Porenetwork subgraph_ids: ndarray mapping from vertex id to a subgraph id coarse_graph: igraph Graph consisting of subnetworks as vertices, with edges present between two subnetworks if they are adjacent. Course_graph must have a [proc_id] property mpicomm: mpi4py communicator Returns ------- subnetworks: dict Dictionary with subnetwork ids as keys and references to subnetworks as values. """ my_id = mpicomm.rank num_proc = mpicomm.size if my_id == 0: num_subnetworks = np.max(subgraph_ids) + 1 proc_ids = coarse_graph.vs['proc_id'] req = dict() indices_of_subgraph = defaultdict(list) for i, subgraph_id in enumerate(subgraph_ids): indices_of_subgraph[subgraph_id].append(i) indices_of_subgraph = dict(indices_of_subgraph) for dest_id in xrange(num_proc): send_dict = dict() for i in xrange(num_subnetworks): if proc_ids[i] == dest_id: pi_list = indices_of_subgraph[i] send_dict[i] = SubNetwork(network, pi_list) n_components = len( network_to_igraph(send_dict[i]).components()) assert n_components == 1, n_components if dest_id == 0: my_subnetworks = send_dict else: req[dest_id] = mpicomm.isend(send_dict, dest=dest_id) for dest_id in req: req[dest_id].wait() if my_id != 0: my_subnetworks = mpicomm.recv(source=0) return my_subnetworks
import numpy as np from pypnm.percolation.invasion_percolation_refactored import site_bond_invasion_percolation from pypnm.porenetwork.network_factory import structured_network from pypnm.util.igraph_utils import network_to_igraph network = structured_network(4, 4, 4) g = network_to_igraph(network, vertex_attributes=["x", "y"]) sat = 0 total_vol = np.sum(network.tubes.vol) + np.sum(network.pores.vol) network.pores.invaded[0] = 1 for x, (element_type, element_id, weight) in enumerate( site_bond_invasion_percolation(g, 1. / network.tubes.r, [0])): if element_type == 0: sat += network.tubes.vol[element_id] / total_vol network.tubes.invaded[element_id] = 1 else: sat += network.pores.vol[element_id] / total_vol network.pores.invaded[element_id] = 1 if sat == 1.0: break
def subgraph_conn_igraph(network): return network_to_igraph(network, network.pores.connected, network.tubes.connected)
def subgraph_wett_igraph(network): return network_to_igraph(network, 1 - network.pores.invaded, 1 - network.tubes.invaded)
def graph_igraph(network): return network_to_igraph(network)
def shortest_pore_path_between_two_nodes(network, p1, p2, weights=None): G = network_to_igraph(network) return G.get_shortest_paths(p1, p2, weights)[0]
def __init__(self, network, fluid_properties, num_subnetworks, comm=None, mpicomm=None, subgraph_ids=None, delta_s_max=0.01, delta_pc=0.01, ptol_ms=1.e-6, ptol_fs=1.e-6, btol=1.e-2): self.network = network if comm is None: comm = Epetra.PyComm() if mpicomm is None: mpicomm = MPI.COMM_WORLD self.comm, self.mpicomm = comm, mpicomm self.fluid_properties = fluid_properties self.my_id = comm.MyPID() my_id = self.my_id self.num_proc = comm.NumProc() self.num_subnetworks = num_subnetworks # On the master cpu do the following: # 1) Create graph corresponding to pore network. # 2) Partition the graph into subgraphs # 3) Create a coarse graph with the subgraphs as the nodes # 4) Use the coarse graph to assign each subgraph to a processor if my_id == 0: self.graph = network_to_igraph(network, edge_attributes=["l", "A_tot", "r", "G"]) # create global_id attributes before creating subgraphs. self.graph.vs["global_id"] = np.arange(self.graph.vcount()) self.graph.es["global_id"] = np.arange(self.graph.ecount()) if subgraph_ids is None: _, subgraph_ids = pymetis.part_graph(num_subnetworks, self.graph.get_adjlist()) subgraph_ids = np.asarray(subgraph_ids) self.graph.vs["subgraph_id"] = subgraph_ids # Assign a processor id to each subgraph coarse_graph = coarse_graph_from_partition(self.graph, subgraph_ids) _, proc_ids = pymetis.part_graph(self.num_proc, coarse_graph.get_adjlist()) coarse_graph.vs['proc_id'] = proc_ids coarse_graph.vs["subgraph_id"] = np.arange(coarse_graph.vcount()) # Assign a processor id to each pore subgraph_id_to_proc_id = {v["subgraph_id"]: v['proc_id'] for v in coarse_graph.vs} self.graph.vs["proc_id"] = [subgraph_id_to_proc_id[v["subgraph_id"]] for v in self.graph.vs] if my_id != 0: coarse_graph = None self.graph = None network = None subgraph_ids = None self.coarse_graph = self.mpicomm.bcast(coarse_graph, root=0) self.graph = self.distribute_graph(self.graph, self.coarse_graph, self.mpicomm) self.my_subnetworks = _create_subnetworks(network, subgraph_ids, self.coarse_graph, self.mpicomm) self.my_subgraph_ids = self.my_subnetworks.keys() self.my_subgraph_ids_with_ghost = list(set().union(*self.coarse_graph.neighborhood(self.my_subgraph_ids))) self.inter_subgraph_edges = _create_inter_subgraph_edgelist(self.graph, self.my_subgraph_ids, self.mpicomm) self.inter_processor_edges = self.create_inter_processor_edgelist(self.graph, self.my_subgraph_ids, self.mpicomm) self.subgraph_id_to_v_center_id = self.subgraph_central_vertices(self.graph, self.my_subgraph_ids_with_ghost) # Epetra maps to facilitate data transfer between processors self.unique_map, self.nonunique_map, self.subgraph_ids_vec = self.create_maps(self.graph, self.comm) self.epetra_importer = Epetra.Import(self.nonunique_map, self.unique_map) assert self.epetra_importer.NumPermuteIDs() == 0 assert self.epetra_importer.NumSameIDs() == self.unique_map.NumMyElements() # subgraph_id to support vertices map. Stores the support vertex ids (global ids) of both # subnetworks belonging to this processor as well as those belonging to ghost subnetworks. # Only support vertex ids belonging to this processor are stored. self.my_basis_support = dict() self.my_subgraph_support = dict() my_global_elements = self.unique_map.MyGlobalElements() self.graph["global_to_local"] = dict((v["global_id"], v.index) for v in self.graph.vs) self.graph["local_to_global"] = dict((v.index, v["global_id"]) for v in self.graph.vs) # support region for each subgraph for i in self.my_subgraph_ids: self.my_subgraph_support[i] = self.my_subnetworks[i].pi_local_to_global # support region for each subgraph self.my_subgraph_support_with_ghosts = dict() for i in self.my_subgraph_ids_with_ghost: self.my_subgraph_support_with_ghosts[i] = np.asarray(self.graph.vs.select(subgraph_id=i)["global_id"]) for i in self.my_subgraph_ids: assert np.all(np.sort(self.my_subgraph_support[i]) == np.sort(self.my_subgraph_support_with_ghosts[i])) for i in self.my_subgraph_ids: if num_subnetworks == 1: support_vertices = self.my_subnetworks[0].pi_local_to_global else: support_vertices = support_of_basis_function(i, self.graph, self.coarse_graph, self.subgraph_id_to_v_center_id, self.my_subgraph_support_with_ghosts) self.my_basis_support[i] = np.intersect1d(support_vertices, my_global_elements).astype(np.int32) # Create distributed arrays - Note: Memory wasted here by allocating extra arrays which include ghost cells. # This can be optimized but the python interface for PyTrilinos is not documented well enough. # Better would be to create only the arrays which include ghost cells. unique_map = self.unique_map nonunique_map = self.nonunique_map self.p_c = Epetra.Vector(unique_map) self.p_w = Epetra.Vector(unique_map) self.sat = Epetra.Vector(unique_map) self.global_source_wett = Epetra.Vector(unique_map) self.global_source_nonwett = Epetra.Vector(unique_map) self.out_flux_w = Epetra.Vector(unique_map) self.out_flux_n = Epetra.Vector(unique_map) self.p_c_with_ghost = Epetra.Vector(nonunique_map) self.p_w_with_ghost = Epetra.Vector(nonunique_map) self.sat_with_ghost = Epetra.Vector(nonunique_map) self.global_source_nonwett_with_ghost = Epetra.Vector(nonunique_map) self.out_flux_n_with_ghost = Epetra.Vector(nonunique_map) # Simulation parameters self.delta_s_max = delta_s_max self.ptol_ms = ptol_ms self.ptol_fs = ptol_fs self.delta_pc = delta_pc self.btol = btol # Crate dynamic simulations self.simulations = dict() for i in self.my_subgraph_ids: self.simulations[i] = DynamicSimulation(self.my_subnetworks[i], self.fluid_properties, delta_pc=self.delta_pc, ptol=self.ptol_fs) self.simulations[i].solver_type = "lu" k_comp = ConductanceCalc(self.my_subnetworks[i], self.fluid_properties) k_comp.compute() pc_comp = DynamicCapillaryPressureComputer(self.my_subnetworks[i]) pc_comp.compute() self.time = 0.0 self.stop_time = None self.pi_list_press_inlet = [] self.press_inlet_w = None self.press_inlet_nw = None self.pi_list_press_outlet = [] self.press_outlet_w = None self.press_outlet_nw = None