def add_forced(self, forced_vertices): """ Add forced vertices - must be selected """ for u in forced_vertices: self.__check_id(u) G.set_vertex_property(self, "forced", u, True)
def add_solved(self, solved_vertices): """ Add solved vertices - constraints operating only on solved vertices are dropped to avoid impossible situations """ for u in solved_vertices: self.__check_id(int(u)) G.set_vertex_property(self, "solved", u, True)
def set_partner(self, u, value, vals=None): assert (type(value) == int) assert (value < G.get_number_of_vertices(self)) if vals is None: G.set_vertex_property(self, "partner", u, value) else: G.set_vertex_property(self, "partner", 0, 0, vals=vals)
def add_conflict(self, exclusive_vertices): """ Add a tuple of exclusive vertices. """ for u in exclusive_vertices: self.__check_id(u) conflicts = G.get_graph_property(self, "conflicts") conflicts.add(tuple(exclusive_vertices)) G.set_graph_property(self, "conflicts", conflicts) return conflicts
def add_sum_constraint(self, vertices_left, vertices_right): """ Require that the number of selected G2 nodes centered around a G1 node u is equal to the left and right of u. Ensures that we do not get branching. """ assert (type(vertices_left) == list) assert (type(vertices_right) == list) for u in vertices_left + vertices_right: self.__check_id(u) sum_constraints = G.get_graph_property(self, "sum_constraints") sum_constraints.append(tuple([vertices_left, vertices_right])) G.set_graph_property(self, "sum_constraints", sum_constraints)
def get_edge_costs(self, orientation_factor, start_edge_prior): edge_cost_tot = { e: self.get_edge_cost(e) * orientation_factor for e in G.get_edge_iterator(self) } edge_cost_tot[self.START_EDGE] = 2.0 * start_edge_prior return edge_cost_tot
def get_vertex_cost(self): vertex_cost = {} for v in G.get_vertex_iterator(self): vertex_cost[v] = 0.0 #Start Vertex vertex_cost[-1] = 0.0 return vertex_cost
def get_edge_cost(self, e): return G.get_edge_property(self, "edge_cost", e=e)
def __init__(self, N): G.__init__(self, N) # Initialized with 0.0, see graph.G and tests G.new_vertex_property(self, "costs", dtype="double") G.new_graph_property(self, "conflicts", dtype="python::object") G.set_graph_property(self, "conflicts", set()) G.new_graph_property(self, "sum_constraints", dtype="python::object") G.set_graph_property(self, "sum_constraints", list()) G.new_vertex_property(self, "forced", dtype="bool", value=False) G.new_vertex_property(self, "solved", dtype="bool", value=False) G.new_graph_property(self, "must_pick_one", dtype="python::object") G.set_graph_property(self, "must_pick_one", list())
def get_must_pick_one(self): return G.get_graph_property(self, "must_pick_one")
def get_orientation(self, u): orientations = G.get_vertex_property(self, "orientation") return orientations[u]
def __check_id(self, u): assert (type(u) == int) assert (u < G.get_number_of_vertices(self)) assert (u >= 0)
def add_must_pick_one(self, g2_vertex_list): must_pick_one = G.get_graph_property(self, "must_pick_one") must_pick_one.append(tuple(g2_vertex_list)) G.set_graph_property(self, "must_pick_one", must_pick_one)
def set_cost(self, u, value): G.set_vertex_property(self, "costs", u, value)
def set_edge_cost(self, e, edge_cost): G.set_edge_property(self, "edge_cost", None, None, edge_cost, e)
def get_orientation_array(self): return G.get_vertex_property(self, "orientation").get_2d_array([0, 1, 2])
def get_forced(self, u): return G.get_vertex_property(self, "forced", u)
def set_position(self, u, value): assert (type(value) == np.ndarray) assert (len(value) == 3) assert (value.ndim == 1) G.set_vertex_property(self, "position", u, value)
def get_partner_array(self): partner = G.get_vertex_property(self, "partner") return partner.get_array()
def get_partner(self, u): partner = G.get_vertex_property(self, "partner") return partner[u]
def get_position(self, u): positions = G.get_vertex_property(self, "position") return positions[u]
def get_position_array(self): return G.get_vertex_property(self, "position").get_2d_array([0, 1, 2])
def get_conflicts(self): return G.get_graph_property(self, "conflicts")
def get_edge_combination_cost_angle(self, comb_angle_factor, comb_angle_prior=0.0, return_edges_to_middle=False): edge_index_map = G.get_edge_index_map(self) """ Only collect the indices for each edge combination in the loop and perform cost calculation later in vectorized form. """ middle_indices = [] edges_to_middle = {} end_indices = [] edges = [] cost = [] prior_cost = {} """ Problem: The indices are derived from the vertices in the graph. The graphs are filtered versions of a bigger graph where the vertices have not been enumerated newly. Thus we expect vertices to have random indices, not corresponding to the number of vertices in the sub graph. If we try to acces the position matrix with these indices we get out of bounds errors because the position matrix has only the entries of the filtered subgraph. We need to map vertex indices in the range [0, N_vertices_subgraph - 1] """ index_map = {} for n, v in enumerate(G.get_vertex_iterator(self)): incident_edges = G.get_incident_edges(self, v) index_map[v] = n for e1 in incident_edges: for e2 in incident_edges + [self.START_EDGE]: e1_id = self.get_edge_id(e1, edge_index_map) e2_id = self.get_edge_id(e2, edge_index_map) if e1_id >= e2_id and e2_id != self.START_EDGE.id(): continue if e2_id == self.START_EDGE.id(): """ Always append edges together with cost s.t. zip(edges, cost) is a correct mapping of edges to cost. """ prior_cost[(e1, e2)] = comb_angle_prior edges_to_middle[(e1, e2)] = int(v) else: """ Here we only save indices. How to secure a proper matching between cost that we calculate later and the corresponding edges? 1. Middle vertex is v -> index_map[v] in [0, N-1] 2. Append the middle_vertex twice to a list. 3. Append the distinct end vertices (2) to end vertices 4. Append the corresponding edges to a list. -> We end up with 3 lists of the following form: edges = [(e1, e2), (e3, e4), ...] m_ind = [ m1, m1 , m2, m2 , ...] e_ind = [ v1, v2 , v3, v4 , ...] p_arr = [ p(m1) , p(m2) , ...] index_map: m1 -> 0, m2 -> 1, m3 -> 2, ... --> p_arr[m1] = p(m1), p_arr[m2] = p(m2) """ middle_vertex = int(v) middle_indices.extend([middle_vertex, middle_vertex]) end_vertices = set([ int(e1.source()), int(e1.target()), int(e2.source()), int(e2.target()) ]) end_vertices.remove(middle_vertex) end_indices.extend(list(end_vertices)) edges.append((e1, e2)) edges_to_middle[(e1, e2)] = int(v) """ (e1, e2) -> end_indices, middle_indices """ if middle_indices: pos_array = self.get_position_array() end_indices = np.array([index_map[v] for v in end_indices]) middle_indices = np.array([index_map[v] for v in middle_indices]) v = (pos_array[:, end_indices] - pos_array[:, middle_indices]).T norm = np.sum(np.abs(v)**2, axis=-1)**(1. / 2.) u = v / np.clip(norm[:, None], a_min=10**(-8), a_max=None) angles = np.arccos(np.clip(inner1d(u[::2], u[1::2]), -1.0, 1.0)) angles = np.pi - angles cost = cost + list((angles * comb_angle_factor)**2) edge_combination_cost = dict(itertools.izip(edges, cost)) edge_combination_cost.update(prior_cost) if return_edges_to_middle: return edge_combination_cost, edges_to_middle else: return edge_combination_cost
def get_sum_constraints(self): return G.get_graph_property(self, "sum_constraints")
def get_edge_combination_cost_curvature(self, comb_angle_factor, comb_angle_prior=0.0, return_edges_to_middle=False): edge_index_map = G.get_edge_index_map(self) """ Only collect the indices for each edge combination in the loop and perform cost calculation later in vectorized form. """ middle_indices = [] edges_to_middle = {} end_indices = [] edges = [] cost = [] prior_cost = {} edge_combination_cost = {} """ Problem: The indices are derived from the vertices in the graph. The graphs are filtered versions of a bigger graph where the vertices have not been enumerated newly. Thus we expect vertices to have random indices, not corresponding to the number of vertices in the sub graph. If we try to acces the position matrix with these indices we get out of bounds errors because the position matrix has only the entries of the filtered subgraph. We need to map vertex indices in the range [0, N_vertices_subgraph - 1] """ index_map = {} for n, v in enumerate(G.get_vertex_iterator(self)): incident_edges = G.get_incident_edges(self, v) index_map[v] = n for e1 in incident_edges: for e2 in incident_edges + [self.START_EDGE]: e1_id = self.get_edge_id(e1, edge_index_map) e2_id = self.get_edge_id(e2, edge_index_map) if e1_id >= e2_id and e2_id != self.START_EDGE.id(): continue if e2_id == self.START_EDGE.id(): """ Always append edges together with cost s.t. zip(edges, cost) is a correct mapping of edges to cost. """ prior_cost[(e1, e2)] = comb_angle_prior edges_to_middle[(e1, e2)] = int(v) else: """ Here we only save indices. How to secure a proper matching between cost that we calculate later and the corresponding edges? 1. Middle vertex is v -> index_map[v] in [0, N-1] 2. Append the middle_vertex twice to a list. 3. Append the distinct end vertices (2) to end vertices 4. Append the corresponding edges to a list. -> We end up with 3 lists of the following form: edges = [(e1, e2), (e3, e4), ...] m_ind = [ m1, m1 , m2, m2 , ...] e_ind = [ v1, v2 , v3, v4 , ...] p_arr = [ p(m1) , p(m2) , ...] index_map: m1 -> 0, m2 -> 1, m3 -> 2, ... --> p_arr[m1] = p(m1), p_arr[m2] = p(m2) """ middle_vertex = int(v) middle_indices.extend([middle_vertex, middle_vertex]) end_vertices = [ int(e1.source()), int(e1.target()), int(e2.source()), int(e2.target()) ] end_vertices.remove(middle_vertex) end_vertices.remove(middle_vertex) ordered_points = [None, None, None] ordered_points[0] = np.array( self.get_position(end_vertices[0])) ordered_points[2] = np.array( self.get_position(end_vertices[1])) ordered_points[1] = np.array( self.get_position(middle_vertex)) energy = get_energy_from_ordered_points(ordered_points, n_samples=1000) edge_combination_cost[(e1, e2)] = (energy * comb_angle_factor)**2 end_indices.extend(list(end_vertices)) edges.append((e1, e2)) edges_to_middle[(e1, e2)] = int(v) """ (e1, e2) -> end_indices, middle_indices """ edge_combination_cost.update(prior_cost) logger.info("edge_combination_cost: " + str(edge_combination_cost)) if return_edges_to_middle: return edge_combination_cost, edges_to_middle else: return edge_combination_cost
def get_solved(self, u): return G.get_vertex_property(self, "solved", u)
def get_components(self, min_vertices, output_folder, remove_aps=False, min_k=1, return_graphs=False): logger.info("Get components...") if remove_aps: logger.info("Remove articulation points...") naps_vp = G.get_articulation_points(self) G.set_vertex_filter(self, naps_vp) if min_k > 1: logger.info("Find " + str(min_k) + "-cores...") kcore_vp = G.get_kcore_mask(self, min_k) G.set_vertex_filter(self, kcore_vp) logger.info("Find connected components...") masks, hist = G.get_component_masks(self, min_vertices) if output_folder is not None: if not os.path.exists(output_folder): os.makedirs(output_folder) logger.info("Filter Graphs...") cc_path_list = [] graph_list = [] n = 0 len_masks = len(masks) for mask in masks: logger.info("Filter graph " + str(n) + "/" + str(len_masks)) if output_folder is not None: output_file = output_folder +\ "cc{}_min{}_phy.gt".format(n, min_vertices) cc_path_list.append(output_file) G.set_vertex_filter(self, mask) g1_masked = G1(0) g1_masked.g = self.g.copy() graph_list.append(g1_masked) if output_folder is not None: g1_masked.save(output_file) G.set_vertex_filter(self, None) n += 1 if return_graphs: return graph_list else: return cc_path_list
def get_cost(self, u): return G.get_vertex_property(self, "costs", u)
def solve_edge(self, e): G.set_edge_property(self, "solved", None, None, True, e)