Esempio n. 1
0
    def canonical_labeling(self):

        self.new_label_name = 'cano_label'
        for k in self.graph_ids:
            df_subset_adj = self.adj_dict_by_graphId[k]
            temp_graph_dict = dfadj_to_dict(df_subset_adj)
            try:
                nauty_graph = nauty.Graph(len(temp_graph_dict),
                                          adjacency_dict=temp_graph_dict)
            except:

                missing = self.node_label_by_graphId[k].shape[0] - len(
                    temp_graph_dict.keys())
                print('missing nodes in graph number {} :  {}'.format(
                    k, missing))
                nauty_graph = nauty.Graph(len(temp_graph_dict) + missing,
                                          adjacency_dict=temp_graph_dict)
                #raise
                pass
            canonical_labeling = nauty.canonical_labeling(nauty_graph)
            self.node_label_by_graphId[k].reset_index(inplace=True)
            self.node_label_by_graphId[k] = pd.concat([
                self.node_label_by_graphId[k],
                pd.Series(
                    canonical_labeling, dtype=int, name=self.new_label_name)
            ],
                                                      axis=1)
Esempio n. 2
0
 def canonical_labeling(self, adj_dict_by_graphId):
     all_canonical_labels = []
     for l in self.graph_ids:
         df_subset_adj = adj_dict_by_graphId[l]
         df_subset_nodes = self.df_node_label[self.df_node_label.graph_ind
                                              == l]
         # temp_graph_dict = utils.dfadj_to_dict(df_subset_adj)
         temp_graph_dict = dfadj_to_dict(df_subset_adj)
         try:
             nauty_graph = nauty.Graph(len(temp_graph_dict),
                                       adjacency_dict=temp_graph_dict)
         except:
             missing = len(
                 set(range(len(temp_graph_dict.keys()) + 1)).difference(
                     set(temp_graph_dict.keys())))
             print('missing nodes in graph number {} :  {}'.format(
                 l, missing))
             nauty_graph = nauty.Graph(len(temp_graph_dict) + missing,
                                       adjacency_dict=temp_graph_dict)
             #raise
             pass
         canonical_labeling = nauty.canonical_labeling(nauty_graph)
         # canonical_labeling = [df_subset_nodes.label.values[i] for i in canonical_labeling]  ###
         all_canonical_labels += canonical_labeling
     return all_canonical_labels
Esempio n. 3
0
    def labelling(self, labelling_procedure='bc'):
        """
        bc : betweenness centrality
        :param labelling_procedure:
        :return: adds label according to labelling procedure
        """

        if labelling_procedure == 'bc':
            self.new_label_name = 'bc_label'
            for k in self.graph_ids:
                nx_graph = self.nx_graphs[k]
                sorted_bc_labels = [
                    i[0] for i in sorted(nx.betweenness_centrality(
                        nx_graph).items(),
                                         key=lambda x: x[1],
                                         reverse=True)
                ]
                self.node_label_by_graphId[k].reset_index(inplace=True)
                self.node_label_by_graphId[k] = pd.concat([
                    self.node_label_by_graphId[k],
                    pd.Series(
                        sorted_bc_labels, dtype=int, name=self.new_label_name)
                ],
                                                          axis=1)
        elif labelling_procedure == 'cl':
            self.new_label_name = 'cano_label'
            for j, k in enumerate(self.graph_ids):
                df_subset_adj = self.adj_dict_by_graphId[k]
                temp_graph_dict = dfadj_to_dict(df_subset_adj)
                try:
                    nauty_graph = nauty.Graph(len(temp_graph_dict),
                                              adjacency_dict=temp_graph_dict)
                except:

                    missing = self.node_label_by_graphId[k].shape[0] - len(
                        temp_graph_dict.keys())
                    print('missing nodes in graph number {} :  {}'.format(
                        k, missing))
                    nauty_graph = nauty.Graph(len(temp_graph_dict) + missing,
                                              adjacency_dict=temp_graph_dict)
                    # raise
                    pass
                canonical_labeling = nauty.canonical_labeling(nauty_graph)
                self.node_label_by_graphId[k].reset_index(inplace=True)
                self.node_label_by_graphId[k] = pd.concat([
                    self.node_label_by_graphId[k],
                    pd.Series(canonical_labeling,
                              dtype=int,
                              name=self.new_label_name)
                ],
                                                          axis=1)
                progress_bar(j, self.num_graphs)
def make_bipartite_for_design(pd):
    # we have num points + num lines vertices
    # first are the points, then then lines
    # the points have color 0 and lines have color 1
    num_points = pd.num_points
    num_lines = len(pd.lines)
    num_vert = num_points + num_lines

    # different colors for lines and for points of each pencil type
    lineset = set(range(num_points, num_vert))
    coloring = [set(range(num_points)), set(range(num_points, num_vert))]

    adj_dict = {}
    for i in range(num_vert):
        adj_dict[i] = []

    # li is the line index, 0, ..., L-1
    # line is the set of vertices in the line, numbers 0 <= x < N
    for li, line in enumerate(pd.lines):
        line_ind = num_points + li  # line index in graph is N + li
        for p in line:
            # add an edge in G between line_ind and p
            adj_dict[line_ind].append(p)
            adj_dict[p].append(line_ind)

    g = pynauty.Graph(number_of_vertices = num_vert, \
        directed = False, \
        adjacency_dict = adj_dict, \
        vertex_coloring = coloring)

    return g
Esempio n. 5
0
def get_graphlet(window, num_graphlets):
    """Compute the Nauty certificate of the graphlet in within a window of the adjacency matrix

    This function takes the upper triangle of a nxn matrix and
    computes its hash certificate using nauty. Given the parameters
    this usually involved computing the graphlet of num_graphlets 
    size

    This is used for comparison with a bank of known certificates
    as loaded in get_maps().

    Parameters
    ----------
    window : numpy ndarray
        submatrix inside the adjacency matrix of a graph
    num_graphlets: int
        the size of the graphlets to extract

    Returns
    -------
    cert : byte str
        certicate of the graphlet produced by finding its canonical representation with Nauty.

    """

    adj_mat = {
        idx: [i for i in list(np.where(edge)[0]) if i != idx]
        for idx, edge in enumerate(window)
    }

    g = pynauty.Graph(number_of_vertices=num_graphlets,
                      directed=False,
                      adjacency_dict=adj_mat)
    cert = pynauty.certificate(g)
    return cert
Esempio n. 6
0
def convert_nx_to_pyn(nx_g, partition=None):
    """
        Takes a NetworkX graph and outputs a PyNauty graph.
        If graph has dimension > 2, converts into layered coloured graph
    """
    # If graph represents nD qudit graph, map to coloured layer graph
    coloring = []
    if nx_g.__dict__.get('dimension', 2) > 2:
        nx_g, coloring = qudit_graph_map(nx_g, partition)
    # Relabels nodes with integers for compatibility with Pynauty
    nodes, neighs = list(zip(*nx_g.adjacency()))
    to_int_node_map = {n: i for i, n in enumerate(nodes)}
    relabel = to_int_node_map.get
    nodes = list(map(relabel, nodes))
    neighs = [list(map(relabel, node_neighs.keys())) for node_neighs in neighs]
    coloring = [set(map(relabel, colour)) for colour in coloring]
    # Creates Pynauty graph
    graph_adj = {node: node_neighs for node, node_neighs in zip(nodes, neighs)}
    n_v = len(graph_adj)
    pyn_g = pyn.Graph(n_v,
                      directed=False,
                      adjacency_dict=graph_adj,
                      vertex_coloring=coloring)
    # Finds inverse node labelling
    from_int_node_map = {i: n for n, i in to_int_node_map.items()}
    return pyn_g, from_int_node_map
def make_bipartite_for_design_linelist(num_points, line_list):
    # we have num points + num lines vertices
    # first are the points, then then lines
    # the points have color 0 and lines have color 1
    num_lines = len(line_list)
    num_vert = num_points + num_lines
    # different colors for lines and for points of each pencil type
    lineset = set(range(num_points, num_vert))
    coloring = [set(range(num_points)), set(range(num_points, num_vert))]

    adj_dict = {}
    for i in range(num_vert):
        adj_dict[i] = []
    for li, line in enumerate(line_list):
        line_ind = num_points + li
        for p in line:
            adj_dict[line_ind].append(p)
            adj_dict[p].append(line_ind)

    g = pynauty.Graph(number_of_vertices = num_vert, \
        directed = False, \
        adjacency_dict = adj_dict, \
        vertex_coloring = coloring)

    return g
def get_isomorphic_signature(graph: DiGraph) -> str:
    """
    Generate unique isomorphic id with pynauty
    """
    nauty_graph = pynauty.Graph(len(graph.nodes),
                                directed=True,
                                adjacency_dict=nx.to_dict_of_lists(graph))
    return hashlib.md5(pynauty.certificate(nauty_graph)).hexdigest()
Esempio n. 9
0
def cl(g6):
    """ compute the canonical labeling? from graph6 """
    g = nx.from_graph6_bytes(g6)
    adj = {v: list(g[v]) for v in g}
    nauty_graph = pynauty.Graph(g.order(), adjacency_dict=adj)
    s = pynauty.certificate(nauty_graph)
    g.clear()
    del nauty_graph
    return s
Esempio n. 10
0
def get_motif(motif_am, size):
    adj_mat = {
        idx: [i for i in list(np.where(edge)[0]) if i != idx]
        for idx, edge in enumerate(motif_am)
    }
    g = pynauty.Graph(number_of_vertices=size,
                      directed=False,
                      adjacency_dict=adj_mat)
    cert = pynauty.certificate(g)
    return cert
    def _canonical(adjacency, labels):
        count = adjacency.shape[0]
        adjacency_dict = {}

        for i in xrange(count):
            adjacency_dict[i] = list(np.nonzero(adjacency[i])[0])

        graph = nauty.Graph(count, adjacency_dict=adjacency_dict)
        labeling = nauty.canonical_labeling(graph)

        labeling = [labels[i] for i in labeling]

        return np.array(labeling, np.int32)
Esempio n. 12
0
def get_graphlet(window, nsize):
    """
    This function takes the upper triangle of a nxn matrix and computes its canonical map
    """
    adj_mat = {
        idx: [i for i in list(np.where(edge)[0]) if i != idx]
        for idx, edge in enumerate(window)
    }

    g = pynauty.Graph(number_of_vertices=nsize,
                      directed=False,
                      adjacency_dict=adj_mat)
    cert = pynauty.certificate(g)
    return cert
def apply_to_all_ged_k(G, k, f, statistic=np.mean):
    """                              
    f is the function to apply. Must take (G,aut)
    """
    results = []
    edges = [(u, v) for u, v in G.edges()]
    for edges_to_remove in combinations(edges, k):
        G1 = copy.deepcopy(G)
        for e in edges_to_remove:
            assert (len(e) == 2)
            G1.remove_edge(*e)
        g = pynauty.Graph(number_of_vertices=G1.number_of_nodes(),
                          directed=nx.is_directed(G1),
                          adjacency_dict=get_adjacency_dict(G1))
        aut = pynauty.autgrp(g)
        results.append(f(G1, aut))
    return statistic(results)
Esempio n. 14
0
def compute_orbits(node_list, A):
    '''
    WARN:
      node_list & A should be a connected component. Don't input multiple connected components.
    args:
      node_list e.g. ['C', 'N', 'N', 'C', 'C', 'O', 'P', 'O', 'C']
      A adj mat n x n np array. 0 edge absent. >0 edge present. Edge colors & edge weights are ignored.
    returns:
      orbit labels. 1 label per atom.
      number of orbit partitions. scalar.
    '''
    A = adj_mat_2_adj_dict(A)
    vertex_coloring = node_list_2_vertex_coloring(node_list)
    G = pynauty.Graph(number_of_vertices=len(node_list), 
                    directed=False,
                    adjacency_dict=A,
                    vertex_coloring=vertex_coloring)
    automorphism_group = pynauty.autgrp(G)  #return -> (generators, grpsize1, grpsize2, orbits, numorbits)
    # print('automorphism_group', automorphism_group)
    n_orbits = automorphism_group[-1] # e.g. 3
    orbits = automorphism_group[-2]   # e.g. [0 1 2 2 1 0]
    return orbits, n_orbits
Esempio n. 15
0
def get_canonical_map(g):
    if len(g.nodes()) > 0:
        a = nx.adjacency_matrix(g)
        am = a.todense()
        window = np.array(am)
        adj_mat = {
            idx: [i for i in list(np.where(edge)[0]) if i != idx]
            for idx, edge in enumerate(window)
        }
        #       This line doesn't take into account the order of nodes, it produce the identical
        #       canonoical map for these graphs
        #       0-->1 2, 0 1-->2, 0-->2 1
        #        tmp = pynauty.Graph(number_of_vertices=len(g.nodes()), directed=True, adjacency_dict = adj_mat)

        tmp = pynauty.Graph(
            number_of_vertices=len(g.nodes()),
            directed=True,
            adjacency_dict=adj_mat,
            vertex_coloring=[set([t]) for t in range(len(g.nodes(0)))])

        cert = pynauty.certificate(tmp)
    else:
        cert = ''
    return cert
Esempio n. 16
0
def calculate_platform_symmetries(cfg):
    """Calculate the Automorphism Group of a Platform Graph

    This task expects three hydra parameters to be available.


    **Hydra Parameters**:
        * **platform:** the input platform. The task expects a configuration
          dict that can be instantiated to a
          :class:`~mocasin.common.platform.Platform` object.
        * **out:** the output file (extension will be added)
        * **mpsym:** a boolean value selecting mpsym as backend (and JSON as output)
        Otherwise it outputs plaintext from the python implementation.
    """
    platform = hydra.utils.instantiate(cfg["platform"])
    log.info("start converting platform to edge graph for automorphisms.")
    plat_graph = platform.to_adjacency_dict(include_proc_type_labels=True)
    use_mpsym = cfg["mpsym"]

    (
        adjacency_dict,
        num_vertices,
        coloring,
        nodes_correspondence,
    ) = aut.to_labeled_edge_graph(plat_graph)
    log.info("done converting platform to edge graph for automorphisms.")
    # print(nodes_correspondence)
    # print(coloring)
    # print(len(coloring))
    # print(str(edge_graph))
    log.info(
        "start calculating the automorphism group of the (edge) graph with " +
        str(num_vertices) + " nodes using nauty.")
    nautygraph = pynauty.Graph(num_vertices, True, adjacency_dict, coloring)
    autgrp_edges = pynauty.autgrp(nautygraph)
    log.info(
        "done calculating the automorphism group of the (edge) graph using nauty."
    )

    log.info("start coverting automorhpism of edges to nodes.")
    autgrp, new_nodes_correspondence = aut.edge_to_node_autgrp(
        autgrp_edges[0], nodes_correspondence)
    permutations_lists = map(aut.list_to_tuple_permutation, autgrp)
    # permutations = map(perm.Permutation,permutations_lists)
    # permgrp = perm.PermutationGroup(list(permutations))
    # print(permgrp.point_orbit(0))
    log.info("done coverting automorhpism of edges to nodes.")

    log.info("start writing to file.")
    if use_mpsym:
        try:
            mpsym
        except NameError:
            log.error(
                "Configured for mpsym output but could not load mpsym. Fallback to python implementation"
            )
            use_mpsym = False

    if use_mpsym:
        out_filename = str(cfg["out_file"])
        mpsym_autgrp = mpsym.ArchGraphAutomorphisms(
            [mpsym.Perm(g) for g in autgrp])
        json_out = mpsym_autgrp.to_json()
        with open(out_filename, "w") as f:
            f.write(json_out)
    else:
        out_filename = cfg["out_file"]
        with open(out_filename, "w") as f:
            f.write("Platform Graph:")
            f.write(str(plat_graph))
            # f.write("Edge Group with ~" + str(autgrp_edges[1]) + " * 10^" + str(autgrp_edges[2]) + " elements.\n")
            f.write("Symmetry group generators:")
            f.write(str(list(permutations_lists)))
            f.write("\nCorrespondence:")
            f.write(str(new_nodes_correspondence))

    log.info("done writing to file.")
Esempio n. 17
0
def find_symmetries(model):
    num_vertices = 0
    obj_var_coefs = {}

    # Going to make this really easy initially by iterating through once initially and building up an index map
    graph_indices = {}
    variable_names = {}

    def add_vertex(name, num):
        graph_indices[name] = num
        variable_names[num] = name
        return num + 1

    for n, constraint in model.c.items():
        num_vertices = add_vertex(constraint.name, num_vertices)
        #graph_indices[constraint.name] = num_vertices
        #num_vertices += 1

    for n, variable in model.vd.items():
        num_vertices = add_vertex(variable.name, num_vertices)
        #graph_indices[variable.name] = num_vertices
        #num_vertices += 1

    # The number of vertices before the inclusion of intermediate vertices
    num_model_vertices = num_vertices

    # I think that first, we go through the objective function build a map from variable->onj_coeff
    standard_objective = generate_standard_repn(model.o)
    for i, (variable, coefficient) in enumerate(
            zip(standard_objective.linear_vars,
                standard_objective.linear_coefs)):
        obj_var_coefs[variable.name] = coefficient

    # Should probably look up a slightly nicer way to do this?
    adjacency_dict = defaultdict(list)

    for name, constraint in model.c.items():
        # variables that share a coefficient within a constraint can coalesce their intermediate vertices
        coalesce_dict = {}
        repn = generate_standard_repn(constraint.body)
        constraint_index = graph_indices[constraint.name]
        for coefficient, variable in zip(repn.linear_coefs, repn.linear_vars):
            # So, first we want an intermediate vertex
            variable_index = graph_indices[variable.name]
            if coefficient == 1:
                # For now, if the coefficient is one, we will not use an intermediate vertex
                adjacency_dict[constraint_index].append(variable_index)
            elif coefficient in coalesce_dict:
                intermediate_vertex_index = coalesce_dict[coefficient]
                # The intermediate vertex is already connected to the constraint vertex, so here we just need to connect it to this variable
                adjacency_dict[variable_index].append(
                    intermediate_vertex_index)
            else:
                # We do not yet have an intermediate vertex for this coefficient and vertex, let's make one
                intermediate_vertex_index = num_vertices
                coalesce_dict[coefficient] = intermediate_vertex_index
                adjacency_dict[constraint_index].append(
                    intermediate_vertex_index)
                adjacency_dict[variable_index].append(
                    intermediate_vertex_index)
                num_vertices += 1

    # So, in building the adjacency dict, we want to colour constraints in one colour (maybe this changes in the future).
    # We then want to partition the variables by their upper bound, lower bound and their value (if any) in the objective function

    # Inefficient to itereate through yet again but want it to be as obvious as possible what's going on
    # I think that we may also need to consider constraint colourings!
    variable_colouring_dict = defaultdict(set)
    for n, variable in model.vd.items():
        variable_index = graph_indices[variable.name]
        obj_coef = obj_var_coefs[
            variable.name] if variable.name in obj_var_coefs else 0
        variable_colouring_dict[(variable.lb, variable.ub,
                                 obj_coef)].add(variable_index)

    # Let's look at some constraint colourings
    constraint_colouring_dict = defaultdict(set)
    for n, constraint in model.c.items():
        constraint_index = graph_indices[constraint.name]
        constraint_colouring_dict[(constraint.lb, constraint.ub,
                                   constraint.rhs)].add(constraint_index)

    vertex_colourings = []
    vertex_colourings.extend(variable_colouring_dict.values())
    vertex_colourings.extend(constraint_colouring_dict.values())

    # We haven't yet done the vertex_colourings yet but let's just see what we get for now
    graph = pynauty.Graph(num_vertices, False, adjacency_dict,
                          vertex_colourings)
    aut = pynauty.autgrp(graph)

    symmetry_groups = defaultdict(set)
    for index, min_vertex in enumerate(aut[3][:num_model_vertices]):

        # I think that we're doing it slightly wrong here, we should use the constraint names here
        symmetry_groups[min_vertex].add(variable_names[index])

    return set(map(lambda s: frozenset(s), symmetry_groups.values()))
Esempio n. 18
0
def HASH(graph):
    """
        see https://stackoverflow.com/questions/46999771/
        use with caution...

        :return:
        """
    g: nx.Graph = graph

    pnGraph = pn.Graph(g.number_of_nodes())
    edg = list(g.edges)
    nodesColored = []
    colors = {
        "H": [],
        "He": [],
        "Li": [],
        "Be": [],
        "B": [],
        "C": [],
        "N": [],
        "O": [],
        "F": [],
        "Ne": [],
        "Na": [],
        "Mg": [],
        "Al": [],
        "Si": [],
        "P": [],
        "S": [],
        "Cl": [],
        "Ar": [],
        "K": [],
        "Ca": [],
        "Sc": [],
        "Ti": [],
        "V": [],
        "Cr": [],
        "Mn": [],
        "Fe": [],
        "Co": [],
        "Ni": [],
        "Cu": [],
        "Zn": [],
        "Ga": [],
        "Ge": [],
        "As": [],
        "Se": [],
        "Br": [],
        "Kr": [],
        "Rb": [],
        "Sr": [],
        "Y": [],
        "Zr": [],
        "Nb": [],
        "Mo": [],
        "Tc": [],
        "Ru": [],
        "Rh": [],
        "Pd": [],
        "Ag": [],
        "Cd": [],
        "In": [],
        "Sn": [],
        "Sb": [],
        "Te": [],
        "I": [],
        "Xe": [],
        "Cs": [],
        "Ba": [],
        "La": [],
        "Ce": [],
        "Pr": [],
        "Nd": [],
        "Pm": [],
        "Sm": [],
        "Eu": [],
        "Gd": [],
        "Tb": [],
        "Dy": [],
        "Ho": [],
        "Er": [],
        "Tm": [],
        "Yb": [],
        "Lu": [],
        "Hf": [],
        "Ta": [],
        "W": [],
        "Re": [],
        "Os": [],
        "Ir": [],
        "Pt": [],
        "Au": [],
        "Hg": [],
        "Tl": [],
        "Pb": [],
        "Bi": [],
        "Po": [],
        "At": [],
        "Rn": [],
        "Fr": [],
        "Ra": [],
        "Ac": [],
        "Th": [],
        "Pa": [],
        "U": [],
        "Np": [],
        "Pu": [],
        "Am": [],
        "Cm": [],
        "Bk": [],
        "Cf": [],
        "Es": [],
        "Fm": [],
        "Md": [],
        "No": [],
        "Lr": [],
        "Rf": [],
        "Db": [],
        "Sg": [],
        "Bh": [],
        "Hs": [],
        "Mt": [],
        "Ds": [],
        "Rg": [],
        "Cn": [],
        "Nh": [],
        "Fl": [],
        "Mc": [],
        "Lv": [],
        "Ts": [],
        "Og": []
    }
    for E in edg:
        pnGraph.connect_vertex(E[0], E[1])
        try:
            nodesColored.index(E[0])
            try:
                colors[g.nodes[E[0]]["symbol"]].append(E[0])
            except KeyError:
                colors[g.nodes[E[0]]["symbol"]] = []
                colors[g.nodes[E[0]]["symbol"]].append(E[0])
        except ValueError:
            nodesColored.append(E[0])
            try:
                colors[g.nodes[E[0]]["symbol"]].append(E[0])
            except KeyError:
                colors[g.nodes[E[0]]["symbol"]] = []
                colors[g.nodes[E[0]]["symbol"]].append(E[0])
        try:
            nodesColored.index(E[1])
            try:
                colors[g.nodes[E[1]]["symbol"]].append(E[1])
            except KeyError:
                colors[g.nodes[E[1]]["symbol"]] = []
                colors[g.nodes[E[1]]["symbol"]].append(E[1])
        except ValueError:
            nodesColored.append(E[1])
            try:
                colors[g.nodes[E[1]]["symbol"]].append(E[1])
            except KeyError:
                colors[g.nodes[E[1]]["symbol"]] = []
                colors[g.nodes[E[1]]["symbol"]].append(E[1])
    j = -1
    for c in colors:
        j = j + 1
        print(str(c) + " " + str(colors[c]))
        if colors[c] != []:
            pnGraph.set_vertex_coloring([set(colors[c])])
        else:
            pnGraph.set_vertex_coloring([set([])])
    return hash(pn.certificate(pnGraph))
Esempio n. 19
0
    def __init__(
        self,
        graph,
        platform,
        channels=False,
        periodic_boundary_conditions=False,
        norm_p=2,
        canonical_operations=True,
        disable_mpsym=False,
        disable_symmetries_test=False,
    ):
        self._topologyGraph = platform.to_adjacency_dict(
            include_proc_type_labels=True)
        self.graph = graph
        self.platform = platform
        self._d = len(graph.processes())
        init_app_ncs(self, graph)
        self._arch_nc_inv = {}
        self.channels = channels
        self.boundary_conditions = periodic_boundary_conditions
        self.p = norm_p
        com_mapper = ComFullMapper(graph, platform)
        self.list_mapper = ProcPartialMapper(graph, platform, com_mapper)
        self.canonical_operations = canonical_operations

        n = len(self.platform.processors())
        correct = None

        if disable_mpsym:
            self.sym_library = False
        else:
            try:
                mpsym
            except NameError:
                self.sym_library = False
            else:
                self.sym_library = True
                if hasattr(platform, "ag"):
                    self._ag = platform.ag
                    log.info(
                        "Symmetries initialized with mpsym: Platform Generator."
                    )
                elif hasattr(platform, "ag_json"):
                    if exists(platform.ag_json):
                        self._ag = mpsym.ArchGraphSystem.from_json_file(
                            platform.ag_json)
                        if disable_symmetries_test:
                            log.warning(
                                "Using symmetries JSON without testing.")
                            correct = True
                        else:
                            try:
                                correct = checkSymmetries(
                                    platform.to_adjacency_dict(),
                                    self._ag.automorphisms(),
                                )
                            except Exception as e:
                                log.warning(
                                    "An unknown error occurred while reading "
                                    "the embedding JSON file. Did you provide "
                                    "the correct file for the given platform? "
                                    f"({e})")
                                correct = False
                        if not correct:
                            log.warning(
                                "Symmetries json does not fit platform.")
                            del self._ag
                        else:
                            log.info(
                                "Symmetries initialized with mpsym: JSON file."
                            )
                    else:
                        log.warning(
                            "Invalid symmetries JSON path (file does not exist)."
                        )

                if not hasattr(self, "_ag"):
                    # only calculate this if not already present
                    log.info("No pre-comupted mpsym symmetry group available."
                             " Initalizing architecture graph...")
                    (
                        adjacency_dict,
                        num_vertices,
                        coloring,
                        self._arch_nc,
                    ) = to_labeled_edge_graph(self._topologyGraph)
                    nautygraph = pynauty.Graph(num_vertices, True,
                                               adjacency_dict, coloring)
                    log.info("Architecture graph initialized. Calculating "
                             "automorphism group using Nauty...")
                    autgrp_edges = pynauty.autgrp(nautygraph)
                    autgrp, _ = edge_to_node_autgrp(autgrp_edges[0],
                                                    self._arch_nc)
                    self._ag = mpsym.ArchGraphAutomorphisms(
                        [mpsym.Perm(g) for g in autgrp])
                    for node in self._arch_nc:
                        self._arch_nc_inv[self._arch_nc[node]] = node
                        # TODO: ensure that nodes_correspondence fits simpleVec

        if not self.sym_library:
            log.info(
                "Using python symmetries: Initalizing architecture graph...")
            (
                adjacency_dict,
                num_vertices,
                coloring,
                self._arch_nc,
            ) = to_labeled_edge_graph(self._topologyGraph)
            nautygraph = pynauty.Graph(num_vertices, True, adjacency_dict,
                                       coloring)
            log.info("Architecture graph initialized. Calculating "
                     "automorphism group using Nauty...")
            autgrp_edges = pynauty.autgrp(nautygraph)
            autgrp, _ = edge_to_node_autgrp(autgrp_edges[0], self._arch_nc)
            permutations_lists = map(list_to_tuple_permutation, autgrp)
            permutations = [
                Permutation.fromLists(p, n=n) for p in permutations_lists
            ]
            self._G = PermutationGroup(permutations)
            log.info("Initialized automorphism group with internal symmetries")
Esempio n. 20
0
def adjacency_to_nagraph(adjacency):
    import pynauty as na
    return na.Graph(number_of_vertices=len(adjacency.keys()),
                    adjacency_dict=adjacency)
Esempio n. 21
0
            adjacency_dict[variable_index].append(intermediate_vertex_index)
            num_vertices += 1

# So, in building the adjacency dict, we want to colour constraints in one colour (maybe this changes in the future).
# We then want to partition the variables by their upper bound, lower bound and their value (if any) in the objective function

# Inefficient to itereate through yet again but want it to be as obvious as possible what's going on
# I think that we may also need to consider constraint colourings!
variable_colouring_dict = defaultdict(set)
for n, variable in model.vd.items():
    variable_index = graph_indices[variable.name]
    obj_coef = obj_var_coefs[
        variable.name] if variable.name in obj_var_coefs else 0
    variable_colouring_dict[(variable.lb, variable.ub,
                             obj_coef)].add(variable_index)

# Let's look at some constraint colourings
constraint_colouring_dict = defaultdict(set)
for n, constraint in model.c.items():
    constraint_index = graph_indices[constraint.name]
    constraint_colouring_dict[(constraint.lb, constraint.ub,
                               constraint.rhs)].add(constraint_index)

vertex_colourings = []
vertex_colourings.extend(variable_colouring_dict.values())
vertex_colourings.extend(constraint_colouring_dict.values())

# We haven't yet done the vertex_colourings yet but let's just see what we get for now
graph = pynauty.Graph(num_vertices, False, adjacency_dict, vertex_colourings)
aut = pynauty.autgrp(graph)
print(aut[3])
    new_rows = []

    print(args.files)
    for fname in progressbar.progressbar(glob.glob(args.files)):
        # TODO: check if the row is already there. If it is, check what fields it already has
        name = Path(fname).name
        if name in precomputed_names:
            # if it's already there, update
            s = df.loc[name]
            existing_columns = set(s.where(s.notna()).dropna().index)
            columns_to_compute = expected_features - existing_columns
            elist = s['elist']
            G = nx.OrderedGraph()
            G.add_edges_from(elist)
            g = pynauty.Graph(number_of_vertices=G.number_of_nodes(),
                              directed=nx.is_directed(G),
                              adjacency_dict=get_adjacency_dict(G))
            aut = pynauty.autgrp(g)
            for feature_name in columns_to_compute:
                df.at[name, feature_name] = float(
                    feature_getter_dispatcher(feature_name, G, aut))
        else:
            # if it's not there yet, build a dictionary with all the stuff
            elist, p, res = pickle.load(open(fname, "rb"))
            d = {
                'name': name,
                'p': float(p),
                'elist': copy.deepcopy(elist),
                'res': copy.deepcopy(res)
            }
            G = nx.OrderedGraph()
Esempio n. 23
0
def sample_graphlets(k=5, delta=0.05, epsilon=0.05, a=-1):
    """ A function that samples graphlets, based on statistic parameters

        k: the dimension of the given graphlets
        delta : confidence level (typically 0.05 or 0.1)
        epsilon : precision level (typically 0.05 or 0.1)
        a : number of isomorphism classes of graphlets        
    """
    
    fallback_map = {1: 1, 2: 2, 3: 4, 4: 8, 5: 19, 6: 53, 7: 209, 8: 1253, 9: 13599}
    if not k>=3:
        # Raise Warning
        exit(1)
    if(a==-1):
        if(k>9):
            # Raise warning for such size number of isomorphisms is not known.
            # Use interpolations
            isomorphism_prediction = interp1d(list(fallback_map.keys()), list(fallback_map.values()), kind='cubic')
            a = isomorphism_prediction(k)
        else:
            a = fallback_map[k]
    
    # Calculate number of samples
    nsamples = math.ceil(2*(a*np.log10(2) + np.log10(1/delta))/(epsilon**2))

    # stores graphlets as pynauty graphs
    graphlets = dict()
    # stores the graphlet certificates
    graphlet_set = set()
    
    # transforms a matrix to a dictionary 
    to_edge_dict_binary = lambda x : matrix_to_dict(x, '==', 1, k, False)
    # different isomorphism bins
    graph_bins = dict()
    nbins = 0
    # produce all the binary sequences up to 2^(k-1)
    max_n = 2**(k-1)
    all_bin = {i: np.array(list(j)) for (i,j) in zip(range(0,max_n),itertools.product([0, 1], repeat=k-1))}
    for i in range(0,nsamples):
        gr = np.empty(shape = (k,k))
        cert = list()
        f = True        
        while f:
            # Calculates a symmetric random matrix
            # is calculated characteristics 
            # with at least line and row.
            for j in range(0,k-1):
                gr[j,j] = .0
                # pick randomly a non zero line
                line = all_bin[random.randrange(1,max_n)]
                # assign
                gr[0:j,j], gr[j,(j+1):k] = line[0:j], line[j:k-1]
                # Apply also for symmetric
                gr[(j+1):k,j], gr[j,0:j] = line[j:k-1],line[0:j]
                # calculate the certificate of the graph
                cert += list(line[j:k-1])
            certificate = str(cert)
            # checks if this graph has already existed
            f = certificate in graphlet_set
        graphlet_set.add(str(cert))
        graphlets[i] = pynauty.Graph(k, True, to_edge_dict_binary(gr))
        
        # add the graph to an isomorphism class
        if i==0:
            graph_bins[0] = [0]
            nbins+=1
        else:
            newbin = True
            for j in range(nbins):
                if pynauty.isomorphic(graphlets[graph_bins[j][0]],graphlets[i]):
                    newbin = False
                    graph_bins[j].append(i)
                    break
            if newbin:
                graph_bins[nbins] = [i]
                nbins+=1
                
    # Produce Pij Matrix:
    # Based on the idea that 
    # if Pij = 1 and Pjk = 1 then Pik=1
    P = np.zeros(shape = (nsamples,nsamples))
    for i in range(nbins):
        pair = list(graph_bins[i])
        for (i,j) in itertools.combinations(pair,2):
            P[i,j]=1

    # also Pij = Pji
    for i in range(0,nsamples):
        for j in range(i+1,nsamples):
            P[j,i]=P[j,i]

    return nsamples, graphlets, P, graph_bins, nbins
Esempio n. 24
0
 def canonicalize(self):
     """
     This method will use `pynauty` library to generate a canonical label
     for the pattern. This pattern will be stored in `canonical_label` attribute.
     """
     # set a location for logging
     loc = f"{__file__} : Pattern.canonicalize()"
     # try importing pynauty to canonicalize the labeling
     try:
         import pynauty
     except ImportError:
         logger.warning(
             f"Importing pynauty failed, cannot canonicalize. Pattern equality checking is not guaranteed to work for highly symmetrical species.",
             loc=loc,
         )
         return
     # find how many vertices we need
     lmol = len(self.molecules)
     lcomp = sum([len(x.components) for x in self.molecules])
     node_cnt = lmol + lcomp
     # initialize our pynauty graph
     G = pynauty.Graph(node_cnt)
     # going to need to figure out bonding
     bond_dict = {}
     # save our IDs
     rev_grpIds = {}
     grpIds = {}
     # also pointers to each object
     node_ptrs = {}
     bond_node_ptrs = {}
     # we'll need to seutp coloring
     colors = {}
     currId = 0
     mCopyId = 0
     cCopyId = 0
     # let's loop over everything in the pattern
     for molec in self.molecules:
         # setting colors
         color_id = (molec.name, None, None)
         if color_id in colors:
             colors[color_id].add(currId)
         else:
             colors[color_id] = set([currId])
         # saving IDs
         parent_id = (molec.name, None, mCopyId, cCopyId)
         if parent_id in grpIds:
             mCopyId += 1
             parent_id = (molec.name, None, mCopyId, cCopyId)
             grpIds[parent_id] = currId
         else:
             grpIds[parent_id] = currId
         rev_grpIds[currId] = parent_id
         node_ptrs[currId] = molec
         currId += 1
         # now looping over components
         for comp in molec.components:
             # saving component coloring
             comp_color_id = (molec.name, comp.name, comp.state)
             if comp_color_id in colors:
                 colors[comp_color_id].add(currId)
             else:
                 colors[comp_color_id] = set([currId])
             chid_id = (molec.name, comp.name, mCopyId, cCopyId)
             # connecting the component to the molecule
             G.connect_vertex(grpIds[parent_id], [currId])
             # saving component IDs
             if chid_id in grpIds:
                 cCopyId += 1
                 chid_id = (molec.name, comp.name, mCopyId, cCopyId)
                 grpIds[chid_id] = currId
             else:
                 grpIds[chid_id] = currId
             rev_grpIds[currId] = chid_id
             node_ptrs[currId] = comp
             currId += 1
             # saving bonds
             if len(comp._bonds) != 0:
                 for bond in comp._bonds:
                     if bond not in bond_dict.keys():
                         bond_dict[bond] = [chid_id]
                     else:
                         bond_dict[bond].append(chid_id)
     # now we got everything, we implement it in the graph
     for bond in bond_dict:
         # check if each of our bonds have exactly two end points
         if len(bond_dict[bond]) == 2:
             id1 = bond_dict[bond][0]
             id1 = grpIds[id1]
             id2 = bond_dict[bond][1]
             id2 = grpIds[id2]
             G.connect_vertex(id1, [id2])
         else:
             # raise a warning
             logger.warning(
                 f"Bond {bond} doesn't have exactly 2 end points, please check that you don't have any dangling bonds.",
                 loc=loc,
             )
     # we get our color sets
     color_sets = list(colors.values())
     # set vertex coloring
     G.set_vertex_coloring(color_sets)
     # save our graph
     self.nautyG = G
     # generate the canonical certificate for the entire graph
     self.canonical_certificate = pynauty.certificate(self.nautyG)
     # generate the canonical label for the entire graph
     # first, we give every node their canonical order
     canon_order = pynauty.canon_label(self.nautyG)
     for iordr, ordr in enumerate(canon_order):
         node_ptrs[ordr].canonical_order = iordr
     # relabeling bonds
     relabeling_bond_dict = {}
     for bond in bond_dict:
         # check if each of our bonds have exactly two end points
         if len(bond_dict[bond]) == 2:
             id1 = bond_dict[bond][0]
             id1 = grpIds[id1]
             comp1 = node_ptrs[id1]
             id2 = bond_dict[bond][1]
             id2 = grpIds[id2]
             comp2 = node_ptrs[id2]
             parent_order = min(
                 comp1.parent_molecule.canonical_order,
                 comp2.parent_molecule.canonical_order,
             )
             comp_order = min(comp1.canonical_order, comp2.canonical_order)
             relabeling_bond_dict[(parent_order, comp_order)] = (comp1,
                                                                 comp2)
         else:
             # raise a warning
             logger.warning(
                 f"Bond {bond} doesn't have exactly 2 end points, please check that you don't have any dangling bonds.",
                 loc=loc,
             )
     # this will give us the keys to canonically sorted bonds
     sorted_order = sorted(relabeling_bond_dict.keys())
     for ibond, sbond in enumerate(sorted_order):
         # now we add a canonical bond ID to each component
         c1, c2 = relabeling_bond_dict[sbond]
         if c1.canonical_bonds is None:
             c1.canonical_bonds = [str(ibond + 1)]
         else:
             c1.canonical_bonds.append(str(ibond + 1))
         if c2.canonical_bonds is None:
             c2.canonical_bonds = [str(ibond + 1)]
         else:
             c2.canonical_bonds.append(str(ibond + 1))
     # and now we can get the canonical label
     self.canonical_label = self.print_canonical()
Esempio n. 25
0
def graphlet_sampling_core(Gx, Gy, nsamples, graphlets, P, graph_bins, nbins, k=5):
    """ Applies the sampling random graph kernel as proposed
        by Shervashidze, Vishwanathan at 2009 (does not consider labels)
        
        Gx, Gy: Graph type objects
        k: the dimension of the given graphlets
        delta : confidence level (typically 0.05 or 0.1)
        epsilon : precision level (typically 0.05 or 0.1)
        a : number of isomorphism classes of graphlets
    """
    Gx.desired_format("adjacency")
    Gy.desired_format("adjacency")
    X, Y = Gx.adjacency_matrix, Gy.adjacency_matrix    

    # Calculate frequencies for each graph
    # Check that if each matrix is a principal
    # minor of the adjoint and how many times
    i = 0

    # Frequency vector for x
    fx = np.zeros(nsamples)
    
    # Frequency vector for y
    fy = np.zeros(nsamples)

    # To transform the adjacency to edge dictionary
    # needed for nauty graph initialise a small lambda
    to_edge_dict_real = lambda x : matrix_to_dict(x, '>', .0, k, False)

    # For all kminors
    for c in itertools.combinations(list(range(k)),k):
        for s in range(0,nbins):
            idxs = list(c)
            # Extract k minors
            X_m = X[idxs,:][:,idxs]
            Y_m = Y[idxs,:][:,idxs]
            graphlet_idx = graph_bins[s][0]
            # Test isomorphism with each graphlet
            if(pynauty.isomorphic(pynauty.Graph(k, True, to_edge_dict_real(X_m)), graphlets[graphlet_idx])):
                fx[graphlet_idx]+=1
            if(pynauty.isomorphic(pynauty.Graph(k, True, to_edge_dict_real(Y_m)), graphlets[graphlet_idx])):
                fy[graphlet_idx]+=1
                
    for s in range(0,nbins):
        for i in range(1,len(graph_bins[s])):
            idx = graph_bins[s][i]
            fx[idx]=fx[graph_bins[s][0]]
            fy[idx]=fy[graph_bins[s][0]]

    # normalize fx
    sfx = np.sum(fx,axis=0)
    if(sfx != 0):
        fx = np.divide(fx,sfx)
    
    # normalize fy
    sfy = np.sum(fy,axis=0)
    if(sfy != 0):
        fy = np.divide(fy,sfy)

    # Calculate the kernel
    kernel = np.dot(fx.T,np.dot(P,fy))

    return kernel
Esempio n. 26
0
    1: [0, 7],
    2: [3, 1],
    3: [2, 5],
    4: [2, 5],
    5: [4, 6],
    6: [0, 7],
    7: [4, 6],
}

if __name__ == "__main__":
    print(list_to_tuple_permutation([0, 1, 2, 4, 3, 5]))  # [[3, 4]]
    print(list_to_tuple_permutation([5, 4, 3, 2, 1,
                                     0]))  # [[0, 5], [1, 4], [2, 3]]
    print(list_to_tuple_permutation([1, 2, 3, 4, 5,
                                     0]))  # [[0, 1, 2, 3, 4, 5]]
    square_aut_grp = pynauty.autgrp(pynauty.Graph(8, True, edge_graph_square))
    print(str(list(map(list_to_tuple_permutation, square_aut_grp[0]))))


def platform_graph_to_num_dict(platform_graph):
    core_to_int = {}
    for i, core in enumerate(platform_graph):
        core_to_int[core] = i
    res = {}
    for core in platform_graph:
        vals = []
        for val in platform_graph[core]:
            vals.append(core_to_int[val[0]])
        res[core_to_int[core]] = set(vals)
    return res