Exemplo n.º 1
0
def test_subgraph_convert():
    G = nx.complete_graph(5)

    edge_index = from_networkx(G).edge_index
    sub_edge_index_1, _ = subgraph([0, 1, 3, 4], edge_index)

    sub_edge_index_2 = from_networkx(G.subgraph([0, 1, 3, 4])).edge_index

    assert sub_edge_index_1.tolist() == sub_edge_index_2.tolist()
Exemplo n.º 2
0
def test_from_networkx_subgraph_convert():
    import networkx as nx

    G = nx.complete_graph(5)

    edge_index = from_networkx(G).edge_index
    sub_edge_index_1, _ = subgraph([0, 1, 3, 4],
                                   edge_index,
                                   relabel_nodes=True)

    sub_edge_index_2 = from_networkx(G.subgraph([0, 1, 3, 4])).edge_index

    assert sub_edge_index_1.tolist() == sub_edge_index_2.tolist()
Exemplo n.º 3
0
def get_data_from_graph(g: nx.DiGraph) -> Data:
    # graph_spicy = nx.to_scipy_sparse_matrix(g, format='coo')
    # edge_index = torch.tensor([graph_spicy.row, graph_spicy.col], dtype=torch.long)
    # x =  torch.tensor(graph_spicy.data, dtype=torch.float)
    #
    # data = Data(x=x,edge_index=edge_index)
    return from_networkx(g)
Exemplo n.º 4
0
    def __init__(self, ckpt_path, newModel=False):
        self.dualGraph = readGraph()
        file_name = "dualGraphNodes.pkl"
        open_file = open(file_name, "wb")
        pickle.dump(list(self.dualGraph.nodes), open_file)
        open_file.close()
        #print(list(self.dualGraph.nodes))
        self.data = from_networkx(self.dualGraph)
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        open_file = open("edge_index.pkl", "wb")
        pickle.dump(self.data.edge_index, open_file)
        open_file.close()
        open_file = open("edge_index.pkl", "rb")
        edge_index = pickle.load(open_file)
        open_file.close()
        self.model = Node2Vec(edge_index,
                              embedding_dim=32,
                              walk_length=20,
                              context_size=10,
                              walks_per_node=10,
                              num_negative_samples=1,
                              p=1,
                              q=1,
                              sparse=True).to(self.device)

        self.loader = self.model.loader(batch_size=128,
                                        shuffle=True,
                                        num_workers=0)
        self.optimizer = torch.optim.SparseAdam(list(self.model.parameters()),
                                                lr=0.01)
        if newModel:
            self.train(epochs=20)
            self.saveTo(ckpt_path)
        else:
            self.loadFrom(ckpt_path)
    def observation(self) -> Data:
        """
        Returns a pytorch geometric grpah object as observation of the AF
        where edge_input contains features wheter the attack exists,
        whether the oppsite attack exists and a self attack exists, and whether an edge have been flipped.
        Node features are set to indicate which arguments should be enforced
        """

        graph = self.state.enforcement_representation
        if self.flipped_edges:
            for u, v in self.flipped_edges:
                graph.edges[u, v]["edge_flipped"] = 1

        data: Data = from_networkx(graph)
        data["edge_input"] = torch.cat(
            [
                data[key].unsqueeze(1)
                for key in ["edge_exists", "edge_opposite", "edge_self", "edge_flipped"]
            ],
            dim=1,
        )

        data.edge_index, data["edge_input"] = coalesce(
            data.edge_index, data["edge_input"], data.num_nodes, data.num_nodes
        )

        if self.task in [STRICT, NONSTRICT]:
            data["node_input"][list(self.problem.desired_extension)] = 1
        elif self.task in [CRED, SCEPT]:
            data["node_input"][list(self.problem.positive)] = 1
            data["node_input"][list(self.problem.negative)] = -1
        return data
Exemplo n.º 6
0
def graph_torch(G_networkx):
    """Create the Pytorch Geometric graph with 5 simple node features:
    the node degree, the min and max neighbor degree,
    the mean and std of the neighbors degree.

    Parameters:
    ----------
    G_networkx: networkx graph

    Returns:
    -------
    G_torch: Pytorch Geometric Data object"""

    # transform the networkx object to pytorch geometric data
    G_torch = from_networkx(G_networkx).to(device)
    G_torch.edge_index = G_torch.edge_index.type(torch.LongTensor).to(device)
    # Add features
    with torch.no_grad():
        add_features = LocalDegreeProfile()
        G_torch = add_features(G_torch)
    # if there is a nan, put it to 0
    G_torch.x[torch.isnan(G_torch.x)] = 0
    # normalize the 4 first features by the max possible degree in the graph
    n = len(G_networkx)
    G_torch.x[:, :4] /= n - 1
    # normalize the std of degree by the max std possible
    G_torch.x[:, -1] /= (n - 1) / 2

    return G_torch
Exemplo n.º 7
0
def test_from_networkx_without_edges():
    graph = nx.Graph()
    graph.add_node(1)
    graph.add_node(2)
    data = from_networkx(graph)
    assert len(data) == 1
    assert data.edge_index.size() == (2, 0)
Exemplo n.º 8
0
def to_line_graph(data: Data, directed: bool = True) -> Data:
    """
    Convert a graph G to its corresponding line-graph L(G)
    Args:
        data: a torch_gemoetric Data object representing representing a graph
        directed: whether the original graph is directed or undirected
    """
    original_edge_attrs = data.edge_attr
    original_edge_names = [
        (from_.item(), to_.item())
        for from_, to_ in zip(data.edge_index[0, :], data.edge_index[1, :])
    ]
    original_edge_to_attr = {
        e: attr
        for e, attr in zip(original_edge_names, original_edge_attrs)
    }
    ctor = nx.DiGraph if directed else nx.Graph
    G = to_networkx(data,
                    node_attrs=['x'],
                    edge_attrs=['edge_attr'],
                    to_undirected=not directed)
    line_graph = nx.line_graph(G, create_using=ctor)
    res_data = from_networkx(line_graph)

    # Copy original attribtues
    res_data.x = torch.stack(
        [original_edge_to_attr[e] for e in line_graph.nodes])
    res_data.y = data.y
    return data
Exemplo n.º 9
0
    def _create_pyg_graphs(self):

        typer.echo("Preprocessing input networks...")

        # Extend all graphs with nodes in `self.union` and add self-loops
        # to all nodes.
        new_graphs = [nx.Graph() for _ in self.graphs]
        for G, nG in zip(self.graphs, new_graphs):
            nG.add_nodes_from(self.union)
            nG.add_weighted_edges_from([(s, t, weight["weight"])
                                        for (s, t,
                                             weight) in G.edges(data=True)])
            nG.remove_edges_from(
                nx.selfloop_edges(nG))  # remove existing selfloops first
            nG.add_weighted_edges_from([(n, n, 1.0) for n in nG.nodes()])
        self.graphs = new_graphs

        pyg_graphs = [from_networkx(G) for G in self.graphs]
        for G in pyg_graphs:
            G.edge_weight = G.weight
            del G.weight

        to_sparse_tensor = ToSparseTensor(remove_edge_index=False)
        for G in pyg_graphs:
            to_sparse_tensor(G)

        pyg_graphs = [t.to(Device()) for t in pyg_graphs]

        return pyg_graphs
Exemplo n.º 10
0
def preprocess_input_graph(G, labels, normalize_adj=False):
    """ Load an existing graph to be converted for the experiments.
	Args:
		G: Networkx graph to be loaded.
		labels: Associated node labels.
		normalize_adj: Should the method return a normalized adjacency matrix.
	Returns:
		A dictionary containing adjacency, node features and labels
	"""
    # Define adj matrix
    adj = np.array(nx.to_numpy_matrix(G))
    if normalize_adj:
        sqrt_deg = np.diag(1.0 /
                           np.sqrt(np.sum(adj, axis=0, dtype=float).squeeze()))
        adj = np.matmul(np.matmul(sqrt_deg, adj), sqrt_deg)

    # Convert to our adj matrix type
    pyg_G = from_networkx(G)
    adj = pyg_G.edge_index
    f = pyg_G.feat

    # Define labels
    labels = torch.tensor(labels)

    # Add batch dim
    # adj = np.expand_dims(adj, axis=0)
    # f = np.expand_dims(f, axis=0)
    # labels = np.expand_dims(labels, axis=0)

    return f, adj, labels
Exemplo n.º 11
0
    def process(self):
        i = 0
        processedValues = self.processed_file_names()
        classValues = np.loadtxt(self.root + '/dayClasses.txt')

        # Read data from `raw_path`.
        for raw_path in self.raw_paths:

            # if file is already processed, continue.
            if (f'data_{i}.pt' in processedValues):
                i = i + 1
                print(f"data_{i}.pt already processed.")
                continue

            print('raw path', raw_path)
            print('root', self.root)

            index = int(raw_path.split('_')[1].split('.')[0])
            classVal = int(classValues[index])

            print("index", index, '\t Class: ', classVal)
            data = nx.read_gml(raw_path)
            data = utils.from_networkx(data)
            data['y'] = [classVal]
            print(data, data['y'])
            torch.save(
                data, os.path.join(self.processed_dir, 'data_{}.pt'.format(i)))
            i += 1
Exemplo n.º 12
0
    def process(self):
        # Read data into huge `Data` list.
        data_list = []
        for i in range(self.num_samples):

            y = np.random.randint(len(self.targets))
            probs = [
                [self.in_probs[0], self.targets[y]],
                [self.targets[y], self.in_probs[1]],
            ]
            block_sizes = self.sizes[np.random.randint(len(self.sizes))]
            G = nx.stochastic_block_model(block_sizes, probs, seed=i + 1)
            x = torch.zeros((sum(block_sizes), self.n_features))

            for i, partition in enumerate(G.graph["partition"]):
                partition = np.array(list(partition))
                feat = np.random.choice(self.n_features,
                                        len(partition),
                                        p=self.feat_probs[i])
                x[partition, feat] = 1

            data = from_networkx(G)
            data["x"] = x.float()
            data["y"] = torch.tensor([y])
            data_list.append(data)

        if self.pre_filter is not None:
            data_list = [data for data in data_list if self.pre_filter(data)]

        if self.pre_transform is not None:
            data_list = [self.pre_transform(data) for data in data_list]

        data, slices = self.collate(data_list)
        torch.save((data, slices), self.processed_paths[0])
Exemplo n.º 13
0
    def process(self):
        # Read data into huge `Data` list.
        data_list = []
        for idx, patient in enumerate(self.patients):

            y = 1 if "Schizophrenia" in patient else 0

            # Load the adjacency matrix into a pandas
            adj = pd.read_csv(patient, skiprows=1, header=None, index_col=0)
            G = nx.from_pandas_adjacency(adj)

            if idx in self.test and y == 0:
                # Thin the test graphs of one class
                G.remove_nodes_from(
                    np.random.random_integers(1, len(G), int(self.p * len(G))))
                G.remove_nodes_from(list(nx.isolates(G)))

            G = nx.convert_node_labels_to_integers(G)

            data = from_networkx(G)
            data["x"] = torch.ones((len(G), 1))
            data["y"] = torch.tensor([y])
            data_list.append(data)

        if self.pre_filter is not None:
            data_list = [data for data in data_list if self.pre_filter(data)]

        if self.pre_transform is not None:
            data_list = [self.pre_transform(data) for data in data_list]

        data, slices = self.collate(data_list)
        torch.save((data, slices), self.processed_paths[0])
Exemplo n.º 14
0
def test_from_networkx_with_same_node_and_edge_attributes():
    G = nx.Graph()
    G.add_nodes_from([(0, {'age': 1}), (1, {'age': 6}), (2, {'age': 5})])
    G.add_edges_from([(0, 1, {'age': 2}), (1, 2, {'age': 7})])

    data = from_networkx(G)
    assert len(data) == 4
    assert data.age.tolist() == [1, 6, 5]
    assert data.num_nodes == 3
    assert data.edge_index.tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]]
    assert data.edge_age.tolist() == [2, 2, 7, 7]

    data = from_networkx(G, group_node_attrs=all, group_edge_attrs=all)
    assert len(data) == 3
    assert data.x.tolist() == [[1], [6], [5]]
    assert data.edge_index.tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]]
    assert data.edge_attr.tolist() == [[2], [2], [7], [7]]
Exemplo n.º 15
0
def test_vice_versa_convert():
    G = nx.complete_graph(5)
    assert G.is_directed() is False
    data = from_networkx(G)
    assert data.is_directed() is False
    G = to_networkx(data)
    assert G.is_directed() is True
    G = nx.to_undirected(G)
    assert G.is_directed() is False
Exemplo n.º 16
0
def test_from_networkx_without_edges():
    import networkx as nx

    graph = nx.Graph()
    graph.add_node(1)
    graph.add_node(2)
    data = from_networkx(graph)
    assert len(data) == 2
    assert data.edge_index.size() == (2, 0)
    assert data.num_nodes == 2
Exemplo n.º 17
0
def dataloader():
    G = nx.read_gpickle("spatial_graph.gpickle")
    data = from_networkx(G)
    # print('here')
    # print(data.num_nodes)
    # print(data.num_edges)
    # print(data.num_node_features)
    # print(data)
    # print(data['x'])
    # print(data['y'])
    return data, G
Exemplo n.º 18
0
def test_from_networkx_non_numeric_labels():
    graph = nx.Graph()
    graph.add_node('4')
    graph.add_node('2')
    graph.add_edge('4', '2')
    for node in graph.nodes():
        graph.nodes[node]['x'] = node
    data = from_networkx(graph)
    assert len(data) == 2
    assert data.x == ['4', '2']
    assert data.edge_index.tolist() == [[0, 1], [1, 0]]
Exemplo n.º 19
0
def test_from_networkx_non_consecutive():
    graph = nx.Graph()
    graph.add_node(4)
    graph.add_node(2)
    graph.add_edge(4, 2)
    for node in graph.nodes():
        graph.nodes[node]['x'] = node

    data = from_networkx(graph)
    assert len(data) == 2
    assert data.x.tolist() == [4, 2]
    assert data.edge_index.tolist() == [[0, 1], [1, 0]]
Exemplo n.º 20
0
 def __getitem__(self, idx):
     if type(idx) is slice:
         res = [
             self.__getitem__(i) for i in range(
                 *list(filter(None, [idx.start, idx.stop, idx.step])))
         ]
         return res
     fname = os.path.join(self.datadir,
                          self.prefix + '_' + f"{idx:06}" + '.xyz')
     dat = process(fname)
     nx_graph = rdkit_process(dat)
     return from_networkx(nx_graph)
Exemplo n.º 21
0
def test_from_networkx_inverse():
    graph = nx.Graph()
    graph.add_node(3)
    graph.add_node(2)
    graph.add_node(1)
    graph.add_node(0)
    graph.add_edge(3, 1)
    graph.add_edge(2, 1)
    graph.add_edge(1, 0)

    data = from_networkx(graph)
    assert len(data) == 1
    assert data.edge_index.tolist() == [[0, 1, 2, 2, 2, 3], [2, 2, 0, 1, 3, 2]]
Exemplo n.º 22
0
def state_to_vector(graph_state: MiniWoBGraphState, prior_actions: dict):
    e_dom = encode_dom_graph(graph_state.dom_graph)
    e_fields = encode_fields(graph_state.fields)
    encode_field_actions_onto_encoded_dom_graph(e_dom, e_fields,
                                                graph_state.fields,
                                                graph_state.utterance)
    encode_prior_actions_onto_encoded_dom_graph(e_dom, prior_actions,
                                                graph_state.fields)
    # for node_id in e_dom.nodes:
    #    print(node_id, e_dom.nodes[node_id])
    e_dom = nx.convert_node_labels_to_integers(e_dom)
    d = from_networkx(e_dom)
    return d
 def __getitem__(self, idx) -> Tuple[ArgumentationFramework, Data]:
     """ Return AF and data representaiton of idx-th AF"""
     af = self.get_af(idx)
     graph = af.graph_representation(self.representation)
     data = from_networkx(graph)
     data["idx"] = idx
     data["edge_input"] = torch.cat(
         [
             data[key].unsqueeze(1)
             for key in ["edge_exists", "edge_opposite", "edge_self"]
         ],
         dim=1,
     )
     return af, data
Exemplo n.º 24
0
def test_from_networkx():
    x = torch.randn(2, 8)
    pos = torch.randn(2, 3)
    edge_index = torch.tensor([[0, 1, 0], [1, 0, 0]])
    edge_attr = torch.randn(edge_index.size(1))
    perm = torch.tensor([0, 2, 1])
    data = Data(x=x, pos=pos, edge_index=edge_index, edge_attr=edge_attr)
    G = to_networkx(data, node_attrs=['x', 'pos'], edge_attrs=['edge_attr'])
    data = from_networkx(G)
    assert len(data) == 4
    assert data.x.tolist() == x.tolist()
    assert data.pos.tolist() == pos.tolist()
    assert data.edge_index.tolist() == edge_index[:, perm].tolist()
    assert data.edge_attr.tolist() == edge_attr[perm].tolist()
Exemplo n.º 25
0
def _edge_removal(graph):
  actions = set()

  _, num_edges = graph.edge_index.size()
  edges = graph.edge_index.t().numpy().tolist()
  for i in range(num_edges//2):
    tmp = to_networkx(graph, to_undirected=True)
    tmp.remove_edge(*edges[i])
    tmp = from_networkx(tmp)
    a = graph.clone()
    a.edge_index = tmp.edge_index

    actions.add(a)

  return actions
Exemplo n.º 26
0
def test_from_networkx():
    x = torch.Tensor([[1, 2], [3, 4]])
    pos = torch.Tensor([[0, 0], [1, 1]])
    edge_index = torch.tensor([[0, 1, 0], [1, 0, 0]])
    edge_attr = torch.Tensor([1, 2, 3])
    data = Data(x=x, pos=pos, edge_index=edge_index, edge_attr=edge_attr)
    G = to_networkx(data, node_attrs=['x', 'pos'], edge_attrs=['edge_attr'])

    data = from_networkx(G)
    assert len(data) == 4
    assert data.x.tolist() == x.tolist()
    assert data.pos.tolist() == pos.tolist()
    edge_index, edge_attr = coalesce(data.edge_index, data.edge_attr, 2, 2)
    assert edge_index.tolist() == [[0, 0, 1], [0, 1, 0]]
    assert edge_attr.tolist() == [3, 1, 2]
Exemplo n.º 27
0
    def process(self):
        i = 1
        for raw_path in self.raw_paths:
            nxg = preprocess_qm9.rdkit_process(
                preprocess_qm9.process(raw_path))
            data = from_networkx(nxg)

            if self.pre_filter is not None and not self.pre_filter(data):
                continue

            if self.pre_transform is not None:
                data = self.pre_transform(data)

            torch.save(data, os.path.join(self.processed_dir,
                                          '{}.pt'.format(i)))
            i += 1
Exemplo n.º 28
0
    def process(self):
        # Read data into huge `Data` list.
        with open(Path(self.root) / self.raw_file_names[0], 'rb') as f:
            aspect_to_aspect_graph = pickle.load(f)
        data = from_networkx(aspect_to_aspect_graph)
        nodes = aspect_to_aspect_graph.nodes()
        nodes_mapping = dict(zip(nodes, range(0, aspect_to_aspect_graph.number_of_nodes())))

        aspect_embeddings = [self.nlp(aspect).vector for aspect in tqdm(nodes, desc='Generating aspects embeddings...')]
        node_features = torch.tensor(aspect_embeddings, dtype=torch.float)

        data.nodes_mapping = nodes_mapping
        data.x = node_features
        data_list = [data]
        data, slices = self.collate(data_list)
        torch.save((data, slices), self.processed_paths[0])
Exemplo n.º 29
0
    def process(self):
        """
        Processes two pickle files into a series of graph files. The first pickle file is the
        simulation/experimental data that contains the light seen by the PMT and the true position
        of the event (when given a simulation). The second pickle file is the graph structure to
        use for the GCNN.
        """

        # Graph structure get
        with open(os.path.join(self.root_dir, "data", self.graph_name),
                  'rb') as fn:
            input_graph = pk.load(fn)
        torch_graph = from_networkx(input_graph)

        # Starting to process the data files.
        i = 0
        for data_path in self.raw_paths:
            with open(data_path, 'rb') as fn:
                data_contents = pk.load(fn)
            self.content_size += len(raw_contents)
            for event_data in data_contents:
                # Light detecting by each PMT
                x = event_data[
                    'area_per_channel'][:127]  # Only getting the top PMTs
                x = np.reshape(x, (len(x), 1))
                x = np.hstack(
                    (x, np.array(torch_graph.pos))).astype(np.float32)

                # True position of this event
                if self.simulation:
                    y = np.reshape(event_data['true_pos'],
                                   (1, len(event_data['true_pos'])))
                else:  # Not a simulation -> no truth
                    y = None

                processed_data = Data(x=torch.tensor(x),
                                      edge_index=torch_graph.edge_index,
                                      y=torch.tensor(y),
                                      pos=torch_graph.pos)

                # Saving the contents into the processed directory
                path_to_proc = os.path.join(self.processed_dir, self.proc_anem)
                if not os.path.isdir(path_to_proc):
                    os.makedirs(path_to_proc)
                torch.save(processed_data,
                           os.path.join(path_to_proc, 'graph_{}.pt'.format(i)))
                i += 1