def test_pna_conv():
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    row, col = edge_index
    value = torch.rand(row.size(0), 3)
    adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))

    conv = PNAConv(16,
                   32,
                   aggregators,
                   scalers,
                   deg=torch.tensor([0, 3, 0, 1]),
                   edge_dim=3,
                   towers=4)
    assert conv.__repr__() == 'PNAConv(16, 32, towers=4, edge_dim=3)'
    out = conv(x, edge_index, value)
    assert out.size() == (4, 32)
    assert torch.allclose(conv(x, adj.t()), out, atol=1e-6)

    t = '(Tensor, Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(jit(x, edge_index, value), out, atol=1e-6)

    t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert torch.allclose(jit(x, adj.t()), out, atol=1e-6)
Пример #2
0
def test_pna_conv_get_degree_histogram():
    edge_index = torch.tensor([[0, 0, 0, 1, 1, 2, 3], [1, 2, 3, 2, 0, 0, 0]])
    data = Data(num_nodes=5, edge_index=edge_index)
    loader = NeighborLoader(
        data,
        num_neighbors=[-1],
        input_nodes=None,
        batch_size=5,
        shuffle=False,
    )
    deg_hist = PNAConv.get_degree_histogram(loader)
    deg_hist_ref = torch.tensor([1, 2, 1, 1])
    assert torch.equal(deg_hist_ref, deg_hist)

    edge_index_1 = torch.tensor([[0, 0, 0, 1, 1, 2, 3], [1, 2, 3, 2, 0, 0, 0]])
    edge_index_2 = torch.tensor([[1, 1, 2, 2, 0, 3, 3], [2, 3, 3, 1, 1, 0, 2]])
    edge_index_3 = torch.tensor([[1, 3, 2, 0, 0, 4, 2], [2, 0, 4, 1, 1, 0, 3]])
    edge_index_4 = torch.tensor([[0, 1, 2, 4, 0, 1, 3], [2, 3, 3, 1, 1, 0, 2]])

    data_1 = Data(num_nodes=5,
                  edge_index=edge_index_1)  # deg_hist = [1, 2 ,1 ,1]
    data_2 = Data(num_nodes=5, edge_index=edge_index_2)  # deg_hist = [1, 1, 3]
    data_3 = Data(num_nodes=5, edge_index=edge_index_3)  # deg_hist = [0, 3, 2]
    data_4 = Data(num_nodes=5, edge_index=edge_index_4)  # deg_hist = [1, 1, 3]

    loader = DataLoader(
        [data_1, data_2, data_3, data_4],
        batch_size=1,
        shuffle=False,
    )
    deg_hist = PNAConv.get_degree_histogram(loader)
    deg_hist_ref = torch.tensor([3, 7, 9, 1])
    assert torch.equal(deg_hist_ref, deg_hist)
Пример #3
0
    def __init__(self):
        super(Net, self).__init__()

        aggregators = ['mean', 'min', 'max', 'std']
        scalers = ['identity', 'amplification', 'attenuation']
        self.dropout = args.dropout
        self.patience = args.patience
        self.convs = ModuleList()
        self.convs.append(
            PNAConv(in_channels=dataset.num_features,
                    out_channels=args.hidden,
                    aggregators=aggregators,
                    scalers=scalers,
                    deg=deg,
                    edge_dim=dataset.num_edge_features,
                    towers=1,
                    pre_layers=args.pretrans_layers,
                    post_layers=args.posttrans_layers,
                    divide_input=False))
        for _ in range(args.n_conv_layers):
            conv = PNAConv(in_channels=args.hidden,
                           out_channels=args.hidden,
                           aggregators=aggregators,
                           scalers=scalers,
                           deg=deg,
                           edge_dim=dataset.num_edge_features,
                           towers=args.towers,
                           pre_layers=args.pretrans_layers,
                           post_layers=args.posttrans_layers,
                           divide_input=False)
            self.convs.append(conv)

        gr = []
        g = list(
            map(
                int,
                np.ceil(
                    np.geomspace(args.hidden, dataset.num_classes,
                                 args.mlp_layers + 1))))
        g[0] = args.hidden
        g[-1] = dataset.num_classes
        for i in range(args.mlp_layers):
            gr.append(Linear(g[i], g[i + 1]))
            if i < args.mlp_layers - 1:
                gr.append(Dropout(p=self.dropout))
            gr.append(LogSoftmax() if i == args.mlp_layers - 1 else ReLU())

        self.mlp = Sequential(*gr)
Пример #4
0
    def __init__(self):
        super().__init__()

        self.node_emb = Embedding(21, 75)
        self.edge_emb = Embedding(4, 50)

        aggregators = ['mean', 'min', 'max', 'std']
        scalers = ['identity', 'amplification', 'attenuation']

        self.convs = ModuleList()
        self.batch_norms = ModuleList()
        for _ in range(4):
            conv = PNAConv(in_channels=75,
                           out_channels=75,
                           aggregators=aggregators,
                           scalers=scalers,
                           deg=deg,
                           edge_dim=50,
                           towers=5,
                           pre_layers=1,
                           post_layers=1,
                           divide_input=False)
            self.convs.append(conv)
            self.batch_norms.append(BatchNorm(75))

        self.mlp = Sequential(Linear(75, 50), ReLU(), Linear(50, 25), ReLU(),
                              Linear(25, 1))
Пример #5
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.save_hyperparameters()

        kwargs = self.sanetize_kwargs(kwargs)

        self.node_emb = Embedding(kwargs["node_vocab"], kwargs["node_dim"])
        self.edge_emb = Embedding(kwargs["edge_vocab"], kwargs["edge_dim"])

        self.convs = ModuleList()
        self.batch_norms = ModuleList()
        for _ in range(kwargs["num_layers"]):
            conv = PNAConv(
                in_channels=kwargs["node_dim"],
                out_channels=kwargs["node_dim"],
                aggregators=kwargs["aggregators"],
                scalers=kwargs["scalers"],
                deg=torch.tensor(kwargs["deg"]),
                edge_dim=kwargs["edge_dim"],
                towers=kwargs["towers"],
                pre_layers=kwargs["pre_layers"],
                post_layers=kwargs["post_layers"],
                divide_input=kwargs["divide_input"],
            )
            self.convs.append(conv)
            self.batch_norms.append(BatchNorm(kwargs["node_dim"]))

        self.mlp = Sequential(
            Linear(kwargs["node_dim"], kwargs["edge_dim"]),
            ReLU(),
            Linear(kwargs["edge_dim"], kwargs["hidden_channels"]),
            ReLU(),
            Linear(kwargs["hidden_channels"], kwargs["num_classes"]),
        )
Пример #6
0
 def make_graph_layer(self, hidden_dim, layer_idx):
     return PNAConv(
         hidden_dim,
         hidden_dim,
         aggregators=["mean", "min", "max", "std"],
         scalers=["identity", "amplification", "attenuation"],
         deg=self.deg,
         towers=4,
         divide_input=True,
     )
def run(args: argparse.ArgumentParser) -> None:

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    print('BENCHMARK STARTS')
    for dataset_name in args.datasets:
        assert dataset_name in supported_sets.keys(
        ), f"Dataset {dataset_name} isn't supported."
        print(f'Dataset: {dataset_name}')
        dataset, num_classes = get_dataset(dataset_name, args.root,
                                           args.use_sparse_tensor)
        data = dataset.to(device)
        hetero = True if dataset_name == 'ogbn-mag' else False
        mask = ('paper', None) if dataset_name == 'ogbn-mag' else None
        degree = None

        inputs_channels = data[
            'paper'].num_features if dataset_name == 'ogbn-mag' \
            else dataset.num_features

        for model_name in args.models:
            if model_name not in supported_sets[dataset_name]:
                print(f'Configuration of {dataset_name} + {model_name} '
                      f'not supported. Skipping.')
                continue
            print(f'Evaluation bench for {model_name}:')

            for batch_size in args.eval_batch_sizes:
                if not hetero:
                    subgraph_loader = NeighborLoader(
                        data,
                        num_neighbors=[-1],  # layer-wise inference
                        input_nodes=mask,
                        batch_size=batch_size,
                        shuffle=False,
                        num_workers=args.num_workers,
                    )

                for layers in args.num_layers:
                    if hetero:
                        subgraph_loader = NeighborLoader(
                            data,
                            num_neighbors=[args.hetero_num_neighbors] *
                            layers,  # batch-wise inference
                            input_nodes=mask,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=args.num_workers,
                        )

                    for hidden_channels in args.num_hidden_channels:
                        print('----------------------------------------------')
                        print(
                            f'Batch size={batch_size}, '
                            f'Layers amount={layers}, '
                            f'Num_neighbors={subgraph_loader.num_neighbors}, '
                            f'Hidden features size={hidden_channels}')
                        params = {
                            'inputs_channels': inputs_channels,
                            'hidden_channels': hidden_channels,
                            'output_channels': num_classes,
                            'num_heads': args.num_heads,
                            'num_layers': layers,
                        }

                        if model_name == 'pna':
                            if degree is None:
                                degree = PNAConv.get_degree_histogram(
                                    subgraph_loader)
                                print(f'Calculated degree for {dataset_name}.')
                            params['degree'] = degree

                        model = get_model(
                            model_name, params,
                            metadata=data.metadata() if hetero else None)
                        model = model.to(device)
                        model.eval()

                        for _ in range(args.warmup):
                            model.inference(subgraph_loader, device,
                                            progress_bar=True)
                        if args.experimental_mode:
                            with torch_geometric.experimental_mode():
                                with timeit():
                                    model.inference(subgraph_loader, device,
                                                    progress_bar=True)
                        else:
                            with timeit():
                                model.inference(subgraph_loader, device,
                                                progress_bar=True)

                        if args.profile:
                            with torch_profile():
                                model.inference(subgraph_loader, device,
                                                progress_bar=True)
                            rename_profile_file(
                                model_name, dataset_name, str(batch_size),
                                str(layers), str(hidden_channels),
                                str(subgraph_loader.num_neighbors))