예제 #1
0
def test_torch_profile(get_dataset):
    dataset = get_dataset(name='PubMed')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    data = dataset[0].to(device)
    model = GraphSAGE(dataset.num_features,
                      hidden_channels=64,
                      num_layers=3,
                      out_channels=dataset.num_classes).to(device)
    model.eval()

    @timeit()
    def inference_e2e(model, data):
        model(data.x, data.edge_index)

    @torch_profile()
    def inference_profile(model, data):
        model(data.x, data.edge_index)

    for epoch in range(3):
        inference_e2e(model, data)
        inference_profile(model, data)
    rename_profile_file('test_profile')
    assert os.path.exists('profile-test_profile.json')
예제 #2
0
def run_inference():
    for dataset_name, Net in product(datasets, nets):
        dataset, _, _, test_loader = prepare_dataloader(dataset_name)

        for num_layers, hidden in product(layers, hiddens):
            print(
                f'--\n{dataset_name} - {Net.__name__}- {num_layers} - {hidden}'
            )

            model = Net(dataset, num_layers, hidden).to(device)

            for epoch in range(1, args.epochs + 1):
                if epoch == args.epochs:
                    with timeit():
                        inference_run(model, test_loader)
                else:
                    inference_run(model, test_loader)

            if args.profile:
                with torch_profile():
                    inference_run(model, test_loader)
                rename_profile_file(Net.__name__, dataset_name,
                                    str(num_layers), str(hidden))
예제 #3
0
        pseudo = (pos[edge_index[1]] - pos[edge_index[0]]) / (2 * radius) + 0.5
        pseudo = pseudo.clamp(min=0, max=1)
        x = F.elu(self.conv2(x, edge_index, pseudo))

        idx = fps(pos, batch, ratio=0.25)
        x, pos, batch = x[idx], pos[idx], batch[idx]

        radius = 1
        edge_index = radius_graph(pos, r=radius, batch=batch)
        pseudo = (pos[edge_index[1]] - pos[edge_index[0]]) / (2 * radius) + 0.5
        pseudo = pseudo.clamp(min=0, max=1)
        x = F.elu(self.conv3(x, edge_index, pseudo))

        x = global_mean_pool(x, batch)

        x = F.elu(self.lin1(x))
        x = F.elu(self.lin2(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin3(x)
        return F.log_softmax(x, dim=-1)


train_dataset, test_dataset = get_dataset(num_points=1024)
model = Net(train_dataset.num_classes)
run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr,
    args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay,
    args.inference, args.profile)

if args.profile:
    rename_profile_file('points', SplineConv.__name__)
예제 #4
0
        self.lin0 = Lin(256, 512)

        self.lin1 = Lin(512, 256)
        self.lin2 = Lin(256, 256)
        self.lin3 = Lin(256, num_classes)

    def forward(self, pos, batch):
        x = self.conv1(pos, batch)
        x = self.conv2(x, batch)

        x = F.relu(self.lin0(x))

        x = global_max_pool(x, batch)

        x = F.relu(self.lin1(x))
        x = F.relu(self.lin2(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin3(x)
        return F.log_softmax(x, dim=-1)


train_dataset, test_dataset = get_dataset(num_points=1024)
model = Net(train_dataset.num_classes)
run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr,
    args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay,
    args.inference, args.profile)

if args.profile:
    rename_profile_file('points', DynamicEdgeConv.__name__)
예제 #5
0
        radius = 0.4
        edge_index = radius_graph(pos, r=radius, batch=batch)
        pseudo = pos[edge_index[1]] - pos[edge_index[0]]
        x = F.relu(self.conv2(x, edge_index, pseudo))

        idx = fps(pos, batch, ratio=0.25)
        x, pos, batch = x[idx], pos[idx], batch[idx]

        radius = 1
        edge_index = radius_graph(pos, r=radius, batch=batch)
        pseudo = pos[edge_index[1]] - pos[edge_index[0]]
        x = F.relu(self.conv3(x, edge_index, pseudo))

        x = global_mean_pool(x, batch)
        x = F.relu(self.lin1(x))
        x = F.relu(self.lin2(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin3(x)
        return F.log_softmax(x, dim=-1)


train_dataset, test_dataset = get_dataset(num_points=1024)
model = Net(train_dataset.num_classes)
run(train_dataset, test_dataset, model, args.epochs, args.batch_size, args.lr,
    args.lr_decay_factor, args.lr_decay_step_size, args.weight_decay,
    args.inference, args.profile)

if args.profile:
    rename_profile_file('points', NNConv.__name__)
예제 #6
0
                              args.num_layers,
                              args.shared_weights,
                              dropout=args.skip_dropout)
        self.conv2 = ARMAConv(args.hidden,
                              dataset.num_classes,
                              args.num_stacks,
                              args.num_layers,
                              args.shared_weights,
                              dropout=args.skip_dropout)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = F.relu(self.conv1(x, edge_index))
        x = F.dropout(x, p=args.dropout, training=self.training)
        x = self.conv2(x, edge_index)
        return F.log_softmax(x, dim=1)


dataset = get_planetoid_dataset(args.dataset, not args.no_normalize_features)
permute_masks = random_planetoid_splits if args.random_splits else None
run(dataset, Net(dataset), args.runs, args.epochs, args.lr, args.weight_decay,
    args.early_stopping, args.inference, args.profile, permute_masks)

if args.profile:
    rename_profile_file('citation', ARMAConv.__name__, args.dataset,
                        str(args.random_splits))
def run(args: argparse.ArgumentParser) -> None:

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    print('BENCHMARK STARTS')
    for dataset_name in args.datasets:
        assert dataset_name in supported_sets.keys(
        ), f"Dataset {dataset_name} isn't supported."
        print(f'Dataset: {dataset_name}')
        dataset, num_classes = get_dataset(dataset_name, args.root,
                                           args.use_sparse_tensor)
        data = dataset.to(device)
        hetero = True if dataset_name == 'ogbn-mag' else False
        mask = ('paper', None) if dataset_name == 'ogbn-mag' else None
        degree = None

        inputs_channels = data[
            'paper'].num_features if dataset_name == 'ogbn-mag' \
            else dataset.num_features

        for model_name in args.models:
            if model_name not in supported_sets[dataset_name]:
                print(f'Configuration of {dataset_name} + {model_name} '
                      f'not supported. Skipping.')
                continue
            print(f'Evaluation bench for {model_name}:')

            for batch_size in args.eval_batch_sizes:
                if not hetero:
                    subgraph_loader = NeighborLoader(
                        data,
                        num_neighbors=[-1],  # layer-wise inference
                        input_nodes=mask,
                        batch_size=batch_size,
                        shuffle=False,
                        num_workers=args.num_workers,
                    )

                for layers in args.num_layers:
                    if hetero:
                        subgraph_loader = NeighborLoader(
                            data,
                            num_neighbors=[args.hetero_num_neighbors] *
                            layers,  # batch-wise inference
                            input_nodes=mask,
                            batch_size=batch_size,
                            shuffle=False,
                            num_workers=args.num_workers,
                        )

                    for hidden_channels in args.num_hidden_channels:
                        print('----------------------------------------------')
                        print(
                            f'Batch size={batch_size}, '
                            f'Layers amount={layers}, '
                            f'Num_neighbors={subgraph_loader.num_neighbors}, '
                            f'Hidden features size={hidden_channels}')
                        params = {
                            'inputs_channels': inputs_channels,
                            'hidden_channels': hidden_channels,
                            'output_channels': num_classes,
                            'num_heads': args.num_heads,
                            'num_layers': layers,
                        }

                        if model_name == 'pna':
                            if degree is None:
                                degree = PNAConv.get_degree_histogram(
                                    subgraph_loader)
                                print(f'Calculated degree for {dataset_name}.')
                            params['degree'] = degree

                        model = get_model(
                            model_name, params,
                            metadata=data.metadata() if hetero else None)
                        model = model.to(device)
                        model.eval()

                        for _ in range(args.warmup):
                            model.inference(subgraph_loader, device,
                                            progress_bar=True)
                        if args.experimental_mode:
                            with torch_geometric.experimental_mode():
                                with timeit():
                                    model.inference(subgraph_loader, device,
                                                    progress_bar=True)
                        else:
                            with timeit():
                                model.inference(subgraph_loader, device,
                                                progress_bar=True)

                        if args.profile:
                            with torch_profile():
                                model.inference(subgraph_loader, device,
                                                progress_bar=True)
                            rename_profile_file(
                                model_name, dataset_name, str(batch_size),
                                str(layers), str(hidden_channels),
                                str(subgraph_loader.num_neighbors))