Esempio n. 1
0
def run(args):
    from ogb.graphproppred import DglGraphPropPredDataset, Evaluator, collate_dgl
    from torch.utils.data import DataLoader

    dataset = DglGraphPropPredDataset(name="ogbg-molhiv")

    import os
    if not os.path.exists("heterographs.bin"):
        dataset.graphs = [hpno.heterograph(graph) for graph in dataset.graphs]
        from dgl.data.utils import save_graphs
        save_graphs("heterographs.bin", dataset.graphs)
    else:
        from dgl.data.utils import load_graphs
        dataset.graphs = load_graphs("heterographs.bin")[0]

    evaluator = Evaluator(name="ogbg-molhiv")
    in_features = 9
    out_features = 1

    split_idx = dataset.get_idx_split()
    train_loader = DataLoader(dataset[split_idx["train"]], batch_size=128, drop_last=True, shuffle=True, collate_fn=collate_dgl)
    valid_loader = DataLoader(dataset[split_idx["valid"]], batch_size=len(split_idx["valid"]), shuffle=False, collate_fn=collate_dgl)
    test_loader = DataLoader(dataset[split_idx["test"]], batch_size=len(split_idx["test"]), shuffle=False, collate_fn=collate_dgl)

    model = hpno.HierarchicalPathNetwork(
        in_features=in_features,
        out_features=args.hidden_features,
        hidden_features=args.hidden_features,
        depth=args.depth,
        readout=hpno.GraphReadout(
            in_features=args.hidden_features,
            out_features=out_features,
            hidden_features=args.hidden_features,
        )
    )


    if torch.cuda.is_available():
        model = model.cuda()

    optimizer = torch.optim.Adam(model.parameters(), args.learning_rate, weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, "min", factor=0.5, patience=20)

    for idx_epoch in range(args.n_epochs):
        print(idx_epoch, flush=True)
        model.train()
        for g, y in train_loader:
            y = y.float()
            if torch.cuda.is_available():
                g = g.to("cuda:0")
                y = y.cuda()
            optimizer.zero_grad()
            y_hat = model.forward(g, g.nodes['n1'].data["feat"].float())
            loss = torch.nn.BCELoss()(
                input=y_hat.sigmoid(),
                target=y,
            )
            loss.backward()
            optimizer.step()

        model.eval()
        with torch.no_grad():
            g, y = next(iter(valid_loader))
            y = y.float()
            if torch.cuda.is_available():
                g = g.to("cuda:0")
                y = y.cuda()
            y_hat = model.forward(g, g.nodes['n1'].data["feat"].float())
            loss = torch.nn.BCELoss()(
                input=y_hat.sigmoid(),
                target=y,
            )
            scheduler.step(loss)

        if optimizer.param_groups[0]["lr"] <= 0.01 * args.learning_rate: break

    model = model.cpu()
    g, y = next(iter(valid_loader))
    rocauc_vl = evaluator.eval(
        {
            "y_true": y.float(),
            "y_pred": model.forward(g, g.nodes['n1'].data["feat"].float()).sigmoid()
        }
    )["rocauc"]

    g, y = next(iter(test_loader))
    rocauc_te = evaluator.eval(
        {
            "y_true": y.float(),
            "y_pred": model.forward(g, g.nodes['n1'].data["feat"].float()).sigmoid()
        }
    )["rocauc"]

    import pandas as pd
    df = pd.DataFrame(
        {
            args.data: {
                "rocauc_te": rocauc_te,
                "rocauc_vl": rocauc_vl,
            }
        }
    )

    df.to_csv("%s.csv" % args.out)
Esempio n. 2
0
def main(args):

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)

    device = torch.device(
        "cuda:" +
        str(args.device)) if torch.cuda.is_available() else torch.device("cpu")

    # Load dataset and evaluator
    dataset = DglGraphPropPredDataset(name=args.dataset)
    split_idx = dataset.get_idx_split()
    evaluator = Evaluator(args.dataset)

    if args.pos_enc_dim > 0:
        # Add graph positional encodings
        print("Adding PEs...")
        dataset.graphs = [
            add_positional_encoding(g, args.pos_enc_dim)
            for g in tqdm(dataset.graphs)
        ]

    # Basic pre-processing
    if args.dataset == 'ogbg-molpcba':
        print("Removing training graphs with 0 edges...")
        train_split = []
        for idx, g in enumerate(tqdm(dataset.graphs)):
            if idx in split_idx["train"] and g.number_of_edges() != 0:
                train_split.append(idx)
        split_idx["train"] = torch.LongTensor(train_split)

    # Prepare dataloaders
    train_loader = DataLoader(dataset[split_idx["train"]],
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.num_workers,
                              collate_fn=collate_dgl)
    valid_loader = DataLoader(dataset[split_idx["valid"]],
                              batch_size=args.batch_size,
                              shuffle=False,
                              num_workers=args.num_workers,
                              collate_fn=collate_dgl)
    test_loader = DataLoader(dataset[split_idx["test"]],
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.num_workers,
                             collate_fn=collate_dgl)

    # Initialize model, optimizer and scheduler
    if args.gnn in ['gated-gcn', 'gcn', 'mlp']:
        model = GNN_mol(gnn_type=args.gnn,
                        num_tasks=dataset.num_tasks,
                        num_layer=args.num_layer,
                        emb_dim=args.emb_dim,
                        dropout=args.dropout,
                        batch_norm=True,
                        residual=True,
                        pos_enc_dim=args.pos_enc_dim,
                        graph_pooling=args.pooling,
                        virtualnode=args.virtualnode)
        model.to(device)
        print(model)
        total_param = 0
        for param in model.parameters():
            total_param += np.prod(list(param.data.size()))
        print(f'Total parameters: {total_param}')

        optimizer = optim.Adam(model.parameters(), lr=args.lr)
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            optimizer,
            mode='min',
            factor=args.lr_reduce_factor,
            patience=args.lr_scheduler_patience,
            verbose=True)
    else:
        raise ValueError('Invalid GNN type')

    # Define loss function
    cls_criterion = torch.nn.BCEWithLogitsLoss()

    # Create Tensorboard logger
    start_time_str = time.strftime("%Y%m%dT%H%M%S")
    log_dir = os.path.join(
        "logs", args.dataset,
        f"{args.expt_name}-{args.gnn}-L{args.num_layer}-h{args.emb_dim}-d{args.dropout}-LR{args.lr}",
        f"{start_time_str}-GPU{args.device}")
    tb_logger = SummaryWriter(log_dir)

    # Training loop
    train_curve = []
    valid_curve = []
    test_curve = []

    for epoch in range(1, args.epochs + 1):
        print("=====Epoch {}".format(epoch))
        tb_logger.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)

        print('Training...')
        train(model, device, train_loader, optimizer, cls_criterion)

        print('Evaluating...')
        train_loss, train_perf = eval(model, device, train_loader, evaluator,
                                      cls_criterion)
        valid_loss, valid_perf = eval(model, device, valid_loader, evaluator,
                                      cls_criterion)
        _, test_perf = eval(model, device, test_loader, evaluator,
                            cls_criterion)

        # Log statistics to Tensorboard, etc.
        print({
            'Train': train_perf,
            'Validation': valid_perf,
            'Test': test_perf
        })

        tb_logger.add_scalar('loss/train', train_loss, epoch)
        tb_logger.add_scalar(f'{dataset.eval_metric}/train',
                             train_perf[dataset.eval_metric], epoch)
        tb_logger.add_scalar('loss/valid', valid_loss, epoch)
        tb_logger.add_scalar(f'{dataset.eval_metric}/valid',
                             valid_perf[dataset.eval_metric], epoch)
        tb_logger.add_scalar(f'{dataset.eval_metric}/test',
                             test_perf[dataset.eval_metric], epoch)

        train_curve.append(train_perf[dataset.eval_metric])
        valid_curve.append(valid_perf[dataset.eval_metric])
        test_curve.append(test_perf[dataset.eval_metric])

        if args.lr_scheduler_patience > 0:
            # Reduce LR using scheduler
            scheduler.step(valid_loss)

    if 'classification' in dataset.task_type:
        best_val_epoch = np.argmax(np.array(valid_curve))
        best_train = max(train_curve)
    else:
        best_val_epoch = np.argmin(np.array(valid_curve))
        best_train = min(train_curve)

    print('Finished training!')
    print('Best validation score: {}'.format(valid_curve[best_val_epoch]))
    print('Test score: {}'.format(test_curve[best_val_epoch]))

    torch.save(
        {
            'args': args,
            'model': model.__repr__,
            'total_param': total_param,
            'BestEpoch': best_val_epoch,
            'Validation': valid_curve[best_val_epoch],
            'Test': test_curve[best_val_epoch],
            'Train': train_curve[best_val_epoch],
            'BestTrain': best_train,
        }, os.path.join(log_dir, "results.pt"))