예제 #1
0
 def __init__(self, nfeat, nnext, nhid, is_final=False, device='cpu'):
     super(DiffPool, self).__init__()
     self.device = device
     self.is_final = is_final
     self.embed = GraphSAGE(nfeat, nhid, device=self.device, use_bn=False)
     self.assign_mat = GraphSAGE(nfeat,
                                 nnext,
                                 device=self.device,
                                 use_bn=False)
     self.link_pred_loss = 0
예제 #2
0
 def __init__(self, pool_size, device):
     super().__init__()
     self.device = device
     self.dps = nn.ModuleList([
         GraphSAGE(18, 128, device=self.device),
         GraphSAGE(128, 64, device=self.device),
         DiffPool(64, pool_size, 64, device=self.device),
         GraphSAGE(64, 32, device=self.device),
         DiffPool(32, 1, 32, is_final=True, device=self.device)
     ])
     self.classifier = Classifier()
예제 #3
0
def build_model(model_key, dataset, g, in_feats, n_classes):
    """
    Returns a model instance based on --model command-line arg and dataset
    """
    if model_key == 'MLP':
        return MLP(in_feats, 64, n_classes, 1, F.relu, 0.5)
    elif model_key == 'GCN':
        return GCN(g, in_feats, 16, n_classes, 1, F.relu, 0.5)
    elif model_key == 'GCN-64':
        return GCN(g, in_feats, 64, n_classes, 1, F.relu, 0.5)
    elif model_key == 'GAT':
        # Default args from paper
        num_heads = 8
        num_out_heads = 8 if dataset == 'pubmed' else 1
        num_layers = 1  # one *hidden* layer
        heads = ([num_heads] * num_layers) + [num_out_heads]
        return GAT(
            g,
            num_layers,
            in_feats,
            8,  # hidden units per layer
            n_classes,
            heads,
            F.elu,  # activation fun
            0.6,  # feat dropout
            0.6,  # attn dropout
            0.2,  # negative slope for leakyrelu
            False  # Use residual connections
        )
    elif model_key == 'GraphSAGE':
        return GraphSAGE(g, in_feats, 16, n_classes, 1, F.relu, 0.5, "mean")

    # Add more models here
    raise ValueError("Invalid model key")
예제 #4
0
from __future__ import division
from __future__ import print_function

import numpy as np
import torch

from graphsage import GraphSAGE

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

adj = np.load("../data/enzymes/graph.npy")
features = np.load("../data/enzymes/one_hot.npy")
labels = np.load("../data/enzymes/labels.npy")
labels = torch.from_numpy(labels).to(device)

a = torch.from_numpy(adj[0]).float()
f = torch.from_numpy(features[0]).float()

model = GraphSAGE(3, 8)
model(f, a)
예제 #5
0
                    help='Dropout rate (1 - keep probability).')
args = parser.parse_args()

# set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# load dataset
dataset = Planetoid(root='/tmp/' + args.dataset, name=args.dataset)
data = dataset[0].to(device)

# generate model and optimizer with parameter
if args.model == 'GCN':
    model = GCN(dataset.num_features, args.hidden,
                dataset.num_classes).to(device)
else:
    model = GraphSAGE(dataset.num_features, args.hidden,
                      dataset.num_classes).to(device)

optimizer = torch.optim.Adam(model.parameters(),
                             lr=args.lr,
                             weight_decay=args.weight_decay)

# define two list for plot
Accuracy_list = []
Loss_list = []

# train the model
model.train()
for epoch in range(args.epochs):
    optimizer.zero_grad()
    out = model(data.x, data.edge_index)
    _, pred = model(data.x, data.edge_index).max(dim=1)
예제 #6
0
    indexs = np.arange(A.shape[0])
    neigh_number = [10, 25]
    neigh_maxlen = []

    model_input = [features, np.asarray(indexs, dtype=np.int32)]

    for num in neigh_number:
        sample_neigh, sample_neigh_len = sample_neighs(
            G, indexs, num, self_loop=False)
        model_input.extend([sample_neigh])
        neigh_maxlen.append(max(sample_neigh_len))

    model = GraphSAGE(feature_dim=features.shape[1],
                      neighbor_num=neigh_maxlen,
                      n_hidden=16,
                      n_classes=y_train.shape[1],
                      use_bias=True,
                      activation=tf.nn.relu,
                      aggregator_type='mean',
                      dropout_rate=0.5, l2_reg=2.5e-4)
    model.compile(Adam(0.01), 'categorical_crossentropy',
                  weighted_metrics=['categorical_crossentropy', 'acc'])


    val_data = (model_input, y_val, val_mask)
    mc_callback = ModelCheckpoint('./best_model.h5',
                                  monitor='val_weighted_categorical_crossentropy',
                                  save_best_only=True,
                                  save_weights_only=True)

    print("start training")
    model.fit(model_input, y_train, sample_weight=train_mask, validation_data=val_data,