Beispiel #1
0
 def foo(dataset):
     return ResGCN(dataset,
                   hidden,
                   num_feat_layers,
                   num_conv_layers,
                   num_fc_layers,
                   gfn=False,
                   collapse=False,
                   residual=residual,
                   res_branch=res_branch,
                   global_pool=global_pool,
                   dropout=dropout,
                   edge_norm=edge_norm)
Beispiel #2
0
    def forward(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        #print('input: {} | norm : {}'.format(x, x.norm()))
        print(self.conv1)
        x = F.relu(self.conv1(x, edge_index, edge_weight))
        #x = F.dropout(x, training=self.training)
        x = self.fc(x)

        #print('output: {} | norm: {}'.format(x, x.norm()))
        return F.log_softmax(x, dim=1)


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data = data.to(device)
model = ResGCN(dataset.num_features, dataset.num_classes, data.edge_index,
               data.edge_attr).to(device)
#model = Net().to(device)
print(model)
# optimizer = torch.optim.Adam([
#     dict(params=model.conv1.parameters(), weight_decay=5e-4),
#     dict(params=model.conv2.parameters(), weight_decay=0)
# ], lr=0.01)  # Only perform weight-decay on first convolution.

optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)


def train():
    model.train()
    optimizer.zero_grad()
    loss = F.nll_loss(model(data)[data.train_mask], data.y[data.train_mask])