Exemple #1
0
def main(args):
    # load and preprocess dataset
    if args.gpu > 0:
        cuda = True
        device = torch.device('cuda:{}'.format(args.gpu))
    else:
        device = torch.device('cpu')
        cuda = False
    cora_data = NeptuneCoraDataset(device, valid_ratio=0.1, test_ratio=0.2)
    #cora_data = CoraDataset(device, valid_ratio=0.1, test_ratio=0.2)
    features = cora_data.features
    test_set = cora_data.test_set
    g = cora_data.g

    in_feats = features['h**o'].shape[1]
    n_edges = g.number_of_edges()
    # normalization
    degs = g.in_degrees().float()
    norm = torch.pow(degs, -0.5)
    norm[torch.isinf(norm)] = 0
    if cuda:
        norm = norm.cuda()
    g.ndata['norm'] = norm.unsqueeze(1)

    # create GCN model
    model = GCN(g, in_feats, args.n_hidden, cora_data.n_class, args.n_layers,
                F.relu)
    model.load_state_dict(torch.load(args.model_path))
    if cuda:
        model.cuda()

    print()
    acc = evaluate(model, features['h**o'], test_set)
    print("Test accuracy {:.2%}".format(acc))
Exemple #2
0
def write_test(model_name):
    test_dataset = Smiles(data_choice='test', pos_weight=my_cfg['pos_weight'], device=device)
    test_loader = DataLoader(test_dataset, batch_size=my_cfg['batch_size'], shuffle=False)

    best_model = GCN(34, 32, 2)
    best_model.load_state_dict(torch.load(model_name))
    if torch.cuda.is_available():
        best_model = best_model.cuda()
        
    best_model.eval()
    print('\nStarting test ...')
    results = []
    for i, (names, adj, features) in enumerate(test_loader):
        res = best_model(features, adj).detach().cpu()
        res = res.reshape(-1)
        for name, my_res in zip(names, res):
            results.append({'name': name, 'res': my_res})

    exp_num = my_cfg['exp_num']
    model_name = model_name.split('.')[-2][1:]
    with open(f'./data/test/output_{model_name}.txt', "w") as f:
        f.write('Chemical,Label\n')
        assert len(results) == 610
        for i in range(len(results)):
            my_name = results[i]['name']
            my_res = results[i]['res']
            my_res = my_res.detach().cpu().numpy()
            f.write(f'{my_name},{my_res}\n')
    return 
def main(args):
    # load and preprocess dataset
    if args.gpu > 0:
        cuda = True
        device = torch.device('cuda:{}'.format(args.gpu))
    else:
        device = torch.device('cpu')
        cuda = False
    cora_data = NeptuneCoraDataset(device, valid_ratio=0.0, test_ratio=0.1)
    #cora_data = CoraDataset(device, valid_ratio=0.1, test_ratio=0.2)
    features = cora_data.features

    # we infer type of nodes in test_set
    test_set = cora_data.test_set
    g = cora_data.g

    in_feats = features['h**o'].shape[1]
    n_edges = g.number_of_edges()
    # normalization
    degs = g.in_degrees().float()
    norm = torch.pow(degs, -0.5)
    norm[torch.isinf(norm)] = 0
    if cuda:
        norm = norm.cuda()
    g.ndata['norm'] = norm.unsqueeze(1)

    # create GCN model
    model = GCN(g, in_feats, args.n_hidden, cora_data.n_class, args.n_layers,
                F.relu)
    model.load_state_dict(torch.load(args.model_path))
    if cuda:
        model.cuda()

    model.eval()
    with torch.no_grad():
        logits = model(features['h**o'])
        logits = logits[test_set[0]]
        _, indices = torch.max(logits, dim=1)

        nodes = test_set[0].numpy().tolist()
        indices = indices.numpy().tolist()
        for idx, label in enumerate(indices):
            node_id = nodes[idx]
            truth_nid = cora_data.translate_node(node_id)
            truth_label = cora_data.translate_label(label)
            print(
                "{{\"gremlin\":\"g.V(\\\"{}\\\").property(\\\"category\\\", \\\"{}\\\")\"}}"
                .format(truth_nid, truth_label))
Exemple #4
0
def main(args):
    data = load_data(args)
    features = torch.FloatTensor(data.features)
    labels = torch.FloatTensor(data.labels)
    train_mask = torch.BoolTensor(data.train_mask)
    val_mask = torch.BoolTensor(data.val_mask)
    test_mask = torch.BoolTensor(data.test_mask)

    g = data.graph
    n_feats = features.shape[1]
    n_labels = data.num_labels
    n_edges = g.number_of_edges()
    print("""----Data statistics------'
        #Features %d    
        #Edges %d
        #Labels %d
        #Train samples %d
        #Val samples %d
        #Test samples %d""" %
          (n_feats, n_edges, n_labels, train_mask.int().sum().item(),
           val_mask.int().sum().item(), test_mask.int().sum().item()))

    dataset_train = CampusDataset(features, labels)
    dict_users = iid_users(dataset_train, args.n_users)

    if args.gnnbase == 'gcn':
        g = DGLGraph(g)
        n_edges = g.number_of_edges()
        degs = g.in_degrees().float()
        norm = torch.pow(degs, -0.5)
        norm[torch.isinf(norm)] = 0
        g.ndata['norm'] = norm.unsqueeze(1)
        model = GCN(g, n_feats, args.n_hidden, n_labels, args.n_layers, F.relu,
                    args.dropout)

    if args.gnnbase == 'gat':
        g.remove_edges_from(nx.selfloop_edges(g))
        g = DGLGraph(g)
        g.add_edges(g.nodes(), g.nodes())
        n_edges = g.number_of_edges()
        heads = ([args.n_heads] * args.n_layers) + [args.n_out_heads]
        model = GAT(g, args.n_layers, n_feats, args.n_hidden, n_labels, heads,
                    F.elu, args.in_drop, args.attn_drop, args.negative_slope,
                    args.residual)

    if args.gnnbase == 'sage':
        g.remove_edges_from(nx.selfloop_edges(g))
        g = DGLGraph(g)
        n_edges = g.number_of_edges()
        model = GraphSAGE(g, n_feats, args.n_hidden, n_labels, args.n_layers,
                          F.relu, args.dropout, args.aggregator_type)

    print(model)
    model.train()

    w_glob = model.state_dict()
    loss_train = []
    timecost = []

    for epoch in range(args.n_epochs):
        time_begin = time.time()

        w_locals, loss_locals = [], []
        m = max(int(args.frac * args.n_users), 1)
        idxs_users = np.random.choice(range(args.n_users), m, replace=False)
        for idx in idxs_users:
            local = LocalUpdate(args=args,
                                dataset=dataset_train,
                                idxs=dict_users[idx],
                                mask=train_mask)
            w, loss = local.train(model=copy.deepcopy(model))
            w_locals.append(copy.deepcopy(w))
            loss_locals.append(copy.deepcopy(loss))
        w_glob = FedAvg(w_locals)

        model.load_state_dict(w_glob)

        time_end = time.time()
        timecost.append(time_end - time_begin)

        loss_avg = sum(loss_locals) / len(loss_locals)
        print('Epoch {:3d}, Average loss {:.3f}'.format(epoch, loss_avg))
        loss_train.append(loss_avg)

        train_errX, train_errY = eval_error(model, features, labels,
                                            train_mask)
        val_errX, val_errY = eval_error(model, features, labels, val_mask)
        test_errX, test_errY = eval_error(model, features, labels, test_mask)
        print(
            "Epoch {:3d} | TrainRMSEX {:.4f} | TrainRMSEY {:.4f} | ValRMSEX {:.4f} | ValRMSEY {:.4f} | TestRMSEX {:.4f} | TestRMSEY {:.4f}"
            .format(epoch, train_errX, train_errY, val_errX, val_errY,
                    test_errX, test_errY))

    print("Time cost {:.4f}".format(sum(timecost) / args.n_epochs))

    base_errX, base_errY = calc_error(features[test_mask, :2],
                                      labels[test_mask])
    print("TestRMSEX-Base {:.4f} | TestRMSEY-Base {:.4f}".format(
        base_errX, base_errY))
Exemple #5
0
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_path = 'Results/model_exp2.pth'
num_classes = 80
train_path = '/home/user/Data/coco2014/train2014'
train_ann_file = '/home/user/Data/coco2014/annotations/instances_train2014.json'
val_path = '/home/user/Data/coco2014/val2014'
val_ann_file = '/home/user/Data/coco2014/annotations/instances_val2014.json'
train_pickle_file = 'train.pickle'
val_pickle_file = 'val.pickle'

adj = pickle.load(open('adj.pickle', 'rb'))
adj = np.float32(adj / np.max(adj) + np.identity(num_classes))
adj_tensor = torch.from_numpy(adj)

model = GCN(adj_tensor, num_classes, 80, num_classes)
model.load_state_dict(torch.load(model_path))

train_dataset = CocoDataset(train_path, train_ann_file, num_classes)
val_dataset = CocoDataset(val_path, val_ann_file, num_classes)
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=1)
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=1)

train_detections = pickle.load(open(train_pickle_file, 'rb'))
val_detections = pickle.load(open(val_pickle_file, 'rb'))

total_train_images = len(train_loader)
total_val_images = len(val_loader)

model.eval()

print('Running...')