def local_train(epoch):
    t = time.time()
    local_model.train()
    local_optimizer.zero_grad()
    output = local_model(features, adj)
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()
    local_optimizer.step()
    # val
    local_model.eval()
    output = local_model(features, adj)
    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])
    print('Epoch: {:04d}'.format(epoch + 1),
          'loss_train: {:.4f}'.format(loss_train.item()),
          'acc_train: {:.4f}'.format(acc_train.item()),
          'time: {:.4f}s'.format(time.time() - t),
          'loss_val: {:.4f}'.format(loss_val.item()),
          'acc_val: {:.4f}'.format(acc_val.item()))

    loss_train_list[2].append(loss_train.cpu().item())
    accuracy_train_list[2].append(acc_train.cpu().item())
    loss_val_list[2].append(loss_val.cpu().item())
    accuracy_val_list[2].append(acc_val.cpu().item())
Exemplo n.º 2
0
def train(model, optimizer, epoch, adj, features, labels, idx_train, idx_val, valid=False):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, adj)
#    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    loss_train = focal_loss(output[idx_train], labels[idx_train])
    acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()
    optimizer.step()

    if valid:
        # Evaluate validation set performance separately,
        # deactivates dropout during validation run.
        model.eval()
        output = model(features, adj)
#        loss_val = F.nll_loss(output[idx_val], labels[idx_val])
        loss_val = focal_loss(output[idx_val], labels[idx_val])
        acc_val = accuracy(output[idx_val], labels[idx_val])
        print('Epoch: {:04d}'.format(epoch+1),
              'loss_train: {:.4f}'.format(loss_train.item()),
              'acc_train: {:.4f}'.format(acc_train.item()),
              'loss_val: {:.4f}'.format(loss_val.item()),
              'acc_val: {:.4f}'.format(acc_val.item()),
              'time: {:.4f}s'.format(time.time() - t))
Exemplo n.º 3
0
def final_test(ls):
    bob_alone_model.eval()
    output_test_bob_alone = bob_alone_model(features, adj)
    loss_test_bob_alone = F.nll_loss(output_test_bob_alone[idx_test],
                                     labels[idx_test])
    acc_test_bob_alone = accuracy(output_test_bob_alone[idx_test],
                                  labels[idx_test])
    print('bob test:\nacc:{} loss:{}'.format(acc_test_bob_alone.item(),
                                             loss_test_bob_alone.item()))

    alice_alone_model.eval()
    output_test_alice_alone = alice_alone_model(features, adj)
    loss_test_alice_alone = F.nll_loss(output_test_alice_alone[idx_test],
                                       labels[idx_test])
    acc_test_alice_alone = accuracy(output_test_alice_alone[idx_test],
                                    labels[idx_test])
    print('alice test:\nacc:{} loss:{}'.format(acc_test_alice_alone.item(),
                                               loss_test_alice_alone.item()))

    local_model.eval()
    output_test_local = local_model(features, adj)
    loss_test_local = F.nll_loss(output_test_local[idx_test], labels[idx_test])
    acc_test_local = accuracy(output_test_local[idx_test], labels[idx_test])
    print('local test:\nacc:{} loss:{}'.format(acc_test_local.item(),
                                               loss_test_local.item()))

    bob_fed_model.eval()
    output_test_bob_fed = bob_fed_model(features, adj)
    loss_test_bob_fed = F.nll_loss(output_test_bob_fed[idx_test],
                                   labels[idx_test])
    acc_test_bob_fed = accuracy(output_test_bob_fed[idx_test],
                                labels[idx_test])
    print('federated test:\nacc:{} loss:{}'.format(acc_test_bob_fed.item(),
                                                   loss_test_bob_fed.item()))

    df_test = pd.DataFrame(data=[
        [acc_test_bob_alone.item(),
         loss_test_bob_alone.item(), ls[0]],
        [acc_test_alice_alone.item(),
         loss_test_alice_alone.item(), ls[1]],
        [acc_test_local.item(),
         loss_test_local.item(), ls[2]],
        [acc_test_bob_fed.item(),
         loss_test_bob_fed.item(), ls[3]]
    ],
                           columns=['acc', 'loss', 'epoch'],
                           index=['bob', 'alice', 'local', 'fed'])
    with open(
            r'C:\Users\lzl_z\Desktop\Fed_GCN_Experiment_two\citeseer-results\testing\2_1_'
            + str(args.zubie) + '.pkl', 'wb') as f5:
        pkl.dump(df_test, f5)
    f5.close()
    print(df_test)
Exemplo n.º 4
0
def test():
    model.eval()
    output = model(features, adj)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print("Test set results:",
          "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))
Exemplo n.º 5
0
def gcn_inference(sample_name, model, adj, features, y_test, idx_test, get_probs=False):
    roi_limits = np.load(dirs.ROI_LIMITS)
    segmentation = np.load(dirs.ROI_PREDICTION)
    gt = np.load(dirs.WORKING_DIR + sample_name[1])
    gt[gt != 0] = 1

    roi_vol = np.load(dirs.ROI_VOLUME)
    vol = np.load(dirs.WORKING_DIR + sample_name[0])
    valid_nodes = np.load(dirs.DILATION_PATH)
    model.eval()
    output = model(features, adj)
#    loss_test = F.nll_loss(output[idx_test], y_test[idx_test])S
    loss_test = focal_loss(output[idx_test], y_test[idx_test])
    acc_test = accuracy(output[idx_test], y_test[idx_test])

    voxel_node, node_voxel = map_voxel_nodes(roi_vol.shape, valid_nodes.astype(np.bool))
    if get_probs:
        graph_predictions = output.cpu().detach().numpy().astype(np.float32)
        graph_predictions = reconstruct_from_n6(graph_predictions, node_voxel, roi_vol.shape, dtype=np.float)
        gp_expanded = np.zeros(vol.shape, dtype=np.float)
        gp_expanded[roi_limits[0]:roi_limits[3], roi_limits[1]:roi_limits[4], roi_limits[2]:roi_limits[5]] \
            = graph_predictions
        return gp_expanded
    else:
        graph_predictions = (output > mpar.gcn_th).cpu().numpy().astype(np.float32)

    graph_predictions = reconstruct_from_n6(graph_predictions, node_voxel, roi_vol.shape)  # recovering the volume shape

    refined = graph_predictions

    # recovering sizes
    segmentation_expanded = np.zeros(vol.shape, dtype=np.float)
    segmentation_expanded[roi_limits[0]:roi_limits[3], roi_limits[1]:roi_limits[4], roi_limits[2]:roi_limits[5]] \
        = segmentation

    refined_expanded = np.zeros(vol.shape, dtype=np.float)
    refined_expanded[roi_limits[0]:roi_limits[3], roi_limits[1]:roi_limits[4], roi_limits[2]:roi_limits[5]] = refined

    cnn_slice_dsc = mean_vol_dsc(segmentation_expanded, gt)
    gcn_slice_dsc = mean_vol_dsc(refined_expanded, gt)

    cnn_vol_dsc = vol_dsc(segmentation_expanded, gt)
    gcn_vol_dsc = vol_dsc(refined_expanded, gt)

    np.save(dirs.GRAPH_PREDICTION, refined)
    npy_to_nifti(refined_expanded, dirs.NIFTI_GRAPH_SEG)

    info = {
        "cnn_slice_dsc": cnn_slice_dsc,
        "gcn_slice_dsc": gcn_slice_dsc,
        "cnn_vol_dsc": cnn_vol_dsc,
        "gcn_vol_dsc": gcn_vol_dsc
    }

    return info
def federated_train(epoch):
    for remote_index in range(len(compute_nodes)):
        adj_train, features_train, labels_train, index_train = remote_dataset[
            remote_index]
        models[remote_index], loss, acc = update(epoch, adj_train,
                                                 features_train, labels_train,
                                                 models[remote_index],
                                                 optimizers[remote_index],
                                                 index_train)
        loss_train_list[remote_index + 3].append(loss.cpu().item())
        accuracy_train_list[remote_index + 3].append(acc.cpu().item())
    if (epoch + 1) % args.E == 0:
        # encrypt
        new_params = list()
        for param_i in range(len(params[0])):
            spdz_params = list()
            for remote_index in range(len(compute_nodes)):
                spdz_params.append(
                    loss_param(
                        params[remote_index][param_i]).fix_precision().share(
                            bob, alice, crypto_provider=james))
            new_param = (spdz_params[0] +
                         spdz_params[1]).get().float_precision() / 2
            new_params.append(new_param)
        # clean up
        with torch.no_grad():
            for x in params:
                for param in x:
                    param *= 0

            # for model in models:
            #     model.get()

            for remote_index in range(len(compute_nodes)):
                for param_index in range(len(params[remote_index])):
                    params[remote_index][param_index].set_(
                        new_params[param_index].cuda())

    bob_fed_model.eval()
    output = bob_fed_model(features, adj)
    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])
    loss_val_list[3].append(loss_val.cpu().item())
    accuracy_val_list[3].append(acc_val.cpu().item())
def update(epoch, adj_train, features_train, labels_train, model_train,
           optimizer, index):
    model_train.send(labels_train.location)
    model_train.train()

    optimizer.zero_grad()
    output = model_train(features_train, adj_train)
    loss_train = F.nll_loss(output[index], labels_train)
    acc_train = accuracy(output[index], labels_train)
    loss_train.backward()
    optimizer.step()
    loss_train = loss_train.get()
    acc_train = acc_train.get()
    print(
        'Epoch: {:04d}'.format(epoch + 1),
        'loss_train: {:.4f}'.format(loss_train.item()),
        'acc_train: {:.4f}'.format(acc_train.item()),
    )
    model_train.get()
    return model_train, loss_train, acc_train
Exemplo n.º 8
0
    labels = labels.cuda()
    idx_train = idx_train.cuda()
    idx_val = idx_val.cuda()
    idx_test = idx_test.cuda()

#########
# Train #
#########
t_total = time.time()
for epoch in range(opt.n_epochs):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, adj)
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()
    optimizer.step()

    if not opt.fastmode:
        model.eval()
        output = model(features, adj)

    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])
    print(f'[Epoch {epoch+1:04d}/{opt.n_epochs}]'
          f'[Train loss: {loss_train.item():.4f}]'
          f'[Train accuracy: {acc_train.item():.4f}]'
          f'[Validation loss: {loss_val.item():.4f}]'
          f'[Validation accuracy: {acc_val.item():.4f}]'
          f'[Time: {time.time() - t:.4f}s]')