Exemple #1
0
y = le.fit_transform(labels)
print("Building vocabulary")
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in subgraphs])
x = np.zeros((len(subgraphs), max_document_length), dtype=np.int32)
for i in range(len(subgraphs)):
    # print(i, "/", len(subgraphs))
    communities = subgraphs[i].split()
    for j in range(len(communities)):
        x[i,j] = int(communities[j])

pkl.dump(x, open('x_news.pkl', 'wb'))


kf = KFold(n_splits=10, random_state=None)
kf.shuffle=True
train_accs = []
test_accs = []
it = 0

print("Starting cross-validation...")

for train_index, test_index in kf.split(x):
    it += 1
    x_train, x_test = x[train_index], x[test_index]
    y_train, y_test = y[train_index], y[test_index]

    train_loader, test_loader = create_train_test_loaders(Q, x_train, x_test, y_train, y_test, batch_size)

    cnn = CNN(input_size=num_filters, hidden_size=hidden_size, num_classes=np.unique(y).size, dim=dim, num_kernels=num_kernels, max_document_length=max_document_length)
    if torch.cuda.is_available():
def main():
    global args
    args = parser.parse_args()

    # Check if CUDA is enabled
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    unlabeled_datasets = [
        "IMDB-BINARY", "IMDB-MULTI", "REDDIT-BINARY", "REDDIT-MULTI-5K",
        "COLLAB", "SYNTHETIC"
    ]
    if args.dataset in unlabeled_datasets:
        use_node_labels = False
        from graph_kernels import sp_kernel, wl_kernel
    else:
        use_node_labels = True
        from graph_kernels_labeled import sp_kernel, wl_kernel

    kernels = [wl_kernel]
    n_kernels = len(kernels)

    print('Computing graph maps')
    Q, subgraphs, labels, shapes = compute_nystrom(args.dataset,
                                                   use_node_labels, args.d,
                                                   args.community_detection,
                                                   kernels)

    M = np.zeros((shapes[0], shapes[1], n_kernels))
    for idx, k in enumerate(kernels):
        M[:, :, idx] = Q[idx]

    Q = M

    # Binarize labels
    le = LabelEncoder()
    y = le.fit_transform(labels)

    # Build vocabulary
    max_n_communities = max([len(x.split(" ")) for x in subgraphs])
    x = np.zeros((len(subgraphs), max_n_communities), dtype=np.int32)
    for i in range(len(subgraphs)):
        communities = subgraphs[i].split()
        for j in range(len(communities)):
            x[i, j] = int(communities[j])

    kf = KFold(n_splits=10, random_state=None)
    kf.shuffle = True
    accs = []
    it = 0

    print('Starting cross-validation')

    for train_index, test_index in kf.split(x):
        it += 1
        best_acc1 = 0

        x_train, x_test = x[train_index], x[test_index]
        y_train, y_test = y[train_index], y[test_index]
        x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                          y_train,
                                                          test_size=0.1)

        train_loader, val_loader, test_loader = create_train_val_test_loaders(
            Q, x_train, x_val, x_test, y_train, y_val, y_test, args.batch_size)

        print('\tCreate model')
        model = CNN(input_size=args.n_filters,
                    hidden_size=args.hidden_size,
                    n_classes=np.unique(y).size,
                    d=args.d,
                    n_kernels=n_kernels,
                    max_n_communities=max_n_communities)

        print('Optimizer')
        optimizer = optim.Adam(model.parameters(), lr=args.lr)

        criterion = nn.CrossEntropyLoss()

        evaluation = lambda output, target: torch.sum(output.eq(target)
                                                      ) / target.size()[0]

        #print('Logger')
        #logger = Logger(args.logPath)

        lr = args.lr
        lr_step = (args.lr - args.lr * args.lr_decay) / (
            args.epochs * args.schedule[1] - args.epochs * args.schedule[0])

        if os.path.isdir(args.checkpoint_dir):
            shutil.rmtree(args.checkpoint_dir)

        os.makedirs(args.checkpoint_dir)

        print('Check cuda')
        if args.cuda:
            print('\t* Cuda')
            model = model.cuda()
            criterion = criterion.cuda()

        # Epoch for loop
        for epoch in range(0, args.epochs):

            if epoch > args.epochs * args.schedule[
                    0] and epoch < args.epochs * args.schedule[1]:
                lr -= lr_step
                #args.lr -= lr_step
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr

            # train for one epoch
            train(train_loader, model, criterion, optimizer, epoch, evaluation)

            # evaluate on test set
            acc1 = validate(val_loader, model, criterion, evaluation)

            is_best = acc1 > best_acc1
            best_acc1 = max(acc1, best_acc1)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                },
                is_best=is_best,
                directory=args.checkpoint_dir)

            # Logger step
            #logger.log_value('learning_rate', args.lr).step()

        # get the best checkpoint and test it with test set
        best_model_file = os.path.join(args.checkpoint_dir, 'model_best.pth')
        if not os.path.isdir(args.checkpoint_dir):
            os.makedirs(args.checkpoint_dir)
        if os.path.isfile(best_model_file):
            print("=> loading best model '{}'".format(best_model_file))
            checkpoint = torch.load(best_model_file)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            if args.cuda:
                model.cuda()
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded best model '{}' (epoch {})".format(
                best_model_file, checkpoint['epoch']))
        else:
            print("=> no best model found at '{}'".format(best_model_file))

        # For testing
        acc = validate(test_loader, model, criterion, evaluation)
        print("Accuracy at iteration " + str(it) + ": " + str(acc))
        accs.append(acc)
    print("Average accuracy: ", np.mean(accs))
    print("std: ", np.std(accs))