Пример #1
0
def _split_weighted_sample(self, X, y, sample_weight, is_stratified=False):
    if is_stratified:
        kfold_model = StratifiedKFold(n_splits=self.n_splits,
                                      shuffle=self.shuffle,
                                      random_state=self.random_state)
    else:
        kfold_model = KFold(n_splits=self.n_splits,
                            shuffle=self.shuffle,
                            random_state=self.random_state)
    if sample_weight is None:
        return kfold_model.split(X, y)
    weights_sum = np.sum(sample_weight)
    max_deviations = []
    all_splits = []
    for i in range(self.n_trials + 1):
        splits = [test for (train, test) in list(kfold_model.split(X, y))]
        weight_fracs = np.array(
            [np.sum(sample_weight[split]) / weights_sum for split in splits])
        if np.all(weight_fracs > .95 / self.n_splits):
            # Found a good split, return.
            return self._get_folds_from_splits(splits, X.shape[0])
        # Record all splits in case the stratification by weight yeilds a worse partition
        all_splits.append(splits)
        max_deviation = np.max(np.abs(weight_fracs - 1 / self.n_splits))
        max_deviations.append(max_deviation)
        # Reseed random generator and try again
        kfold_model.shuffle = True
        kfold_model.random_state = None

    # If KFold fails after n_trials, we try the next best thing: stratifying by weight groups
    warnings.warn(
        "The KFold algorithm failed to find a weight-balanced partition after "
        +
        "{n_trials} trials. Falling back on a weight stratification algorithm."
        .format(n_trials=self.n_trials), UserWarning)
    if is_stratified:
        stratified_weight_splits = [[]] * self.n_splits
        for y_unique in np.unique(y.flatten()):
            class_inds = np.argwhere(y == y_unique).flatten()
            class_splits = self._get_splits_from_weight_stratification(
                sample_weight[class_inds])
            stratified_weight_splits = [
                split + list(class_inds[class_split]) for split, class_split in
                zip(stratified_weight_splits, class_splits)
            ]
    else:
        stratified_weight_splits = self._get_splits_from_weight_stratification(
            sample_weight)
    weight_fracs = np.array([
        np.sum(sample_weight[split]) / weights_sum
        for split in stratified_weight_splits
    ])
    if np.all(weight_fracs > .95 / self.n_splits):
        # Found a good split, return.
        return self._get_folds_from_splits(stratified_weight_splits,
                                           X.shape[0])
    else:
        # Did not find a good split
        # Record the devaiation for the weight-stratified split to compare with KFold splits
        all_splits.append(stratified_weight_splits)
        max_deviation = np.max(np.abs(weight_fracs - 1 / self.n_splits))
        max_deviations.append(max_deviation)
    # Return most weight-balanced partition
    min_deviation_index = np.argmin(max_deviations)
    return self._get_folds_from_splits(all_splits[min_deviation_index],
                                       X.shape[0])
Пример #2
0
def main():
    global args
    args = parser.parse_args()

    # Check if CUDA is enabled
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    unlabeled_datasets = [
        "IMDB-BINARY", "IMDB-MULTI", "REDDIT-BINARY", "REDDIT-MULTI-5K",
        "COLLAB", "SYNTHETIC", "raw-gitgraph"
    ]
    if args.dataset in unlabeled_datasets:
        use_node_labels = False
        from graph_kernels import sp_kernel, wl_kernel
    else:
        use_node_labels = True
        from graph_kernels_labeled import sp_kernel, wl_kernel

    kernels = [wl_kernel]
    n_kernels = len(kernels)

    print('Computing graph maps')
    Q, subgraphs, labels, shapes = compute_nystrom(args.dataset,
                                                   use_node_labels, args.d,
                                                   args.community_detection,
                                                   kernels)

    M = np.zeros((shapes[0], shapes[1], n_kernels))
    for idx, k in enumerate(kernels):
        M[:, :, idx] = Q[idx]

    Q = M

    # Binarize labels
    le = LabelEncoder()
    y = le.fit_transform(labels)

    # Build vocabulary
    max_n_communities = max([len(x.split(" ")) for x in subgraphs])
    x = np.zeros((len(subgraphs), max_n_communities), dtype=np.int32)
    for i in range(len(subgraphs)):
        communities = subgraphs[i].split()
        for j in range(len(communities)):
            x[i, j] = int(communities[j])

    print(x[0, :])

    kf = StratifiedKFold(n_splits=10, random_state=None)
    kf.shuffle = True
    accs = []
    it = 0

    print('Starting cross-validation')

    for train_index, test_index in kf.split(x, y):
        it += 1
        best_acc1 = 0

        x_train, x_test = x[train_index], x[test_index]
        y_train, y_test = y[train_index], y[test_index]
        x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                          y_train,
                                                          test_size=0.1)

        train_loader, val_loader, test_loader = create_train_val_test_loaders(
            Q, x_train, x_val, x_test, y_train, y_val, y_test, args.batch_size)

        print('\tCreate model')
        model = CNN(input_size=args.n_filters,
                    hidden_size=args.hidden_size,
                    n_classes=np.unique(y).size,
                    d=args.d,
                    n_kernels=n_kernels,
                    max_n_communities=max_n_communities)

        print('Optimizer')
        optimizer = optim.Adam(model.parameters(), lr=args.lr)

        criterion = nn.CrossEntropyLoss()

        evaluation = lambda output, target: torch.sum(output.eq(target)
                                                      ) / target.size()[0]

        lr = args.lr
        lr_step = (args.lr - args.lr * args.lr_decay) / (
            args.epochs * args.schedule[1] - args.epochs * args.schedule[0])

        if os.path.isdir(args.checkpoint_dir):
            shutil.rmtree(args.checkpoint_dir)

        os.makedirs(args.checkpoint_dir)

        print('Check cuda')
        if args.cuda:
            print('\t* Cuda')
            model = model.cuda()
            criterion = criterion.cuda()

        # Epoch for loop
        for epoch in range(0, args.epochs):

            if epoch > args.epochs * args.schedule[
                    0] and epoch < args.epochs * args.schedule[1]:
                lr -= lr_step
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr

            # train for one epoch
            train(train_loader, model, criterion, optimizer, epoch, evaluation)

            # evaluate on test set
            acc1 = validate(val_loader, model, criterion, evaluation)

            is_best = acc1 > best_acc1
            best_acc1 = max(acc1, best_acc1)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                },
                is_best=is_best,
                directory=args.checkpoint_dir)

        # get the best checkpoint and test it with test set
        best_model_file = os.path.join(args.checkpoint_dir, 'model_best.pth')
        if not os.path.isdir(args.checkpoint_dir):
            os.makedirs(args.checkpoint_dir)
        if os.path.isfile(best_model_file):
            print("=> loading best model '{}'".format(best_model_file))
            checkpoint = torch.load(best_model_file)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            if args.cuda:
                model.cuda()
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded best model '{}' (epoch {})".format(
                best_model_file, checkpoint['epoch']))
        else:
            print("=> no best model found at '{}'".format(best_model_file))

        # For testing
        acc = validate(test_loader, model, criterion, evaluation)
        print("Accuracy at iteration " + str(it) + ": " + str(acc))
        accs.append(acc)
    print("Average accuracy: ", np.mean(accs))
    print("std: ", np.std(accs))