예제 #1
0
def fte_bte_experiment(
    train_x,
    train_y,
    test_x,
    test_y,
    ntrees,
    shift,
    slot,
    which_task,
    acorn=None,
):

    # We initialize lists to store the results
    df = pd.DataFrame()
    accuracies_across_tasks = []

    # Declare the progressive learner model (L2F)
    learner = LifelongClassificationForest()

    for task_num in range((which_task - 1), 10):
        accuracy_per_task = []
        # print("Starting Task {} For Shift {} For Slot {}".format(task_num, shift, slot))
        if acorn is not None:
            np.random.seed(acorn)

        # If first task, add task. Else, add a transformer for the task
        if task_num == (which_task - 1):
            learner.add_task(
                X=train_x[(task_num * 900):((task_num + 1) * 900)],
                y=train_y[(task_num * 900):((task_num + 1) * 900)],
                task_id=0,
            )

            t_num = 0
            # Add transformers for all task up to current task (task t)
            while t_num < task_num:
                # Make a prediction on task t using the trained learner on test data
                llf_task = learner.predict(
                    test_x[((which_task - 1) * 1000):(which_task * 1000), :],
                    task_id=0,
                )
                acc = np.mean(llf_task == test_y[((which_task - 1) *
                                                  1000):(which_task * 1000)])
                accuracies_across_tasks.append(acc)

                learner.add_transformer(
                    X=train_x[(t_num * 900):((t_num + 1) * 900)],
                    y=train_y[(t_num * 900):((t_num + 1) * 900)],
                )

                # Add transformer for next task
                t_num = t_num + 1

        else:
            learner.add_transformer(
                X=train_x[(task_num * 900):((task_num + 1) * 900)],
                y=train_y[(task_num * 900):((task_num + 1) * 900)],
            )

        # Make a prediction on task t using the trained learner on test data
        llf_task = learner.predict(test_x[((which_task - 1) *
                                           1000):(which_task * 1000), :],
                                   task_id=0)
        acc = np.mean(llf_task == test_y[((which_task - 1) *
                                          1000):(which_task * 1000)])
        accuracies_across_tasks.append(acc)
        # print("Accuracy Across Tasks: {}".format(accuracies_across_tasks))

    df["task"] = range(1, 11)
    df["task_accuracy"] = accuracies_across_tasks

    return df
예제 #2
0
def label_shuffle_experiment(
    train_x,
    train_y,
    test_x,
    test_y,
    ntrees,
    shift,
    slot,
    num_points_per_task,
    acorn=None,
):

    # We initialize lists to store the results
    df = pd.DataFrame()
    shifts = []
    accuracies_across_tasks = []

    # Declare the progressive learner model (L2F), with ntrees as a parameter
    learner = LifelongClassificationForest(n_estimators=ntrees)

    for task_ii in range(10):
        print("Starting Task {} For Fold {} For Slot {}".format(
            task_ii, shift, slot))
        if acorn is not None:
            np.random.seed(acorn)

        # If task number is 0, add task. Else, add a transformer for the task
        if task_ii == 0:
            learner.add_task(
                X=train_x[task_ii * 5000 +
                          slot * num_points_per_task:task_ii * 5000 +
                          (slot + 1) * num_points_per_task],
                y=train_y[task_ii * 5000 +
                          slot * num_points_per_task:task_ii * 5000 +
                          (slot + 1) * num_points_per_task],
                task_id=0,
            )
        else:
            learner.add_transformer(
                X=train_x[task_ii * 5000 +
                          slot * num_points_per_task:task_ii * 5000 +
                          (slot + 1) * num_points_per_task],
                y=train_y[task_ii * 5000 +
                          slot * num_points_per_task:task_ii * 5000 +
                          (slot + 1) * num_points_per_task],
            )

        # Make a prediction on task 0 using the trained learner on test data
        llf_task = learner.predict(test_x[:1000], task_id=0)

        # Calculate the accuracy of the task 0 predictions
        acc = np.mean(llf_task == test_y[:1000])
        accuracies_across_tasks.append(acc)
        shifts.append(shift)

        print("Accuracy Across Tasks: {}".format(accuracies_across_tasks))

    df["data_fold"] = shifts
    df["task"] = range(1, 11)
    df["task_1_accuracy"] = accuracies_across_tasks

    return df
예제 #3
0
def run_experiment(x_data,
                   y_data,
                   num_tasks,
                   num_points_per_task,
                   ntrees=10,
                   model='uf',
                   reps=100):
    """ Runs the FTE/BTE experiment.
        Referenced Chenyu's code, with modifications to adjust the number of tasks.
    """

    # initialize list for storing results
    accuracies_across_tasks = []

    # format data
    if model == 'dnn':  # add dnn implementation in the future
        x = x_data
        y = y_data
    elif model == 'uf':
        x = x_data.reshape(len(x_data), -1)
        y = y_data

    # get y values per task
    unique_y = np.unique(y_data)
    ys_by_task = unique_y.reshape(num_tasks, int(len(unique_y) / num_tasks))

    # run experiment over all reps
    for rep in range(reps):
        #print('Starting rep', rep)

        # for each task
        for task in range(num_tasks):

            # initialize progressive learner
            learner = LifelongClassificationForest(
                default_n_estimators=ntrees
            )  #default_max_depth=np.ceil(np.log2(num_points_per_task))

            # get train/test data (train = num_points_per_task)
            index = np.where(np.in1d(y, ys_by_task[task]))
            x_task0 = x[index]
            y_task0 = y[index]
            train_x_task0, test_x_task0, train_y_task0, test_y_task0 = train_test_split(
                x_task0, y_task0, test_size=0.25)
            train_x_task0 = train_x_task0[:num_points_per_task]
            train_y_task0 = train_y_task0[:num_points_per_task]

            # feed to learner and predict on single task
            learner.add_task(train_x_task0, train_y_task0)
            task_0_predictions = learner.predict(test_x_task0, task_id=0)
            accuracies_across_tasks.append(
                np.mean(task_0_predictions == test_y_task0))

            # evaluate for other tasks
            for other_task in range(num_tasks):

                if other_task == task:
                    pass

                else:

                    # get train/test data (train = num_points_per_task)
                    index = np.random.choice(np.where(
                        np.in1d(y, ys_by_task[other_task]))[0],
                                             num_points_per_task,
                                             replace=False)
                    train_x = x[index]
                    train_y = y[index]

                    # add transformer from other tasks
                    learner.add_task(train_x, train_y)

                # predict on current task using other tasks
                prev_task_predictions = learner.predict(test_x_task0,
                                                        task_id=0)
                accuracies_across_tasks.append(
                    np.mean(prev_task_predictions == test_y_task0))

    # average results
    accuracy_all_task = np.array(accuracies_across_tasks).reshape((reps, -1))
    accuracy_all_task = np.mean(accuracy_all_task, axis=0)

    return accuracy_all_task
def ftebte_exp(x, y, model, num_tasks, num_trees, reps, shift):
    """
    Runs the FTE/BTE experiment given the following parameters:
        x - data features
        y - data labels
        model - "uf" or "nn"
        num_tasks - number of tasks
        num_trees - number of trees
        shift - whether to shift the data
    """

    # shift data if indicated
    x, y = shift_data(x, y, shift)

    # initialize list for storing results
    accuracies_across_tasks = []

    # get y values per task
    ys_by_task = [np.unique(i) for i in y]

    # get the count of the least frequent label over all tasks
    min_labelct = np.min(
        [np.min(np.unique(each_set, return_counts=True)[1]) for each_set in y])

    # run experiment over all reps
    for rep in range(reps):
        train_x_task = []
        train_y_task = []
        test_x_task = []
        test_y_task = []

        # sample min points (31) from each dataset
        x_sample = []
        y_sample = []
        for dataset, label in zip(x, y):
            sample = []
            for unique_label in np.unique(label):
                sample += list(
                    np.random.choice(
                        np.where(label == unique_label)[0], min_labelct))
            x_sample.append(dataset[sample])
            y_sample.append(label[sample])

        # initialize overall learner
        learner = LifelongClassificationForest(default_n_estimators=num_trees,
                                               default_max_depth=30)

        # for each task
        for task in range(num_tasks):
            # get train/test data
            tr_x, te_x, tr_y, te_y = train_test_split(x_sample[task],
                                                      y_sample[task],
                                                      test_size=0.2)
            train_x_task.append(tr_x)
            train_y_task.append(tr_y)
            test_x_task.append(te_x)
            test_y_task.append(te_y)

            # predict on single task (UF learner) - CHANGE TO UNCERTAINTYFOREST LATER
            uf_learner = LifelongClassificationForest(
                default_n_estimators=num_trees, default_max_depth=30)
            uf_learner.add_task(train_x_task[task], train_y_task[task])
            uf_predictions = uf_learner.predict(test_x_task[task], task_id=0)
            accuracies_across_tasks.append(
                np.mean(uf_predictions == test_y_task[task]))

            # feed to overall learner
            learner.add_task(train_x_task[task], train_y_task[task])

            # evaluate for other tasks
            for other_task in range(num_tasks):

                if other_task > task:
                    pass

                else:
                    # predict on current task using other tasks
                    prev_task_predictions = learner.predict(
                        test_x_task[other_task], task_id=other_task)
                    accuracies_across_tasks.append(
                        np.mean(
                            prev_task_predictions == test_y_task[other_task]))

    # average results
    accuracy_all_task = np.array(accuracies_across_tasks).reshape((reps, -1))
    accuracy_all_task = np.mean(accuracy_all_task, axis=0)

    return accuracy_all_task
예제 #5
0
def experiment(n_task1, n_task2, n_test=1000, 
               n_trees=10, max_depth=None, random_state=None):
    
    """
    A function to do progressive experiment between two tasks
    where the task data is generated using Gaussian parity.
    
    Parameters
    ----------
    n_task1 : int
        Total number of train sample for task 1.
    
    n_task2 : int
        Total number of train dsample for task 2

    n_test : int, optional (default=1000)
        Number of test sample for each task.
            
    n_trees : int, optional (default=10)
        Number of total trees to train for each task.

    max_depth : int, optional (default=None)
        Maximum allowable depth for each tree.
        
    random_state : int, RandomState instance, default=None
        Determines random number generation for dataset creation. Pass an int
        for reproducible output across multiple function calls.
        
    
    Returns
    -------
    errors : array of shape [6]
        Elements of the array is organized as single task error task1,
        multitask error task1, single task error task2,
        multitask error task2, naive UF error task1,
        naive UF task2.
    """

    if n_task1==0 and n_task2==0:
        raise ValueError('Wake up and provide samples to train!!!')

    if random_state != None:
        np.random.seed(random_state)

    errors = np.zeros(6,dtype=float)


    progressive_learner = LifelongClassificationForest(default_n_estimators=n_trees)
    uf1 = LifelongClassificationForest(default_n_estimators=n_trees)
    naive_uf = LifelongClassificationForest(default_n_estimators=n_trees)
    uf2 = LifelongClassificationForest(default_n_estimators=n_trees)
    
    #source data
    X_task1, y_task1 = generate_spirals(n_task1, 3, noise=0.8)
    test_task1, test_label_task1 = generate_spirals(n_test,  3, noise=0.8)

    #target data
    X_task2, y_task2 = generate_spirals(n_task2,  5, noise=0.4)
    test_task2, test_label_task2 = generate_spirals(n_test,  5, noise=0.4)

    if n_task1 == 0:
        progressive_learner.add_task(X_task2, y_task2, n_estimators=n_trees)
        uf2.add_task(X_task2, y_task2, n_estimators=n_trees)

        errors[0] = 0.5
        errors[1] = 0.5

        uf_task2=uf2.predict(test_task2, task_id=0)
        l2f_task2=progressive_learner.predict(test_task2, task_id=0)

        errors[2] = 1 - np.mean(uf_task2 == test_label_task2)
        errors[3] = 1 - np.mean(l2f_task2 == test_label_task2)
        
        errors[4] = 0.5
        errors[5] = 1 - np.mean(uf_task2 == test_label_task2)
    elif n_task2 == 0:
        progressive_learner.add_task(X_task1, y_task1,
                                     n_estimators=n_trees)
        uf1.add_task(X_task1, y_task1,
                                     n_estimators=n_trees)

        uf_task1=uf1.predict(test_task1, task_id=0)
        l2f_task1=progressive_learner.predict(test_task1, task_id=0)

        errors[0] = 1 - np.mean(uf_task1 == test_label_task1)
        errors[1] = 1 - np.mean(l2f_task1 == test_label_task1)
        
        errors[2] = 0.5
        errors[3] = 0.5
        
        errors[4] = 1 - np.mean(uf_task1 == test_label_task1)
        errors[5] = 0.5
    else:
        progressive_learner.add_task(X_task1, y_task1, n_estimators=n_trees)
        progressive_learner.add_task(X_task2, y_task2, n_estimators=n_trees)

        uf1.add_task(X_task1, y_task1, n_estimators=2*n_trees)
        uf2.add_task(X_task2, y_task2, n_estimators=2*n_trees)
        
        naive_uf_train_x = np.concatenate((X_task1,X_task2),axis=0)
        naive_uf_train_y = np.concatenate((y_task1,y_task2),axis=0)
        naive_uf.add_task(
                naive_uf_train_x, naive_uf_train_y, n_estimators=n_trees
                )
        
        uf_task1=uf1.predict(test_task1, task_id=0)
        l2f_task1=progressive_learner.predict(test_task1, task_id=0)
        uf_task2=uf2.predict(test_task2, task_id=0)
        l2f_task2=progressive_learner.predict(test_task2, task_id=1)
        naive_uf_task1 = naive_uf.predict(
            test_task1, task_id=0
        )
        naive_uf_task2 = naive_uf.predict(
            test_task2, task_id=0
        )

        errors[0] = 1 - np.mean(
            uf_task1 == test_label_task1
        )
        errors[1] = 1 - np.mean(
            l2f_task1 == test_label_task1
        )
        errors[2] = 1 - np.mean(
            uf_task2 == test_label_task2
        )
        errors[3] = 1 - np.mean(
            l2f_task2 == test_label_task2
        )
        errors[4] = 1 - np.mean(
            naive_uf_task1 == test_label_task1
        )
        errors[5] = 1 - np.mean(
            naive_uf_task2 == test_label_task2
        )

    return errors
def experiment(n_xor, n_nxor, n_test, reps, n_trees, max_depth, acorn=None):
    """
    Runs the Gaussian XOR N-XOR experiment.
    Returns the mean error.
    """

    # initialize experiment
    if n_xor == 0 and n_nxor == 0:
        raise ValueError("Wake up and provide samples to train!!!")

    # if acorn is specified, set random seed to it
    if acorn != None:
        np.random.seed(acorn)

    # initialize array for storing errors
    errors = np.zeros((reps, 4), dtype=float)

    # run the progressive learning algorithm for a number of repetitions
    for i in range(reps):

        # initialize learners
        progressive_learner = LifelongClassificationForest(
            n_estimators=n_trees)
        uf = UncertaintyForest(n_estimators=2 * n_trees)

        # source data
        xor, label_xor = generate_gaussian_parity(n_xor, angle_params=0)
        test_xor, test_label_xor = generate_gaussian_parity(n_test,
                                                            angle_params=0)

        # target data
        nxor, label_nxor = generate_gaussian_parity(n_nxor,
                                                    angle_params=np.pi / 2)
        test_nxor, test_label_nxor = generate_gaussian_parity(
            n_test, angle_params=np.pi / 2)

        if n_xor == 0:
            # fit learners and predict
            progressive_learner.add_task(nxor, label_nxor)
            l2f_task2 = progressive_learner.predict(test_nxor, task_id=0)
            uf.fit(nxor, label_nxor)
            uf_task2 = uf.predict(test_nxor)
            # record errors
            errors[
                i,
                0] = 0.5  # no data, so random chance of guessing correctly (err = 0.5)
            errors[
                i,
                1] = 0.5  # no data, so random chance of guessing correctly (err = 0.5)
            errors[i, 2] = 1 - np.sum(uf_task2 == test_label_nxor) / n_test
            errors[i, 3] = 1 - np.sum(l2f_task2 == test_label_nxor) / n_test
        elif n_nxor == 0:
            # fit learners and predict
            progressive_learner.add_task(xor, label_xor)
            l2f_task1 = progressive_learner.predict(test_xor, task_id=0)
            uf.fit(xor, label_xor)
            uf_task1 = uf.predict(test_xor)
            # record errors
            errors[i, 0] = 1 - np.sum(uf_task1 == test_label_xor) / n_test
            errors[i, 1] = 1 - np.sum(l2f_task1 == test_label_xor) / n_test
            errors[
                i,
                2] = 0.5  # no data, so random chance of guessing correctly (err = 0.5)
            errors[
                i,
                3] = 0.5  # no data, so random chance of guessing correctly (err = 0.5)
        else:
            # fit learners and predict
            progressive_learner.add_task(xor, label_xor)
            progressive_learner.add_task(nxor, label_nxor)
            l2f_task1 = progressive_learner.predict(test_xor, task_id=0)
            l2f_task2 = progressive_learner.predict(test_nxor, task_id=1)
            uf.fit(xor, label_xor)
            uf_task1 = uf.predict(test_xor)
            uf.fit(nxor, label_nxor)
            uf_task2 = uf.predict(test_nxor)
            # record errors
            errors[i, 0] = 1 - np.sum(uf_task1 == test_label_xor) / n_test
            errors[i, 1] = 1 - np.sum(l2f_task1 == test_label_xor) / n_test
            errors[i, 2] = 1 - np.sum(uf_task2 == test_label_nxor) / n_test
            errors[i, 3] = 1 - np.sum(l2f_task2 == test_label_nxor) / n_test

    return np.mean(errors, axis=0)
def LF_experiment(angle,
                  data_x,
                  data_y,
                  granularity,
                  max_depth,
                  reps=1,
                  ntrees=29,
                  acorn=None):

    # Set random seed to acorn if acorn is specified
    if acorn is not None:
        np.random.seed(acorn)

    errors = np.zeros(
        2
    )  # initializes array of errors that will be generated during each rep

    for rep in range(reps):
        # training and testing subsets are randomly selected by calling the cross_val_data function
        train_x1, train_y1, train_x2, train_y2, test_x, test_y = cross_val_data(
            data_x, data_y, total_cls=10)

        # Change data angle for second task
        tmp_data = train_x2.copy()
        _tmp_ = np.zeros((32, 32, 3), dtype=int)
        total_data = tmp_data.shape[0]

        for i in range(total_data):
            tmp_ = image_aug(tmp_data[i], angle)
            # 2D image is flattened into a 1D array as random forests can only take in flattened images as inputs
            tmp_data[i] = tmp_

        # .shape gives the dimensions of each numpy array
        # .reshape gives a new shape to the numpy array without changing its data
        train_x1 = train_x1.reshape((
            train_x1.shape[0],
            train_x1.shape[1] * train_x1.shape[2] * train_x1.shape[3],
        ))
        tmp_data = tmp_data.reshape((
            tmp_data.shape[0],
            tmp_data.shape[1] * tmp_data.shape[2] * tmp_data.shape[3],
        ))
        test_x = test_x.reshape(
            (test_x.shape[0],
             test_x.shape[1] * test_x.shape[2] * test_x.shape[3]))
        # number of trees (estimators) to use is passed as an argument because the default is 100 estimators
        progressive_learner = LifelongClassificationForest(
            n_estimators=ntrees, default_max_depth=max_depth)

        # Add the original task
        progressive_learner.add_task(X=train_x1, y=train_y1)

        # Predict and get errors for original task
        llf_single_task = progressive_learner.predict(test_x, task_id=0)

        # Add the new transformer
        progressive_learner.add_transformer(X=tmp_data, y=train_y2)

        # Predict and get errors with the new transformer
        llf_task1 = progressive_learner.predict(test_x, task_id=0)

        errors[1] = errors[1] + (1 - np.mean(llf_task1 == test_y)
                                 )  # errors from transfer learning
        errors[0] = errors[0] + (1 - np.mean(llf_single_task == test_y)
                                 )  # errors from original task

    errors = (
        errors / reps
    )  # errors are averaged across all reps ==> more reps means more accurate errors

    # Average errors for original task and transfer learning are returned for the angle tested
    return errors
예제 #8
0
def test(NT, h, names, classifiers, datasets):
    i = 1
    # iterate over datasets
    for ds_cnt, ds in enumerate(datasets):
        # preprocess dataset, split into training and test part
        X, y = ds
        X = StandardScaler().fit_transform(X)
        X_train, X_test, y_train, y_test = \
            train_test_split(X, y, test_size=.4, random_state=42)

        x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
        y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                             np.arange(y_min, y_max, h))

        # just plot the dataset first
        cm = plt.cm.RdBu
        cm_bright = ListedColormap(['#FF0000', '#0000FF'])
        ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
        if ds_cnt == 0:
            ax.set_title("Input data")
        # Plot the training points
        ax.scatter(X_train[:, 0],
                   X_train[:, 1],
                   c=y_train,
                   cmap=cm_bright,
                   edgecolors='k')
        # Plot the testing points
        ax.scatter(X_test[:, 0],
                   X_test[:, 1],
                   c=y_test,
                   cmap=cm_bright,
                   alpha=0.6,
                   edgecolors='k')
        ax.set_xlim(xx.min(), xx.max())
        ax.set_ylim(yy.min(), yy.max())
        ax.set_xticks(())
        ax.set_yticks(())
        i += 1

        # iterate over classifiers
        for name, clf in zip(names, classifiers):
            ax = plt.subplot(len(datasets), len(classifiers) + 1, i)

            if "Proglearn" in name:

                clf = LifelongClassificationForest(
                    oblique=True,
                    default_feature_combinations=1,
                    default_density=0.5)
                clf.add_task(X_train, y_train, n_estimators=NT)
                y_hat = clf.predict(X_test, task_id=0)
                score = np.sum(y_hat == y_test) / len(y_test)

            else:
                clf.fit(X_train, y_train)
                score = clf.score(X_test, y_test)

            # Plot the decision boundary. For that, we will assign a color to each
            # point in the mesh [x_min, x_max]x[y_min, y_max].
            if hasattr(clf, "decision_function"):
                Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
            elif "Proglearn" in name:
                Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()],
                                      task_id=0)[:, 1]
            else:
                Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]

            # Put the result into a color plot
            Z = Z.reshape(xx.shape)
            ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)

            # Plot the training points
            ax.scatter(X_train[:, 0],
                       X_train[:, 1],
                       c=y_train,
                       cmap=cm_bright,
                       edgecolors='k')
            # Plot the testing points
            ax.scatter(X_test[:, 0],
                       X_test[:, 1],
                       c=y_test,
                       cmap=cm_bright,
                       edgecolors='k',
                       alpha=0.6)

            ax.set_xlim(xx.min(), xx.max())
            ax.set_ylim(yy.min(), yy.max())
            ax.set_xticks(())
            ax.set_yticks(())
            if ds_cnt == 0:
                ax.set_title(name)
            ax.text(xx.max() - .3,
                    yy.min() + .3, ('%.2f' % score).lstrip('0'),
                    size=15,
                    horizontalalignment='right')
            i += 1
예제 #9
0
def experiment(angle, classifiers, n_xor, n_rxor, n_test):
    """Perform XOR RXOR(XNOR) XOR experiment"""
    X_xor, y_xor = generate_gaussian_parity(n_xor)
    X_rxor, y_rxor = generate_gaussian_parity(n_rxor, angle_params=angle)
    X_xor_2, y_xor_2 = generate_gaussian_parity(n_xor)
    test_x_xor, test_y_xor = generate_gaussian_parity(n_test)
    test_x_rxor, test_y_rxor = generate_gaussian_parity(n_test,
                                                        angle_params=angle)
    X_stream = np.concatenate((X_xor, X_rxor, X_xor_2), axis=0)
    y_stream = np.concatenate((y_xor, y_rxor, y_xor_2), axis=0)

    # Instantiate classifiers
    if classifiers[0] == 1:
        ht = tree.HoeffdingTreeClassifier(grace_period=2,
                                          split_confidence=1e-01)
    if classifiers[1] == 1:
        mf = MondrianForestClassifier(n_estimators=10)
    if classifiers[2] == 1:
        sdt = DecisionTreeClassifier()
    if classifiers[3] == 1:
        sdf = StreamDecisionForest()
    if classifiers[4] == 1:
        synf = LifelongClassificationForest(default_n_estimators=10)

    errors = np.zeros((10, int(X_stream.shape[0] / 25)))

    for i in range(int(X_stream.shape[0] / 25)):
        X = X_stream[i * 25:(i + 1) * 25]
        y = y_stream[i * 25:(i + 1) * 25]

        # Hoeffding Tree Classifier
        if classifiers[0] == 1:
            ht_partial_fit(ht, X, y)
            ht_xor_y_hat, ht_rxor_y_hat = ht_predict(ht, test_x_xor,
                                                     test_x_rxor)
            errors[0, i] = 1 - np.mean(ht_xor_y_hat == test_y_xor)
            errors[1, i] = 1 - np.mean(ht_rxor_y_hat == test_y_rxor)

        # Mondrian Forest Classifier
        if classifiers[1] == 1:
            mf.partial_fit(X, y)
            mf_xor_y_hat = mf.predict(test_x_xor)
            mf_rxor_y_hat = mf.predict(test_x_rxor)
            errors[2, i] = 1 - np.mean(mf_xor_y_hat == test_y_xor)
            errors[3, i] = 1 - np.mean(mf_rxor_y_hat == test_y_rxor)

        # Stream Decision Tree Classifier
        if classifiers[2] == 1:
            sdt.partial_fit(X, y, classes=[0, 1])
            sdt_xor_y_hat = sdt.predict(test_x_xor)
            sdt_rxor_y_hat = sdt.predict(test_x_rxor)
            errors[4, i] = 1 - np.mean(sdt_xor_y_hat == test_y_xor)
            errors[5, i] = 1 - np.mean(sdt_rxor_y_hat == test_y_rxor)

        # Stream Decision Forest Classifier
        if classifiers[3] == 1:
            sdf.partial_fit(X, y, classes=[0, 1])
            sdf_xor_y_hat = sdf.predict(test_x_xor)
            sdf_rxor_y_hat = sdf.predict(test_x_rxor)
            errors[6, i] = 1 - np.mean(sdf_xor_y_hat == test_y_xor)
            errors[7, i] = 1 - np.mean(sdf_rxor_y_hat == test_y_rxor)

        # Synergistic Forest Classifier
        if classifiers[4] == 1:
            if i == 0:
                synf.add_task(X, y, n_estimators=10, task_id=0)
                synf_xor_y_hat = synf.predict(test_x_xor, task_id=0)
            elif i < (n_xor / 25):
                synf.update_task(X, y, task_id=0)
                synf_xor_y_hat = synf.predict(test_x_xor, task_id=0)
            elif i == (n_xor / 25):
                synf.add_task(X, y, n_estimators=10, task_id=1)
                synf_xor_y_hat = synf.predict(test_x_xor, task_id=0)
                synf_rxor_y_hat = synf.predict(test_x_rxor, task_id=1)
            elif i < (n_xor + n_rxor) / 25:
                synf.update_task(X, y, task_id=1)
                synf_xor_y_hat = synf.predict(test_x_xor, task_id=0)
                synf_rxor_y_hat = synf.predict(test_x_rxor, task_id=1)
            elif i < (2 * n_xor + n_rxor) / 25:
                synf.update_task(X, y, task_id=0)
                synf_xor_y_hat = synf.predict(test_x_xor, task_id=0)
                synf_rxor_y_hat = synf.predict(test_x_rxor, task_id=1)

            if i < (n_xor / 25):
                errors[8, i] = 1 - np.mean(synf_xor_y_hat == test_y_xor)
            if i >= (n_xor / 25):
                errors[8, i] = 1 - np.mean(synf_xor_y_hat == test_y_xor)
                errors[9, i] = 1 - np.mean(synf_rxor_y_hat == test_y_rxor)

    return errors