예제 #1
0
    def setup(self, num_points_per_task):

        # Set transformer network hyperparameters.
        default_transformer_kwargs = {
            "network": self.network,
            "euclidean_layer_idx": -2,
            "loss": self.loss,
            "optimizer": self.optimizer,
            "num_classes": 10,
            "compile_kwargs": {},
            "fit_kwargs": {
                "epochs": self.epochs,
                # "callbacks": [EarlyStopping(patience=5, monitor="val_loss")],
                "verbose": self.verbose,
                "validation_split": 0.33,
                "batch_size": self.batch_size
            },
        }

        # Hyperparameter for KNN voter.
        default_voter_kwargs = {"k": int(np.log2(num_points_per_task))}

        self.pl = ProgressiveLearner(
            default_transformer_class=NeuralClassificationTransformer,
            default_transformer_kwargs=default_transformer_kwargs,
            default_voter_class=KNNClassificationVoter,
            default_voter_kwargs=default_voter_kwargs,
            default_decider_class=SimpleAverage,
            default_decider_kwargs={},
        )
예제 #2
0
def LF_experiment(num_task_1_data, rep):

    default_transformer_class = TreeClassificationTransformer
    default_transformer_kwargs = {"kwargs": {"max_depth": 30}}

    default_voter_class = TreeClassificationVoter
    default_voter_kwargs = {}

    default_decider_class = SimpleAverage
    default_decider_kwargs = {}
    progressive_learner = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class)

    X_train_task0, y_train_task0 = generate_gaussian_parity(n=num_task_1_data,
                                                            angle_params=0,
                                                            acorn=1)
    X_train_task1, y_train_task1 = generate_gaussian_parity(n=100,
                                                            angle_params=10,
                                                            acorn=1)
    X_test_task0, y_test_task0 = generate_gaussian_parity(n=10000,
                                                          angle_params=0,
                                                          acorn=2)

    progressive_learner.add_task(
        X_train_task0,
        y_train_task0,
        num_transformers=10,
        transformer_voter_decider_split=[0.67, 0.33, 0],
        decider_kwargs={"classes": np.unique(y_train_task0)})
    llf_task = progressive_learner.predict(X_test_task0, task_id=0)
    single_task_accuracy = np.nanmean(llf_task == y_test_task0)
    single_task_error = 1 - single_task_accuracy

    progressive_learner.add_transformer(X=X_train_task1,
                                        y=y_train_task1,
                                        transformer_data_proportion=1,
                                        num_transformers=10,
                                        backward_task_ids=[0])

    llf_task = progressive_learner.predict(X_test_task0, task_id=0)
    double_task_accuracy = np.nanmean(llf_task == y_test_task0)
    double_task_error = 1 - double_task_accuracy

    if double_task_error == 0 or single_task_error == 0:
        te = 1
    else:
        te = (single_task_error + 1e-6) / (double_task_error + 1e-6)

    df = pd.DataFrame()
    df['te'] = [te]

    print('n = {}, te = {}'.format(num_task_1_data, te))
    file_to_save = 'result/' + str(num_task_1_data) + '_' + str(
        rep) + '.pickle'
    with open(file_to_save, 'wb') as f:
        pickle.dump(df, f)
def LF_experiment(angle, reps=1, ntrees=10, acorn=None):

    errors = np.zeros(2)

    for rep in range(reps):
        print("Starting Rep {} of Angle {}".format(rep, angle))
        X_base_train, y_base_train = generate_gaussian_parity(n=100,
                                                              angle_params=0,
                                                              acorn=rep)
        X_base_test, y_base_test = generate_gaussian_parity(n=10000,
                                                            angle_params=0,
                                                            acorn=rep)
        X_rotated_train, y_rotated_train = generate_gaussian_parity(
            n=100, angle_params=angle, acorn=rep)

        default_transformer_class = TreeClassificationTransformer
        default_transformer_kwargs = {"kwargs": {"max_depth": 10}}

        default_voter_class = TreeClassificationVoter
        default_voter_kwargs = {}

        default_decider_class = SimpleAverage
        default_decider_kwargs = {}
        progressive_learner = ProgressiveLearner(
            default_transformer_class=default_transformer_class,
            default_transformer_kwargs=default_transformer_kwargs,
            default_voter_class=default_voter_class,
            default_voter_kwargs=default_voter_kwargs,
            default_decider_class=default_decider_class)
        progressive_learner.add_task(
            X_base_train,
            y_base_train,
            num_transformers=ntrees,
            transformer_voter_decider_split=[0.67, 0.33, 0],
            decider_kwargs={"classes": np.unique(y_base_train)})
        base_predictions_test = progressive_learner.predict(X_base_test,
                                                            task_id=0)
        progressive_learner.add_transformer(X=X_rotated_train,
                                            y=y_rotated_train,
                                            transformer_data_proportion=1,
                                            num_transformers=10,
                                            backward_task_ids=[0])

        all_predictions_test = progressive_learner.predict(X_base_test,
                                                           task_id=0)

        errors[1] = errors[1] + (1 -
                                 np.mean(all_predictions_test == y_base_test))
        errors[0] = errors[0] + (1 -
                                 np.mean(base_predictions_test == y_base_test))

    errors = errors / reps
    print("Errors For Angle {}: {}".format(angle, errors))
    with open('results/angle_' + str(angle) + '.pickle', 'wb') as f:
        pickle.dump(errors, f, protocol=2)
예제 #4
0
    def setup(self):

        # Set transformer network hyperparameters.
        default_transformer_kwargs = {
            "network": self.network,
            "euclidean_layer_idx": -2,
            "loss": self.loss,
            "optimizer": self.optimizer,
            "compile_kwargs": {},
            "fit_kwargs": {
                "epochs": self.epochs,
                "verbose": self.verbose,
                "callbacks": [EarlyStopping(patience=10, monitor="val_loss")],
                "validation_split": 0.25,
            },
        }

        # Set voter network hyperparameters.
        default_voter_kwargs = {
            "validation_split": 0.25,
            "loss": self.loss,
            "lr": self.lr,
            "epochs": self.epochs,
            "verbose": self.verbose,
        }

        # Choose decider.
        if self.decider == "linear":
            default_decider_class = LinearRegressionDecider
        elif self.decider == "knn":
            default_decider_class = KNNRegressionDecider
        else:
            raise ValueError("Decider must be 'linear' or 'knn'.")

        self.pl = ProgressiveLearner(
            default_transformer_class=NeuralRegressionTransformer,
            default_transformer_kwargs=default_transformer_kwargs,
            default_voter_class=NeuralRegressionVoter,
            default_voter_kwargs=default_voter_kwargs,
            default_decider_class=default_decider_class,
            default_decider_kwargs={},
        )
def experiment(n_xor, n_nxor, n_test, reps, n_trees, max_depth, acorn=None):
    #print(1)
    if n_xor==0 and n_nxor==0:
        raise ValueError('Wake up and provide samples to train!!!')
    
    if acorn != None:
        np.random.seed(acorn)
    
    errors = np.zeros((reps,4),dtype=float)
    
    for i in range(reps):
        default_transformer_class = TreeClassificationTransformer
        default_transformer_kwargs = {"kwargs" : {"max_depth" : max_depth}}

        default_voter_class = TreeClassificationVoter
        default_voter_kwargs = {}

        default_decider_class = SimpleAverage
        default_decider_kwargs = {"classes" : np.arange(2)}
        progressive_learner = ProgressiveLearner(default_transformer_class = default_transformer_class, 
                                             default_transformer_kwargs = default_transformer_kwargs,
                                             default_voter_class = default_voter_class,
                                             default_voter_kwargs = default_voter_kwargs,
                                             default_decider_class = default_decider_class,
                                             default_decider_kwargs = default_decider_kwargs)
        uf = ProgressiveLearner(default_transformer_class = default_transformer_class, 
                                             default_transformer_kwargs = default_transformer_kwargs,
                                             default_voter_class = default_voter_class,
                                             default_voter_kwargs = default_voter_kwargs,
                                             default_decider_class = default_decider_class,
                                             default_decider_kwargs = default_decider_kwargs)
        #source data
        xor, label_xor = generate_gaussian_parity(n_xor,cov_scale=0.1,angle_params=0)
        test_xor, test_label_xor = generate_gaussian_parity(n_test,cov_scale=0.1,angle_params=0)
    
        #target data
        nxor, label_nxor = generate_gaussian_parity(n_nxor,cov_scale=0.1,angle_params=np.pi/4)
        test_nxor, test_label_nxor = generate_gaussian_parity(n_test,cov_scale=0.1,angle_params=np.pi/4)
    
        if n_xor == 0:
            progressive_learner.add_task(nxor, label_nxor, num_transformers=n_trees)
            
            errors[i,0] = 0.5
            errors[i,1] = 0.5
            
            uf_task2=progressive_learner.predict(test_nxor, transformer_ids=[0], task_id=0)
            l2f_task2=progressive_learner.predict(test_nxor, task_id=0)
            
            errors[i,2] = 1 - np.sum(uf_task2 == test_label_nxor)/n_test
            errors[i,3] = 1 - np.sum(l2f_task2 == test_label_nxor)/n_test
        elif n_nxor == 0:
            progressive_learner.add_task(xor, label_xor, num_transformers=n_trees)
            
            uf_task1=progressive_learner.predict(test_xor, transformer_ids=[0], task_id=0)
            l2f_task1=progressive_learner.predict(test_xor, task_id=0)
            
            errors[i,0] = 1 - np.sum(uf_task1 == test_label_xor)/n_test
            errors[i,1] = 1 - np.sum(l2f_task1 == test_label_xor)/n_test
            errors[i,2] = 0.5
            errors[i,3] = 0.5
        else:
            progressive_learner.add_task(xor, label_xor, num_transformers=n_trees)
            progressive_learner.add_task(nxor, label_nxor, num_transformers=n_trees)
            
            uf.add_task(xor, label_xor, num_transformers=2*n_trees)
            uf.add_task(nxor, label_nxor, num_transformers=2*n_trees)

            uf_task1=uf.predict(test_xor, transformer_ids=[0], task_id=0)
            l2f_task1=progressive_learner.predict(test_xor, task_id=0)
            uf_task2=uf.predict(test_nxor, transformer_ids=[1], task_id=1)
            l2f_task2=progressive_learner.predict(test_nxor, task_id=1)
            
            errors[i,0] = 1 - np.sum(uf_task1 == test_label_xor)/n_test
            errors[i,1] = 1 - np.sum(l2f_task1 == test_label_xor)/n_test
            errors[i,2] = 1 - np.sum(uf_task2 == test_label_nxor)/n_test
            errors[i,3] = 1 - np.sum(l2f_task2 == test_label_nxor)/n_test

    return np.mean(errors,axis=0)
def LF_experiment(data_x,
                  data_y,
                  angle,
                  model,
                  granularity,
                  reps=1,
                  ntrees=29,
                  acorn=None):
    if acorn is not None:
        np.random.seed(acorn)

    errors = np.zeros(2)

    with tf.device('/gpu:' + str(int(angle // granularity) % 4)):
        for rep in range(reps):
            train_x1, train_y1, train_x2, train_y2, test_x, test_y = cross_val_data(
                data_x, data_y, total_cls=10)

            #change data angle for second task
            tmp_data = train_x2.copy()
            _tmp_ = np.zeros((32, 32, 3), dtype=int)
            total_data = tmp_data.shape[0]

            for i in range(total_data):
                tmp_ = image_aug(tmp_data[i], angle)
                tmp_data[i] = tmp_

            if model == "uf":
                train_x1 = train_x1.reshape(
                    (train_x1.shape[0], train_x1.shape[1] * train_x1.shape[2] *
                     train_x1.shape[3]))
                tmp_data = tmp_data.reshape(
                    (tmp_data.shape[0], tmp_data.shape[1] * tmp_data.shape[2] *
                     tmp_data.shape[3]))
                test_x = test_x.reshape(
                    (test_x.shape[0],
                     test_x.shape[1] * test_x.shape[2] * test_x.shape[3]))

            if model == "dnn":

                default_transformer_class = NeuralClassificationTransformer

                network = keras.Sequential()
                network.add(
                    layers.Conv2D(filters=16,
                                  kernel_size=(3, 3),
                                  activation='relu',
                                  input_shape=np.shape(train_x1)[1:]))
                network.add(layers.BatchNormalization())
                network.add(
                    layers.Conv2D(filters=32,
                                  kernel_size=(3, 3),
                                  strides=2,
                                  padding="same",
                                  activation='relu'))
                network.add(layers.BatchNormalization())
                network.add(
                    layers.Conv2D(filters=64,
                                  kernel_size=(3, 3),
                                  strides=2,
                                  padding="same",
                                  activation='relu'))
                network.add(layers.BatchNormalization())
                network.add(
                    layers.Conv2D(filters=128,
                                  kernel_size=(3, 3),
                                  strides=2,
                                  padding="same",
                                  activation='relu'))
                network.add(layers.BatchNormalization())
                network.add(
                    layers.Conv2D(filters=254,
                                  kernel_size=(3, 3),
                                  strides=2,
                                  padding="same",
                                  activation='relu'))

                network.add(layers.Flatten())
                network.add(layers.BatchNormalization())
                network.add(layers.Dense(2000, activation='relu'))
                network.add(layers.BatchNormalization())
                network.add(layers.Dense(2000, activation='relu'))
                network.add(layers.BatchNormalization())
                network.add(layers.Dense(units=10, activation='softmax'))

                default_transformer_kwargs = {
                    "network": network,
                    "euclidean_layer_idx": -2,
                    "num_classes": 10,
                    "optimizer": keras.optimizers.Adam(3e-4)
                }

                default_voter_class = KNNClassificationVoter
                default_voter_kwargs = {"k": int(np.log2(len(train_x1)))}

                default_decider_class = SimpleAverage
            elif model == "uf":
                default_transformer_class = TreeClassificationTransformer
                default_transformer_kwargs = {"kwargs": {"max_depth": 30}}

                default_voter_class = TreeClassificationVoter
                default_voter_kwargs = {}

                default_decider_class = SimpleAverage

            progressive_learner = ProgressiveLearner(
                default_transformer_class=default_transformer_class,
                default_transformer_kwargs=default_transformer_kwargs,
                default_voter_class=default_voter_class,
                default_voter_kwargs=default_voter_kwargs,
                default_decider_class=default_decider_class)

            progressive_learner.add_task(
                X=train_x1,
                y=train_y1,
                transformer_voter_decider_split=[0.67, 0.33, 0],
                decider_kwargs={"classes": np.unique(train_y1)})

            progressive_learner.add_transformer(X=tmp_data,
                                                y=train_y2,
                                                transformer_data_proportion=1,
                                                backward_task_ids=[0])

            llf_task1 = progressive_learner.predict(test_x, task_id=0)
            llf_single_task = progressive_learner.predict(test_x,
                                                          task_id=0,
                                                          transformer_ids=[0])

            errors[1] = errors[1] + (1 - np.mean(llf_task1 == test_y))
            errors[0] = errors[0] + (1 - np.mean(llf_single_task == test_y))

    errors = errors / reps
    print("Errors For Angle {}: {}".format(angle, errors))
    with open('results/angle_' + str(angle) + '_' + model + '.pickle',
              'wb') as f:
        pickle.dump(errors, f, protocol=2)
def LF_experiment(train_x,
                  train_y,
                  test_x,
                  test_y,
                  ntrees,
                  shift,
                  slot,
                  model,
                  num_points_per_task,
                  acorn=None):

    df = pd.DataFrame()
    shifts = []
    tasks = []
    base_tasks = []
    accuracies_across_tasks = []
    train_times_across_tasks = []
    inference_times_across_tasks = []

    if model == "dnn":
        default_transformer_class = NeuralClassificationTransformer

        network = keras.Sequential()
        network.add(
            layers.Conv2D(filters=16,
                          kernel_size=(3, 3),
                          activation='relu',
                          input_shape=np.shape(train_x)[1:]))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(filters=32,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(filters=64,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(filters=128,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(filters=254,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))

        network.add(layers.Flatten())
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(2000, activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(2000, activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(units=10, activation='softmax'))

        default_transformer_kwargs = {
            "network": network,
            "euclidean_layer_idx": -2,
            "num_classes": 10,
            "optimizer": keras.optimizers.Adam(3e-4)
        }

        default_voter_class = KNNClassificationVoter
        default_voter_kwargs = {"k": 16 * int(np.log2(num_points_per_task))}

        default_decider_class = SimpleAverage
    elif model == "uf":
        default_transformer_class = TreeClassificationTransformer
        default_transformer_kwargs = {"kwargs": {"max_depth": 30}}

        default_voter_class = TreeClassificationVoter
        default_voter_kwargs = {}

        default_decider_class = SimpleAverage
    progressive_learner = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class)

    for task_ii in range(10):
        print("Starting Task {} For Fold {} For Slot {}".format(
            task_ii, shift, slot))
        if acorn is not None:
            np.random.seed(acorn)

        train_start_time = time.time()
        progressive_learner.add_task(
            X=train_x[task_ii * 5000 +
                      slot * num_points_per_task:task_ii * 5000 +
                      (slot + 1) * num_points_per_task],
            y=train_y[task_ii * 5000 +
                      slot * num_points_per_task:task_ii * 5000 +
                      (slot + 1) * num_points_per_task],
            num_transformers=1 if model == "dnn" else ntrees,
            transformer_voter_decider_split=[0.67, 0.33, 0],
            decider_kwargs={
                "classes":
                np.unique(train_y[task_ii * 5000 +
                                  slot * num_points_per_task:task_ii * 5000 +
                                  (slot + 1) * num_points_per_task])
            },
            backward_task_ids=[0])
        train_end_time = time.time()

        inference_start_time = time.time()
        llf_task = progressive_learner.predict(test_x[:1000], task_id=0)
        inference_end_time = time.time()
        acc = np.mean(llf_task == test_y[:1000])
        accuracies_across_tasks.append(acc)
        shifts.append(shift)
        train_times_across_tasks.append(train_end_time - train_start_time)
        inference_times_across_tasks.append(inference_end_time -
                                            inference_start_time)

        print("Accuracy Across Tasks: {}".format(accuracies_across_tasks))
        print("Train Times Across Tasks: {}".format(train_times_across_tasks))
        print("Inference Times Across Tasks: {}".format(
            inference_times_across_tasks))

    df['data_fold'] = shifts
    df['task'] = range(1, 11)
    df['task_1_accuracy'] = accuracies_across_tasks
    df['train_times'] = train_times_across_tasks
    df['inference_times'] = inference_times_across_tasks

    file_to_save = 'result/' + model + str(ntrees) + '_' + str(
        shift) + '_' + str(slot) + '.pickle'
    with open(file_to_save, 'wb') as f:
        pickle.dump(df, f)
예제 #8
0
class LifelongClassificationNetwork:
    def __init__(
        self,
        network,
        loss="categorical_crossentropy",
        epochs=100,
        lr=3e-4,
        batch_size=32,
        verbose=False,
    ):
        self.network = network
        self.loss = loss
        self.epochs = epochs
        self.optimizer = Adam(lr)
        self.verbose = verbose
        self.batch_size = batch_size
        self.is_first_task = True

    def setup(self, num_points_per_task):

        # Set transformer network hyperparameters.
        default_transformer_kwargs = {
            "network": self.network,
            "euclidean_layer_idx": -2,
            "loss": self.loss,
            "optimizer": self.optimizer,
            "num_classes": 10,
            "compile_kwargs": {},
            "fit_kwargs": {
                "epochs": self.epochs,
                # "callbacks": [EarlyStopping(patience=5, monitor="val_loss")],
                "verbose": self.verbose,
                "validation_split": 0.33,
                "batch_size": self.batch_size
            },
        }

        # Hyperparameter for KNN voter.
        default_voter_kwargs = {"k": int(np.log2(num_points_per_task))}

        self.pl = ProgressiveLearner(
            default_transformer_class=NeuralClassificationTransformer,
            default_transformer_kwargs=default_transformer_kwargs,
            default_voter_class=KNNClassificationVoter,
            default_voter_kwargs=default_voter_kwargs,
            default_decider_class=SimpleAverage,
            default_decider_kwargs={},
        )

    def add_task(self, X, y, task_id=None, decider_kwargs={}):

        if self.is_first_task:
            num_points_per_task = len(X)
            self.setup(num_points_per_task)
            self.is_first_task = False

        self.pl.add_task(
            X,
            y,
            task_id=task_id,
            transformer_voter_decider_split=[0.67, 0.33, 0.0],
            decider_kwargs=decider_kwargs,
        )

        return self

    def predict(self, X, task_id):
        return self.pl.predict(X, task_id)

    def predict_proba(self, X, task_id):
        return self.pl.predict_proba(X, task_id)
예제 #9
0
class LifelongRegressionNetwork:
    def __init__(
        self, network, decider="linear", loss="mse", epochs=100, optimizer=Adam(1e-3), verbose=False
    ):
        self.network = network
        self.decider = decider
        self.loss = loss
        self.epochs = epochs
        self.optimizer = optimizer
        self.verbose = verbose
        self.is_first_task = True

    def setup(self):

        # Set transformer network hyperparameters.
        default_transformer_kwargs = {
            "network": self.network,
            "euclidean_layer_idx": -2,
            "loss": self.loss,
            "optimizer": self.optimizer,
            "compile_kwargs": {},
            "fit_kwargs": {
                "epochs": self.epochs,
                "verbose": self.verbose,
                "callbacks": [EarlyStopping(patience=10, monitor="val_loss")],
                "validation_split": 0.25,
            },
        }

        # Set voter network hyperparameters.
        default_voter_kwargs = {
            "validation_split": 0.25,
            "loss": self.loss,
            "lr": self.lr,
            "epochs": self.epochs,
            "verbose": self.verbose,
        }

        # Choose decider.
        if self.decider == "linear":
            default_decider_class = LinearRegressionDecider
        elif self.decider == "knn":
            default_decider_class = KNNRegressionDecider
        else:
            raise ValueError("Decider must be 'linear' or 'knn'.")

        self.pl = ProgressiveLearner(
            default_transformer_class=NeuralRegressionTransformer,
            default_transformer_kwargs=default_transformer_kwargs,
            default_voter_class=NeuralRegressionVoter,
            default_voter_kwargs=default_voter_kwargs,
            default_decider_class=default_decider_class,
            default_decider_kwargs={},
        )

    def add_task(self, X, y, task_id=None):

        if self.is_first_task:
            self.setup()
            self.is_first_task = False

        self.pl.add_task(
            X, y, task_id=task_id, transformer_voter_decider_split=[0.6, 0.3, 0.1]
        )

        return self

    def predict(self, X, task_id):
        return self.pl.predict(X, task_id)
예제 #10
0
def LF_experiment(data_x,
                  data_y,
                  ntrees,
                  shift,
                  slot,
                  model,
                  num_points_per_task,
                  acorn=None):

    df = pd.DataFrame()
    shifts = []
    slots = []
    accuracies_across_tasks = []
    train_times_across_tasks = []
    inference_times_across_tasks = []

    train_x_task0, train_y_task0, test_x_task0, test_y_task0 = cross_val_data(
        data_x,
        data_y,
        num_points_per_task,
        total_task=10,
        shift=shift,
        slot=slot)
    if model == "dnn":
        default_transformer_class = NeuralClassificationTransformer

        network = keras.Sequential()
        network.add(
            layers.Conv2D(filters=16,
                          kernel_size=(3, 3),
                          activation='relu',
                          input_shape=np.shape(train_x_task0)[1:]))
        network.add(
            layers.Conv2D(filters=32,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))
        network.add(
            layers.Conv2D(filters=64,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))
        network.add(
            layers.Conv2D(filters=128,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))
        network.add(
            layers.Conv2D(filters=254,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))

        network.add(layers.Flatten())
        network.add(layers.Dense(2000, activation='relu'))
        network.add(layers.Dense(2000, activation='relu'))
        network.add(layers.Dense(units=10, activation='softmax'))

        default_transformer_kwargs = {
            "network": network,
            "euclidean_layer_idx": -2,
            "num_classes": 10,
            "optimizer": keras.optimizers.Adam(3e-4)
        }

        default_voter_class = KNNClassificationVoter
        default_voter_kwargs = {"k": int(np.log2(num_points_per_task * .33))}

        default_decider_class = SimpleAverage
    elif model == "uf":
        default_transformer_class = TreeClassificationTransformer
        default_transformer_kwargs = {"kwargs": {"max_depth": 30}}

        default_voter_class = TreeClassificationVoter
        default_voter_kwargs = {}

        default_decider_class = SimpleAverage
    progressive_learner = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class)
    train_start_time = time.time()
    progressive_learner.add_task(
        X=train_x_task0,
        y=train_y_task0,
        num_transformers=1 if model == "dnn" else ntrees,
        transformer_voter_decider_split=[0.67, 0.33, 0],
        decider_kwargs={"classes": np.unique(train_y_task0)})
    train_end_time = time.time()

    inference_start_time = time.time()
    task_0_predictions = progressive_learner.predict(test_x_task0, task_id=0)
    inference_end_time = time.time()

    shifts.append(shift)
    slots.append(slot)
    accuracies_across_tasks.append(np.mean(task_0_predictions == test_y_task0))
    train_times_across_tasks.append(train_end_time - train_start_time)
    inference_times_across_tasks.append(inference_end_time -
                                        inference_start_time)

    for task_ii in range(1, 20):
        train_x, train_y, _, _ = cross_val_data(data_x,
                                                data_y,
                                                num_points_per_task,
                                                total_task=10,
                                                shift=shift,
                                                slot=slot,
                                                task=task_ii)

        print("Starting Task {} For Fold {} For Slot {}".format(
            task_ii, shift, slot))

        train_start_time = time.time()
        progressive_learner.add_transformer(
            X=train_x,
            y=train_y,
            transformer_data_proportion=1,
            num_transformers=1 if model == "dnn" else ntrees,
            backward_task_ids=[0])
        train_end_time = time.time()

        inference_start_time = time.time()
        task_0_predictions = progressive_learner.predict(test_x_task0,
                                                         task_id=0)
        inference_end_time = time.time()

        shifts.append(shift)
        slots.append(slot)
        accuracies_across_tasks.append(
            np.mean(task_0_predictions == test_y_task0))
        train_times_across_tasks.append(train_end_time - train_start_time)
        inference_times_across_tasks.append(inference_end_time -
                                            inference_start_time)

        print("Accuracy Across Tasks: {}".format(accuracies_across_tasks))
        print("Train Times Across Tasks: {}".format(train_times_across_tasks))
        print("Inference Times Across Tasks: {}".format(
            inference_times_across_tasks))

    df['data_fold'] = shifts
    df['slot'] = slots
    df['accuracy'] = accuracies_across_tasks
    df['train_times'] = train_times_across_tasks
    df['inference_times'] = inference_times_across_tasks

    file_to_save = 'result/' + model + str(ntrees) + '_' + str(
        shift) + '_' + str(slot) + '.pickle'
    with open(file_to_save, 'wb') as f:
        pickle.dump(df, f)