def test(data_file, reps, n_trees, task_num,
         default_transformer_class, default_transformer_kwargs):
    default_voter_class = TreeClassificationVoter
    default_voter_kwargs = {}

    default_decider_class = SimpleArgmaxAverage

    kappa = np.zeros(reps)
    for i in range(reps):
        X_train, X_test, y_train, y_test, n_classes = load_data(data_file, task_num)
        default_decider_kwargs = {"classes": np.arange(n_classes)}

        pl = ProgressiveLearner(
            default_transformer_class=default_transformer_class,
            default_transformer_kwargs=default_transformer_kwargs,
            default_voter_class=default_voter_class,
            default_voter_kwargs=default_voter_kwargs,
            default_decider_class=default_decider_class,
            default_decider_kwargs=default_decider_kwargs)

        pl.add_task(X_train, y_train, num_transformers=n_trees)

        y_hat = pl.predict(X_test, task_id=0)

        acc = np.sum(y_test == y_hat) / len(y_test)
        print("Accuracy after iteration ", i, ": ", acc)

        chance_pred = 1 / n_classes
        kappa[i] = (acc - chance_pred) / (1 - chance_pred)

    return np.mean(kappa) * 100, (np.std(kappa) * 100) / np.sqrt(reps)
Exemplo n.º 2
0
    def test_nxor(self):
        # tests proglearn on xor nxor simulation data
        np.random.seed(12345)

        reps = 10
        errors = np.zeros((4, reps), dtype=float)

        for ii in range(reps):
            default_transformer_class = TreeClassificationTransformer
            default_transformer_kwargs = {"kwargs": {"max_depth": 30}}

            default_voter_class = TreeClassificationVoter
            default_voter_kwargs = {}

            default_decider_class = SimpleArgmaxAverage
            default_decider_kwargs = {"classes": np.arange(2)}
            progressive_learner = ProgressiveLearner(
                default_transformer_class=default_transformer_class,
                default_transformer_kwargs=default_transformer_kwargs,
                default_voter_class=default_voter_class,
                default_voter_kwargs=default_voter_kwargs,
                default_decider_class=default_decider_class,
                default_decider_kwargs=default_decider_kwargs,
            )

            xor, label_xor = generate_gaussian_parity(750,
                                                      cov_scale=0.1,
                                                      angle_params=0)
            test_xor, test_label_xor = generate_gaussian_parity(1000,
                                                                cov_scale=0.1,
                                                                angle_params=0)

            nxor, label_nxor = generate_gaussian_parity(750,
                                                        cov_scale=0.1,
                                                        angle_params=np.pi / 2)
            test_nxor, test_label_nxor = generate_gaussian_parity(
                1000, cov_scale=0.1, angle_params=np.pi / 2)

            progressive_learner.add_task(xor, label_xor, num_transformers=10)
            progressive_learner.add_task(nxor, label_nxor, num_transformers=10)

            uf_task1 = progressive_learner.predict(test_xor,
                                                   transformer_ids=[0],
                                                   task_id=0)
            l2f_task1 = progressive_learner.predict(test_xor, task_id=0)
            uf_task2 = progressive_learner.predict(test_nxor,
                                                   transformer_ids=[1],
                                                   task_id=1)
            l2f_task2 = progressive_learner.predict(test_nxor, task_id=1)

            errors[0, ii] = 1 - np.mean(uf_task1 == test_label_xor)
            errors[1, ii] = 1 - np.mean(l2f_task1 == test_label_xor)
            errors[2, ii] = 1 - np.mean(uf_task2 == test_label_nxor)
            errors[3, ii] = 1 - np.mean(l2f_task2 == test_label_nxor)

        bte = np.mean(errors[0, ]) / np.mean(errors[1, ])
        fte = np.mean(errors[2, ]) / np.mean(errors[3, ])

        assert bte > 1 and fte > 1
Exemplo n.º 3
0
def LF_experiment(num_task_1_data, rep):

    default_transformer_class = TreeClassificationTransformer
    default_transformer_kwargs = {"kwargs": {"max_depth": 30}}

    default_voter_class = TreeClassificationVoter
    default_voter_kwargs = {}

    default_decider_class = SimpleAverage
    default_decider_kwargs = {}
    progressive_learner = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class)

    X_train_task0, y_train_task0 = generate_gaussian_parity(n=num_task_1_data,
                                                            angle_params=0,
                                                            acorn=1)
    X_train_task1, y_train_task1 = generate_gaussian_parity(n=100,
                                                            angle_params=10,
                                                            acorn=1)
    X_test_task0, y_test_task0 = generate_gaussian_parity(n=10000,
                                                          angle_params=0,
                                                          acorn=2)

    progressive_learner.add_task(
        X_train_task0,
        y_train_task0,
        num_transformers=10,
        transformer_voter_decider_split=[0.67, 0.33, 0],
        decider_kwargs={"classes": np.unique(y_train_task0)})
    llf_task = progressive_learner.predict(X_test_task0, task_id=0)
    single_task_accuracy = np.nanmean(llf_task == y_test_task0)
    single_task_error = 1 - single_task_accuracy

    progressive_learner.add_transformer(X=X_train_task1,
                                        y=y_train_task1,
                                        transformer_data_proportion=1,
                                        num_transformers=10,
                                        backward_task_ids=[0])

    llf_task = progressive_learner.predict(X_test_task0, task_id=0)
    double_task_accuracy = np.nanmean(llf_task == y_test_task0)
    double_task_error = 1 - double_task_accuracy

    if double_task_error == 0 or single_task_error == 0:
        te = 1
    else:
        te = (single_task_error + 1e-6) / (double_task_error + 1e-6)

    df = pd.DataFrame()
    df['te'] = [te]

    print('n = {}, te = {}'.format(num_task_1_data, te))
    file_to_save = 'result/' + str(num_task_1_data) + '_' + str(
        rep) + '.pickle'
    with open(file_to_save, 'wb') as f:
        pickle.dump(df, f)
Exemplo n.º 4
0
def LF_experiment(angle, reps=1, ntrees=10, acorn=None):

    errors = np.zeros(2)

    for rep in range(reps):
        print("Starting Rep {} of Angle {}".format(rep, angle))
        X_base_train, y_base_train = generate_gaussian_parity(n=100,
                                                              angle_params=0,
                                                              acorn=rep)
        X_base_test, y_base_test = generate_gaussian_parity(n=10000,
                                                            angle_params=0,
                                                            acorn=rep)
        X_rotated_train, y_rotated_train = generate_gaussian_parity(
            n=100, angle_params=angle, acorn=rep)

        default_transformer_class = TreeClassificationTransformer
        default_transformer_kwargs = {"kwargs": {"max_depth": 10}}

        default_voter_class = TreeClassificationVoter
        default_voter_kwargs = {}

        default_decider_class = SimpleAverage
        default_decider_kwargs = {}
        progressive_learner = ProgressiveLearner(
            default_transformer_class=default_transformer_class,
            default_transformer_kwargs=default_transformer_kwargs,
            default_voter_class=default_voter_class,
            default_voter_kwargs=default_voter_kwargs,
            default_decider_class=default_decider_class)
        progressive_learner.add_task(
            X_base_train,
            y_base_train,
            num_transformers=ntrees,
            transformer_voter_decider_split=[0.67, 0.33, 0],
            decider_kwargs={"classes": np.unique(y_base_train)})
        base_predictions_test = progressive_learner.predict(X_base_test,
                                                            task_id=0)
        progressive_learner.add_transformer(X=X_rotated_train,
                                            y=y_rotated_train,
                                            transformer_data_proportion=1,
                                            num_transformers=10,
                                            backward_task_ids=[0])

        all_predictions_test = progressive_learner.predict(X_base_test,
                                                           task_id=0)

        errors[1] = errors[1] + (1 -
                                 np.mean(all_predictions_test == y_base_test))
        errors[0] = errors[0] + (1 -
                                 np.mean(base_predictions_test == y_base_test))

    errors = errors / reps
    print("Errors For Angle {}: {}".format(angle, errors))
    with open('results/angle_' + str(angle) + '.pickle', 'wb') as f:
        pickle.dump(errors, f, protocol=2)
def LF_experiment(data_x,
                  data_y,
                  ntrees,
                  shift,
                  slot,
                  model,
                  num_points_per_task,
                  acorn=None):

    df = pd.DataFrame()
    shifts = []
    slots = []
    accuracies_across_tasks = []
    train_times_across_tasks = []
    inference_times_across_tasks = []

    train_x_task0, train_y_task0, test_x_task0, test_y_task0 = cross_val_data(
        data_x,
        data_y,
        num_points_per_task,
        total_task=10,
        shift=shift,
        slot=slot)
    if model == "dnn":
        default_transformer_class = NeuralClassificationTransformer

        network = keras.Sequential()
        network.add(
            layers.Conv2D(filters=16,
                          kernel_size=(3, 3),
                          activation='relu',
                          input_shape=np.shape(train_x_task0)[1:]))
        network.add(
            layers.Conv2D(filters=32,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))
        network.add(
            layers.Conv2D(filters=64,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))
        network.add(
            layers.Conv2D(filters=128,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))
        network.add(
            layers.Conv2D(filters=254,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))

        network.add(layers.Flatten())
        network.add(layers.Dense(2000, activation='relu'))
        network.add(layers.Dense(2000, activation='relu'))
        network.add(layers.Dense(units=10, activation='softmax'))

        default_transformer_kwargs = {
            "network": network,
            "euclidean_layer_idx": -2,
            "num_classes": 10,
            "optimizer": keras.optimizers.Adam(3e-4)
        }

        default_voter_class = KNNClassificationVoter
        default_voter_kwargs = {"k": int(np.log2(num_points_per_task * .33))}

        default_decider_class = SimpleArgmaxAverage
    elif model == "uf":
        default_transformer_class = TreeClassificationTransformer
        default_transformer_kwargs = {"kwargs": {"max_depth": 30}}

        default_voter_class = TreeClassificationVoter
        default_voter_kwargs = {}

        default_decider_class = SimpleArgmaxAverage
    progressive_learner = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class)
    train_start_time = time.time()
    progressive_learner.add_task(
        X=train_x_task0,
        y=train_y_task0,
        num_transformers=1 if model == "dnn" else ntrees,
        transformer_voter_decider_split=[0.67, 0.33, 0],
        decider_kwargs={"classes": np.unique(train_y_task0)})
    train_end_time = time.time()

    inference_start_time = time.time()
    task_0_predictions = progressive_learner.predict(test_x_task0, task_id=0)
    inference_end_time = time.time()

    shifts.append(shift)
    slots.append(slot)
    accuracies_across_tasks.append(np.mean(task_0_predictions == test_y_task0))
    train_times_across_tasks.append(train_end_time - train_start_time)
    inference_times_across_tasks.append(inference_end_time -
                                        inference_start_time)

    for task_ii in range(1, 20):
        train_x, train_y, _, _ = cross_val_data(data_x,
                                                data_y,
                                                num_points_per_task,
                                                total_task=10,
                                                shift=shift,
                                                slot=slot,
                                                task=task_ii)

        print("Starting Task {} For Fold {} For Slot {}".format(
            task_ii, shift, slot))

        train_start_time = time.time()
        progressive_learner.add_transformer(
            X=train_x,
            y=train_y,
            transformer_data_proportion=1,
            num_transformers=1 if model == "dnn" else ntrees,
            backward_task_ids=[0])
        train_end_time = time.time()

        inference_start_time = time.time()
        task_0_predictions = progressive_learner.predict(test_x_task0,
                                                         task_id=0)
        inference_end_time = time.time()

        shifts.append(shift)
        slots.append(slot)
        accuracies_across_tasks.append(
            np.mean(task_0_predictions == test_y_task0))
        train_times_across_tasks.append(train_end_time - train_start_time)
        inference_times_across_tasks.append(inference_end_time -
                                            inference_start_time)

        print("Accuracy Across Tasks: {}".format(accuracies_across_tasks))
        print("Train Times Across Tasks: {}".format(train_times_across_tasks))
        print("Inference Times Across Tasks: {}".format(
            inference_times_across_tasks))

    df['data_fold'] = shifts
    df['slot'] = slots
    df['accuracy'] = accuracies_across_tasks
    df['train_times'] = train_times_across_tasks
    df['inference_times'] = inference_times_across_tasks

    file_to_save = 'result/' + model + str(ntrees) + '_' + str(
        shift) + '_' + str(slot) + '.pickle'
    with open(file_to_save, 'wb') as f:
        pickle.dump(df, f)
Exemplo n.º 6
0
def experiment(n_task1,
               n_task2,
               n_test=1000,
               task1_angle=0,
               task2_angle=np.pi / 2,
               n_trees=10,
               max_depth=None,
               random_state=None):
    """
    A function to do progressive experiment between two tasks
    where the task data is generated using Gaussian parity.
    
    Parameters
    ----------
    n_task1 : int
        Total number of train sample for task 1.
    
    n_task2 : int
        Total number of train dsample for task 2

    n_test : int, optional (default=1000)
        Number of test sample for each task.
        
    task1_angle : float, optional (default=0)
        Angle in radian for task 1.
            
    task2_angle : float, optional (default=numpy.pi/2)
        Angle in radian for task 2.
            
    n_trees : int, optional (default=10)
        Number of total trees to train for each task.

    max_depth : int, optional (default=None)
        Maximum allowable depth for each tree.
        
    random_state : int, RandomState instance, default=None
        Determines random number generation for dataset creation. Pass an int
        for reproducible output across multiple function calls.
        
    
    Returns
    -------
    errors : array of shape [6]
        Elements of the array is organized as single task error task1,
        multitask error task1, single task error task2,
        multitask error task2, naive UF error task1,
        naive UF task2.
    """

    if n_task1 == 0 and n_task2 == 0:
        raise ValueError('Wake up and provide samples to train!!!')

    if random_state != None:
        np.random.seed(random_state)

    errors = np.zeros(6, dtype=float)

    default_transformer_class = TreeClassificationTransformer
    default_transformer_kwargs = {"kwargs": {"max_depth": max_depth}}

    default_voter_class = TreeClassificationVoter
    default_voter_kwargs = {}

    default_decider_class = SimpleArgmaxAverage
    default_decider_kwargs = {"classes": np.arange(2)}
    progressive_learner = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
        default_decider_kwargs=default_decider_kwargs)
    uf = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
        default_decider_kwargs=default_decider_kwargs)
    naive_uf = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
        default_decider_kwargs=default_decider_kwargs)

    #source data
    X_task1, y_task1 = generate_gaussian_parity(n_task1,
                                                angle_params=task1_angle)
    test_task1, test_label_task1 = generate_gaussian_parity(
        n_test, angle_params=task1_angle)

    #target data
    X_task2, y_task2 = generate_gaussian_parity(n_task2,
                                                angle_params=task2_angle)
    test_task2, test_label_task2 = generate_gaussian_parity(
        n_test, angle_params=task2_angle)

    if n_task1 == 0:
        progressive_learner.add_task(X_task2,
                                     y_task2,
                                     num_transformers=n_trees)

        errors[0] = 0.5
        errors[1] = 0.5

        uf_task2 = progressive_learner.predict(test_task2,
                                               transformer_ids=[0],
                                               task_id=0)
        l2f_task2 = progressive_learner.predict(test_task2, task_id=0)

        errors[2] = 1 - np.mean(uf_task2 == test_label_task2)
        errors[3] = 1 - np.mean(l2f_task2 == test_label_task2)

        errors[4] = 0.5
        errors[5] = 1 - np.mean(uf_task2 == test_label_task2)
    elif n_task2 == 0:
        progressive_learner.add_task(X_task1,
                                     y_task1,
                                     num_transformers=n_trees)

        uf_task1 = progressive_learner.predict(test_task1,
                                               transformer_ids=[0],
                                               task_id=0)
        l2f_task1 = progressive_learner.predict(test_task1, task_id=0)

        errors[0] = 1 - np.mean(uf_task1 == test_label_task1)
        errors[1] = 1 - np.mean(l2f_task1 == test_label_task1)

        errors[2] = 0.5
        errors[3] = 0.5

        errors[4] = 1 - np.mean(uf_task1 == test_label_task1)
        errors[5] = 0.5
    else:
        progressive_learner.add_task(X_task1,
                                     y_task1,
                                     num_transformers=n_trees)
        progressive_learner.add_task(X_task2,
                                     y_task2,
                                     num_transformers=n_trees)

        uf.add_task(X_task1, y_task1, num_transformers=2 * n_trees)
        uf.add_task(X_task2, y_task2, num_transformers=2 * n_trees)

        naive_uf_train_x = np.concatenate((X_task1, X_task2), axis=0)
        naive_uf_train_y = np.concatenate((y_task1, y_task2), axis=0)
        naive_uf.add_task(naive_uf_train_x,
                          naive_uf_train_y,
                          num_transformers=n_trees)

        uf_task1 = uf.predict(test_task1, transformer_ids=[0], task_id=0)
        l2f_task1 = progressive_learner.predict(test_task1, task_id=0)
        uf_task2 = uf.predict(test_task2, transformer_ids=[1], task_id=1)
        l2f_task2 = progressive_learner.predict(test_task2, task_id=1)
        naive_uf_task1 = naive_uf.predict(test_task1,
                                          transformer_ids=[0],
                                          task_id=0)
        naive_uf_task2 = naive_uf.predict(test_task2,
                                          transformer_ids=[0],
                                          task_id=0)

        errors[0] = 1 - np.mean(uf_task1 == test_label_task1)
        errors[1] = 1 - np.mean(l2f_task1 == test_label_task1)
        errors[2] = 1 - np.mean(uf_task2 == test_label_task2)
        errors[3] = 1 - np.mean(l2f_task2 == test_label_task2)
        errors[4] = 1 - np.mean(naive_uf_task1 == test_label_task1)
        errors[5] = 1 - np.mean(naive_uf_task2 == test_label_task2)

    return errors
def experiment(n_xor, n_nxor, n_test, reps, n_trees, max_depth, acorn=None):
    #print(1)
    if n_xor==0 and n_nxor==0:
        raise ValueError('Wake up and provide samples to train!!!')

    if acorn != None:
        np.random.seed(acorn)

    errors = np.zeros((reps,4),dtype=float)

    for i in range(reps):
        default_transformer_class = TreeClassificationTransformer
        default_transformer_kwargs = {"kwargs" : {"max_depth" : max_depth}}

        default_voter_class = TreeClassificationVoter
        default_voter_kwargs = {}

        default_decider_class = SimpleArgmaxAverage
        default_decider_kwargs = {"classes" : np.arange(2)}
        progressive_learner = ProgressiveLearner(default_transformer_class = default_transformer_class,
                                             default_transformer_kwargs = default_transformer_kwargs,
                                             default_voter_class = default_voter_class,
                                             default_voter_kwargs = default_voter_kwargs,
                                             default_decider_class = default_decider_class,
                                             default_decider_kwargs = default_decider_kwargs)
        uf = ProgressiveLearner(default_transformer_class = default_transformer_class,
                                             default_transformer_kwargs = default_transformer_kwargs,
                                             default_voter_class = default_voter_class,
                                             default_voter_kwargs = default_voter_kwargs,
                                             default_decider_class = default_decider_class,
                                             default_decider_kwargs = default_decider_kwargs)
        #source data
        xor, label_xor = generate_gaussian_parity(n_xor,cov_scale=0.1,angle_params=0)
        test_xor, test_label_xor = generate_gaussian_parity(n_test,cov_scale=0.1,angle_params=0)

        #target data
        nxor, label_nxor = generate_gaussian_parity(n_nxor,cov_scale=0.1,angle_params=np.pi/4)
        test_nxor, test_label_nxor = generate_gaussian_parity(n_test,cov_scale=0.1,angle_params=np.pi/4)

        if n_xor == 0:
            progressive_learner.add_task(nxor, label_nxor, num_transformers=n_trees)

            errors[i,0] = 0.5
            errors[i,1] = 0.5

            uf_task2=progressive_learner.predict(test_nxor, transformer_ids=[0], task_id=0)
            l2f_task2=progressive_learner.predict(test_nxor, task_id=0)

            errors[i,2] = 1 - np.sum(uf_task2 == test_label_nxor)/n_test
            errors[i,3] = 1 - np.sum(l2f_task2 == test_label_nxor)/n_test
        elif n_nxor == 0:
            progressive_learner.add_task(xor, label_xor, num_transformers=n_trees)

            uf_task1=progressive_learner.predict(test_xor, transformer_ids=[0], task_id=0)
            l2f_task1=progressive_learner.predict(test_xor, task_id=0)

            errors[i,0] = 1 - np.sum(uf_task1 == test_label_xor)/n_test
            errors[i,1] = 1 - np.sum(l2f_task1 == test_label_xor)/n_test
            errors[i,2] = 0.5
            errors[i,3] = 0.5
        else:
            progressive_learner.add_task(xor, label_xor, num_transformers=n_trees)
            progressive_learner.add_task(nxor, label_nxor, num_transformers=n_trees)

            uf.add_task(xor, label_xor, num_transformers=2*n_trees)
            uf.add_task(nxor, label_nxor, num_transformers=2*n_trees)

            uf_task1=uf.predict(test_xor, transformer_ids=[0], task_id=0)
            l2f_task1=progressive_learner.predict(test_xor, task_id=0)
            uf_task2=uf.predict(test_nxor, transformer_ids=[1], task_id=1)
            l2f_task2=progressive_learner.predict(test_nxor, task_id=1)

            errors[i,0] = 1 - np.sum(uf_task1 == test_label_xor)/n_test
            errors[i,1] = 1 - np.sum(l2f_task1 == test_label_xor)/n_test
            errors[i,2] = 1 - np.sum(uf_task2 == test_label_nxor)/n_test
            errors[i,3] = 1 - np.sum(l2f_task2 == test_label_nxor)/n_test

    return np.mean(errors,axis=0)
Exemplo n.º 8
0
def single_experiment(x, y, y_speaker, ntrees=19, model='uf', shuffle=False):
    num_tasks = 6
    num_points_per_task = 3000 / num_tasks
    speakers = ['g', 'j', 'l', 'n', 't', 'y']
    accuracies_across_tasks = []

    if model == 'dnn':
        x_all = x
        y_all = y

        default_transformer_class = NeuralClassificationTransformer

        network = keras.Sequential()
        network.add(
            layers.Conv2D(filters=16,
                          kernel_size=(3, 3),
                          activation='relu',
                          input_shape=np.shape(x_all)[1:]))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(filters=32,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(filters=64,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(filters=128,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(filters=254,
                          kernel_size=(3, 3),
                          strides=2,
                          padding="same",
                          activation='relu'))

        network.add(layers.Flatten())
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(2000, activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(2000, activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(units=10, activation='softmax'))

        default_transformer_kwargs = {
            "network": network,
            "euclidean_layer_idx": -2,
            "loss": "categorical_crossentropy",
            "optimizer": Adam(3e-4),
            "fit_kwargs": {
                "epochs": 100,
                "callbacks": [EarlyStopping(patience=5, monitor="val_loss")],
                "verbose": False,
                "validation_split": 0.33,
                "batch_size": 32,
            },
        }
        default_voter_class = KNNClassificationVoter
        default_voter_kwargs = {"k": int(np.log2(num_points_per_task))}
        default_decider_class = SimpleArgmaxAverage

    elif model == 'uf':
        x_all = x.reshape(3000, -1)
        y_all = y

        default_transformer_class = TreeClassificationTransformer
        default_transformer_kwargs = {"kwargs": {"max_depth": 30}}
        default_voter_class = TreeClassificationVoter
        default_voter_kwargs = {}
        default_decider_class = SimpleArgmaxAverage

    if shuffle:
        np.random.shuffle(speakers)
    else:
        pass

    for j, task0_speaker in enumerate(speakers):
        progressive_learner = ProgressiveLearner(
            default_transformer_class=default_transformer_class,
            default_transformer_kwargs=default_transformer_kwargs,
            default_voter_class=default_voter_class,
            default_voter_kwargs=default_voter_kwargs,
            default_decider_class=default_decider_class)

        index = np.where(y_speaker == task0_speaker)
        x_task0 = x_all[index]
        y_task0 = y_all[index]
        train_x_task0, test_x_task0, train_y_task0, test_y_task0 = train_test_split(
            x_task0, y_task0, test_size=0.25)
        progressive_learner.add_task(
            X=train_x_task0,
            y=train_y_task0,
            task_id=0,
            num_transformers=1 if model == "dnn" else ntrees,
            transformer_voter_decider_split=[0.67, 0.33, 0],
            decider_kwargs={"classes": np.unique(train_y_task0)},
        )
        task_0_predictions = progressive_learner.predict(test_x_task0,
                                                         task_id=0)
        accuracies_across_tasks.append(
            np.mean(task_0_predictions == test_y_task0))

        for k, contribute_speaker in enumerate(speakers):
            if k == j:
                pass
            else:
                index = np.where(y_speaker == contribute_speaker)
                x_train = x_all[index]
                y_train = y_all[index]
                progressive_learner.add_transformer(
                    X=x_train,
                    y=y_train,
                    transformer_data_proportion=1,
                    num_transformers=1 if model == "dnn" else ntrees,
                    backward_task_ids=[0],
                )
            task_0_predictions = progressive_learner.predict(test_x_task0,
                                                             task_id=0)
            accuracies_across_tasks.append(
                np.mean(task_0_predictions == test_y_task0))

    return accuracies_across_tasks
Exemplo n.º 9
0
def LF_experiment(train_x, train_y, test_x, test_y, ntrees, shift, slot, model, num_points_per_task, acorn=None):
       
    df = pd.DataFrame()
    shifts = []
    tasks = []
    base_tasks = []
    accuracies_across_tasks = []
    train_times_across_tasks = []
    inference_times_across_tasks = []
    
    if model == "dnn":
        default_transformer_class = NeuralClassificationTransformer
        
        network = keras.Sequential()
        network.add(layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=np.shape(train_x)[1:]))
        network.add(layers.BatchNormalization())
        network.add(layers.Conv2D(filters=32, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(layers.Conv2D(filters=254, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))

        network.add(layers.Flatten())
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(2000, activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(2000, activation='relu'))
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(units=10, activation = 'softmax'))
        
        default_transformer_kwargs = {"network" : network, 
                                      "euclidean_layer_idx" : -2,
                                      "num_classes" : 10,
                                      "optimizer" : keras.optimizers.Adam(3e-4)
                                     }
        
        default_voter_class = KNNClassificationVoter
        default_voter_kwargs = {"k" : 16 * int(np.log2(num_points_per_task))}
        
        default_decider_class = SimpleAverage
    elif model == "uf":
        default_transformer_class = TreeClassificationTransformer
        default_transformer_kwargs = {"kwargs" : {"max_depth" : 30}}
        
        default_voter_class = TreeClassificationVoter
        default_voter_kwargs = {}
        
        default_decider_class = SimpleAverage
    progressive_learner = ProgressiveLearner(default_transformer_class = default_transformer_class, 
                                         default_transformer_kwargs = default_transformer_kwargs,
                                         default_voter_class = default_voter_class,
                                         default_voter_kwargs = default_voter_kwargs,
                                         default_decider_class = default_decider_class)

    for task_ii in range(10):
        print("Starting Task {} For Fold {} For Slot {}".format(task_ii, shift, slot))
        if acorn is not None:
            np.random.seed(acorn)

        train_start_time = time.time()
        progressive_learner.add_task(
            X = train_x[task_ii*5000+slot*num_points_per_task:task_ii*5000+(slot+1)*num_points_per_task], 
            y = train_y[task_ii*5000+slot*num_points_per_task:task_ii*5000+(slot+1)*num_points_per_task],
            num_transformers = 1 if model == "dnn" else ntrees,
            transformer_voter_decider_split = [0.67, 0.33, 0],
            decider_kwargs = {"classes" : np.unique(train_y[task_ii*5000+slot*num_points_per_task:task_ii*5000+(slot+1)*num_points_per_task])},
            backward_task_ids=[0]
            )
        train_end_time = time.time()
        
        inference_start_time = time.time()
        llf_task=progressive_learner.predict(
            test_x[:1000], task_id=0
            )
        inference_end_time = time.time()
        acc = np.mean(
                    llf_task == test_y[:1000]
                    )
        accuracies_across_tasks.append(acc)
        shifts.append(shift)
        train_times_across_tasks.append(train_end_time - train_start_time)
        inference_times_across_tasks.append(inference_end_time - inference_start_time)
        
        print("Accuracy Across Tasks: {}".format(accuracies_across_tasks))
        print("Train Times Across Tasks: {}".format(train_times_across_tasks))
        print("Inference Times Across Tasks: {}".format(inference_times_across_tasks))
            
    df['data_fold'] = shifts
    df['task'] = range(1, 11)
    df['task_1_accuracy'] = accuracies_across_tasks
    df['train_times'] = train_times_across_tasks
    df['inference_times'] = inference_times_across_tasks

    file_to_save = 'result/'+model+str(ntrees)+'_'+str(shift)+'_'+str(slot)+'.pickle'
    with open(file_to_save, 'wb') as f:
        pickle.dump(df, f)
Exemplo n.º 10
0
def single_experiment(x, y, y_speaker, ntrees=10, model="odif", shuffle=False):
    num_tasks = 6
    num_points_per_task = 3000 / num_tasks
    speakers = ["g", "j", "l", "n", "t", "y"]
    single_task_accuracies = np.zeros(num_tasks, dtype=float)
    accuracies = np.zeros(27, dtype=float)

    if model == "odin":
        x_all = x
        y_all = y

        clear_session()  # clear GPU memory before each run, to avoid OOM error

        default_transformer_class = NeuralClassificationTransformer

        network = keras.Sequential()
        network.add(
            layers.Conv2D(
                filters=16,
                kernel_size=(3, 3),
                activation="relu",
                input_shape=np.shape(x_all)[1:],
            ))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(
                filters=32,
                kernel_size=(3, 3),
                strides=2,
                padding="same",
                activation="relu",
            ))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(
                filters=64,
                kernel_size=(3, 3),
                strides=2,
                padding="same",
                activation="relu",
            ))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(
                filters=128,
                kernel_size=(3, 3),
                strides=2,
                padding="same",
                activation="relu",
            ))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(
                filters=254,
                kernel_size=(3, 3),
                strides=2,
                padding="same",
                activation="relu",
            ))

        network.add(layers.Flatten())
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(2000, activation="relu"))
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(2000, activation="relu"))
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(units=10, activation="softmax"))

        default_transformer_kwargs = {
            "network": network,
            "euclidean_layer_idx": -2,
            "loss": "categorical_crossentropy",
            "optimizer": Adam(3e-4),
            "fit_kwargs": {
                "epochs": 100,
                "callbacks": [EarlyStopping(patience=5, monitor="val_loss")],
                "verbose": False,
                "validation_split": 0.33,
                "batch_size": 32,
            },
        }
        default_voter_class = KNNClassificationVoter
        default_voter_kwargs = {"k": int(np.log2(num_points_per_task))}
        default_decider_class = SimpleArgmaxAverage

    elif model == "odif":
        x_all = x.reshape(3000, -1)
        y_all = y

        default_transformer_class = TreeClassificationTransformer
        default_transformer_kwargs = {"kwargs": {"max_depth": 30}}
        default_voter_class = TreeClassificationVoter
        default_voter_kwargs = {}
        default_decider_class = SimpleArgmaxAverage

    if shuffle:
        np.random.shuffle(speakers)
    else:
        pass

    progressive_learner = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
    )

    train_x_task, test_x_task, train_y_task, test_y_task = (
        [[], [], [], [], [], []],
        [[], [], [], [], [], []],
        [[], [], [], [], [], []],
        [[], [], [], [], [], []],
    )

    for j, task0_speaker in enumerate(speakers):
        index = np.where(y_speaker == task0_speaker)
        x_task0 = x_all[index]
        y_task0 = y_all[index]
        (
            train_x_task[j],
            test_x_task[j],
            train_y_task[j],
            test_y_task[j],
        ) = train_test_split(x_task0, y_task0, test_size=0.45)

        progressive_learner.add_task(
            X=train_x_task[j],
            y=train_y_task[j],
            task_id=j,
            num_transformers=1 if model == "odin" else ntrees,
            transformer_voter_decider_split=[0.67, 0.33, 0],
            decider_kwargs={"classes": np.unique(train_y_task[j])},
        )
        odi_predictions = progressive_learner.predict(X=test_x_task[j],
                                                      transformer_ids=[j],
                                                      task_id=j)
        accuracies[j] = np.mean(odi_predictions == test_y_task[j])

        for k, contribute_speaker in enumerate(speakers):
            if k > j:
                pass
            else:
                odi_predictions = progressive_learner.predict(test_x_task[k],
                                                              task_id=k)

            accuracies[6 + k + (j * (j + 1)) // 2] = np.mean(
                odi_predictions == test_y_task[k])

    return accuracies
Exemplo n.º 11
0
def L2_experiment(data_x,
                  data_y,
                  ntrees,
                  shift,
                  slot,
                  num_points_per_task,
                  task_num,
                  acorn=None):

    # construct dataframes
    df = pd.DataFrame()
    shifts = []
    slots = []
    accuracies_across_tasks = []

    # randomly separate the training and testing subsets
    train_x_task0, train_y_task0, test_x_task0, test_y_task0 = cross_val_data(
        data_x,
        data_y,
        num_points_per_task,
        total_task=10,
        shift=shift,
        slot=slot)

    # choose Uncertainty Forest as transformer
    progressive_learner = ProgressiveLearner(
        default_transformer_class=TreeClassificationTransformer,
        default_transformer_kwargs={"kwargs": {
            "max_depth": 30
        }},
        default_voter_class=TreeClassificationVoter,
        default_voter_kwargs={},
        default_decider_class=SimpleArgmaxAverage,
    )

    # training process
    progressive_learner.add_task(
        X=train_x_task0,
        y=train_y_task0,
        num_transformers=ntrees,
        transformer_voter_decider_split=[0.67, 0.33, 0],
        decider_kwargs={"classes": np.unique(train_y_task0)},
    )

    # testing process
    task_0_predictions = progressive_learner.predict(test_x_task0, task_id=0)

    # record results
    shifts.append(shift)
    slots.append(slot)
    accuracies_across_tasks.append(np.mean(task_0_predictions == test_y_task0))

    # repeating the tasks for task_num times
    for task_ii in range(1, task_num):

        # randomly separate the training and testing subsets
        train_x, train_y, _, _ = cross_val_data(
            data_x,
            data_y,
            num_points_per_task,
            total_task=10,
            shift=shift,
            slot=slot,
            task=task_ii,
        )

        # training process
        progressive_learner.add_transformer(
            X=train_x,
            y=train_y,
            transformer_data_proportion=1,
            num_transformers=ntrees,
            backward_task_ids=[0],
        )

        # testing process
        task_0_predictions = progressive_learner.predict(test_x_task0,
                                                         task_id=0)

        # record results
        shifts.append(shift)
        slots.append(slot)
        accuracies_across_tasks.append(
            np.mean(task_0_predictions == test_y_task0))

    # finalize dataframes
    df["data_fold"] = shifts
    df["slot"] = slots
    df["accuracy"] = accuracies_across_tasks

    # save results
    return df
Exemplo n.º 12
0
def run_fte_bte_exp(data_x, data_y, which_task, model, ntrees=30, shift=0):

    df_total = []

    for slot in range(
            1
    ):  # Rotates the batch of training samples that are used from each class in each task
        train_x, train_y, test_x, test_y = cross_val_data(
            data_x, data_y, shift, slot)

        if model == "odif":
            # Reshape the data
            train_x = train_x.reshape(
                train_x.shape[0],
                train_x.shape[1] * train_x.shape[2] * train_x.shape[3])
            test_x = test_x.reshape(
                test_x.shape[0],
                test_x.shape[1] * test_x.shape[2] * test_x.shape[3])

        if model == "odin":
            clear_session(
            )  # clear GPU memory before each run, to avoid OOM error

            default_transformer_class = NeuralClassificationTransformer

            network = keras.Sequential()
            network.add(
                layers.Conv2D(
                    filters=16,
                    kernel_size=(3, 3),
                    activation="relu",
                    input_shape=np.shape(data_x)[1:],
                ))
            network.add(layers.BatchNormalization())
            network.add(
                layers.Conv2D(
                    filters=32,
                    kernel_size=(3, 3),
                    strides=2,
                    padding="same",
                    activation="relu",
                ))
            network.add(layers.BatchNormalization())
            network.add(
                layers.Conv2D(
                    filters=64,
                    kernel_size=(3, 3),
                    strides=2,
                    padding="same",
                    activation="relu",
                ))
            network.add(layers.BatchNormalization())
            network.add(
                layers.Conv2D(
                    filters=128,
                    kernel_size=(3, 3),
                    strides=2,
                    padding="same",
                    activation="relu",
                ))
            network.add(layers.BatchNormalization())
            network.add(
                layers.Conv2D(
                    filters=254,
                    kernel_size=(3, 3),
                    strides=2,
                    padding="same",
                    activation="relu",
                ))

            network.add(layers.Flatten())
            network.add(layers.BatchNormalization())
            network.add(layers.Dense(2000, activation="relu"))
            network.add(layers.BatchNormalization())
            network.add(layers.Dense(2000, activation="relu"))
            network.add(layers.BatchNormalization())
            network.add(layers.Dense(units=10, activation="softmax"))

            default_transformer_kwargs = {
                "network": network,
                "euclidean_layer_idx": -2,
                "loss": "categorical_crossentropy",
                "optimizer": Adam(3e-4),
                "fit_kwargs": {
                    "epochs": 100,
                    "callbacks":
                    [EarlyStopping(patience=5, monitor="val_loss")],
                    "verbose": False,
                    "validation_split": 0.33,
                    "batch_size": 32,
                },
            }
            default_voter_class = KNNClassificationVoter
            default_voter_kwargs = {"k": int(np.log2(300))}
            default_decider_class = SimpleArgmaxAverage

            p_learner = ProgressiveLearner(
                default_transformer_class=default_transformer_class,
                default_transformer_kwargs=default_transformer_kwargs,
                default_voter_class=default_voter_class,
                default_voter_kwargs=default_voter_kwargs,
                default_decider_class=default_decider_class,
            )

        elif model == "odif":
            p_learner = LifelongClassificationForest()

        df = fte_bte_experiment(
            train_x,
            train_y,
            test_x,
            test_y,
            ntrees,
            shift,
            slot,
            model,
            p_learner,
            which_task,
            acorn=12345,
        )

        df_total.append(df)

    return df_total
Exemplo n.º 13
0
def odif_experiment(angle, data_x, data_y, reps=1, ntrees=29, acorn=None):
    if acorn is not None:
        np.random.seed(acorn)

    errors = np.zeros(2)

    for _ in range(reps):
        train_x1, train_y1, train_x2, train_y2, test_x, test_y = cross_val_data(
            data_x, data_y, total_cls=10
        )

        # change data angle for second task
        tmp_data = train_x2.copy()
        total_data = tmp_data.shape[0]

        for i in range(total_data):
            tmp_ = image_aug(tmp_data[i], angle)
            tmp_data[i] = tmp_

        train_x1 = train_x1.reshape(
            (
                train_x1.shape[0],
                train_x1.shape[1] * train_x1.shape[2] * train_x1.shape[3],
            )
        )
        tmp_data = tmp_data.reshape(
            (
                tmp_data.shape[0],
                tmp_data.shape[1] * tmp_data.shape[2] * tmp_data.shape[3],
            )
        )
        test_x = test_x.reshape(
            (test_x.shape[0], test_x.shape[1] * test_x.shape[2] * test_x.shape[3])
        )

        default_transformer_class = TreeClassificationTransformer
        default_transformer_kwargs = {"kwargs": {"max_depth": 30}}

        default_voter_class = TreeClassificationVoter
        default_voter_kwargs = {}

        default_decider_class = SimpleArgmaxAverage

        progressive_learner = ProgressiveLearner(
            default_transformer_class=default_transformer_class,
            default_transformer_kwargs=default_transformer_kwargs,
            default_voter_class=default_voter_class,
            default_voter_kwargs=default_voter_kwargs,
            default_decider_class=default_decider_class,
        )

        progressive_learner.add_task(
            X=train_x1,
            y=train_y1,
            transformer_voter_decider_split=[0.67, 0.33, 0],
            decider_kwargs={"classes": np.unique(train_y1)},
        )

        progressive_learner.add_transformer(
            X=tmp_data, y=train_y2, transformer_data_proportion=1, backward_task_ids=[0]
        )

        llf_task1 = progressive_learner.predict(test_x, task_id=0)
        llf_single_task = progressive_learner.predict(
            test_x, task_id=0, transformer_ids=[0]
        )

        errors[1] = errors[1] + (1 - np.mean(llf_task1 == test_y))
        errors[0] = errors[0] + (1 - np.mean(llf_single_task == test_y))

    errors = errors / reps
    return errors
Exemplo n.º 14
0
def Odif_experiment(data_x,
                    data_y,
                    ntrees,
                    shift,
                    slot,
                    num_points_per_task,
                    acorn=None):

    df = pd.DataFrame()
    shifts = []
    slots = []
    accuracies_across_tasks = []

    train_x_task0, train_y_task0, test_x_task0, test_y_task0 = cross_val_data(
        data_x,
        data_y,
        num_points_per_task,
        total_task=10,
        shift=shift,
        slot=slot)

    default_transformer_class = TreeClassificationTransformer
    default_transformer_kwargs = {
        "kwargs": {
            "max_depth": 30,
            "max_features": "auto"
        }
    }

    default_voter_class = TreeClassificationVoter
    default_voter_kwargs = {}

    default_decider_class = SimpleArgmaxAverage

    progressive_learner = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
    )

    progressive_learner.add_task(
        X=train_x_task0,
        y=train_y_task0,
        num_transformers=ntrees,
        transformer_voter_decider_split=[0.67, 0.33, 0],
        decider_kwargs={"classes": np.unique(train_y_task0)},
    )

    task_0_predictions = progressive_learner.predict(test_x_task0, task_id=0)

    shifts.append(shift)
    slots.append(slot)
    accuracies_across_tasks.append(np.mean(task_0_predictions == test_y_task0))

    for task_ii in range(1, 20):
        train_x, train_y, _, _ = cross_val_data(
            data_x,
            data_y,
            num_points_per_task,
            total_task=10,
            shift=shift,
            slot=slot,
            task=task_ii,
        )

        progressive_learner.add_task(
            X=train_x,
            y=train_y,
            num_transformers=ntrees,
            transformer_voter_decider_split=[0.67, 0.33, 0],
            decider_kwargs={"classes": np.unique(train_y)},
        )

        task_0_predictions = progressive_learner.predict(test_x_task0,
                                                         task_id=0)

        shifts.append(shift)
        slots.append(slot)
        accuracies_across_tasks.append(
            np.mean(task_0_predictions == test_y_task0))

    df["data_fold"] = shifts
    df["slot"] = slots
    df["accuracy"] = accuracies_across_tasks

    return df
Exemplo n.º 15
0
def single_experiment(train_x_task,
                      test_x_task,
                      train_y_task,
                      test_y_task,
                      ntrees=10,
                      model="odif"):
    num_tasks = 10
    num_points_per_task = 1800
    accuracies = np.zeros(65, dtype=float)

    if model == "odin":

        clear_session()  # clear GPU memory before each run, to avoid OOM error

        default_transformer_class = NeuralClassificationTransformer

        network = keras.Sequential()
        network.add(
            layers.Conv2D(
                filters=16,
                kernel_size=(3, 3),
                activation="relu",
                input_shape=np.shape(train_x_task[0])[1:],
            ))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(
                filters=32,
                kernel_size=(3, 3),
                strides=2,
                padding="same",
                activation="relu",
            ))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(
                filters=64,
                kernel_size=(3, 3),
                strides=2,
                padding="same",
                activation="relu",
            ))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(
                filters=128,
                kernel_size=(3, 3),
                strides=2,
                padding="same",
                activation="relu",
            ))
        network.add(layers.BatchNormalization())
        network.add(
            layers.Conv2D(
                filters=254,
                kernel_size=(3, 3),
                strides=2,
                padding="same",
                activation="relu",
            ))

        network.add(layers.Flatten())
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(2000, activation="relu"))
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(2000, activation="relu"))
        network.add(layers.BatchNormalization())
        network.add(layers.Dense(units=20, activation="softmax"))  # units=10

        default_transformer_kwargs = {
            "network": network,
            "euclidean_layer_idx": -2,
            "loss": "categorical_crossentropy",
            "optimizer": Adam(3e-4),
            "fit_kwargs": {
                "epochs": 100,
                "callbacks": [EarlyStopping(patience=5, monitor="val_loss")],
                "verbose": False,
                "validation_split": 0.33,
                "batch_size": 32,
            },
        }
        default_voter_class = KNNClassificationVoter
        default_voter_kwargs = {"k": int(np.log2(num_points_per_task))}
        default_decider_class = SimpleArgmaxAverage

    elif model == "odif":
        for i in range(num_tasks):
            train_x_task[i] = train_x_task[i].reshape(1080, -1)
            test_x_task[i] = test_x_task[i].reshape(720, -1)

        default_transformer_class = TreeClassificationTransformer
        default_transformer_kwargs = {"kwargs": {"max_depth": 30}}
        default_voter_class = TreeClassificationVoter
        default_voter_kwargs = {}
        default_decider_class = SimpleArgmaxAverage

    progressive_learner = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
    )

    for i in range(num_tasks):
        progressive_learner.add_task(
            X=train_x_task[i],
            y=train_y_task[i],
            task_id=i,
            num_transformers=1 if model == "odin" else ntrees,
            transformer_voter_decider_split=[0.67, 0.33, 0],
            decider_kwargs={"classes": np.unique(train_y_task[i])},
        )
        prediction = progressive_learner.predict(X=test_x_task[i],
                                                 transformer_ids=[i],
                                                 task_id=i)
        accuracies[i] = np.mean(prediction == test_y_task[i])

        for j in range(num_tasks):
            if j > i:
                pass  # this is not wrong but misleading, should be continue
            else:
                odif_predictions = progressive_learner.predict(test_x_task[j],
                                                               task_id=j)

            accuracies[10 + j + (i * (i + 1)) // 2] = np.mean(
                odif_predictions == test_y_task[j])
    # print('single experiment done!')

    return accuracies
Exemplo n.º 16
0
def experiment(
    n_task1,
    n_task2,
    n_test=1000,
    task1_angle=0,
    task2_angle=np.pi / 2,
    n_trees=10,
    max_depth=None,
    random_state=None,
    register=False,
):
    """
    A function to do backwards transfer efficiency experiment
    between two tasks. Task 1 is XOR. Task 2 is RXOR.
    A registered Task 2

    Parameters
    ----------
    n_task1 : int
        Total number of train sample for task 1.

    n_task2 : int
        Total number of train dsample for task 2

    n_test : int, optional (default=1000)
        Number of test sample for each task.

    task1_angle : float, optional (default=0)
        Angle in radian for task 1.

    task2_angle : float, optional (default=numpy.pi/2)
        Angle in radian for task 2.

    n_trees : int, optional (default=10)
        Number of total trees to train for each task.

    max_depth : int, optional (default=None)
        Maximum allowable depth for each tree.

    random_state : int, RandomState instance, default=None
        Determines random number generation for dataset creation. Pass an int
        for reproducible output across multiple function calls.

    register: boolean, default=False
        Register task2 to task1 before feeding to forest.

    Returns
    -------
    errors : array of shape [6]
        Elements of the array is organized as single task error task1,
        multitask error task1, single task error task2,
        multitask error task2, naive UF error task1,
        naive UF task2.
    """

    if n_task1 == 0 and n_task2 == 0:
        raise ValueError("Wake up and provide samples to train!!!")

    if random_state != None:
        np.random.seed(random_state)

    errors = np.zeros(6, dtype=float)

    default_transformer_class = TreeClassificationTransformer
    default_transformer_kwargs = {"kwargs": {"max_depth": max_depth}}

    default_voter_class = TreeClassificationVoter
    default_voter_kwargs = {}

    default_decider_class = SimpleArgmaxAverage
    default_decider_kwargs = {"classes": np.arange(2)}
    progressive_learner = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
        default_decider_kwargs=default_decider_kwargs,
    )
    uf = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
        default_decider_kwargs=default_decider_kwargs,
    )
    naive_uf = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
        default_decider_kwargs=default_decider_kwargs,
    )

    # source data
    X_task1, y_task1 = generate_gaussian_parity(n_task1,
                                                angle_params=task1_angle)
    test_task1, test_label_task1 = generate_gaussian_parity(
        n_test, angle_params=task1_angle)

    # target data
    X_task2, y_task2 = generate_gaussian_parity(n_task2,
                                                angle_params=task2_angle)
    test_task2, test_label_task2 = generate_gaussian_parity(
        n_test, angle_params=task2_angle)

    if register:
        X_task2 = cpd_reg(X_task2.copy(), X_task1.copy())

    progressive_learner.add_task(X_task1, y_task1, num_transformers=n_trees)
    progressive_learner.add_task(X_task2, y_task2, num_transformers=n_trees)

    uf.add_task(X_task1, y_task1, num_transformers=2 * n_trees)
    uf.add_task(X_task2, y_task2, num_transformers=2 * n_trees)

    uf_task1 = uf.predict(test_task1, transformer_ids=[0], task_id=0)
    l2f_task1 = progressive_learner.predict(test_task1, task_id=0)

    errors[0] = 1 - np.mean(uf_task1 == test_label_task1)
    errors[1] = 1 - np.mean(l2f_task1 == test_label_task1)

    return errors
Exemplo n.º 17
0
def label_shuffle_experiment(
    train_x,
    train_y,
    test_x,
    test_y,
    ntrees,
    shift,
    slot,
    num_points_per_task,
    acorn=None,
):

    df = pd.DataFrame()
    single_task_accuracies = np.zeros(10, dtype=float)
    shifts = []
    tasks = []
    base_tasks = []
    accuracies_across_tasks = []

    default_transformer_class = TreeClassificationTransformer
    default_transformer_kwargs = {
        "kwargs": {
            "max_depth": 30,
            "max_features": "auto"
        }
    }

    default_voter_class = TreeClassificationVoter
    default_voter_kwargs = {}

    default_decider_class = SimpleArgmaxAverage

    progressive_learner = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
    )

    for task_ii in range(10):
        if acorn is not None:
            np.random.seed(acorn)

        tmp_y = train_y[task_ii * 5000 +
                        slot * num_points_per_task:task_ii * 5000 +
                        (slot + 1) * num_points_per_task]

        if task_ii != 0:
            np.random.shuffle(tmp_y)

        progressive_learner.add_task(
            X=train_x[task_ii * 5000 +
                      slot * num_points_per_task:task_ii * 5000 +
                      (slot + 1) * num_points_per_task],
            y=tmp_y,
            num_transformers=ntrees,
            transformer_voter_decider_split=[0.63, 0.37, 0],
            decider_kwargs={
                "classes":
                np.unique(train_y[task_ii * 5000 +
                                  slot * num_points_per_task:task_ii * 5000 +
                                  (slot + 1) * num_points_per_task])
            },
        )

        llf_task = progressive_learner.predict(X=test_x[0:1000, :], task_id=0)

        shifts.append(shift)

        accuracies_across_tasks.append(np.mean(llf_task == test_y[0:1000]))

    df["data_fold"] = shifts
    df["task"] = range(1, 11)
    df["task_1_accuracy"] = accuracies_across_tasks

    return df
Exemplo n.º 18
0
def LF_experiment(data_x, data_y, angle, model, granularity, reps=1, ntrees=29, acorn=None):
    if acorn is not None:
        np.random.seed(acorn)

    errors = np.zeros(2)

    with tf.device('/gpu:'+str(int(angle //  granularity) % 4)):
        for rep in range(reps):
            train_x1, train_y1, train_x2, train_y2, test_x, test_y = cross_val_data(data_x, data_y, total_cls=10)


            #change data angle for second task
            tmp_data = train_x2.copy()
            _tmp_ = np.zeros((32,32,3), dtype=int)
            total_data = tmp_data.shape[0]

            for i in range(total_data):
                tmp_ = image_aug(tmp_data[i],angle)
                tmp_data[i] = tmp_

            if model == "uf":
                train_x1 = train_x1.reshape((train_x1.shape[0], train_x1.shape[1] * train_x1.shape[2] * train_x1.shape[3]))
                tmp_data = tmp_data.reshape((tmp_data.shape[0], tmp_data.shape[1] * tmp_data.shape[2] * tmp_data.shape[3]))
                test_x = test_x.reshape((test_x.shape[0], test_x.shape[1] * test_x.shape[2] * test_x.shape[3]))

            if model == "dnn":

                default_transformer_class = NeuralClassificationTransformer

                network = keras.Sequential()
                network.add(layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=np.shape(train_x1)[1:]))
                network.add(layers.BatchNormalization())
                network.add(layers.Conv2D(filters=32, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
                network.add(layers.BatchNormalization())
                network.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
                network.add(layers.BatchNormalization())
                network.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
                network.add(layers.BatchNormalization())
                network.add(layers.Conv2D(filters=254, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))

                network.add(layers.Flatten())
                network.add(layers.BatchNormalization())
                network.add(layers.Dense(2000, activation='relu'))
                network.add(layers.BatchNormalization())
                network.add(layers.Dense(2000, activation='relu'))
                network.add(layers.BatchNormalization())
                network.add(layers.Dense(units=10, activation = 'softmax'))

                default_transformer_kwargs = {"network" : network,
                                              "euclidean_layer_idx" : -2,
                                              "num_classes" : 10,
                                              "optimizer" : keras.optimizers.Adam(3e-4)
                                             }

                default_voter_class = KNNClassificationVoter
                default_voter_kwargs = {"k" : int(np.log2(len(train_x1)))}

                default_decider_class = SimpleArgmaxAverage
            elif model == "uf":
                default_transformer_class = TreeClassificationTransformer
                default_transformer_kwargs = {"kwargs" : {"max_depth" : 30}}

                default_voter_class = TreeClassificationVoter
                default_voter_kwargs = {}

                default_decider_class = SimpleArgmaxAverage


            progressive_learner = ProgressiveLearner(default_transformer_class = default_transformer_class,
                                         default_transformer_kwargs = default_transformer_kwargs,
                                         default_voter_class = default_voter_class,
                                         default_voter_kwargs = default_voter_kwargs,
                                         default_decider_class = default_decider_class)

            progressive_learner.add_task(
                X = train_x1,
                y = train_y1,
                transformer_voter_decider_split = [0.67, 0.33, 0],
                decider_kwargs = {"classes" : np.unique(train_y1)}
            )

            progressive_learner.add_transformer(
                X = tmp_data,
                y = train_y2,
                transformer_data_proportion = 1,
                backward_task_ids = [0]
            )


            llf_task1=progressive_learner.predict(test_x, task_id=0)
            llf_single_task=progressive_learner.predict(test_x, task_id=0, transformer_ids=[0])

            errors[1] = errors[1]+(1 - np.mean(llf_task1 == test_y))
            errors[0] = errors[0]+(1 - np.mean(llf_single_task == test_y))

    errors = errors/reps
    print("Errors For Angle {}: {}".format(angle, errors))
    with open('results/angle_'+str(angle)+'_'+model+'.pickle', 'wb') as f:
        pickle.dump(errors, f, protocol = 2)
def experiment(
    n_task1,
    n_task2,
    n_test=0.4,
    task1_angle=0,
    task2_angle=np.pi / 2,
    n_trees=10,
    max_depth=None,
    random_state=None,
    register_cpd=False,
    register_otp=False,
    register_icp=False,
):

    if n_task1 == 0 and n_task2 == 0:
        raise ValueError("Wake up and provide samples to train!!!")

    if random_state != None:
        np.random.seed(random_state)

    errors = np.zeros(6, dtype=float)

    default_transformer_class = TreeClassificationTransformer
    default_transformer_kwargs = {"kwargs": {"max_depth": max_depth}}

    default_voter_class = TreeClassificationVoter
    default_voter_kwargs = {}

    default_decider_class = SimpleArgmaxAverage
    default_decider_kwargs = {"classes": np.arange(2)}
    progressive_learner = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
        default_decider_kwargs=default_decider_kwargs,
    )
    uf = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
        default_decider_kwargs=default_decider_kwargs,
    )
    naive_uf = ProgressiveLearner(
        default_transformer_class=default_transformer_class,
        default_transformer_kwargs=default_transformer_kwargs,
        default_voter_class=default_voter_class,
        default_voter_kwargs=default_voter_kwargs,
        default_decider_class=default_decider_class,
        default_decider_kwargs=default_decider_kwargs,
    )

    # source data
    X_task1, y_task1 = sample_cc18(n_task1, angle_params=task1_angle)
    test_task1, test_label_task1 = sample_cc18(n_test,
                                               angle_params=task1_angle)

    # target data
    tform1 = transform.AffineTransform(shear=0 * np.pi / 180)
    tform2 = transform.AffineTransform(shear=task2_angle)

    X_task2, y_task2 = sample_cc18(n_task2, angle_params=task1_angle)
    test_task2, test_label_task2 = sample_cc18(n_test,
                                               angle_params=task1_angle)

    # Transform training set
    X1_top = X_task2[X_task2[:, 1] >= 0]
    X1_bottom = X_task2[X_task2[:, 1] < 0]
    y1_top = y_task2[X_task2[:, 1] >= 0]
    y1_bottom = y_task2[X_task2[:, 1] < 0]

    m = X1_top.shape[1]
    src = np.ones((m + 1, X1_top.shape[0]))
    src[:m, :] = np.copy(X1_top.T)

    src = np.dot(tform1.params, src)
    X2_top = src.T[:, 0:2]
    y2_top = y1_top

    m = X1_bottom.shape[1]
    src = np.ones((m + 1, X1_bottom.shape[0]))
    src[:m, :] = np.copy(X1_bottom.T)

    src = np.dot(tform2.params, src)
    X2_bottom = src.T[:, 0:2]
    y2_bottom = y1_bottom

    X_task2 = np.concatenate((X2_top, X2_bottom))
    y_task2 = np.concatenate((y2_top, y2_bottom))

    if register_cpd:

        X_task2 = cpd_reg(X_task2.copy(), X_task1.copy())

    if register_otp:
        ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-2)
        ot_sinkhorn.fit(Xs=X_task2.copy(),
                        Xt=X_task1.copy(),
                        ys=y_task2.copy(),
                        yt=y_task1.copy())
        transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=X_task2.copy())
        X_task2 = transp_Xs_sinkhorn

    if register_icp:
        T, X_3, i = icp(X_task2.copy(), X_task1.copy(), y_task2.copy(),
                        y_task1.copy())
        X_task2 = X_3.T[:, 0:2]

    progressive_learner.add_task(X_task1, y_task1, num_transformers=n_trees)
    progressive_learner.add_task(X_task2, y_task2, num_transformers=n_trees)

    uf.add_task(X_task1, y_task1, num_transformers=2 * n_trees)
    uf.add_task(X_task2, y_task2, num_transformers=2 * n_trees)

    uf_task1 = uf.predict(test_task1, transformer_ids=[0], task_id=0)
    l2f_task1 = progressive_learner.predict(test_task1, task_id=0)

    errors[0] = 1 - np.mean(uf_task1 == test_label_task1)
    errors[1] = 1 - np.mean(l2f_task1 == test_label_task1)

    return errors