示例#1
0
    def al_stream(self, data, target, X_train, y_train, X_full, y_full,
                  train_idx):
        # initializing the active learner
        acc = []
        learner = ActiveLearner(estimator=RandomForestClassifier(),
                                query_strategy=margin_sampling,
                                X_training=X_train,
                                y_training=y_train)

        # print('Initial prediction accuracy: %f' % learner.score(X_full, y_full))
        index = 0
        # learning until the accuracy reaches a given threshold
        while learner.score(X_full, y_full) < 0.90:
            stream_idx = np.random.choice(range(len(X_full)))
            if classifier_uncertainty(learner, X_full[stream_idx].reshape(
                    1, -1)) >= 0.2:
                learner.teach(X_full[stream_idx].reshape(1, -1),
                              y_full[stream_idx].reshape(-1, ))
                learner_score = learner.score(X_full, y_full)
                # print('Item no. %d queried, new accuracy: %f' % (stream_idx, learner_score))
                print('%0.3f' % (learner_score), end=",")
                if index == self.query_number:
                    break
                index = index + 1
                acc.append(learner_score)
        return acc
示例#2
0
    def uncertainty_values(self, data, target, X_train, y_train, X_full,
                           y_full, train_idx):
        print("START: ST")
        # initializing the active learner
        learner = ActiveLearner(estimator=RandomForestClassifier(),
                                query_strategy=margin_sampling,
                                X_training=X_train,
                                y_training=y_train)
        print('%f' % learner.score(X_full, y_full))
        index = 0
        # learning until the accuracy reaches a given threshold
        while learner.score(X_full, y_full) < 0.90:
            stream_idx = np.random.choice(range(len(X_full)))
            if classifier_uncertainty(learner, X_full[stream_idx].reshape(
                    1, -1)) >= 0.4:

                print("[ %1.3f, %1.3f]" %
                      (classifier_uncertainty(
                          learner, X_full[stream_idx].reshape(1, -1))[0],
                       classifier_margin(learner, X_full[stream_idx].reshape(
                           1, -1))[0]))

                learner.teach(X_full[stream_idx].reshape(1, -1),
                              y_full[stream_idx].reshape(-1, ))
                learner_score = learner.score(X_full, y_full)
                # print('Item no. %d queried, new accuracy: %f' % (stream_idx, learner_score))
                # print('%f' % (learner_score))
                if index == 50:
                    break
                index = index + 1
        print("START: ST")
示例#3
0
def active_learning_procedure(query_strategy,
                              test_X,
                              test_y,
                              pool_X,
                              pool_y,
                              initial_X,
                              initial_y,
                              estimator,
                              epochs=50,
                              batch_size=128,
                              n_queries=100,
                              n_instances=10,
                              verbose=0):
    learner = ActiveLearner(estimator=estimator,
                            X_training=initial_X,
                            y_training=initial_y,
                            query_strategy=query_strategy,
                            verbose=verbose)
    perf_hist = [learner.score(test_X, test_y, verbose=verbose)]
    for index in range(n_queries):
        query_idx, query_instance = learner.query(pool_X, n_instances)
        learner.teach(pool_X[query_idx],
                      pool_y[query_idx],
                      epochs=epochs,
                      batch_size=batch_size,
                      verbose=verbose)
        pool_X = np.delete(pool_X, query_idx, axis=0)
        pool_y = np.delete(pool_y, query_idx, axis=0)
        model_accuracy = learner.score(test_X, test_y, verbose=0)
        print("accuracy after query {n}: {acc:0.4f".format(n=index + 1,
                                                           acc=model_accuracy))
        perf_hist.append(model_accuracy)
    return perf_hist
示例#4
0
    def learn(self):
        # seeding
        classes = self.short_df['grades_round'].unique()
        seed_index = []
        for i in classes:
            seed_index.append(self.short_df['grades_round'][
                self.short_df['grades_round'] == i].index[0])
        seed_index

        act_data = self.short_df.copy()
        accuracy_list = []

        # initialising
        train_idx = seed_index
        X_train = self.X[train_idx]
        y_train = self.Y[train_idx]

        # generating the pool
        X_pool = np.delete(self.X, train_idx, axis=0)
        y_pool = np.delete(self.Y, train_idx)

        act_data = act_data.drop(axis=0, index=train_idx)
        act_data.reset_index(drop=True, inplace=True)

        # initializing the active learner

        learner = ActiveLearner(estimator=self.model,
                                X_training=X_train,
                                y_training=y_train,
                                query_strategy=self.query_method)

        # pool-based sampling
        n_queries = int(len(X) / (100 / self.percent))
        for idx in range(n_queries):
            query_idx, query_instance = learner.query(X_pool)
            learner.teach(X=X_pool[query_idx].reshape(1, -1),
                          y=y_pool[query_idx].reshape(1, ))

            # remove queried instance from pool
            X_pool = np.delete(X_pool, query_idx, axis=0)
            y_pool = np.delete(y_pool, query_idx)

            act_data = act_data.drop(axis=0, index=query_idx)
            act_data.reset_index(drop=True, inplace=True)

            accuracy_list.append(learner.score(X_pool, y_pool))
#             print('Accuracy after query no. %d: %f' % (idx+1, learner.score(X_pool, y_pool)))
        print("By just labelling ", round(n_queries * 100.0 / len(X),
                                          2), "% of total data accuracy of ",
              round(learner.score(X_pool, y_pool), 3),
              " % is achieved on the unseen data")
        model_pred = learner.predict(X_pool)
        model_f1 = f1_score(y_pool, model_pred, average='weighted')
        return accuracy_list, model_f1
示例#5
0
def simple_rf(data, target, X_train, y_train, X_full, y_full, train_idx):
    # print("START: RF")
    for i in range(201, 1701):
        learner = ActiveLearner(estimator=RandomForestClassifier(),
                                X_training=X_train[:i],
                                y_training=y_train[:i])
        print(' %0.3f' % learner.score(X_full, y_full), end=",")
示例#6
0
    def al_pool(self, data, target, X_train, y_train, X_full, y_full, train_idx):
        acc = []
        X_pool = np.delete(data, train_idx, axis=0)
        y_pool = np.delete(target, train_idx)
        learner = ActiveLearner(
            estimator=RandomForestClassifier(),
            X_training=X_train, y_training=y_train
        )

        n_queries = self.query_number
        # n_queries = 1500
        for idx in range(n_queries):
            query_idx, query_instance = learner.query(X_pool)
            learner.teach(
                X=X_pool[query_idx].reshape(1, -1),
                y=y_pool[query_idx].reshape(1, )
            )
            # remove queried instance from pool
            X_pool = np.delete(X_pool, query_idx, axis=0)
            y_pool = np.delete(y_pool, query_idx)
            learner_score = learner.score(data, target)

            # learner.estimator
            # print('Accuracy after query no. %d: %f' % (idx + 1, learner_wscore))
            X_train, X_test, y_train, y_test = train_test_split(X_full, y_full, test_size=0.30)
            y_predict = learner.predict(X_test)
            precision, recall, fscore, support = score(y_test, y_predict)
            acc.append(learner_score)
            print('%0.3f' % (learner_score), end=",")
        return acc
示例#7
0
def RandomLearner(X, y):
    """
	Create an active learner with random query strategy and run the active learner on the given data set. You should implement this also using modAL. Use SVM classifier with default parameter as the estimator.
	Input:
	The data set X and the corresponding labels
	Return:
	The accuracies evaluated on X, y whenever querying the true label of a data point from oracle as a one-demensional numpy array, the number of data points that are queried from oracle for the true label.
	"""

    random_learner = ActiveLearner(estimator=SVC(),
                                   query_strategy=RandomQuery,
                                   X_training=np.array([[0.5, 4.0], [2.0,
                                                                     1.0]]),
                                   y_training=np.array([[0], [1]]))

    ### TODO: Write the main loop for running the random active learner
    accuracies = []
    i = 0

    #S, SLabels = np.array([[0.5, 4.0], [2.0, 1.0]]), np.array([[0], [1]])
    U, ULabels = copy.deepcopy(X), copy.deepcopy(y)

    while (len(U) != 0):
        idx, instance = random_learner.query(U)
        i += 1
        random_learner._add_training_data(U[idx].reshape(1, 2),
                                          ULabels[idx].reshape(1, 1))
        random_learner._fit_to_known()
        U, ULabels = np.delete(U, idx, axis=0), np.delete(ULabels, idx, axis=0)
        acc = random_learner.score(X, y)
        accuracies.append(acc)

    return np.array(accuracies), i
示例#8
0
    def al_pool_margin(self, data, target, X_train, y_train, X_full, y_full,
                       train_idx):
        acc = []
        X_pool = np.delete(data, train_idx, axis=0)
        y_pool = np.delete(target, train_idx)
        learner = ActiveLearner(estimator=RandomForestClassifier(),
                                query_strategy=margin_sampling,
                                X_training=X_train,
                                y_training=y_train)

        n_queries = self.query_number
        # n_queries = 1500
        for idx in range(n_queries):
            query_idx, query_instance = learner.query(X_pool)
            learner.teach(X=X_pool[query_idx].reshape(1, -1),
                          y=y_pool[query_idx].reshape(1, ))
            # remove queried instance from pool
            X_pool = np.delete(X_pool, query_idx, axis=0)
            y_pool = np.delete(y_pool, query_idx)
            learner_score = learner.score(data, target)
            # print('Accuracy after query no. %d: %f' % (idx + 1, learner_wscore))
            precision, recall, fscore, support = self.performance_measure(
                learner, X_full, y_full)
            learner_score = fscore
            acc.append(learner_score)
            print('%0.3f' % (learner_score), end=",")
        return acc
示例#9
0
def al_Loop(estimator, X_train, Y_train, X, Y, X_test, Y_test, indexs):
    learner = ActiveLearner(estimator=estimator,
                            X_training=X_train,
                            y_training=Y_train)
    X_pool = np.delete(X, indexs, axis=0)
    Y_pool = np.delete(Y, indexs, axis=0)
    index = 0

    accuracy = 0
    while len(X_pool) > 0:
        query_index, _ = learner.query(X_pool)
        x, y = X_pool[query_index].reshape(1, -1), Y_pool[query_index].reshape(
            1, )
        learner.teach(X=x, y=y)
        X_pool, Y_pool = np.delete(X_pool, query_index,
                                   axis=0), np.delete(Y_pool, query_index)
        model_accuracy = 1 - learner.score(X_pool, Y_pool)

        print('Error after query {n}: {acc:0.4f}'.format(n=index + 1,
                                                         acc=model_accuracy))
        accuracy = model_accuracy
        predicts = learner.predict(X_test)
        corrects = (predicts == Y_test)
        accs = (sum([1 if i else 0 for i in corrects]) / len(predicts))
        accs = 1 - accs
        print(accs)
        index += 1
    return learner
示例#10
0
def train_model(data, project_id, estimator):
    log.info(f'train model')
    start_time = time.time()

    x_test = data['x_test']
    y_test = data['y_test']

    # initial model train model
    if (estimator == 'KNC'):
        estimator = KNeighborsClassifier()
    elif (estimator == 'GBC'):
        estimator = GradientBoostingClassifier()
    elif (estimator == 'RFC'):
        estimator = RandomForestClassifier()
    learner = ActiveLearner(estimator=estimator,
                            X_training=data['x_teach'],
                            y_training=data['y_teach'])

    # save the model to disk
    model_name = project_id + "_model.pkl"
    with open('./' + modelDir + model_name, 'wb') as knn_pickle:
        pickle.dump(learner, knn_pickle)

    initial_accuracy = learner.score(x_test, y_test)

    log.info(
        f'Initial accuracy: { initial_accuracy } train model end (in secs): {int(time.time() - start_time)}'
    )
    return {
        "accuracy": initial_accuracy,
        "model": model_name,
        "test": data['id_test']
    }
示例#11
0
文件: t3.py 项目: ncgamit/ALforASAG
    def learn(self):
        # seeding
        classes = self.short_df['grades_round'].unique()
        seed_index = []
        for i in classes:
            seed_index.append(self.short_df['grades_round'][
                self.short_df['grades_round'] == i].index[0])
        seed_index

        act_data = self.short_df.copy()
        accuracy_list = []
        f1_total_list = []
        kappa_total_list = []

        # initialising
        train_idx = seed_index
        X_train = self.X[train_idx]
        y_train = self.Y[train_idx]

        # generating the pool
        X_pool = np.delete(self.X, train_idx, axis=0)
        y_pool = np.delete(self.Y, train_idx)

        act_data = act_data.drop(axis=0, index=train_idx)
        act_data.reset_index(drop=True, inplace=True)

        # initializing the random learner
        learner = ActiveLearner(
            estimator=self.model,
            X_training=X_train,
            y_training=y_train,
        )

        # pool-based sampling
        n_queries = int(len(X) / (100 / self.percent))
        for idx in range(n_queries):
            query_idx = np.random.choice(range(len(X_pool)))
            learner.teach(X=X_pool[query_idx].reshape(1, -1),
                          y=y_pool[query_idx].reshape(1, ))

            # remove queried instance from pool
            X_pool = np.delete(X_pool, query_idx, axis=0)
            y_pool = np.delete(y_pool, query_idx)

            act_data = act_data.drop(axis=0, index=query_idx)
            act_data.reset_index(drop=True, inplace=True)

            accuracy_list.append(learner.score(X_pool, y_pool))

            model_pred = learner.predict(X_pool)
            f1_total_list.append(
                f1_score(y_pool,
                         model_pred,
                         average="weighted",
                         labels=np.unique(model_pred)))
            kappa_total_list.append(cohen_kappa_score(y_pool, model_pred))
        return accuracy_list, f1_total_list, kappa_total_list
def active_learn(df1, first_item_index_of_each_category):
    train_idx = first_item_index_of_each_category

    data = df1.values[:, 1:]
    target = df1['label'].values

    X_full = df1.values[:, 1:]
    y_full = df1['label'].values

    X_train = df1.values[:, 1:][
        train_idx]  #item from second column as the first column is the label..
    y_train = df1['label'].values[train_idx]

    X_pool = np.delete(data, train_idx, axis=0)
    y_pool = np.delete(target, train_idx)

    for i in range(1001, 1500):
        learner = ActiveLearner(estimator=RandomForestClassifier(),
                                X_training=X_train[:i],
                                y_training=y_train[:i])
        print('Initial prediction accuracy: %f' %
              learner.score(X_full, y_full))
    print("================================")
    print("================================")
    print("================================")
    print("================================")
    print("================================")
    learner = ActiveLearner(estimator=RandomForestClassifier(),
                            X_training=X_train[:1001],
                            y_training=y_train[:1001])

    n_queries = 502
    performance_array = []
    for idx in range(n_queries):
        query_idx, query_instance = learner.query(X_pool)
        learner.teach(X=X_pool[query_idx].reshape(1, -1),
                      y=y_pool[query_idx].reshape(1, ))
        # remove queried instance from pool
        X_pool = np.delete(X_pool, query_idx, axis=0)
        y_pool = np.delete(y_pool, query_idx)
        learner_score = learner.score(data, target)
        # print('Accuracy after query no. %d: %f' % (idx + 1, learner_score))
        print('%f' % (learner_score))
def run(X_initial, y_initial, n_samples_for_initial, n_queries, estimator):
    np.random.seed(0)
    start_time = time.time()
    # Isolate our examples for our labeled dataset.
    n_labeled_examples = X_initial.shape[0]
    training_indices = np.random.randint(low=0, high=n_labeled_examples + 1, size=n_samples_for_initial)

    X_train = X_initial[training_indices, :]
    y_train = y_initial[training_indices]

    # Isolate the non-training examples we'll be querying.
    X_pool = delete_rows_csr(X_initial, training_indices)
    y_pool = np.delete(y_initial, training_indices)
    # Pre-set our batch sampling to retrieve 3 samples at a time.
    BATCH_SIZE = 3
    preset_batch = partial(uncertainty_batch_sampling, n_instances=BATCH_SIZE)

    # Specify our active learning model.
    learner = ActiveLearner(
        estimator=estimator,
        X_training=X_train,
        y_training=y_train,
        query_strategy=preset_batch
    )

    initial_accuracy = learner.score(X_initial, y_initial)
    print("Initial Accuracy: ", initial_accuracy)
    performance_history = [initial_accuracy]

    f1_score = 0
    index = 0
    while f1_score < 0.65:
        index += 1
        query_index = np.random.choice(y_pool.shape[0], size=1, replace=False)

        # Teach our ActiveLearner model the random record it has been sampled.
        X, y = X_pool[query_index, :], y_pool[query_index]
        learner.teach(X=X, y=y)

        # Remove the queried instance from the unlabeled pool.
        X_pool = delete_rows_csr(X_pool, query_index)
        y_pool = np.delete(y_pool, query_index)

        # Calculate and report our model's f1_score.
        y_pred = learner.predict(X_initial)
        f1_score = metrics.f1_score(y_initial, y_pred, average='micro')

        if index % 100 == 0:
            print('F1 score after {n} training samples: {f1:0.4f}'.format(n=index, f1=f1_score))

        # Save our model's performance for plotting.
        performance_history.append(f1_score)

    print("--- %s seconds ---" % (time.time() - start_time))
    return index
 def simple_rf(self, data, target, X_train, y_train, X_full, y_full, train_idx):
     # print("START: RF")
     acc = []
     for i in range(self.initial_point * 2, self.initial_point * 2 + self.query_number):
         learner = ActiveLearner(
             estimator=RandomForestClassifier(),
             X_training=X_train[:i], y_training=y_train[:i]
         )
         score = learner.score(X_full, y_full)
         acc.append(score)
         print(' %0.3f' % score, end=",")
     # print("END: RF")
     return acc
示例#15
0
    def al_pool_proba(self, data, target, X_train, y_train, X_full, y_full, train_idx, classifier, sampling_strategy, proba):
        acc = []
        pre = []
        rec = []
        fs = []
        X_pool = np.delete(data, train_idx, axis=0)
        y_pool = np.delete(target, train_idx)
        learner = ActiveLearner(
            estimator=classifier,
            query_strategy=sampling_strategy,
            X_training=X_train, y_training=y_train
        )

        n_queries = self.query_number
        # n_queries = 1500
        for idx in range(n_queries):
            query_idx, query_instance = learner.query(X_pool)

            labeled_y = y_pool[query_idx].reshape(1, )
            rand_int = randint(0, 100)
            if(rand_int <= proba):
                if( y_pool[query_idx][0] == 1):
                    y_pool[query_idx][0] = 0
                    labeled_y = np.array((0)).reshape(1,)
                else:
                    y_pool[query_idx][0] = 1
                    labeled_y = np.array((1)).reshape(1, )

            learner.teach(
                X=X_pool[query_idx].reshape(1, -1),
                y=labeled_y
            )
            # remove queried instance from pool
            X_pool = np.delete(X_pool, query_idx, axis=0)
            y_pool = np.delete(y_pool, query_idx)
            learner_score = learner.score(data, target)
            # print('Accuracy after query no. %d: %f' % (idx + 1, learner_wscore))
            precision, recall, fscore, support, accuracy = self.performance_measure(learner, X_full, y_full)
            # learner_score = fscore
            acc.append(accuracy)
            pre.append(precision)
            rec.append(recall)
            fs.append(fscore)
            print('%0.3f' % (learner_score), end=",")
        return acc, pre, rec, fs
def active_learner(query_stra, N_query):
  knn = KNeighborsClassifier(n_neighbors=8)
  learner = ActiveLearner(estimator=knn, X_training=X_train, y_training=y_train, query_strategy=query_stra)

  predictions = learner.predict(X_test)

  X_pool = X_test.values
  y_pool = y_test.values

  for index in range(N_query):
    query_index, query_instance = learner.query(X_pool)
    X, y = X_pool[query_index].reshape(1, -1), y_pool[query_index].reshape(1, )
    learner.teach(X=X, y=y)
    X_pool, y_pool = np.delete(X_pool, query_index, axis=0), np.delete(y_pool, query_index)
    
  model_accuracy = learner.score(X_test, y_test)
  print('Accuracy: {acc:0.4f} \n'.format(acc=model_accuracy))
  performance_history.append(model_accuracy)
示例#17
0
def al_pool(data, target, X_train, y_train, X_full, y_full, train_idx):
    X_pool = np.delete(data, train_idx, axis=0)
    y_pool = np.delete(target, train_idx)
    learner = ActiveLearner(estimator=RandomForestClassifier(),
                            X_training=X_train[:200],
                            y_training=y_train[:200])

    n_queries = 1500
    for idx in range(n_queries):
        query_idx, query_instance = learner.query(X_pool)
        learner.teach(X=X_pool[query_idx].reshape(1, -1),
                      y=y_pool[query_idx].reshape(1, ))
        # remove queried instance from pool
        X_pool = np.delete(X_pool, query_idx, axis=0)
        y_pool = np.delete(y_pool, query_idx)
        learner_score = learner.score(data, target)
        # print('Accuracy after query no. %d: %f' % (idx + 1, learner_wscore))
        print('%0.3f' % (learner_score), end=",")
示例#18
0
def CALLearner(X, y):
    """
	Create an active learner with CAL query strategy and run the active learner on the given data set
	Input:
	The data set X and the corresponding labels
	Return:
	The accuracies evaluated on X, y using the fitted model with the labeled data so far whenever querying the true label of a data point from oracle as a one-demensional numpy array, the number of data points that are queried from oracle for the true label.
	"""

    # use SVM classifier with default parameters
    clf = SVC()
    # create an active learner with CAL as query strategy. The labeled pool of data is initially not empty, it contains two data points that belong to two classes.
    CAL_learner = ActiveLearner(estimator=clf,
                                query_strategy=CAL,
                                X_training=np.array([[0.5, 4.0], [2.0, 1.0]]),
                                y_training=np.array([[0], [1]]))
    # In worst case, we would need to query all data points in the unlabeled pool.
    n_queries = len(y)

    # use variable i to keep track of the number of data points that are queried from oracle
    i = 0
    # store the accuracies evaluated on X, y whenever querying the true label of a data point from oracle
    accuracies = []

    ### TODO: Write the main loop for running the CAL active learner, make sure you maintain the labeled pool and unlabeled pool properly, and calculate the accuracy of the estimater on all given data, i.e. X, y whenever you query a data point from the oracle for the true label.
    S, SLabels = np.array([[0.5, 4.0], [2.0, 1.0]]), np.array([[0], [1]])
    U, ULabels = copy.deepcopy(X), copy.deepcopy(y)

    while (i < n_queries and len(U) != 0):
        idx, y_idx, is_queried = CAL_learner.query(S, SLabels, U, ULabels)
        CAL_learner._add_training_data(U[idx].reshape(1, 2),
                                       y_idx.reshape(1, 1))
        CAL_learner._fit_to_known()
        S, SLabels = np.vstack((S, U[idx].reshape(1, 2))), np.vstack(
            (SLabels, y_idx.reshape(1, 1)))
        U, ULabels = np.delete(U, idx, axis=0), np.delete(ULabels, idx)
        acc = CAL_learner.score(X, y)
        if (is_queried):
            i += 1
            accuracies.append(acc)

    return np.array(accuracies), i
示例#19
0
def run_exp_music(intup):
    global X_train, X_test, y_train, y_test
    rep, i, p = intup

    X_seed, X_pool = X_train[:n_seed], X_train[n_seed:]
    y_seed, y_pool = y_train[:n_seed], y_train[n_seed:]

    # Initializing the learner
    learner = ActiveLearner(
        estimator=RandomForestClassifier(n_estimators=10),
        query_strategy=entropy_sampling,
        X_training=X_seed, y_training=y_seed
    )

    # Run active learning and record history of test accuracy
    history = np.zeros(query_budget - n_seed)
    for j in range(query_budget - n_seed):
        query_idx, query_inst = learner.query(X_pool)
        learner.teach(X_pool[query_idx], y_pool[query_idx])
        history[j] = learner.score(X_test, y_test)
    return history
示例#20
0
def RandomLearner(X, y):
    """
    Create an active learner with random query strategy and run the active learner on the given data set. You should implement this also using modAL. Use SVM classifier with default parameter as the estimator.
    Input:
    The data set X and the corresponding labels
    Return:
    The accuracies evaluated on X, y whenever querying the true label of a data point from oracle as a one-demensional numpy array, the number of data points that are queried from oracle for the true label.
    """
    random_learner = ActiveLearner(estimator=SVC(gamma='scale'),
                                   query_strategy=RandomQuery,
                                   X_training=np.array([[0.5, 4.0], [2.0,
                                                                     1.0]]),
                                   y_training=np.array([[0], [1]]))

    accuracies = []
    n_queries = len(y)
    i = 0
    while i < n_queries:
        if len(random_learner.y_training) == 2:
            U = X
            ULabels = y
        else:
            U = np.delete(U, query_idx, axis=0)
            ULabels = np.delete(ULabels, query_idx)
            if not len(U):
                break

        query_idx, query_instance = random_learner.query(U)

        # add to training data
        random_learner._add_training_data(U[query_idx, :].reshape(-1, 2),
                                          ULabels[query_idx].reshape(-1, 1))
        # fit on training data
        random_learner._fit_to_known()
        # calculate the accuracy of the learned estimator on the entire dataset
        accuracies.append(random_learner.score(X, y))
        i += 1

    return np.array(accuracies), i
示例#21
0
def run_exp(intup):
    global X_train, X_test, y_train, y_test
    rep, i, p = intup

    # Make noisy data, simulate pool-based case
    X_train_noisy = utils.add_gaussian_noise(X_train, p)
    y_train_noisy = y_train  # utils.flip_labels(y_train, p)
    X_seed, X_pool = X_train_noisy[:n_seed], X_train_noisy[n_seed:]
    y_seed, y_pool = y_train_noisy[:n_seed], y_train_noisy[n_seed:]

    # Initializing the learner
    learner = ActiveLearner(
        estimator=RandomForestClassifier(n_estimators=10),
        query_strategy=entropy_sampling,
        X_training=X_seed, y_training=y_seed
    )

    # Run active learning and record history of test accuracy
    history = np.zeros(query_budget - n_seed)
    for j in range(query_budget - n_seed):
        query_idx, query_inst = learner.query(X_pool)
        learner.teach(X_pool[query_idx], y_pool[query_idx])
        history[j] = learner.score(X_test, y_test)
    return history
示例#22
0
def train_model(data, project_id, estimator, query_strategy):
    log.info(f'train model')
    start_time = time.time()

    x_test = data['x_test']
    y_test = data['y_test']

    # initial model train model
    if estimator == cst['ESTIMATOR']['KNC']:
        estimator = KNeighborsClassifier()
    elif estimator == cst['ESTIMATOR']['GBC']:
        estimator = GradientBoostingClassifier()
    elif estimator == cst['ESTIMATOR']['RFC']:
        estimator = RandomForestClassifier()

    if query_strategy == cst['QUERY_STRATEGY']['POOL_BASED_SAMPLING']['PB_UNS']:
        query_strategy = uncertainty_sampling
    elif query_strategy == cst['QUERY_STRATEGY']['POOL_BASED_SAMPLING']['PB_MS']:
        query_strategy = margin_sampling
    elif query_strategy == cst['QUERY_STRATEGY']['POOL_BASED_SAMPLING']['PB_ES']:
        query_strategy = entropy_sampling
    elif query_strategy == cst['QUERY_STRATEGY']['RANKED_BATCH_MODE']['RBM_UNBS']:
        batch_size = 10
        query_strategy = partial(uncertainty_batch_sampling, n_instances=batch_size)

    learner = ActiveLearner(estimator=estimator, query_strategy=query_strategy, X_training=data['x_teach'], y_training=data['y_teach'])

    # save the model to disk
    model_name = project_id + "_model.pkl"
    with open('./' + modelDir + model_name, 'wb') as knn_pickle:
        pickle.dump(learner, knn_pickle)

    initial_accuracy = learner.score(x_test, y_test)

    log.info(f'Initial accuracy: { initial_accuracy } train model end (in secs): {int(time.time() - start_time)}')
    return {"accuracy": initial_accuracy, "model": model_name, "test": data['id_test']}
示例#23
0
learner = ActiveLearner(estimator=RandomForestClassifier(),
                        query_strategy=entropy_sampling,
                        X_training=X_train,
                        y_training=y_train)
learners.append(learner)

weights = np.ones(shape=len(learners)) / len(learners)

n_queries = 4000
loop = 0
strategy_count = np.zeros(len(learners))
x = []
# while True:
score = []
for learner in learners:
    score.append(learner.score(train_features, train_labels))
unqueried_score = np.min(score)
performance_history = [unqueried_score]
for _ in range(n_queries):
    opinions = []
    learner_id = 0
    for learner in learners:
        if learner_id == 1:
            query_idx, query_instance = learner.query(X_pool, n_instances=1)
        else:
            query_idx, query_instance = learner.query(X_pool)  # -> Here

        opinions.append(query_idx)
        learner_id += 1
    opt_idx = np.random.choice(range(len(opinions)),
                               p=weights,
def active_learn(df1, first_item_index_of_each_category):
    train_idx = first_item_index_of_each_category
    # X_train = iris['data'][train_idx]
    # y_train = iris['target'][train_idx]

    # initial training data
    data = df1.values[:, 1:]
    target = df1['label'].values

    X_full = df1.values[:, 1:]
    y_full = df1['label'].values

    X_train = df1.values[:, 1:][
        train_idx]  #item from second column as the first column is the label..
    y_train = df1['label'].values[train_idx]

    # with plt.style.context('seaborn-white'):
    #     pca = PCA(n_components=2).fit_transform(data)
    #     plt.figure(figsize=(7, 7))
    #     plt.scatter(x=pca[:, 0], y=pca[:, 1], c=y_train, cmap='viridis', s=50)
    #     plt.title('The iris dataset')
    #     plt.show()

    # generating the pool
    X_pool = np.delete(data, train_idx, axis=0)
    y_pool = np.delete(target, train_idx)

    # initializing the active learner
    learner = ActiveLearner(estimator=RandomForestClassifier(),
                            query_strategy=entropy_sampling,
                            X_training=X_train,
                            y_training=y_train)

    # print('Initial prediction accuracy: %f' % learner.score(X_full, y_full))
    print('%f' % learner.score(X_full, y_full))
    index = 0
    performance_array = []
    # learning until the accuracy reaches a given threshold
    while learner.score(X_full, y_full) < 0.90:
        stream_idx = np.random.choice(range(len(X_full)))
        if classifier_uncertainty(learner, X_full[stream_idx].reshape(
                1, -1)) >= 0.4:
            learner.teach(X_full[stream_idx].reshape(1, -1),
                          y_full[stream_idx].reshape(-1, ))
            learner_score = learner.score(X_full, y_full)
            # print('Item no. %d queried, new accuracy: %f' % (stream_idx, learner_score))
            print('%f' % (learner_score))
            if index == 505:
                break
            if (index % 100 == 0):
                performance_array.append(learner_score)
            index = index + 1
    percentage_increase(performance_array)

    # visualizing initial prediction
    # with plt.style.context('seaborn-white'):
    #     plt.figure(figsize=(7, 7))
    #     prediction = learner.predict(data)
    #     plt.scatter(x=pca[:, 0], y=pca[:, 1], c=prediction, cmap='viridis', s=50)
    #     plt.title('Initial accuracy: %f' % learner.score(data, target))
    #     plt.show()

    # pool-based sampling
    # n_queries = 502
    # performance_array = []
    # for idx in range(n_queries):
    #     query_idx, query_instance = learner.query(X_pool)
    #     learner.teach(
    #         X=X_pool[query_idx].reshape(1, -1),
    #         y=y_pool[query_idx].reshape(1, )
    #     )
    #     # remove queried instance from pool
    #     X_pool = np.delete(X_pool, query_idx, axis=0)
    #     y_pool = np.delete(y_pool, query_idx)
    #     learner_score = learner.score(data, target)
    #     print('Accuracy after query no. %d: %f' % (idx + 1, learner_score))
    #     if (idx % 100 == 0):
    #         performance_array.append(learner_score)
    #
    # percentage_increase(performance_array)

    # plotting final prediction
    # with plt.style.context('seaborn-white'):
    #     plt.figure(figsize=(7, 7))
    #     prediction = learner.predict(data)
    #     plt.scatter(x=pca[:, 0], y=pca[:, 1], c=prediction, cmap='viridis', s=50)
    #     plt.title(
    #         'Classification accuracy after %i queries: %f' % (n_queries, learner.score(data,target)))
    #     plt.show()
    y = 0
# create an ActiveLearner instance
learner = ActiveLearner(estimator=RandomForestClassifier(),
                        X_training=X_train,
                        y_training=y_train)
initial_prediction = learner.predict_proba(X_full)[:, 1].reshape(
    im_height, im_width)

n_queries = 100
uncertainty_sampling_accuracy = list()
for round_idx in range(n_queries):
    query_idx, query_inst = learner.query(X_pool)
    learner.teach(X_pool[query_idx].reshape(1, -1),
                  y_pool[query_idx].reshape(-1, ))
    X_pool = np.delete(X_pool, query_idx, axis=0)
    y_pool = np.delete(y_pool, query_idx)
    uncertainty_sampling_accuracy.append(learner.score(X_full, y_full))

final_prediction = learner.predict_proba(X_full)[:, 1].reshape(
    im_height, im_width)
"""
---------------------------------
 comparison with random sampling
---------------------------------
"""


def random_sampling(classsifier, X):
    query_idx = np.random.rand(range(len(X)))
    return query_idx, X[query_idx]

示例#26
0
from modAL.models import ActiveLearner

# Specify our core estimator along with it's active learning model.
knn = KNeighborsClassifier(n_neighbors=3)
learner = ActiveLearner(estimator=RandomForestClassifier(),
                        query_strategy=uncertainty_sampling,
                        X_training=X_train, y_training=y_train)


# Isolate the data we'll need for plotting.
predictions = learner.predict(X_raw)
is_correct = (predictions == y_raw)


# Record our learner's score on the raw data.
unqueried_score = learner.score(X_raw, y_raw)

# Plot our classification results.
'''
fig, ax = plt.subplots(figsize=(8.5, 6), dpi=130)
ax.scatter(x=x_component[is_correct],  y=y_component[is_correct],  c='g', marker='+', label='Correct',   alpha=8/10)
ax.scatter(x=x_component[~is_correct], y=y_component[~is_correct], c='r', marker='x', label='Incorrect', alpha=8/10)
ax.legend(loc='lower right')
ax.set_title("ActiveLearner class predictions (Accuracy: {score:.3f})".format(score=unqueried_score))
plt.show()
'''

N_QUERIES = 20
performance_history = [unqueried_score]

# Allow our model to query our unlabeled dataset for the most
    def al_rank(self,
                data,
                target,
                X_train,
                y_train,
                X_full,
                y_full,
                train_idx,
                N_RAW_SAMPLES=80,
                proba=5,
                proba_e=5,
                proba_n=20,
                e=1,
                n=4):
        acc = []
        pre = []
        rec = []
        fs = []
        BATCH_SIZE = 5
        preset_batch = partial(uncertainty_batch_sampling,
                               n_instances=BATCH_SIZE)

        learner = ActiveLearner(estimator=RandomForestClassifier(),
                                X_training=X_train,
                                y_training=y_train,
                                query_strategy=preset_batch)

        # N_RAW_SAMPLES = 80
        N_QUERIES = N_RAW_SAMPLES // BATCH_SIZE
        unqueried_score = learner.score(X_full, y_full)
        performance_history = [unqueried_score]

        # Isolate our examples for our labeled dataset.
        n_labeled_examples = X_full.shape[0]
        training_indices = np.random.randint(low=0,
                                             high=n_labeled_examples + 1,
                                             size=5)

        X_train = X_full[training_indices]
        y_train = y_full[training_indices]

        # Isolate the non-training examples we'll be querying.
        X_pool = np.delete(X_full, training_indices, axis=0)
        y_pool = np.delete(y_full, training_indices, axis=0)

        for index in range(N_QUERIES):
            query_index, query_instance = learner.query(X_pool)

            # Teach our ActiveLearner model the record it has requested.
            X, y = X_pool[query_index], y_pool[query_index]

            labeled_y = np.array([])

            for i in range(0, e):
                if (randint(0, 100) <= proba_e):
                    if (y_pool[query_index[i]] == 1):
                        y_pool[query_index[i]] = 0
                        labeled_y = np.append(labeled_y, 0)
                    else:
                        labeled_y = np.append(labeled_y, 1)
                else:
                    labeled_y = np.append(labeled_y, y_pool[query_index[i]])
            for j in range(0, n):
                i = j + e
                if (randint(0, 100) <= proba_n):
                    if (y_pool[query_index[i]] == 1):
                        y_pool[query_index[i]] = 0
                        labeled_y = np.append(labeled_y, 0)
                    else:
                        labeled_y = np.append(labeled_y, 1)
                else:
                    labeled_y = np.append(labeled_y, y_pool[query_index[i]])

            # labeled_y =y
            # rand_int = randint(0, 100)
            # if (rand_int <= proba):
            #     labeled_y = np.array([])
            #     for idx in query_index:
            #         if (y_pool[idx] == 1):
            #             y_pool[idx] = 0
            #             labeled_y = np.append(labeled_y, 0)
            #         else:
            #             y_pool[idx] = 1
            #             # labeled_y = np.array((1)).reshape(1, )
            #             labeled_y = np.append(labeled_y, 1)

            learner.teach(X=X, y=labeled_y)

            # learner.teach(X=X, y=y)

            # Remove the queried instance from the unlabeled pool.
            X_pool = np.delete(X_pool, query_index, axis=0)
            y_pool = np.delete(y_pool, query_index)

            # Calculate and report our model's accuracy.
            model_accuracy = learner.score(X_full, y_full)
            print('Accuracy after query {n}: {acc:0.4f}'.format(
                n=index + 1, acc=model_accuracy))
            precision, recall, fscore, support, accuracy = self.performance_measure(
                learner, X_full, y_full)
            learner_score = accuracy
            acc.append(learner_score)
            pre.append(precision)
            rec.append(recall)
            fs.append(fscore)
            # Save our model's performance for plotting.
            performance_history.append(model_accuracy)

        return acc, pre, rec, fs
示例#28
0
class ActiveKNN:
    """A KNN machine learning model using active learning with modAL package

    Attributes:
        amine:          A string representing the amine that the KNN model is used for predictions.
        n_neighbors:    An integer representing the number of neighbors to classify using KNN model.
        model:          A KNeighborClassifier object as the classifier model given the number of neighbors to classify
                            with.
        metrics:        A dictionary to store the performance metrics locally. It has the format of
                            {'metric_name': [metric_value]}.
        verbose:        A boolean representing whether it will prints out additional information to the terminal or not.
        pool_data:      A numpy array representing all the data from the dataset.
        pool_labels:    A numpy array representing all the labels from the dataset.
        x_t:            A numpy array representing the training data used for model training.
        y_t:            A numpy array representing the training labels used for model training.
        x_v:            A numpy array representing the testing data used for active learning.
        y_v:            A numpy array representing the testing labels used for active learning.
        learner:        An ActiveLearner to conduct active learning with. See modAL documentation for more details.
    """
    def __init__(self, amine=None, n_neighbors=2, verbose=True):
        """Initialize the ActiveKNN object."""
        self.amine = amine
        self.n_neighbors = n_neighbors
        self.model = KNeighborsClassifier(n_neighbors=self.n_neighbors)
        self.metrics = {
            'accuracies': [],
            'precisions': [],
            'recalls': [],
            'bcrs': [],
            'confusion_matrices': []
        }
        self.verbose = verbose

    def load_dataset(self, x_t, y_t, x_v, y_v, all_data, all_labels):
        """Load the input training and validation data and labels into the model.

        Args:
            x_t:                A 2-D numpy array representing the training data.
            y_t:                A 2-D numpy array representing the training labels.
            x_v:                A 2-D numpy array representing the validation data.
            y_v:                A 2-D numpy array representing the validation labels.
            all_data:           A 2-D numpy array representing all the data in the active learning pool.
            all_labels:         A 2-D numpy array representing all the labels in the active learning pool.

        Returns:
            N/A
        """

        self.x_t, self.x_v, self.y_t, self.y_v = x_t, y_t, x_v, y_v

        self.pool_data = all_data
        self.pool_labels = all_labels

        if self.verbose:
            print(f'The training data has dimension of {self.x_t.shape}.')
            print(f'The training labels has dimension of {self.y_t.shape}.')
            print(f'The testing data has dimension of {self.x_v.shape}.')
            print(f'The testing labels has dimension of {self.y_v.shape}.')

    def train(self):
        """Train the KNN model by setting up the ActiveLearner."""

        self.learner = ActiveLearner(estimator=self.model,
                                     X_training=self.x_t,
                                     y_training=self.y_t)
        # Evaluate zero-point performance
        self.evaluate()

    def active_learning(self, num_iter=None, to_params=True):
        """ The active learning loop

        This is the active learning model that loops around the KNN model
        to look for the most uncertain point and give the model the label to train

        Args:
            num_iter:   An integer that is the number of iterations.
                        Default = None
            to_params:  A boolean that decide if to store the metrics to the dictionary,
                        detail see "store_metrics_to_params" function.
                        Default = True

        return: N/A
        """
        num_iter = num_iter if num_iter else self.x_v.shape[0]

        for _ in range(num_iter):
            # Query the most uncertain point from the active learning pool
            query_index, query_instance = self.learner.query(self.x_v)

            # Teach our ActiveLearner model the record it has requested.
            uncertain_data, uncertain_label = self.x_v[query_index].reshape(
                1, -1), self.y_v[query_index].reshape(1, )
            self.learner.teach(X=uncertain_data, y=uncertain_label)

            self.evaluate()

            # Remove the queried instance from the unlabeled pool.
            self.x_t = np.append(self.x_t, uncertain_data).reshape(
                -1, self.pool_data.shape[1])
            self.y_t = np.append(self.y_t, uncertain_label)
            self.x_v = np.delete(self.x_v, query_index, axis=0)
            self.y_v = np.delete(self.y_v, query_index)

        if to_params:
            self.store_metrics_to_params()

    def evaluate(self, store=True):
        """Evaluation of the model

        Args:
            store:  A boolean that decides if to store the metrics of the performance of the model.
                    Default = True

        return: N/A
        """

        # Calculate and report our model's accuracy.
        accuracy = self.learner.score(self.pool_data, self.pool_labels)

        preds = self.learner.predict(self.pool_data)
        cm = confusion_matrix(self.pool_labels, preds)

        # To prevent nan value for precision, we set it to 1 and send out a warning message
        if cm[1][1] + cm[0][1] != 0:
            precision = cm[1][1] / (cm[1][1] + cm[0][1])
        else:
            precision = 1.0
            print('WARNING: zero division during precision calculation')

        recall = cm[1][1] / (cm[1][1] + cm[1][0])
        true_negative = cm[0][0] / (cm[0][0] + cm[0][1])
        bcr = 0.5 * (recall + true_negative)

        if store:
            self.store_metrics_to_model(cm, accuracy, precision, recall, bcr)

    def store_metrics_to_model(self, cm, accuracy, precision, recall, bcr):
        """Store the performance metrics

        The metrics are specifically the confusion matrices, accuracies,
        precisions, recalls and balanced classification rates.

        Args:
            cm:             A numpy array representing the confusion matrix given our predicted labels and the actual
                                corresponding labels. It's a 2x2 matrix for the drp_chem model.
            accuracy:       A float representing the accuracy rate of the model: the rate of correctly predicted
                                reactions out of all reactions.
            precision:      A float representing the precision rate of the model: the rate of the number of actually
                                successful reactions out of all the reactions predicted to be successful.
            recall:         A float representing the recall rate of the model: the rate of the number of reactions
                                predicted to be successful out of all the actual successful reactions.
            bcr:            A float representing the balanced classification rate of the model. It's the average value
                                of recall rate and true negative rate.

        return: N/A
        """

        self.metrics['confusion_matrices'].append(cm)
        self.metrics['accuracies'].append(accuracy)
        self.metrics['precisions'].append(precision)
        self.metrics['recalls'].append(recall)
        self.metrics['bcrs'].append(bcr)

        if self.verbose:
            print(cm)
            print('accuracy for model is', accuracy)
            print('precision for model is', precision)
            print('recall for model is', recall)
            print('balanced classification rate for model is', bcr)

    def store_metrics_to_params(self):
        """Store the metrics results to the model's parameters dictionary

        Use the same logic of saving the metrics for each model.
        Dump the cross validation statistics to a pickle file.
        """

        model = 'KNN'

        with open(os.path.join("./data", "cv_statistics.pkl"), "rb") as f:
            stats_dict = pickle.load(f)

        stats_dict[model]['accuracies'].append(self.metrics['accuracies'])
        stats_dict[model]['confusion_matrices'].append(
            self.metrics['confusion_matrices'])
        stats_dict[model]['precisions'].append(self.metrics['precisions'])
        stats_dict[model]['recalls'].append(self.metrics['recalls'])
        stats_dict[model]['bcrs'].append(self.metrics['bcrs'])

        # Save this dictionary in case we need it later
        with open(os.path.join("./data", "cv_statistics.pkl"), "wb") as f:
            pickle.dump(stats_dict, f)

    def save_model(self, k_shot, n_way, meta):
        """Save the data used to train, validate and test the model to designated folder

        Args:
            k_shot:                 An integer representing the number of training samples per class.
            n_way:                  An integer representing the number of classes per task.
            meta:                   A boolean representing if it will be trained under option 1 or option 2.
                                        Option 1 is train with observations of other tasks and validate on the
                                        task-specific observations.
                                        Option 2 is to train and validate on the task-specific observations.

        Returns:
            N/A
        """

        # Indicate which option we used the data for
        option = 2 if meta else 1

        # Set up the main destination folder for the model
        dst_root = './KNN_few_shot/option_{0:d}'.format(option)
        if not os.path.exists(dst_root):
            os.makedirs(dst_root)
            print('No folder for KNN model storage found')
            print(f'Make folder to store KNN model at')

        # Set up the model specific folder
        model_folder = '{0:s}/KNN_{1:d}_shot_{2:d}_way_option_{3:d}_{4:s}'.format(
            dst_root, k_shot, n_way, option, self.amine)
        if not os.path.exists(model_folder):
            os.makedirs(model_folder)
            print('No folder for KNN model storage found')
            print(f'Make folder to store KNN model of amine {self.amine} at')
        else:
            print(
                f'Found existing folder. Model of amine {self.amine} will be stored at'
            )
        print(model_folder)

        # Dump the model into the designated folder
        file_name = "KNN_{0:s}_option_{1:d}.pkl".format(self.amine, option)
        with open(os.path.join(model_folder, file_name), "wb") as f:
            pickle.dump([self], f, -1)

    def __str__(self):
        return 'A {0:d}-neighbor KNN model for amine {1:s} using active learning'.format(
            self.n_neighbors, self.amine)
示例#29
0
# generating the pool
X_pool = np.delete(iris['data'], train_idx, axis=0)
y_pool = np.delete(iris['target'], train_idx)

# initializing the active learner
learner = ActiveLearner(
    predictor=KNeighborsClassifier(n_neighbors=3),
    X_initial=X_train, y_initial=y_train
)

# visualizing initial prediction
with plt.style.context('seaborn-white'):
    plt.figure(figsize=(7, 7))
    prediction = learner.predict(iris['data'])
    plt.scatter(x=pca[:, 0], y=pca[:, 1], c=prediction, cmap='viridis', s=50)
    plt.title('Initial accuracy: %f' % learner.score(iris['data'], iris['target']))
    plt.show()

print('Accuracy before active learning: %f' % learner.score(iris['data'], iris['target']))

# pool-based sampling
n_queries = 20
for idx in range(n_queries):
    query_idx, query_instance = learner.query(X_pool)
    learner.teach(
        X=X_pool[query_idx].reshape(1, -1),
        y=y_pool[query_idx].reshape(1, )
    )
    # remove queried instance from pool
    X_pool = np.delete(X_pool, query_idx, axis=0)
    y_pool = np.delete(y_pool, query_idx)
示例#30
0
     np.repeat(np.asarray(range(im.shape[1])), im.shape[0])]
)
# map the intensity values against the grid
y_full = np.asarray([im[P[0], P[1]] for P in X_full])

# assembling initial training set
n_initial = 5
initial_idx = np.random.choice(range(len(X_full)), size=n_initial, replace=False)
X_train, y_train = X_full[initial_idx], y_full[initial_idx]

# initialize the learner
learner = ActiveLearner(
    predictor=RandomForestClassifier(),
    X_initial=X_train, y_initial=y_train
)
print('Initial prediction accuracy: %f' % learner.score(X_full, y_full))

# visualizing initial prediciton
with plt.style.context('seaborn-white'):
    plt.figure(figsize=(7, 7))
    prediction = learner.predict_proba(X_full)[:, 1]
    plt.imshow(prediction.reshape(im_width, im_height))
    plt.title('Initial prediction accuracy: %f' % learner.score(X_full, y_full))
    plt.show()

"""
The instances are randomly selected one by one, if an instance's uncertainty
is above a threshold, the label is requested and shown to the learner. The
process is continued until the learner reaches a previously defined accuracy.
"""
示例#31
0
# create the data to stream from
X_full = np.transpose(
    [np.tile(np.asarray(range(im.shape[0])), im.shape[1]),
     np.repeat(np.asarray(range(im.shape[1])), im.shape[0])]
)
# map the intensity values against the grid
y_full = np.asarray([im[P[0], P[1]] for P in X_full])

# assembling initial training set
n_initial = 5
initial_idx = np.random.choice(range(len(X_full)), size=n_initial, replace=False)
X_train, y_train = X_full[initial_idx], y_full[initial_idx]

# initialize the learner
learner = ActiveLearner(
    predictor=RandomForestClassifier(),
    X_initial=X_train, y_initial=y_train
)

"""
The instances are randomly selected one by one, if an instance's uncertainty
is above a threshold, the label is requested and shown to the learner. The
process is continued until the learner reaches a previously defined accuracy.
"""

# learning until the accuracy reaches a given threshold
while learner.score(X_full, y_full) < 0.7:
    stream_idx = np.random.choice(range(len(X_full)))
    if classifier_uncertainty(learner, X_full[stream_idx].reshape(1, -1)) >= 0.4:
        learner.teach(X_full[stream_idx].reshape(1, -1), y_full[stream_idx].reshape(-1, ))
示例#32
0
# generating the pool
X_pool = np.delete(data, train_idx, axis=0)
y_pool = np.delete(target, train_idx)

# initializing the active learner
learner = ActiveLearner(estimator=RandomForestClassifier(),
                        X_training=X_train,
                        y_training=y_train)

# visualizing initial prediction
with plt.style.context('seaborn-white'):
    plt.figure(figsize=(7, 7))
    prediction = learner.predict(data)
    plt.scatter(x=pca[:, 0], y=pca[:, 1], c=prediction, cmap='viridis', s=50)
    plt.title('Initial accuracy: %f' % learner.score(data, target))
    plt.show()

print('Accuracy before active learning: %f' % learner.score(data, target))

# pool-based sampling
n_queries = 30
for idx in range(n_queries):
    query_idx, query_instance = learner.query(X_pool)
    learner.teach(X=X_pool[query_idx].reshape(1, -1),
                  y=y_pool[query_idx].reshape(1, ))
    # remove queried instance from pool
    X_pool = np.delete(X_pool, query_idx, axis=0)
    y_pool = np.delete(y_pool, query_idx)
    print('Accuracy after query no. %d: %f' %
          (idx + 1, learner.score(data, target)))
示例#33
0
class ActiveLearningClassifier:
    """Base machine learning classifier using active learning with modAL package

    Attributes:
        amine:              A string representing the amine that the Logistic Regression model is used for predictions.
        config:             A dictionary representing the hyper-parameters of the model
        metrics:            A dictionary to store the performance metrics locally. It has the format of
                                {'metric_name': [metric_value]}.
        verbose:            A boolean representing whether it will prints out additional information to the terminal
                                or not.
        stats_path:         A Path object representing the directory of the stats dictionary if we are not running
                                multi-processing.
        result_dict:        A dictionary representing the result dictionary used during multi-thread processing.
        classifier_name:    A string representing the name of the generic classifier.
        model_name:         A string representing the name of the specific model for future plotting.
        all_data:           A numpy array representing all the data from the dataset.
        all_labels:         A numpy array representing all the labels from the dataset.
        x_t:                A numpy array representing the training data used for model training.
        y_t:                A numpy array representing the training labels used for model training.
        x_v:                A numpy array representing the testing data used for active learning.
        y_v:                A numpy array representing the testing labels used for active learning.
        learner:            An ActiveLearner to conduct active learning with. See modAL documentation for more details.
    """
    def __init__(self,
                 amine=None,
                 config=None,
                 verbose=True,
                 stats_path=None,
                 result_dict=None,
                 classifier_name='Base Classifier',
                 model_name='Base Classifier'):
        """initialization of the class"""
        self.amine = amine

        self.config = config

        self.metrics = defaultdict(dict)
        self.verbose = verbose
        self.stats_path = stats_path
        self.result_dict = result_dict
        self.classifier_name = classifier_name
        self.model_name = model_name

    def load_dataset(self, set_id, x_t, y_t, x_v, y_v, all_data, all_labels):
        """Load the input training and validation data and labels into the model.

        Args:
            set_id:             An integer representing the id of the random draw that we are loading.
            x_t:                A 2-D numpy array representing the training data.
            y_t:                A 2-D numpy array representing the training labels.
            x_v:                A 2-D numpy array representing the validation data.
            y_v:                A 2-D numpy array representing the validation labels.
            all_data:           A 2-D numpy array representing all the data in the active learning pool.
            all_labels:         A 2-D numpy array representing all the labels in the active learning pool.
        """
        self.draw_id = set_id
        self.metrics[self.draw_id] = defaultdict(list)

        self.x_t, self.y_t, self.x_v, self.y_v = x_t, y_t, x_v, y_v

        self.all_data = all_data
        self.all_labels = all_labels

        if self.verbose:
            print(f'The training data has dimension of {self.x_t.shape}.')
            print(f'The training labels has dimension of {self.y_t.shape}.')
            print(f'The testing data has dimension of {self.x_v.shape}.')
            print(f'The testing labels has dimension of {self.y_v.shape}.')

    def train(self, warning=True):
        """Train the KNN model by setting up the ActiveLearner."""

        self.learner = ActiveLearner(estimator=self.model,
                                     X_training=self.x_t,
                                     y_training=self.y_t)
        # Evaluate zero-point performance
        self.evaluate(warning=warning)

    def active_learning(self, num_iter=None, warning=True):
        """The active learning loop

        This is the active learning model that loops around the decision tree model
        to look for the most uncertain point and give the model the label to train

        Args:
            num_iter:   An integer that is the number of iterations.
                        Default = None
            warning:    A boolean that decide if to declare zero division warning or not.
                        Default = True.
        """

        num_iter = num_iter if num_iter else self.x_v.shape[0]

        for _ in range(num_iter):
            # Query the most uncertain point from the active learning pool
            query_index, query_instance = self.learner.query(self.x_v)

            # Teach our ActiveLearner model the record it has requested.
            uncertain_data, uncertain_label = self.x_v[query_index].reshape(
                1, -1), self.y_v[query_index].reshape(1, )
            self.learner.teach(X=uncertain_data, y=uncertain_label)

            self.evaluate(warning=warning)

            # Remove the queried instance from the unlabeled pool.
            self.x_t = np.append(self.x_t, uncertain_data).reshape(
                -1, self.all_data.shape[1])
            self.y_t = np.append(self.y_t, uncertain_label)
            self.x_v = np.delete(self.x_v, query_index, axis=0)
            self.y_v = np.delete(self.y_v, query_index)

    def evaluate(self, warning=True, store=True):
        """Evaluation of the model

        Args:
            warning:    A boolean that decides if to warn about the zero division issue or not.
                            Default = True
            store:      A boolean that decides if to store the metrics of the performance of the model.
                            Default = True
        """

        # Calculate and report our model's accuracy.
        accuracy = self.learner.score(self.all_data, self.all_labels)

        self.y_preds = self.learner.predict(self.all_data)

        cm = confusion_matrix(self.all_labels, self.y_preds)

        # To prevent nan value for precision, we set it to 1 and send out a warning message
        if cm[1][1] + cm[0][1] != 0:
            precision = cm[1][1] / (cm[1][1] + cm[0][1])
        else:
            precision = 1.0
            if warning:
                print('WARNING: zero division during precision calculation')

        recall = cm[1][1] / (cm[1][1] + cm[1][0])
        true_negative = cm[0][0] / (cm[0][0] + cm[0][1])
        bcr = 0.5 * (recall + true_negative)

        if store:
            self.store_metrics_to_model(cm, accuracy, precision, recall, bcr)

    def store_metrics_to_model(self, cm, accuracy, precision, recall, bcr):
        """Store the performance metrics

        The metrics are specifically the confusion matrices, accuracies,
        precisions, recalls and balanced classification rates.

        Args:
            cm:             A numpy array representing the confusion matrix given our predicted labels and the actual
                                corresponding labels. It's a 2x2 matrix for the drp_chem model.
            accuracy:       A float representing the accuracy rate of the model: the rate of correctly predicted
                                reactions out of all reactions.
            precision:      A float representing the precision rate of the model: the rate of the number of actually
                                successful reactions out of all the reactions predicted to be successful.
            recall:         A float representing the recall rate of the model: the rate of the number of reactions
                                predicted to be successful out of all the actual successful reactions.
            bcr:            A float representing the balanced classification rate of the model. It's the average value
                                of recall rate and true negative rate.
        """

        self.metrics[self.draw_id]['confusion_matrices'].append(cm)
        self.metrics[self.draw_id]['accuracies'].append(accuracy)
        self.metrics[self.draw_id]['precisions'].append(precision)
        self.metrics[self.draw_id]['recalls'].append(recall)
        self.metrics[self.draw_id]['bcrs'].append(bcr)

        if self.verbose:
            print(cm)
            print('accuracy for model is', accuracy)
            print('precision for model is', precision)
            print('recall for model is', recall)
            print('balanced classification rate for model is', bcr)

    def find_inner_avg(self):
        """Find the average across all random draws"""
        metric_names = ['accuracies', 'precisions', 'recalls', 'bcrs']
        rand_draws = list(self.metrics.keys())

        for metric in metric_names:
            lst_of_metrics = []
            for set_id in rand_draws:
                lst_of_metrics.append(self.metrics[set_id][metric])
            self.metrics['average'][metric] = list(
                np.average(lst_of_metrics, axis=0))

        lst_of_confusion_matrices = []
        for set_id in rand_draws:
            lst_of_confusion_matrices.append(
                self.metrics[set_id]['confusion_matrices'])
        self.metrics['average'][
            'confusion_matrices'] = lst_of_confusion_matrices

    def store_metrics_to_file(self):
        """Store the metrics results to the model's parameters dictionary

        Use the same logic of saving the metrics for each model.
        Dump the cross validation statistics to a pickle file.
        """
        self.find_inner_avg()

        model = self.model_name

        # Check if we are running multi-thread process
        # Or single-thread process
        if self.result_dict:
            # Store to the existing multi-processing dictionary
            stats_dict = self.result_dict
        else:
            # Store to a simple dictionary
            if self.stats_path.exists():
                with open(self.stats_path, "rb") as f:
                    stats_dict = pickle.load(f)
            else:
                stats_dict = {}

        if model not in stats_dict:
            stats_dict[model] = defaultdict(list)

        stats_dict[model]['amine'].append(self.amine)
        stats_dict[model]['accuracies'].append(
            self.metrics['average']['accuracies'])
        stats_dict[model]['confusion_matrices'].append(
            self.metrics['average']['confusion_matrices'])
        stats_dict[model]['precisions'].append(
            self.metrics['average']['precisions'])
        stats_dict[model]['recalls'].append(self.metrics['average']['recalls'])
        stats_dict[model]['bcrs'].append(self.metrics['average']['bcrs'])

        # Save this dictionary in case we need it later
        if not self.result_dict and self.stats_path:
            with open(self.stats_path, "wb") as f:
                pickle.dump(stats_dict, f)

    def save_model(self):
        """Save the data used to train, validate and test the model to designated folder"""

        # Set up the main destination folder for the model
        dst_root = './data/{}/{}'.format(self.classifier_name, self.model_name)
        if not os.path.exists(dst_root):
            os.makedirs(dst_root)
            print(
                f'No folder for {self.classifier_name} model {self.model_name} storage found'
            )
            print(f'Make folder to store model at')

        # Dump the model into the designated folder
        file_name = "{0:s}_{1:s}.pkl".format(self.model_name, self.amine)
        with open(os.path.join(dst_root, file_name), "wb") as f:
            pickle.dump(self, f)
示例#34
0
# generate the pool
# remove the initial data from the training dataset
X_pool = np.delete(X_train, initial_idx, axis=0)
y_pool = np.delete(y_train, initial_idx, axis=0)

"""
Training the ActiveLearner
"""

# initialize ActiveLearner
learner = ActiveLearner(
    predictor=classifier,
    X_initial=X_initial, y_initial=y_initial,
    verbose=0
)

# the active learning loop
n_queries = 10
for idx in range(n_queries):
    query_idx, query_instance = learner.query(X_pool, n_instances=200, verbose=0)
    learner.teach(
        X=X_pool[query_idx], y=y_pool[query_idx],
        verbose=0
    )
    # remove queried instance from pool
    X_pool = np.delete(X_pool, query_idx, axis=0)
    y_pool = np.delete(y_pool, query_idx, axis=0)

# the final accuracy score
print(learner.score(X_test, y_test, verbose=0))