def test_apriori(): pool_classifiers, X_dsel, y_dsel, X_test, y_test = setup_classifiers() rng = np.random.RandomState(123456) apriori = APriori(pool_classifiers, random_state=rng) apriori.fit(X_dsel, y_dsel) assert np.isclose(apriori.score(X_test, y_test), 0.6878787878787879)
def test_apriori(): pool_classifiers, X_dsel, y_dsel, X_test, y_test = setup_classifiers() rng = np.random.RandomState(123456) apriori = APriori(pool_classifiers, rng=rng, DFP=True) apriori.fit(X_dsel, y_dsel) assert np.isclose(apriori.score(X_test, y_test), 0.87272727272727268)
def test_apriori(knn_methods): pool_classifiers, X_dsel, y_dsel, X_test, y_test = setup_classifiers() rng = np.random.RandomState(123456) apriori = APriori(pool_classifiers, random_state=rng, knn_classifier=knn_methods) apriori.fit(X_dsel, y_dsel) assert np.isclose(apriori.score(X_test, y_test), 0.97872340425531912)
def test_fit(create_pool_classifiers, create_X_y): X, y = create_X_y a_priori_test = APriori(create_pool_classifiers) a_priori_test.fit(X, y) expected = np.array([[0.5, 0.5], [1.0, 0.0], [0.33, 0.67]]) expected = np.tile(expected, (15, 1, 1)) assert np.array_equal(a_priori_test.dsel_scores_, expected)
knorau = KNORAU(pool_classifiers) kne = KNORAE(pool_classifiers) desp = DESP(pool_classifiers) ola = OLA(pool_classifiers) mcb = MCB(pool_classifiers) apriori = APriori(pool_classifiers) meta = METADES(pool_classifiers) # Fit the des techniques knorau.fit(X_dsel, y_dsel) kne.fit(X_dsel, y_dsel) desp.fit(X_dsel, y_dsel) # Fit the dcs techniques ola.fit(X_dsel, y_dsel) mcb.fit(X_dsel, y_dsel) apriori.fit(X_dsel, y_dsel) meta.fit(X_dsel, y_dsel) # Calculate classification accuracy of each technique print('Evaluating DS techniques:') print('Classification accuracy KNORA-Union: ', knorau.score(X_test, y_test)) print('Classification accuracy KNORA-Eliminate: ', kne.score(X_test, y_test)) print('Classification accuracy DESP: ', desp.score(X_test, y_test)) print('Classification accuracy OLA: ', ola.score(X_test, y_test)) print('Classification accuracy A priori: ', apriori.score(X_test, y_test)) print('Classification accuracy MCB: ', mcb.score(X_test, y_test)) print('Classification accuracy META-DES: ', meta.score(X_test, y_test))
def test_fit(): a_priori_test = APriori(create_pool_classifiers()) a_priori_test.fit(X_dsel_ex1, y_dsel_ex1) expected = np.array([[0.5, 0.5], [1.0, 0.0], [0.33, 0.67]]) expected = np.tile(expected, (15, 1, 1)) assert np.array_equal(a_priori_test.dsel_scores, expected)
def test_fit(): a_priori_test = APriori(create_pool_classifiers()) a_priori_test.fit(X_dsel_ex1, y_dsel_ex1) assert np.isclose(a_priori_test.dsel_scores, [0.5, 0.5, 1.0, 0.0, 0.33, 0.67]).all()
random_state=rng) # Considering a pool composed of 10 base classifiers pool_classifiers = RandomForestClassifier(n_estimators=10, random_state=rng, max_depth=10) pool_classifiers.fit(X_train, y_train) # DS techniques without DFP apriori = APriori(pool_classifiers) aposteriori = APosteriori(pool_classifiers) ola = OLA(pool_classifiers) lca = LCA(pool_classifiers) desp = DESP(pool_classifiers) meta = METADES(pool_classifiers) apriori.fit(X_dsel, y_dsel) aposteriori.fit(X_dsel, y_dsel) ola.fit(X_dsel, y_dsel) lca.fit(X_dsel, y_dsel) desp.fit(X_dsel, y_dsel) meta.fit(X_dsel, y_dsel) print('Evaluating DS techniques:') print('Classification accuracy of OLA: ', ola.score(X_test, y_test)) print('Classification accuracy of LCA: ', lca.score(X_test, y_test)) print('Classification accuracy of A priori: ', apriori.score(X_test, y_test)) print('Classification accuracy of A posteriori: ', aposteriori.score(X_test, y_test)) print('Classification accuracy of DES-P: ', desp.score(X_test, y_test)) print('Classification accuracy of META-DES: ', meta.score(X_test, y_test))
def main(): ############################################################################### # Preparing the dataset # --------------------- # In this part we load the breast cancer dataset from scikit-learn and # preprocess it in order to pass to the DS models. An important point here is # to normalize the data so that it has zero mean and unit variance, which is # a common requirement for many machine learning algorithms. # This step can be easily done using the StandardScaler class. rng = np.random.RandomState(123) data = load_breast_cancer() X = data.data y = data.target # split the data into training and test data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=rng) # Scale the variables to have 0 mean and unit variance scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # Split the data into training and DSEL for DS techniques X_train, X_dsel, y_train, y_dsel = train_test_split(X_train, y_train, test_size=0.5, random_state=rng) # Train a pool of 100 base classifiers pool_classifiers = BaggingClassifier(Perceptron(max_iter=10), n_estimators=100, random_state=rng) pool_classifiers.fit(X_train, y_train) # Initialize the DS techniques knorau = KNORAU(pool_classifiers) kne = KNORAE(pool_classifiers) desp = DESP(pool_classifiers) ola = OLA(pool_classifiers) mcb = MCB(pool_classifiers) ############################################################################### # Calibrating base classifiers # ----------------------------- # Some dynamic selection techniques requires that the base classifiers estimate # probabilities in order to estimate its competence level. Since the Perceptron # model is not a probabilistic classifier (does not implements the # predict_proba method, it needs to be calibrated for # probability estimation before being used by such DS techniques. This step can # be conducted using the CalibrateClassifierCV class from scikit-learn. Note # that in this example we pass a prefited pool of classifiers to the # calibration method in order to use exactly the same pool used in the other # DS methods. calibrated_pool = [] for clf in pool_classifiers: calibrated = CalibratedClassifierCV(base_estimator=clf, cv='prefit') calibrated.fit(X_dsel, y_dsel) calibrated_pool.append(calibrated) apriori = APriori(calibrated_pool) meta = METADES(calibrated_pool) knorau.fit(X_dsel, y_dsel) kne.fit(X_dsel, y_dsel) desp.fit(X_dsel, y_dsel) ola.fit(X_dsel, y_dsel) mcb.fit(X_dsel, y_dsel) apriori.fit(X_dsel, y_dsel) meta.fit(X_dsel, y_dsel) ############################################################################### # Evaluating the methods # ----------------------- # Let's now evaluate the methods on the test set. We also use the performance # of Bagging (pool of classifiers without any selection) as a baseline # comparison. We can see that the majority of DS methods achieve higher # classification accuracy. print('Evaluating DS techniques:') print('Classification accuracy KNORA-Union: ', knorau.score(X_test, y_test)) print('Classification accuracy KNORA-Eliminate: ', kne.score(X_test, y_test)) print('Classification accuracy DESP: ', desp.score(X_test, y_test)) print('Classification accuracy OLA: ', ola.score(X_test, y_test)) print('Classification accuracy A priori: ', apriori.score(X_test, y_test)) print('Classification accuracy MCB: ', mcb.score(X_test, y_test)) print('Classification accuracy META-DES: ', meta.score(X_test, y_test)) print('Classification accuracy Bagging: ', pool_classifiers.score(X_test, y_test))