def test_KNN_adwin(test_path, package_path): test_file = os.path.join(package_path, 'src/skmultiflow/data/datasets/sea_big.csv') stream = FileStream(test_file, -1, 1) stream.prepare_for_use() learner = KNNAdwin(n_neighbors=8, leaf_size=40, max_window_size=2000) cnt = 0 max_samples = 5000 predictions = [] correct_predictions = 0 wait_samples = 100 while cnt < max_samples: X, y = stream.next_sample() # Test every n samples if (cnt % wait_samples == 0) and (cnt != 0): predictions.append(learner.predict(X)[0]) if y[0] == predictions[-1]: correct_predictions += 1 learner.partial_fit(X, y) cnt += 1 performance = correct_predictions / len(predictions) expected_predictions = [ 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1 ] expected_correct_predictions = 40 expected_performance = 0.8163265306122449 assert np.alltrue(predictions == expected_predictions) assert np.isclose(expected_performance, performance) assert correct_predictions == expected_correct_predictions
def __init__(self, base_estimator=KNNAdwin(), n_estimators=10, sampling_rate=3, algorithm=1, drift_detection=True, random_state=None): super().__init__() # default values self.ensemble = None self.n_estimators = None self.classes = None self.random_state = None self._init_n_estimators = n_estimators self._init_random_state = random_state self.sampling_rate = sampling_rate self.algorithm = algorithm self.drift_detection = drift_detection self.adwin_ensemble = None self.lam_sc = None self.lam_pos = None self.lam_neg = None self.lam_sw = None self.epsilon = None self.__configure(base_estimator)
def __init__(self, base_estimator=KNNAdwin(), n_estimators=10, cost_positive=1, cost_negative=0.1, drift_detection=True, random_state=None): super().__init__() # default values self.ensemble = None self.actual_n_estimators = None self.classes = None self._random_state = None self.base_estimator = base_estimator self.n_estimators = n_estimators self.cost_positive = cost_positive self.cost_negative = cost_negative self.drift_detection = drift_detection self.random_state = random_state self.adwin_ensemble = None self.lam_fn = None self.lam_fp = None self.lam_sum = None self.lam_sw = None self.werr = None self.epsilon = None self.__configure()
def __init__(self, base_estimator=KNNAdwin(), n_estimators=10, sampling_rate=1, drift_detection=True, random_state=None): super().__init__(base_estimator, n_estimators, sampling_rate, drift_detection, random_state)
def __init__(self, base_estimator=KNNAdwin(), n_estimators=10, random_state=None): super().__init__(base_estimator, n_estimators, random_state) # default values self.adwin_ensemble = None self.__configure()
def demo(instances=2000): """ _test_comparison_prequential This demo will test a prequential evaluation when more than one learner is passed, which makes it a comparison task. Parameters ---------- instances: int The evaluation's maximum number of instances. """ # Stream setup stream = FileStream("../data/datasets/covtype.csv", -1, 1) # stream = SEAGenerator(classification_function=2, sample_seed=53432, balance_classes=False) stream.prepare_for_use() # Setup the classifier clf = SGDClassifier() # classifier = KNNAdwin(n_neighbors=8, max_window_size=2000,leaf_size=40, categorical_list=None) # classifier = OzaBaggingAdwin(base_estimator=KNN(n_neighbors=8, max_window_size=2000, leaf_size=30, categorical_list=None)) clf_one = KNNAdwin(n_neighbors=8, max_window_size=1000, leaf_size=30) # clf_two = KNN(n_neighbors=8, max_window_size=1000, leaf_size=30) # clf_two = LeverageBagging(base_estimator=KNN(), n_estimators=2) t_one = OneHotToCategorical([[10, 11, 12, 13], [ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 ]]) # t_two = OneHotToCategorical([[10, 11, 12, 13], # [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, # 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]]) pipe_one = Pipeline([('one_hot_to_categorical', t_one), ('KNN', clf_one)]) # pipe_two = Pipeline([('one_hot_to_categorical', t_two), ('KNN', clf_two)]) classifier = [clf, pipe_one] # classifier = SGDRegressor() # classifier = PerceptronMask() # Setup the pipeline # pipe = Pipeline([('Classifier', classifier)]) # Setup the evaluator evaluator = EvaluatePrequential( pretrain_size=2000, output_file='test_comparison_prequential.csv', max_samples=instances, batch_size=1, n_wait=200, max_time=1000, show_plot=True, metrics=['performance', 'kappa_t']) # Evaluate evaluator.evaluate(stream=stream, model=classifier)
def __init__(self, base_estimator=KNNAdwin(), n_estimators=10, cost_positive=1, cost_negative=0.1, drift_detection=True, random_state=None): super().__init__(base_estimator, n_estimators, cost_positive, cost_negative, drift_detection, random_state)
def __init__(self, base_estimator=KNNAdwin(), n_estimators=10, random_state=None): super().__init__() # default values self.ensemble = None self.n_estimators = None self.classes = None self.random_state = None self._init_n_estimators = n_estimators self._init_random_state = random_state self.__configure(base_estimator)
def __init__(self, base_estimator=KNNAdwin(), n_estimators=10, random_state=None): super().__init__() # default values self.ensemble = None self.actual_n_estimators = None self.classes = None self._random_state = None # This is the actual random_state object used internally self.base_estimator = base_estimator self.n_estimators = n_estimators self.random_state = random_state self.__configure()
def demo(): """ _test_oza_bagging This demo tests the OzaBagging classifier using KNNAdwin classifiers, on samples given by a SEAGenerator. The test computes the performance of the OzaBagging classifier as well as the time to create the structure and classify max_samples (5000 by default) instances. """ logging.basicConfig(format='%(message)s', level=logging.INFO) warnings.filterwarnings("ignore", ".*Passing 1d.*") stream = SEAGenerator(1, noise_percentage=.067, random_state=1) stream.prepare_for_use() clf = OzaBagging(base_estimator=KNNAdwin(n_neighbors=8, max_window_size=2000, leaf_size=30), n_estimators=2, random_state=1) sample_count = 0 correctly_classified = 0 max_samples = 5000 train_size = 8 first = True if train_size > 0: X, y = stream.next_sample(train_size) clf.partial_fit(X, y, classes=stream.target_values) first = False while sample_count < max_samples: if sample_count % (max_samples/20) == 0: logging.info('%s%%', str((sample_count//(max_samples/20)*5))) X, y = stream.next_sample() my_pred = clf.predict(X) if first: clf.partial_fit(X, y, classes=stream.target_values) first = False else: clf.partial_fit(X, y) if my_pred is not None: if y[0] == my_pred[0]: correctly_classified += 1 sample_count += 1 print(str(sample_count) + ' samples analyzed.') print('My performance: ' + str(correctly_classified / sample_count))
def test_pipeline(test_path): n_categories = 5 # Load test data generated using: # RandomTreeGenerator(tree_random_state=1, sample_random_state=1, # n_cat_features=n_categories, n_num_features=0) test_file = os.path.join(test_path, 'data-one-hot.npz') data = np.load(test_file) X = data['X'] y = data['y'] stream = DataStream(data=X, y=y) stream.prepare_for_use() # Setup transformer cat_att_idx = [[i + j for i in range(n_categories)] for j in range(0, n_categories * n_categories, n_categories) ] transformer = OneHotToCategorical(categorical_list=cat_att_idx) # Set up the classifier classifier = KNNAdwin(n_neighbors=2, max_window_size=50, leaf_size=40) # Setup the pipeline pipe = Pipeline([('one-hot', transformer), ('KNNAdwin', classifier)]) # Setup the evaluator evaluator = EvaluatePrequential(show_plot=False, pretrain_size=10, max_samples=100) # Evaluate evaluator.evaluate(stream=stream, model=pipe) metrics = evaluator.get_mean_measurements() expected_accuracy = 0.5555555555555556 assert np.isclose(expected_accuracy, metrics[0].accuracy_score()) expected_kappa = 0.11111111111111116 assert np.isclose(expected_kappa, metrics[0].kappa_score()) print(pipe.get_info()) expected_info = "Pipeline:\n" \ "[OneHotToCategorical(categorical_list=[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9],\n" \ " [10, 11, 12, 13, 14],\n" \ " [15, 16, 17, 18, 19],\n" \ " [20, 21, 22, 23, 24]])\n" \ "KNNAdwin(leaf_size=40, max_window_size=50, n_neighbors=2,\n" \ " nominal_attributes=None)]" assert pipe.get_info() == expected_info
def __init__(self, base_estimator=KNNAdwin(), n_estimators=10, sampling_rate=2, drift_detection=True, random_state=None): super().__init__() # default values self.ensemble = None self.n_estimators = None self.classes = None self.random_state = None self.n_samples = None self.drift_detection = drift_detection self.adwin_ensemble = None self._init_n_estimators = n_estimators self._init_random_state = random_state self.sampling_rate = sampling_rate self.__configure(base_estimator)
def __init__(self, base_estimator=KNNAdwin(), n_estimators=10, random_state=None): super().__init__(base_estimator, n_estimators, random_state)
def demo(): """ _test_knn_adwin This demo tests the KNNAdwin classifier on a file stream, which gives instances coming from a SEA generator. The test computes the performance of the KNNAdwin classifier as well as the time to create the structure and classify max_samples (10000 by default) instances. """ start = timer() logging.basicConfig(format='%(message)s', level=logging.INFO) # warnings.filterwarnings("ignore", ".*Passing 1d.*") stream = FileStream('../data/datasets/sea_big.csv', -1, 1) # stream = RandomRBFGeneratorDrift(change_speed=41.00, n_centroids=50, model_random_state=32523423, # sample_seed=5435, n_classes=2, num_att=10, num_drift_centroids=50) stream.prepare_for_use() t = OneHotToCategorical([[10, 11, 12, 13], [ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 ]]) t2 = OneHotToCategorical([[10, 11, 12, 13], [ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 ]]) # knn = KNN(n_neighbors=8, max_window_size=2000, leaf_size=40) knn = KNNAdwin(n_neighbors=8, leaf_size=40, max_window_size=2000) # pipe = Pipeline([('one_hot_to_categorical', t), ('KNN', knn)]) compare = KNeighborsClassifier(n_neighbors=8, algorithm='kd_tree', leaf_size=40, metric='euclidean') # pipe2 = Pipeline([('one_hot_to_categorical', t2), ('KNN', compare)]) first = True train = 200 if train > 0: X, y = stream.next_sample(train) # pipe.partial_fit(X, y, classes=stream.target_values) # pipe.partial_fit(X, y, classes=stream.target_values) # pipe2.fit(X, y) knn.partial_fit(X, y, classes=stream.target_values) compare.fit(X, y) first = False n_samples = 0 max_samples = 10000 my_corrects = 0 compare_corrects = 0 while n_samples < max_samples: if n_samples % (max_samples / 20) == 0: logging.info('%s%%', str((n_samples // (max_samples / 20) * 5))) X, y = stream.next_sample() # my_pred = pipe.predict(X) my_pred = knn.predict(X) # my_pred = [1] if first: # pipe.partial_fit(X, y, classes=stream.target_values) # pipe.partial_fit(X, y, classes=stream.target_values) knn.partial_fit(X, y, classes=stream.target_values) first = False else: # pipe.partial_fit(X, y) knn.partial_fit(X, y) # compare_pred = pipe2.predict(X) compare_pred = compare.predict(X) if y[0] == my_pred[0]: my_corrects += 1 if y[0] == compare_pred[0]: compare_corrects += 1 n_samples += 1 end = timer() print('Evaluation time: ' + str(end - start)) print(str(n_samples) + ' samples analyzed.') print('My performance: ' + str(my_corrects / n_samples)) print('Compare performance: ' + str(compare_corrects / n_samples))
def test_knn_adwin(): stream = ConceptDriftStream(stream=SEAGenerator(random_state=1), drift_stream=SEAGenerator(random_state=2, classification_function=2), random_state=1, position=250, width=10) stream.prepare_for_use() learner = KNNAdwin(n_neighbors=8, leaf_size=40, max_window_size=200) cnt = 0 max_samples = 1000 predictions = array('i') correct_predictions = 0 wait_samples = 20 while cnt < max_samples: X, y = stream.next_sample() # Test every n samples if (cnt % wait_samples == 0) and (cnt != 0): predictions.append(learner.predict(X)[0]) if y[0] == predictions[-1]: correct_predictions += 1 learner.partial_fit(X, y) cnt += 1 expected_predictions = array('i', [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1]) assert np.alltrue(predictions == expected_predictions) expected_correct_predictions = 46 assert correct_predictions == expected_correct_predictions learner.reset() assert learner.window.n_samples == 0 expected_info = 'KNNAdwin: - n_neighbors: 8 - max_window_size: 200 - leaf_size: 40' assert learner.get_info() == expected_info stream.restart() X, y = stream.next_sample(max_samples) learner.fit(X[:950], y[:950]) predictions = learner.predict(X[951:]) correct_predictions = sum(np.array(predictions) == y[951:]) expected_correct_predictions = 47 assert correct_predictions == expected_correct_predictions assert type(learner.predict(X)) == np.ndarray assert type(learner.predict_proba(X)) == np.ndarray