def __configure(self):
     if hasattr(self.base_estimator, "reset"):
         self.base_estimator.reset()
     self.actual_n_estimators = self.n_estimators
     self.ensemble = [
         cp.deepcopy(self.base_estimator)
         for _ in range(self.actual_n_estimators)
     ]
     self.adwin_ensemble = [
         ADWIN(self.delta) for _ in range(self.actual_n_estimators)
     ]
     self._random_state = check_random_state(self.random_state)
     self.n_detected_changes = 0
     self.classes = None
     self.init_matrix_codes = True
示例#2
0
 def __configure(self, base_estimator):
     base_estimator.reset()
     self.base_estimator = base_estimator
     self.n_estimators = self._init_n_estimators
     self.adwin_ensemble = []
     for i in range(self.n_estimators):
         self.adwin_ensemble.append(ADWIN())
     self.ensemble = [cp.deepcopy(base_estimator) for _ in range(self.n_estimators)]
     self.random_state = check_random_state(self._init_random_state)
     self.lam_fn = np.zeros(self.n_estimators)
     self.lam_fp = np.zeros(self.n_estimators)
     self.lam_sum = np.zeros(self.n_estimators)
     self.werr = np.zeros(self.n_estimators)
     self.lam_sw = np.zeros(self.n_estimators)
     self.epsilon = np.zeros(self.n_estimators)
示例#3
0
    def __configure(self):
        if hasattr(self.base_estimator, "reset"):
            self.base_estimator.reset()

        self.actual_n_estimators = self._init_n_estimators
        self.adwin_ensemble = []
        for i in range(self.actual_n_estimators):
            self.adwin_ensemble.append(ADWIN())
        self.ensemble = [
            cp.deepcopy(self.base_estimator)
            for _ in range(self.actual_n_estimators)
        ]
        self._random_state = check_random_state(self.random_state)
        self.lam_sc = np.zeros(self.actual_n_estimators)
        self.lam_sw = np.zeros(self.actual_n_estimators)
        self.epsilon = np.zeros(self.actual_n_estimators)
示例#4
0
 def __init__(
     self,
     base_ensemble,
     drift_detector=ADWIN(),
     ensemble_size=10,
     min_lambda=1,
     max_lambda=6,
 ):
     self.ensemble = DESDDEnsemble(
         base_ensemble,
         ensemble_size=ensemble_size,
         min_lambda=min_lambda,
         max_lambda=max_lambda,
     )
     self.drift_detector = drift_detector
     self.max_ensemble_size = ensemble_size
     self.min_lambda = min_lambda
     self.max_lambda = max_lambda
     self.dcs_method = DESDDSel()
     self.classes = None
示例#5
0
 def __init__(self, initial_class_observations, random_state=None):
     super().__init__(initial_class_observations)
     self._estimation_error_weight = ADWIN()
     self.error_change = False
     self._random_state = check_random_state(random_state)
示例#6
0
 def __init__(self, initial_class_observations):
     super().__init__(initial_class_observations)
     self._estimation_error_weight = ADWIN()
     self.error_change = False
     self._randomSeed = 1
     self._classifier_random = check_random_state(self._randomSeed)
示例#7
0
        def learn_from_instance(self, X, y, weight, hat, parent, parent_branch):
            true_class = y
            class_prediction = 0

            leaf = self.filter_instance_to_leaf(X, parent, parent_branch)
            if leaf.node is not None:
                class_prediction = get_max_value_key(leaf.node.get_class_votes(X, hat))

            bl_correct = (true_class == class_prediction)

            if self._estimation_error_weight is None:
                self._estimation_error_weight = ADWIN()

            old_error = self.get_error_estimation()

            # Add element to ADWIN
            add = 0.0 if (bl_correct is True) else 1.0

            self._estimation_error_weight.add_element(add)
            # Detect change with ADWIN
            self.error_change = self._estimation_error_weight.detected_change()

            if self.error_change is True and old_error > self.get_error_estimation():
                self.error_change = False

            # Check condition to build a new alternate tree
            if self.error_change is True:
                self._alternate_tree = hat._new_learning_node()
                hat.alternate_trees_cnt += 1

            # Condition to replace alternate tree
            elif self._alternate_tree is not None and self._alternate_tree.is_null_error() is False:
                if self.get_error_width() > error_width_threshold \
                        and self._alternate_tree.get_error_width() > error_width_threshold:
                    old_error_rate = self.get_error_estimation()
                    alt_error_rate = self._alternate_tree.get_error_estimation()
                    fDelta = .05
                    fN = 1.0 / self._alternate_tree.get_error_width() + 1.0 / (self.get_error_width())

                    bound = math.sqrt(2.0 * old_error_rate * (1.0 - old_error_rate) * math.log(2.0 / fDelta) * fN)
                    # To check, bound never less than (old_error_rate - alt_error_rate)
                    if bound < (old_error_rate - alt_error_rate):
                        hat._active_leaf_node_cnt -= self.number_leaves()
                        hat._active_leaf_node_cnt += self._alternate_tree.number_leaves()
                        self.kill_tree_children(hat)

                        if parent is not None:
                            parent.set_child(parent_branch, self._alternate_tree)
                        else:
                            # Switch tree root
                            hat._tree_root = hat._tree_root.alternateTree
                        hat.switch_alternate_trees_cnt += 1
                    elif bound < alt_error_rate - old_error_rate:
                        if isinstance(self._alternate_tree, TS_HAT.ActiveLearningNode):
                            self._alternate_tree = None
                        elif isinstance(self._alternate_tree, TS_HAT.InactiveLearningNode):
                            self._alternate_tree = None
                        else:
                            self._alternate_tree.kill_tree_children(hat)
                        hat.pruned_alternate_trees_cnt += 1  # hat.pruned_alternate_trees_cnt to check

            # Learn_From_Instance alternate Tree and Child nodes
            if self._alternate_tree is not None:
                self._alternate_tree.learn_from_instance(X, y, weight, hat, parent, parent_branch)
            child_branch = self.instance_child_index(X)
            child = self.get_child(child_branch)
            if child is not None:
                child.learn_from_instance(X, y, weight, hat, parent, parent_branch)
示例#8
0
    def __init__(self,
                 n_estimators=10,
                 max_features='auto',
                 disable_weighted_vote=False,
                 lambda_value=6,
                 performance_metric='acc',
                 drift_detection_method: BaseDriftDetector = ADWIN(0.001),
                 warning_detection_method: BaseDriftDetector = ADWIN(0.01),
                 max_byte_size=33554432,
                 memory_estimate_period=2000000,
                 grace_period=50,
                 split_criterion='info_gain',
                 split_confidence=0.01,
                 tie_threshold=0.05,
                 binary_split=False,
                 stop_mem_management=False,
                 remove_poor_atts=False,
                 no_preprune=False,
                 leaf_prediction='nba',
                 nb_threshold=0,
                 nominal_attributes=None,
                 random_state=None):
        """AdaptiveRandomForestClassifier class constructor."""
        super().__init__()
        self.n_estimators = n_estimators
        self.max_features = max_features
        self.disable_weighted_vote = disable_weighted_vote
        self.lambda_value = lambda_value
        if isinstance(drift_detection_method, BaseDriftDetector):
            self.drift_detection_method = drift_detection_method
        else:
            self.drift_detection_method = None
        if isinstance(warning_detection_method, BaseDriftDetector):
            self.warning_detection_method = warning_detection_method
        else:
            self.warning_detection_method = None
        self.instances_seen = 0
        self.classes = None
        self._train_weight_seen_by_model = 0.0
        self.ensemble = None
        self.random_state = random_state
        self._random_state = check_random_state(
            self.random_state)  # Actual random_state object
        if performance_metric in ['acc', 'kappa']:
            self.performance_metric = performance_metric
        else:
            raise ValueError(
                'Invalid performance metric: {}'.format(performance_metric))

        # ARH Hoeffding Tree configuration
        self.max_byte_size = max_byte_size
        self.memory_estimate_period = memory_estimate_period
        self.grace_period = grace_period
        self.split_criterion = split_criterion
        self.split_confidence = split_confidence
        self.tie_threshold = tie_threshold
        self.binary_split = binary_split
        self.stop_mem_management = stop_mem_management
        self.remove_poor_atts = remove_poor_atts
        self.no_preprune = no_preprune
        self.leaf_prediction = leaf_prediction
        self.nb_threshold = nb_threshold
        self.nominal_attributes = nominal_attributes
示例#9
0
class AdaSplitNode(SplitNode, AdaNode):
    """ Node that splits the data in a Hoeffding Adaptive Tree.

    Parameters
    ----------
    split_test: skmultiflow.split_test.InstanceConditionalTest
        Split test.
    class_observations: dict (class_value, weight) or None
        Class observations
    """
    def __init__(self, split_test, class_observations):
        super().__init__(split_test, class_observations)
        self._estimation_error_weight = ADWIN()
        self._alternate_tree = None
        self.error_change = False
        self._random_seed = 1
        self._classifier_random = check_random_state(self._random_seed)

    # Override AdaNode
    def number_leaves(self):
        num_of_leaves = 0
        for child in self._children:
            if child is not None:
                num_of_leaves += child.number_leaves()

        return num_of_leaves

    # Override AdaNode
    def get_error_estimation(self):
        return self._estimation_error_weight.estimation

    # Override AdaNode
    def get_error_width(self):
        w = 0.0
        if self.is_null_error() is False:
            w = self._estimation_error_weight.width

        return w

    # Override AdaNode
    def is_null_error(self):
        return self._estimation_error_weight is None

    # Override AdaNode
    def learn_from_instance(self, X, y, weight, hat, parent, parent_branch):
        true_class = y
        class_prediction = 0

        leaf = self.filter_instance_to_leaf(X, parent, parent_branch)
        if leaf.node is not None:
            class_prediction = get_max_value_key(
                leaf.node.get_class_votes(X, hat))

        bl_correct = (true_class == class_prediction)

        if self._estimation_error_weight is None:
            self._estimation_error_weight = ADWIN()

        old_error = self.get_error_estimation()

        # Add element to ADWIN
        add = 0.0 if (bl_correct is True) else 1.0

        self._estimation_error_weight.add_element(add)
        # Detect change with ADWIN
        self.error_change = self._estimation_error_weight.detected_change()

        if self.error_change is True and old_error > self.get_error_estimation(
        ):
            self.error_change = False

        # Check condition to build a new alternate tree
        if self.error_change is True:
            self._alternate_tree = hat._new_learning_node()
            hat.alternate_trees_cnt += 1

        # Condition to replace alternate tree
        elif self._alternate_tree is not None and self._alternate_tree.is_null_error(
        ) is False:
            if self.get_error_width() > ERROR_WIDTH_THRESHOLD \
                    and self._alternate_tree.get_error_width() > ERROR_WIDTH_THRESHOLD:
                old_error_rate = self.get_error_estimation()
                alt_error_rate = self._alternate_tree.get_error_estimation()
                fDelta = .05
                fN = 1.0 / self._alternate_tree.get_error_width() + 1.0 / (
                    self.get_error_width())

                bound = math.sqrt(2.0 * old_error_rate *
                                  (1.0 - old_error_rate) *
                                  math.log(2.0 / fDelta) * fN)
                # To check, bound never less than (old_error_rate - alt_error_rate)
                if bound < (old_error_rate - alt_error_rate):
                    hat._active_leaf_node_cnt -= self.number_leaves()
                    hat._active_leaf_node_cnt += self._alternate_tree.number_leaves(
                    )
                    self.kill_tree_children(hat)

                    if parent is not None:
                        parent.set_child(parent_branch, self._alternate_tree)
                    else:
                        # Switch tree root
                        hat._tree_root = hat._tree_root.alternateTree
                    hat.switch_alternate_trees_cnt += 1
                elif bound < alt_error_rate - old_error_rate:
                    if isinstance(self._alternate_tree, ActiveLearningNode):
                        self._alternate_tree = None
                    elif isinstance(self._alternate_tree,
                                    InactiveLearningNode):
                        self._alternate_tree = None
                    else:
                        self._alternate_tree.kill_tree_children(hat)
                    hat.pruned_alternate_trees_cnt += 1  # hat.pruned_alternate_trees_cnt to check

        # Learn_From_Instance alternate Tree and Child nodes
        if self._alternate_tree is not None:
            self._alternate_tree.learn_from_instance(X, y, weight, hat, parent,
                                                     parent_branch)
        child_branch = self.instance_child_index(X)
        child = self.get_child(child_branch)
        if child is not None:
            child.learn_from_instance(X, y, weight, hat, self, child_branch)
        # Instance contains a categorical value previously unseen by the split
        # node
        elif isinstance(self.get_split_test(), NominalAttributeMultiwayTest) and \
                self.get_split_test().branch_for_instance(X) < 0:
            # Creates a new learning node to encompass the new observed feature
            # value
            leaf_node = hat._new_learning_node()
            branch_id = self.get_split_test().add_new_branch(
                X[self.get_split_test().get_atts_test_depends_on()[0]])
            self.set_child(branch_id, leaf_node)
            hat._active_leaf_node_cnt += 1
            leaf_node.learn_from_instance(X, y, weight, hat, parent,
                                          parent_branch)

    # Override AdaNode
    def kill_tree_children(self, hat):
        for child in self._children:
            if child is not None:
                # Delete alternate tree if it exists
                if isinstance(child,
                              SplitNode) and child._alternate_tree is not None:
                    child._alternate_tree.kill_tree_children(hat)
                    self._pruned_alternate_trees += 1
                # Recursive delete of SplitNodes
                if isinstance(child, SplitNode):
                    child.kill_tree_children(hat)

                if isinstance(child, ActiveLearningNode):
                    child = None
                    hat._active_leaf_node_cnt -= 1
                elif isinstance(child, InactiveLearningNode):
                    child = None
                    hat._inactive_leaf_node_cnt -= 1

    # override AdaNode
    def filter_instance_to_leaves(self,
                                  X,
                                  y,
                                  weight,
                                  parent,
                                  parent_branch,
                                  update_splitter_counts=False,
                                  found_nodes=None):
        if found_nodes is None:
            found_nodes = []
        if update_splitter_counts:
            try:
                self._observed_class_distribution[
                    y] += weight  # Dictionary (class_value, weight)
            except KeyError:
                self._observed_class_distribution[y] = weight
        child_index = self.instance_child_index(X)
        if child_index >= 0:
            child = self.get_child(child_index)
            if child is not None:
                child.filter_instance_to_leaves(X, y, weight, parent,
                                                parent_branch,
                                                update_splitter_counts,
                                                found_nodes)
            else:
                found_nodes.append(FoundNode(None, self, child_index))
        if self._alternate_tree is not None:
            self._alternate_tree.filter_instance_to_leaves(
                X, y, weight, self, -999, update_splitter_counts, found_nodes)
    def __init__(
            self,
            # Forest parameters
            n_estimators: int = 10,
            max_features='auto',
            aggregation_method: str = 'median',
            weighted_vote_strategy: str = None,
            lambda_value: int = 6,
            drift_detection_method: BaseDriftDetector = ADWIN(0.001),
            warning_detection_method: BaseDriftDetector = ADWIN(0.01),
            drift_detection_criteria: str = 'mse',
            # Tree parameters
            max_byte_size: int = 1048576000,
            memory_estimate_period: int = 2000000,
            grace_period: int = 50,
            split_confidence: float = 0.01,
            tie_threshold: float = 0.05,
            binary_split: bool = False,
            stop_mem_management: bool = False,
            remove_poor_atts: bool = False,
            no_preprune: bool = False,
            leaf_prediction: str = 'perceptron',
            nominal_attributes: list = None,
            learning_ratio_perceptron: float = 0.1,
            learning_ratio_decay: float = 0.001,
            learning_ratio_const: bool = True,
            random_state=None):
        super().__init__(
            n_estimators=n_estimators,
            max_features=max_features,
            lambda_value=lambda_value,
            drift_detection_method=drift_detection_method,
            warning_detection_method=warning_detection_method,
            # Tree parameters
            max_byte_size=max_byte_size,
            memory_estimate_period=memory_estimate_period,
            grace_period=grace_period,
            split_confidence=split_confidence,
            tie_threshold=tie_threshold,
            binary_split=binary_split,
            stop_mem_management=stop_mem_management,
            remove_poor_atts=remove_poor_atts,
            no_preprune=no_preprune,
            leaf_prediction=leaf_prediction,
            nominal_attributes=nominal_attributes,
            random_state=random_state)

        self.learning_ratio_perceptron = learning_ratio_perceptron
        self.learning_ratio_decay = learning_ratio_decay
        self.learning_ratio_const = learning_ratio_const

        if weighted_vote_strategy in [self._MSE, self._MAE, None]:
            self.weighted_vote_strategy = weighted_vote_strategy
        else:
            raise ValueError('Invalid weighted vote strategy: {}'.format(
                weighted_vote_strategy))

        if aggregation_method in [self._MEAN, self._MEDIAN]:
            self.aggregation_method = aggregation_method
        else:
            raise ValueError(
                'Invalid aggregation method: {}'.format(aggregation_method))

        if drift_detection_criteria in [
                self._MSE, self._MAE, self._PREDICTIONS
        ]:
            self.drift_detection_criteria = drift_detection_criteria
        else:
            raise ValueError('Invalid drift detection criteria: {}'.format(
                drift_detection_criteria))
示例#11
0
class DeepNNPytorch(BaseSKMObject, ClassifierMixin):
    def __init__(
            self,
            class_labels=['0', '1'],  # {'up':0,'down':1}
            use_cpu=True,
            process_as_a_batch=False,
            use_threads=False,
            background_training_after=4):
        # configuration variables (which has the same name as init parameters)
        self.class_labels = class_labels
        self.use_threads = use_threads
        self.background_training_after = background_training_after

        super().__init__()

        # status variables
        self.class_to_label = {}
        self.foreground_nets = []  # type: List[ANN]
        self.background_nets = []  # type: List[ANN]
        self.drift_detection_method = None
        self.warning_detection_method = None
        self.detected_warnings = 0
        self.samples_seen = 0
        self.last_detected_drift_around = 0
        self.background_learner_threads = []
        self.background_train_results = None
        self.foreground_train_results = None

        self.init_status_values()

    def init_status_values(self):
        # init status variables
        self.class_to_label = {}
        for i in range(len(self.class_labels)):
            self.class_to_label.update({i: self.class_labels[i]})

        for i in range(len(foreground_net_config)):
            self.foreground_nets.append(
                ANN(learning_rate=foreground_net_config[i]['l_rate'],
                    optimizer_type=foreground_net_config[i]['optimizer_type'],
                    class_labels=self.class_labels))

        for i in range(len(background_net_config)):
            self.background_nets.append(
                ANN(learning_rate=foreground_net_config[i]['l_rate'],
                    optimizer_type=background_net_config[i]['optimizer_type'],
                    class_labels=self.class_labels))

        self.drift_detection_method = ADWIN(delta=1e-3,
                                            direction=ADWIN.DETECT_DOWN)
        self.warning_detection_method = ADWIN(delta=1e-8,
                                              direction=ADWIN.DETECT_DOWN)

        self.detected_warnings = 0
        self.samples_seen = 0
        self.last_detected_drift_around = 0
        self.background_learner_threads = []
        self.background_train_results = None
        self.foreground_train_results = None
        print(self)

    def partial_fit(self, X, y, classes=None, sample_weight=None):
        r, c = get_dimensions(X)
        self.samples_seen += r

        # if self.samples_seen % 2 == 0:
        if len(self.background_learner_threads) == 0:
            if self.samples_seen % self.background_training_after == 0:
                self.background_train_results = {
                    'probas': [None] * len(self.background_nets),
                    'y_hats': [None] * len(self.background_nets),
                    'avg_loss_since_last_detected_drift_by_parent':
                    [0] * len(self.background_nets)
                }
                for i in range(len(self.background_nets)):
                    self.background_learner_threads.append(
                        threading.Thread(target=net_train,
                                         args=(
                                             self.background_nets[i],
                                             X,
                                             r,
                                             c,
                                             y,
                                             self.background_train_results,
                                             i,
                                             self.last_detected_drift_around,
                                         )))

                for i in range(len(self.background_nets)):
                    self.background_learner_threads[i].start()
        else:  # there are live background learner threads
            # wait for self.background_training_after instances to join them
            if self.samples_seen % self.background_training_after == self.background_training_after - 1:
                # TODO: CPython does not support multi threading: https://docs.python.org/3/library/threading.html
                #  we still may be fine as long as we don't compile the module using CPython.
                #  Multiprocessing is an alternative:
                #  https://docs.python.org/3/library/multiprocessing.html#module-multiprocessing
                for i in range(len(self.background_nets)):
                    self.background_learner_threads[i].join()
                self.background_learner_threads = []

                if self.foreground_train_results is not None:
                    min_back = np.argmin(self.background_train_results[
                        'avg_loss_since_last_detected_drift_by_parent'],
                                         axis=0)
                    max_fore = np.argmax(self.foreground_train_results[
                        'avg_loss_since_last_detected_drift_by_parent'],
                                         axis=0)
                    # min_back < max_fore
                    if self.background_train_results['avg_loss_since_last_detected_drift_by_parent'][min_back] \
                            < self.foreground_train_results['avg_loss_since_last_detected_drift_by_parent'][max_fore]:
                        tmp_net = self.foreground_nets[max_fore]
                        self.foreground_nets[max_fore] = self.background_nets[
                            min_back]
                        self.background_nets[min_back] = tmp_net

        self.foreground_train_results = {
            'probas': [None] * len(self.foreground_nets),
            'y_hats': [None] * len(self.foreground_nets),
            'avg_loss_since_last_detected_drift_by_parent':
            [0] * len(self.foreground_nets)
        }
        if self.use_threads:
            t = []
            for i in range(len(self.foreground_nets)):
                t.append(
                    threading.Thread(target=net_train,
                                     args=(
                                         self.foreground_nets[i],
                                         X,
                                         r,
                                         c,
                                         y,
                                         self.foreground_train_results,
                                         i,
                                         self.last_detected_drift_around,
                                     )))

            for i in range(len(self.foreground_nets)):
                t[i].start()

            for i in range(len(self.foreground_nets)):
                t[i].join()
        else:
            for i in range(len(self.foreground_nets)):
                net_train(self.foreground_nets[i], X, r, c, y,
                          self.foreground_train_results, i,
                          self.last_detected_drift_around)

        if self.drift_detection_method is not None:
            # get predicted class and compare with actual class label
            predicted_label = vectorized_map_class_to_label(
                np.argmax(
                    np.sum(self.foreground_train_results['probas'], axis=0) /
                    len(self.foreground_nets),
                    axis=1),
                class_to_label_map=self.class_to_label)
            # TODO: we may have to have a special case for batch processing
            predicted_matches_actual = predicted_label == y

            self.drift_detection_method.add_element(
                1 if predicted_matches_actual else 0)
            if self.warning_detection_method is not None:
                self.warning_detection_method.add_element(
                    1 if predicted_matches_actual else 0)

            # pass the difference to the detector
            # predicted_matches_actual = torch.abs(y-output).detach().numpy()[0]
            # self.drift_detection_method.add_element(predicted_matches_actual)

            # Check if the was a warning
            if self.warning_detection_method is not None:
                if self.warning_detection_method.detected_change():
                    self.detected_warnings += 1
            else:  # warning detector is None, hence drift detector has warning detection capability.
                if self.drift_detection_method.detected_warning_zone():
                    self.detected_warnings += 1  # 3 is the threshold level
            # Check if the was a change
            if self.detected_warnings > 3 and self.drift_detection_method.detected_change(
            ):
                print('Drift detected by {} around {} th sample.'.format(
                    self.drift_detection_method, self.samples_seen))
                self.detected_warnings = 0
                self.last_detected_drift_around = self.samples_seen
                # Find the the worst learner from the foreground and replace it with the background

        return self

    def predict(self, X):
        y_proba = self.predict_proba(X)
        pred_sum_per_class = np.sum(y_proba, axis=0)
        pred_avgsum_per_class = np.divide(pred_sum_per_class,
                                          len(self.foreground_nets))
        y_pred = np.argmax(pred_avgsum_per_class, axis=0)
        return vectorized_map_class_to_label(
            np.asarray([y_pred]), class_to_label_map=self.class_to_label)

    def predict_proba(self, X):
        r, c = get_dimensions(X)
        probas = np.zeros([len(self.foreground_nets), len(self.class_labels)])
        # if self.use_threads:
        #     t = []
        #     for i in range(len(self.nets)):
        #         t.append(threading.Thread(target=net_predict_proba, args=(self.nets[i], X, r, c, probas, i,)))
        #
        #     for i in range(len(self.nets)):
        #         t[i].start()
        #
        #     for i in range(len(self.nets)):
        #         t[i].join()
        # else:
        for i in range(len(self.foreground_nets)):
            net_predict_proba(self.foreground_nets[i], X, r, c, probas, i)

        return np.asarray(probas)

    def reset(self):
        # configuration variables (which has the same name as init parameters) should be copied by the caller function
        for i in range(len(self.foreground_nets)):
            self.foreground_nets[i].reset()
        return self

    def __str__(self):
        return str(self.__class__) + ": " + str(self.__dict__)

    def stream_ended(self):
        print('\nNetwork configuration:\n'
              '{}\n'
              '=======================================\n'
              'Foreground Nets\n'.format(self))
        print(
            'optimizer_type,learning_rate,accumulated_loss,accumulated_loss_since_last_detected_drift_by_parent'
        )
        for i in range(len(self.foreground_nets)):
            print('{},{},{},{}'.format(
                self.foreground_nets[i].optimizer_type,
                self.foreground_nets[i].learning_rate,
                self.foreground_nets[i].accumulated_loss /
                self.foreground_nets[i].samples_seen, self.foreground_nets[i].
                accumulated_loss_since_last_detected_drift_by_parent /
                self.foreground_nets[i].
                samples_seen_after_last_detected_drift_by_parent))
        print('\n' 'Background Nets\n'.format(self))
        for i in range(len(self.background_nets)):
            print('{},{},{},{}'.format(
                self.background_nets[i].optimizer_type,
                self.background_nets[i].learning_rate,
                self.background_nets[i].accumulated_loss /
                self.background_nets[i].samples_seen, self.background_nets[i].
                accumulated_loss_since_last_detected_drift_by_parent /
                self.background_nets[i].
                samples_seen_after_last_detected_drift_by_parent))
        print('\n')
示例#12
0
def EvaluateModels(stream, run, n_trees, n_samples_max, n_samples_meas, metaModel = None): # For adaptive experiments

    stream[0].prepare_for_use()


    # Evaluate model (with adaptation or not)
    arf = AdaptiveRandomForest(n_estimators = n_trees, lambda_value=6, grace_period=10, split_confidence=0.1, tie_threshold=0.005, warning_detection_method= ADWIN(delta=0.01), drift_detection_method=ADWIN(delta=0.001))

    modelsList = [arf]
    modelsNames = ['ARF']
    
    try :
        dirpath = os.getcwd()
        new_dir_path = dirpath+'/ExperimentTuningTrees'
        os.mkdir(new_dir_path)
    except FileExistsError :
        pass
        
    if metaModel == None :
        fileName = new_dir_path+'\\'+stream[1]+'_'+str(n_trees)+'Trees_Adapt_Drift_ARDClassic_Run'+str(run)+'.csv'
    else :
        fileName = new_dir_path+'\\'+stream[1]+'_AdativeSetting_Trees_Adapt_Drift_ARDClassic_Run'+str(run)+'.csv'
    

    evaluator = EvaluatePrequentialAndAdaptTreesARF(metrics=['accuracy','kappa','running_time','ram_hours'],
                                    show_plot=False,
                                    n_wait=n_samples_meas,
                                    pretrain_size=200,
                                    max_samples=n_samples_max,
                                    output_file = fileName,
                                    metaKB=metaModel)

    # Run evaluation
    model, acc = evaluator.evaluate(stream=stream[0], model=modelsList, model_names=modelsNames)
示例#13
0
 def __init__(self, initial_stats=None, random_state=None):
     super().__init__(initial_stats)
     self._adwin = ADWIN()
     self.error_change = False
     self._random_state = check_random_state(random_state)
示例#14
0
class AdaLearningNode(ActiveLearningNodeNBA, AdaNode):
    """ Learning node for Hoeffding Adaptive Tree.

    Uses Adaptive Naive Bayes models.

    Parameters
    ----------
    initial_stats: dict (class_value, weight) or None
        Initial class observations

    """
    def __init__(self, initial_stats=None, random_state=None):
        super().__init__(initial_stats)
        self._adwin = ADWIN()
        self.error_change = False
        self._random_state = check_random_state(random_state)

    @property
    def n_leaves(self):
        return 1

    @property
    def error_estimation(self):
        return self._adwin.estimation

    @property
    def error_width(self):
        return self._adwin.width

    def error_is_null(self):
        return self._adwin is None

    def kill_tree_children(self, hat):
        pass

    def learn_one(self, X, y, weight, tree, parent, parent_branch):
        true_class = y

        if tree.bootstrap_sampling:
            # Perform bootstrap-sampling
            k = self._random_state.poisson(1.0)
            if k > 0:
                weight = weight * k

        class_prediction = get_max_value_key(self.predict_one(X, tree=tree))

        is_correct = (true_class == class_prediction)

        if self._adwin is None:
            self._adwin = ADWIN()

        old_error = self.error_estimation

        # Add element to ADWIN
        self._adwin.add_element(0.0 if is_correct else 1.0)
        # Detect change with Adwin
        self.error_change = self._adwin.detected_change()

        if self.error_change and old_error > self.error_estimation:
            self.error_change = False

        # Update statistics
        super().learn_one(X, y, weight=weight, tree=tree)

        weight_seen = self.total_weight

        if weight_seen - self.last_split_attempt_at >= tree.grace_period:
            tree._attempt_to_split(self, parent, parent_branch)
            self.last_split_attempt_at = weight_seen

    # Override LearningNodeNBAdaptive
    def predict_one(self, X, *, tree=None):
        prediction_option = tree.leaf_prediction
        # MC
        if prediction_option == tree._MAJORITY_CLASS:
            dist = self.stats
        # NB
        elif prediction_option == tree._NAIVE_BAYES:
            dist = do_naive_bayes_prediction(X, self.stats,
                                             self.attribute_observers)
        # NBAdaptive (default)
        else:
            dist = super().predict_one(X, tree=tree)

        dist_sum = sum(dist.values())  # sum all values in dictionary
        normalization_factor = dist_sum * self.error_estimation * self.error_estimation

        if normalization_factor > 0.0:
            dist = normalize_values_in_dict(dist,
                                            normalization_factor,
                                            inplace=False)

        return dist

    # Override AdaNode, New for option votes
    def filter_instance_to_leaves(self,
                                  X,
                                  y,
                                  weight,
                                  parent,
                                  parent_branch,
                                  update_splitter_counts,
                                  found_nodes=None):
        if found_nodes is None:
            found_nodes = []
        found_nodes.append(FoundNode(self, parent, parent_branch))
示例#15
0
    def learn_one(self, X, y, weight, tree, parent, parent_branch):
        true_class = y
        class_prediction = 0

        leaf = self.filter_instance_to_leaf(X, parent, parent_branch)
        if leaf.node is not None:
            class_prediction = get_max_value_key(
                leaf.node.predict_one(X, tree=tree))

        is_correct = (true_class == class_prediction)

        if self._adwin is None:
            self._adwin = ADWIN()

        old_error = self.error_estimation

        # Add element to ADWIN
        add = 0.0 if is_correct else 1.0

        self._adwin.add_element(add)
        # Detect change with ADWIN
        self.error_change = self._adwin.detected_change()

        if self.error_change and old_error > self.error_estimation:
            self.error_change = False

        # Check condition to build a new alternate tree
        if self.error_change:
            self._alternate_tree = tree._new_learning_node()
            tree.alternate_trees_cnt += 1

        # Condition to replace alternate tree
        elif self._alternate_tree is not None and not self._alternate_tree.error_is_null(
        ):
            if self.error_width > tree._ERROR_WIDTH_THRESHOLD \
                    and self._alternate_tree.error_width > tree._ERROR_WIDTH_THRESHOLD:
                old_error_rate = self.error_estimation
                alt_error_rate = self._alternate_tree.error_estimation
                fDelta = .05
                fN = 1.0 / self._alternate_tree.error_width + 1.0 / self.error_width

                bound = math.sqrt(2.0 * old_error_rate *
                                  (1.0 - old_error_rate) *
                                  math.log(2.0 / fDelta) * fN)
                # To check, bound never less than (old_error_rate - alt_error_rate)
                if bound < (old_error_rate - alt_error_rate):
                    tree._active_leaf_node_cnt -= self.n_leaves
                    tree._active_leaf_node_cnt += self._alternate_tree.n_leaves
                    self.kill_tree_children(tree)

                    if parent is not None:
                        parent.set_child(parent_branch, self._alternate_tree)
                    else:
                        # Switch tree root
                        tree._tree_root = tree._tree_root._alternate_tree
                    tree.switch_alternate_trees_cnt += 1
                elif bound < alt_error_rate - old_error_rate:
                    if isinstance(self._alternate_tree, SplitNode):
                        self._alternate_tree.kill_tree_children(tree)
                    else:
                        self._alternate_tree = None
                    tree.pruned_alternate_trees_cnt += 1  # hat.pruned_alternate_trees_cnt to check

        # Learn one sample in alternate tree and child nodes
        if self._alternate_tree is not None:
            self._alternate_tree.learn_one(X, y, weight, tree, parent,
                                           parent_branch)
        child_branch = self.instance_child_index(X)
        child = self.get_child(child_branch)
        if child is not None:
            child.learn_one(X,
                            y,
                            weight,
                            tree,
                            parent=self,
                            parent_branch=child_branch)
        # Instance contains a categorical value previously unseen by the split
        # node
        elif isinstance(self.split_test, NominalAttributeMultiwayTest) and \
                self.split_test.branch_for_instance(X) < 0:
            # Creates a new learning node to encompass the new observed feature
            # value
            leaf_node = tree._new_learning_node()
            branch_id = self.split_test.add_new_branch(
                X[self.split_test.get_atts_test_depends_on()[0]])
            self.set_child(branch_id, leaf_node)
            tree._active_leaf_node_cnt += 1
            leaf_node.learn_one(X, y, weight, tree, parent, parent_branch)
示例#16
0
class ADWINChangeDetector(BaseDriftDetector):
    """ Drift detection method based in ADWIN.

        Parameters
        ----------
        delta : float (default=0.002)
            The delta parameter for the ADWIN algorithm.

        Notes
        -----
        ADWIN [1]_ (ADaptive WINdowing) is an adaptive sliding window algorithm
        for detecting change, and keeping updated statistics about a data stream.
        ADWIN allows algorithms not adapted for drifting data, to be resistant
        to this phenomenon.

        The general idea is to keep statistics from a window of variable size while
        detecting concept drift.

        The algorithm will decide the size of the window by cutting the statistics'
        window at different points and analysing the average of some statistic over
        these two windows. If the absolute value of the difference between the two
        averages surpasses a pre-defined threshold, change is detected at that point
        and all data before that time is discarded.

        References
        ----------
        .. [1] Bifet, Albert, and Ricard Gavalda. "Learning from time-changing data with adaptive windowing."
           In Proceedings of the 2007 SIAM international conference on data mining, pp. 443-448.
           Society for Industrial and Applied Mathematics, 2007.

        Examples
        --------
        >>> # Imports
        >>> import numpy as np
        >>> from skmultiflow.drift_detection import ADWINChangeDetector
        >>> adwin_change_detector = ADWINChangeDetector()
        >>> # Simulating a data stream as a normal distribution of 1's and 0's
        >>> data_stream = np.random.randint(2, size=2000)
        >>> # Changing the data concept from index 999 to 2000
        >>> for i in range(999, 2000):
        ...     data_stream[i] = np.random.randint(4, high=8)
        >>> # Adding stream elements to ADWIN and verifying if drift occurred
        >>> for i in range(2000):
        ...     adwin_change_detector.add_element(data_stream[i])
        ...     if adwin_change_detector.detected_change():
        ...         print('Change detected in data: ' + str(data_stream[i]) + ' - at index: ' + str(i))

        """
    def __init__(self, delta=.002):
        super().__init__()
        self.adwin = ADWIN(delta=delta)
        super().reset()

    def add_element(self, input_value):
        err_estim = self.adwin.estimation
        self.adwin.add_element(input_value)
        res_input = self.adwin.detected_change()

        self.in_concept_change = False
        self.in_warning_zone = False

        if self.adwin.detected_warning_zone():
            self.in_warning_zone = True
        if res_input:
            if self.adwin.estimation > err_estim:
                self.in_concept_change = True
                self.in_warning_zone = False

        self.estimation = self.adwin.estimation
示例#17
0
 def __init__(self, delta=.002):
     super().__init__()
     self.adwin = ADWIN(delta=delta)
     super().reset()
示例#18
0
class KNNADWINClassifier(KNNClassifier):
    """ K-Nearest Neighbors classifier with ADWIN change detector.

    This Classifier is an improvement from the regular KNNClassifier,
    as it is resistant to concept drift. It utilises the ADWIN change
    detector to decide which samples to keep and which ones to forget,
    and by doing so it regulates the sample window size.

    To know more about the ADWIN change detector, please see
    :class:`skmultiflow.drift_detection.ADWIN`

    It uses the regular KNNClassifier as a base class, with the
    major difference that this class keeps a variable size window,
    instead of a fixed size one and also it updates the adwin algorithm
    at each partial_fit call.

    Parameters
    ----------
    n_neighbors: int (default=5)
        The number of nearest neighbors to search for.

    max_window_size: int (default=1000)
        The maximum size of the window storing the last viewed samples.

    leaf_size: int (default=30)
        The maximum number of samples that can be stored in one leaf node,
        which determines from which point the algorithm will switch for a
        brute-force approach. The bigger this number the faster the tree
        construction time, but the slower the query time will be.

    metric: string or sklearn.DistanceMetric object
        sklearn.KDTree parameter. The distance metric to use for the KDTree.
        Default=’euclidean’. KNNClassifier.valid_metrics() gives a list of
        the metrics which are valid for KDTree.

    Notes
    -----
    This estimator is not optimal for a mixture of categorical and numerical
    features. This implementation treats all features from a given stream as
    numerical.

    Examples
    --------
    >>> # Imports
    >>> from skmultiflow.lazy import KNNADWINClassifier
    >>> from skmultiflow.data import ConceptDriftStream
    >>> # Setting up the stream
    >>> stream = ConceptDriftStream(position=2500, width=100, random_state=1)
    >>> # Setting up the KNNAdwin classifier
    >>> knn_adwin = KNNADWINClassifier(n_neighbors=8, leaf_size=40, max_window_size=1000)
    >>> # Keep track of sample count and correct prediction count
    >>> n_samples = 0
    >>> corrects = 0
    >>> while n_samples < 5000:
    ...     X, y = stream.next_sample()
    ...     pred = knn_adwin.predict(X)
    ...     if y[0] == pred[0]:
    ...         corrects += 1
    ...     knn_adwin = knn_adwin.partial_fit(X, y)
    ...     n_samples += 1
    >>>
    >>> # Displaying the results
    >>> print('KNNClassifier usage example')
    >>> print(str(n_samples) + ' samples analyzed.')
    5000 samples analyzed.
    >>> print("KNNADWINClassifier's performance: " + str(corrects/n_samples))
    KNNAdwin's performance: 0.5714

    """
    def __init__(self,
                 n_neighbors=5,
                 max_window_size=1000,
                 leaf_size=30,
                 metric='euclidean'):
        super().__init__(n_neighbors=n_neighbors,
                         max_window_size=max_window_size,
                         leaf_size=leaf_size,
                         metric=metric)
        self.adwin = ADWIN()

    def reset(self):
        """ Reset the estimator.

        Resets the ADWIN Drift detector as well as the KNN model.

        Returns
        -------
        KNNADWINClassifier
            self

        """
        self.adwin = ADWIN()
        return super().reset()

    def partial_fit(self, X, y, classes=None, sample_weight=None):
        """ Partially (incrementally) fit the model.

        Parameters
        ----------
        X: Numpy.ndarray of shape (n_samples, n_features)
            The data upon which the algorithm will create its model.

        y: Array-like
            An array-like containing the classification targets for all
            samples in X.

        classes: numpy.ndarray, optional (default=None)
            Array with all possible/known classes.

        sample_weight: Not used.

        Returns
        -------
        KNNADWINClassifier
            self

        Notes
        -----
        Partially fits the model by updating the window with new samples
        while also updating the ADWIN algorithm. IF ADWIN detects a change,
        the window is split in such a wat that samples from the previous
        concept are dropped.

        """
        r, c = get_dimensions(X)
        if classes is not None:
            self.classes = list(set().union(self.classes, classes))

        for i in range(r):
            self.data_window.add_sample(X[i], y[i])
            if self.data_window.size >= self.n_neighbors:
                correctly_classifies = 1 if self.predict(np.asarray(
                    [X[i]])) == y[i] else 0
                self.adwin.add_element(correctly_classifies)
            else:
                self.adwin.add_element(0)

        if self.data_window.size >= self.n_neighbors:
            if self.adwin.detected_change():
                if self.adwin.width < self.data_window.size:
                    for i in range(self.data_window.size, self.adwin.width,
                                   -1):
                        self.data_window.delete_oldest_sample()
        return self
示例#19
0
adwin_param = [0.002, 0.005, 0.01]
ddm_param = [3, 5, 7]
ks_param1 = [100, 150, 200]
ks_param2 = [30, 50, 100]
ph_param1 = [25, 50, 75]
ph_param2 = [0.005, 0.01, 0.02]

knn = KNNClassifier()

stream = driftStreams[0]

for i in range(0, 3):
    trainX, trainY = stream.next_sample(2000)
    knn.partial_fit(trainX, trainY)

    adwin = ADWIN(delta=adwin_param[i])
    ddm = DDM(out_control_level=ddm_param[i])
    kswin1 = KSWIN(window_size=ks_param1[i])
    # kswin2 = KSWIN(stat_size=ks_param2[i])
    ph1 = PageHinkley(threshold=ph_param1[i])
    ph2 = PageHinkley(delta=ph_param2[i])

    adwin_results = []
    ddm_results = []
    kswin1_results = []
    kswin2_results = []
    ph1_results = []
    ph2_results = []

    n_samples = 0
    corrects = 0
示例#20
0
    def __init__(
            self,
            base_estimator=HoeffdingTreeClassifier(grace_period=50,
                                                   split_confidence=0.01),
            n_estimators: int = 100,
            subspace_mode: str = "percentage",
            subspace_size: int = 60,
            training_method: str = "randompatches",
            lam: float = 6.0,
            drift_detection_method: BaseDriftDetector = ADWIN(delta=1e-5),
            warning_detection_method: BaseDriftDetector = ADWIN(delta=1e-4),
            disable_weighted_vote: bool = False,
            disable_drift_detection: bool = False,
            disable_background_learner: bool = False,
            nominal_attributes=None,
            random_state=None):

        self.base_estimator = base_estimator  # Not restricted to a specific base estimator.
        self.n_estimators = n_estimators
        if subspace_mode not in {
                self._FEATURES_SQRT, self._FEATURES_SQRT_INV,
                self._FEATURES_PERCENT, self._FEATURES_M
        }:
            raise ValueError(
                "Invalid subspace_mode: {}.\n"
                "Valid options are: {}".format(
                    subspace_mode, {
                        self._FEATURES_M, self._FEATURES_SQRT,
                        self._FEATURES_SQRT_INV, self._FEATURES_PERCENT
                    }))
        self.subspace_mode = subspace_mode
        self.subspace_size = subspace_size
        if training_method not in {
                self._TRAIN_RESAMPLING, self._TRAIN_RANDOM_PATCHES,
                self._TRAIN_RANDOM_SUBSPACES
        }:
            raise ValueError(
                "Invalid training_method: {}.\n"
                "Valid options are: {}".format(
                    training_method, {
                        self._TRAIN_RANDOM_PATCHES,
                        self._TRAIN_RANDOM_SUBSPACES, self._TRAIN_RESAMPLING
                    }))
        self.training_method = training_method
        self.lam = lam
        self.drift_detection_method = drift_detection_method
        self.warning_detection_method = warning_detection_method
        self.disable_weighted_vote = disable_weighted_vote
        self.disable_drift_detection = disable_drift_detection
        self.disable_background_learner = disable_background_learner
        # Single option (accuracy) for drift detection criteria. Could be extended in the future.
        self.drift_detection_criteria = 'accuracy'
        self.nominal_attributes = nominal_attributes if nominal_attributes else []
        self.random_state = random_state
        # self._random_state is the actual object used internally
        self._random_state = check_random_state(self.random_state)
        self.ensemble = None

        self._n_samples_seen = 0
        self._subspaces = None

        self._base_performance_evaluator = ClassificationMeasurements()
        self._base_learner_class = StreamingRandomPatchesBaseLearner
示例#21
0
    def learn_from_instance(self, X, y, weight, hat, parent, parent_branch):
        true_class = y
        class_prediction = 0

        leaf = self.filter_instance_to_leaf(X, parent, parent_branch)
        if leaf.node is not None:
            class_prediction = get_max_value_key(
                leaf.node.get_class_votes(X, hat))

        bl_correct = (true_class == class_prediction)

        if self._estimation_error_weight is None:
            self._estimation_error_weight = ADWIN()

        old_error = self.get_error_estimation()

        # Add element to ADWIN
        add = 0.0 if (bl_correct is True) else 1.0

        self._estimation_error_weight.add_element(add)
        # Detect change with ADWIN
        self.error_change = self._estimation_error_weight.detected_change()

        if self.error_change is True and old_error > self.get_error_estimation(
        ):
            self.error_change = False

        # Check condition to build a new alternate tree
        if self.error_change is True:
            self._alternate_tree = hat._new_learning_node()
            hat.alternate_trees_cnt += 1

        # Condition to replace alternate tree
        elif self._alternate_tree is not None and self._alternate_tree.is_null_error(
        ) is False:
            if self.get_error_width() > ERROR_WIDTH_THRESHOLD \
                    and self._alternate_tree.get_error_width() > ERROR_WIDTH_THRESHOLD:
                old_error_rate = self.get_error_estimation()
                alt_error_rate = self._alternate_tree.get_error_estimation()
                fDelta = .05
                fN = 1.0 / self._alternate_tree.get_error_width() + 1.0 / (
                    self.get_error_width())

                bound = math.sqrt(2.0 * old_error_rate *
                                  (1.0 - old_error_rate) *
                                  math.log(2.0 / fDelta) * fN)
                # To check, bound never less than (old_error_rate - alt_error_rate)
                if bound < (old_error_rate - alt_error_rate):
                    hat._active_leaf_node_cnt -= self.number_leaves()
                    hat._active_leaf_node_cnt += self._alternate_tree.number_leaves(
                    )
                    self.kill_tree_children(hat)

                    if parent is not None:
                        parent.set_child(parent_branch, self._alternate_tree)
                    else:
                        # Switch tree root
                        hat._tree_root = hat._tree_root.alternateTree
                    hat.switch_alternate_trees_cnt += 1
                elif bound < alt_error_rate - old_error_rate:
                    if isinstance(self._alternate_tree, ActiveLearningNode):
                        self._alternate_tree = None
                    elif isinstance(self._alternate_tree,
                                    InactiveLearningNode):
                        self._alternate_tree = None
                    else:
                        self._alternate_tree.kill_tree_children(hat)
                    hat.pruned_alternate_trees_cnt += 1  # hat.pruned_alternate_trees_cnt to check

        # Learn_From_Instance alternate Tree and Child nodes
        if self._alternate_tree is not None:
            self._alternate_tree.learn_from_instance(X, y, weight, hat, parent,
                                                     parent_branch)
        child_branch = self.instance_child_index(X)
        child = self.get_child(child_branch)
        if child is not None:
            child.learn_from_instance(X, y, weight, hat, self, child_branch)
        # Instance contains a categorical value previously unseen by the split
        # node
        elif isinstance(self.get_split_test(), NominalAttributeMultiwayTest) and \
                self.get_split_test().branch_for_instance(X) < 0:
            # Creates a new learning node to encompass the new observed feature
            # value
            leaf_node = hat._new_learning_node()
            branch_id = self.get_split_test().add_new_branch(
                X[self.get_split_test().get_atts_test_depends_on()[0]])
            self.set_child(branch_id, leaf_node)
            hat._active_leaf_node_cnt += 1
            leaf_node.learn_from_instance(X, y, weight, hat, parent,
                                          parent_branch)
示例#22
0
    async def predict(self, X: PredictionData) -> bool:
        detector = self.detectors.get(X.identifier, ADWIN(self.delta))
        self.detectors[X.identifier] = detector
        detector.add_element(X.data["sensor"])

        return detector.detected_change()
示例#23
0
    class AdaSplitNode(SplitNode, NewNode):
        def __init__(self, split_test, class_observations):
            super().__init__(split_test, class_observations)
            self._estimation_error_weight = ADWIN()
            self._alternate_tree = None
            self.error_change = False
            self._random_seed = 1
            self._classifier_random = check_random_state(self._random_seed)

        # Override NewNode
        def number_leaves(self):
            num_of_leaves = 0
            for child in self._children:
                if child is not None:
                    num_of_leaves += child.number_leaves()

            return num_of_leaves

        # Override NewNode
        def get_error_estimation(self):
            return self._estimation_error_weight.estimation

        # Override NewNode
        def get_error_width(self):
            w = 0.0
            if self.is_null_error() is False:
                w = self._estimation_error_weight.width

            return w

        # Override NewNode
        def is_null_error(self):
            return self._estimation_error_weight is None

        # Override NewNode
        def learn_from_instance(self, X, y, weight, hat, parent, parent_branch):
            true_class = y
            class_prediction = 0

            leaf = self.filter_instance_to_leaf(X, parent, parent_branch)
            if leaf.node is not None:
                class_prediction = get_max_value_key(leaf.node.get_class_votes(X, hat))

            bl_correct = (true_class == class_prediction)

            if self._estimation_error_weight is None:
                self._estimation_error_weight = ADWIN()

            old_error = self.get_error_estimation()

            # Add element to ADWIN
            add = 0.0 if (bl_correct is True) else 1.0

            self._estimation_error_weight.add_element(add)
            # Detect change with ADWIN
            self.error_change = self._estimation_error_weight.detected_change()

            if self.error_change is True and old_error > self.get_error_estimation():
                self.error_change = False

            # Check condition to build a new alternate tree
            if self.error_change is True:
                self._alternate_tree = hat._new_learning_node()
                hat.alternate_trees_cnt += 1

            # Condition to replace alternate tree
            elif self._alternate_tree is not None and self._alternate_tree.is_null_error() is False:
                if self.get_error_width() > error_width_threshold \
                        and self._alternate_tree.get_error_width() > error_width_threshold:
                    old_error_rate = self.get_error_estimation()
                    alt_error_rate = self._alternate_tree.get_error_estimation()
                    fDelta = .05
                    fN = 1.0 / self._alternate_tree.get_error_width() + 1.0 / (self.get_error_width())

                    bound = math.sqrt(2.0 * old_error_rate * (1.0 - old_error_rate) * math.log(2.0 / fDelta) * fN)
                    # To check, bound never less than (old_error_rate - alt_error_rate)
                    if bound < (old_error_rate - alt_error_rate):
                        hat._active_leaf_node_cnt -= self.number_leaves()
                        hat._active_leaf_node_cnt += self._alternate_tree.number_leaves()
                        self.kill_tree_children(hat)

                        if parent is not None:
                            parent.set_child(parent_branch, self._alternate_tree)
                        else:
                            # Switch tree root
                            hat._tree_root = hat._tree_root.alternateTree
                        hat.switch_alternate_trees_cnt += 1
                    elif bound < alt_error_rate - old_error_rate:
                        if isinstance(self._alternate_tree, TS_HAT.ActiveLearningNode):
                            self._alternate_tree = None
                        elif isinstance(self._alternate_tree, TS_HAT.InactiveLearningNode):
                            self._alternate_tree = None
                        else:
                            self._alternate_tree.kill_tree_children(hat)
                        hat.pruned_alternate_trees_cnt += 1  # hat.pruned_alternate_trees_cnt to check

            # Learn_From_Instance alternate Tree and Child nodes
            if self._alternate_tree is not None:
                self._alternate_tree.learn_from_instance(X, y, weight, hat, parent, parent_branch)
            child_branch = self.instance_child_index(X)
            child = self.get_child(child_branch)
            if child is not None:
                child.learn_from_instance(X, y, weight, hat, parent, parent_branch)

        # Override NewNode
        def kill_tree_children(self, hat):
            for child in self._children:
                if child is not None:
                    # Delete alternate tree if it exists
                    if isinstance(child, TS_HAT.AdaSplitNode) and child._alternate_tree is not None:
                        child._alternate_tree.kill_tree_children(hat)
                        self._pruned_alternate_trees += 1
                    # Recursive delete of SplitNodes
                    if isinstance(child, TS_HAT.AdaSplitNode):
                        child.kill_tree_children(hat)

                    if isinstance(child, TS_HAT.ActiveLearningNode):
                        child = None
                        hat._active_leaf_node_cnt -= 1
                    elif isinstance(child, TS_HAT.InactiveLearningNode):
                        child = None
                        hat._inactive_leaf_node_cnt -= 1

        # override NewNode
        def filter_instance_to_leaves(self, X, y, weight, parent, parent_branch,
                                      update_splitter_counts=False, found_nodes=None):
            if found_nodes is None:
                found_nodes = []
            if update_splitter_counts:
                try:
                    self._observed_class_distribution[y] += weight  # Dictionary (class_value, weight)
                except KeyError:
                    self._observed_class_distribution[y] = weight
            child_index = self.instance_child_index(X)
            if child_index >= 0:
                child = self.get_child(child_index)
                if child is not None:
                    child.filter_instance_to_leaves(X, y, weight, parent, parent_branch,
                                                    update_splitter_counts, found_nodes)
                else:
                    found_nodes.append(HoeffdingTree.FoundNode(None, self, child_index))
            if self._alternate_tree is not None:
                self._alternate_tree.filter_instance_to_leaves(X, y, weight, self, -999,
                                                               update_splitter_counts, found_nodes)
示例#24
0
    def partial_fit(self, X, y, classes=None, weight=None):
        """ partial_fit

        Partially fits the model, based on the X and y matrix.

        Since it's an ensemble learner, if X and y matrix of more than one
        sample are passed, the algorithm will partial fit the model one sample
        at a time.

        Each sample is trained by each classifier a total of K times, where K
        is drawn by a Poisson(l) distribution. l is updated after every example
        using :math:`lambda_{sc}` if th estimator correctly classifies the example or
        :math:`lambda_{sw}` in the other case.

        Parameters
        ----------
        X: Numpy.ndarray of shape (n_samples, n_features)
            Features matrix used for partially updating the model.

        y: Array-like
            An array-like of all the class labels for the samples in X.

        classes: list
            List of all existing classes. This is an optional parameter, except
            for the first partial_fit call, when it becomes obligatory.

        weight: Array-like
            Instance weight. If not provided, uniform weights are assumed.

        Raises
        ------
        ValueError: A ValueError is raised if the 'classes' parameter is not
        passed in the first partial_fit call, or if they are passed in further
        calls but differ from the initial classes list passed..
        """
        if self.classes is None:
            if classes is None:
                raise ValueError(
                    "The first partial_fit call should pass all the classes.")
            else:
                self.classes = classes

        if self.classes is not None and classes is not None:
            if set(self.classes) == set(classes):
                pass
            else:
                raise ValueError(
                    "The classes passed to the partial_fit function differ from those passed earlier."
                )

        self.__adjust_ensemble_size()

        r, _ = get_dimensions(X)
        for j in range(r):
            change_detected = False
            for i in range(self.n_estimators):
                a = (i + 1) / self.n_estimators
                if y[j] == 1:
                    lam = a * self.sampling_rate
                else:
                    lam = a
                k = self.random_state.poisson(lam)
                if k > 0:
                    for b in range(k):
                        self.ensemble[i].partial_fit([X[j]], [y[j]], classes,
                                                     weight)

                if self.drift_detection:
                    try:
                        pred = self.ensemble[i].predict(X)
                        error_estimation = self.adwin_ensemble[i].estimation
                        for j in range(r):
                            if pred[j] is not None:
                                if pred[j] == y[j]:
                                    self.adwin_ensemble[i].add_element(1)
                                else:
                                    self.adwin_ensemble[i].add_element(0)
                        if self.adwin_ensemble[i].detected_change():
                            if self.adwin_ensemble[
                                    i].estimation > error_estimation:
                                change_detected = True
                    except ValueError:
                        change_detected = False
                        pass

            if change_detected and self.drift_detection:
                max_threshold = 0.0
                i_max = -1
                for i in range(self.n_estimators):
                    if max_threshold < self.adwin_ensemble[i].estimation:
                        max_threshold = self.adwin_ensemble[i].estimation
                        i_max = i
                if i_max != -1:
                    self.ensemble[i_max].reset()
                    self.adwin_ensemble[i_max] = ADWIN()
示例#25
0
    class AdaLearningNode(LearningNodeNBAdaptive, NewNode):
        def __init__(self, initial_class_observations):
            super().__init__(initial_class_observations)
            self._estimation_error_weight = ADWIN()
            self.error_change = False
            self._randomSeed = 1
            self._classifier_random = check_random_state(self._randomSeed)

        # Override NewNode
        def number_leaves(self):
            return 1

        # Override NewNode
        def get_error_estimation(self):
            return self._estimation_error_weight.estimation

        # Override NewNode
        def get_error_width(self):
            return self._estimation_error_weight.width

        # Override NewNode
        def is_null_error(self):
            return self._estimation_error_weight is None

        def kill_tree_children(self, hat):
            pass

        # Override NewNode
        def learn_from_instance(self, X, y, weight, hat, parent, parent_branch):
            true_class = y

            # k = self._classifier_random.poisson(1.0)
            # if k > 0:
            #     weight = weight * k

            tmp = self.get_class_votes(X, hat)

            class_prediction = get_max_value_key(tmp)

            bl_correct = (true_class == class_prediction)

            if self._estimation_error_weight is None:
                self._estimation_error_weight = ADWIN()

            old_error = self.get_error_estimation()

            # Add element to Adwin
            add = 0.0 if (bl_correct is True) else 1.0

            self._estimation_error_weight.add_element(add)
            # Detect change with Adwin
            self.error_change = self._estimation_error_weight.detected_change()

            if self.error_change is True and old_error > self.get_error_estimation():
                self.error_change = False

            # Update statistics
            super().learn_from_instance(X, y, weight, hat)

            # call ActiveLearningNode
            weight_seen = self.get_weight_seen()

            if weight_seen - self.get_weight_seen_at_last_split_evaluation() >= hat.grace_period:
                hat._attempt_to_split(self, parent, parent_branch)
                self.set_weight_seen_at_last_split_evaluation(weight_seen)

        # Override LearningNodeNBAdaptive
        def get_class_votes(self, X, ht):
            # dist = {}
            prediction_option = ht.leaf_prediction
            # MC
            if prediction_option == MAJORITY_CLASS:
                dist = self.get_observed_class_distribution()
            # NB
            elif prediction_option == NAIVE_BAYES:
                dist = do_naive_bayes_prediction(X, self._observed_class_distribution, self._attribute_observers)
            # NBAdaptive
            else:
                if self._mc_correct_weight > self._nb_correct_weight:
                    dist = self.get_observed_class_distribution()
                else:
                    dist = do_naive_bayes_prediction(X, self._observed_class_distribution, self._attribute_observers)

            dist_sum = sum(dist.values())  # sum all values in dictionary
            normalization_factor = dist_sum * self.get_error_estimation() * self.get_error_estimation()

            if normalization_factor > 0.0:
                normalize_values_in_dict(dist, normalization_factor)

            return dist

        # Override NewNode, New for option votes
        def filter_instance_to_leaves(self, X, y, weight, parent, parent_branch,
                                      update_splitter_counts, found_nodes=None):
            if found_nodes is None:
                found_nodes = []
            found_nodes.append(HoeffdingTree.FoundNode(self, parent, parent_branch))
    def __partial_fit(self, X, y):
        if self.init_matrix_codes:
            self.matrix_codes = np.zeros(
                (self.actual_n_estimators, len(self.classes)), dtype=int)
            for i in range(self.actual_n_estimators):
                n_zeros = 0
                n_ones = 0
                while (n_ones - n_zeros) * (
                        n_ones - n_zeros) > self.actual_n_estimators % 2:
                    n_zeros = 0
                    n_ones = 0
                    for j in range(len(self.classes)):
                        if (j == 1) and (len(self.classes) == 2):
                            result = 1 - self.matrix_codes[i][0]
                        else:
                            result = self._random_state.randint(2)

                        self.matrix_codes[i][j] = result
                        if result == 1:
                            n_ones += 1
                        else:
                            n_zeros += 1
            self.init_matrix_codes = False

        change_detected = False
        X_cp, y_cp = cp.deepcopy(X), cp.deepcopy(y)
        for i in range(self.actual_n_estimators):
            k = 0.0

            if self.leverage_algorithm == self.LEVERAGE_ALGORITHMS[0]:
                k = self._random_state.poisson(self.w)

            elif self.leverage_algorithm == self.LEVERAGE_ALGORITHMS[1]:
                error = self.adwin_ensemble[i].estimation
                pred = self.ensemble[i].predict(np.asarray([X]))
                if pred is None:
                    k = 1.0
                elif pred[0] != y:
                    k = 1.0
                elif self._random_state.rand() < (error / (1.0 - error)):
                    k = 1.0
                else:
                    k = 0.0

            elif self.leverage_algorithm == self.LEVERAGE_ALGORITHMS[2]:
                w = 1.0
                k = 0.0 if (self._random_state.randint(2) == 1) else w

            elif self.leverage_algorithm == self.LEVERAGE_ALGORITHMS[3]:
                w = 1.0
                k = 1.0 + self._random_state.poisson(w)

            elif self.leverage_algorithm == self.LEVERAGE_ALGORITHMS[4]:
                w = 1.0
                k = self._random_state.poisson(1)
                k = w if k > 0 else 0

            if k > 0:
                if self.enable_code_matrix:
                    y_cp = self.matrix_codes[i][int(y_cp)]
                for l in range(int(k)):
                    self.ensemble[i].partial_fit(np.asarray([X_cp]),
                                                 np.asarray([y_cp]),
                                                 self.classes)

            try:
                pred = self.ensemble[i].predict(np.asarray([X]))
                if pred is not None:
                    add = 1 if (pred[0] == y_cp) else 0
                    error = self.adwin_ensemble[i].estimation
                    self.adwin_ensemble[i].add_element(add)
                    if self.adwin_ensemble[i].detected_change():
                        if self.adwin_ensemble[i].estimation > error:
                            change_detected = True
            except ValueError:
                change_detected = False

        if change_detected:
            self.n_detected_changes += 1
            max_threshold = 0.0
            i_max = -1
            for i in range(self.actual_n_estimators):
                if max_threshold < self.adwin_ensemble[i].estimation:
                    max_threshold = self.adwin_ensemble[i].estimation
                    i_max = i
            if i_max != -1:
                self.ensemble[i_max].reset()
                self.adwin_ensemble[i_max] = ADWIN(self.delta)
        return self
示例#27
0
class AdaLearningNode(LearningNodeNBAdaptive, AdaNode):
    """ Learning node for Hoeffding Adaptive Tree that uses Adaptive Naive
    Bayes models.

    Parameters
    ----------
    initial_class_observations: dict (class_value, weight) or None
        Initial class observations

    """
    def __init__(self, initial_class_observations, random_state=None):
        super().__init__(initial_class_observations)
        self._estimation_error_weight = ADWIN()
        self.error_change = False
        self._random_state = check_random_state(random_state)

    # Override AdaNode
    def number_leaves(self):
        return 1

    # Override AdaNode
    def get_error_estimation(self):
        return self._estimation_error_weight.estimation

    # Override AdaNode
    def get_error_width(self):
        return self._estimation_error_weight.width

    # Override AdaNode
    def is_null_error(self):
        return self._estimation_error_weight is None

    def kill_tree_children(self, hat):
        pass

    # Override AdaNode
    def learn_from_instance(self, X, y, weight, hat, parent, parent_branch):
        true_class = y

        if hat.bootstrap_sampling:
            # Perform bootstrap-sampling
            k = self._random_state.poisson(1.0)
            if k > 0:
                weight = weight * k

        class_prediction = get_max_value_key(self.get_class_votes(X, hat))

        bl_correct = (true_class == class_prediction)

        if self._estimation_error_weight is None:
            self._estimation_error_weight = ADWIN()

        old_error = self.get_error_estimation()

        # Add element to Adwin
        add = 0.0 if bl_correct else 1.0

        self._estimation_error_weight.add_element(add)
        # Detect change with Adwin
        self.error_change = self._estimation_error_weight.detected_change()

        if self.error_change and old_error > self.get_error_estimation():
            self.error_change = False

        # Update statistics
        super().learn_from_instance(X, y, weight, hat)

        # call ActiveLearningNode
        weight_seen = self.get_weight_seen()

        if weight_seen - self.get_weight_seen_at_last_split_evaluation() >= hat.grace_period:
            hat._attempt_to_split(self, parent, parent_branch)
            self.set_weight_seen_at_last_split_evaluation(weight_seen)

    # Override LearningNodeNBAdaptive
    def get_class_votes(self, X, ht):
        # dist = {}
        prediction_option = ht.leaf_prediction
        # MC
        if prediction_option == ht._MAJORITY_CLASS:
            dist = self.get_observed_class_distribution()
        # NB
        elif prediction_option == ht._NAIVE_BAYES:
            dist = do_naive_bayes_prediction(X, self._observed_class_distribution,
                                             self._attribute_observers)
        # NBAdaptive (default)
        else:
            if self._mc_correct_weight > self._nb_correct_weight:
                dist = self.get_observed_class_distribution()
            else:
                dist = do_naive_bayes_prediction(X, self._observed_class_distribution,
                                                 self._attribute_observers)

        dist_sum = sum(dist.values())  # sum all values in dictionary
        normalization_factor = dist_sum * self.get_error_estimation() * self.get_error_estimation()

        if normalization_factor > 0.0:
            dist = normalize_values_in_dict(dist, normalization_factor, inplace=False)

        return dist

    # Override AdaNode, New for option votes
    def filter_instance_to_leaves(self, X, y, weight, parent, parent_branch,
                                  update_splitter_counts, found_nodes=None):
        if found_nodes is None:
            found_nodes = []
        found_nodes.append(FoundNode(self, parent, parent_branch))
    def __partial_fit(self, X, y):
        if self.init_matrix_codes and self.enable_code_matrix:
            self.__init_output_codes()

        change_detected = False
        for i in range(self.actual_n_estimators):

            # leveraging_bag - Leveraging Bagging
            if self.leverage_algorithm == self._LEVERAGE_ALGORITHMS[0]:
                k = self._random_state.poisson(self.w)

            # leveraging_bag_me - Missclassification Error
            elif self.leverage_algorithm == self._LEVERAGE_ALGORITHMS[1]:
                error = self.adwin_ensemble[i].estimation
                pred = self.ensemble[i].predict(np.asarray([X]))
                if pred is None:
                    k = 1.0
                elif pred[0] != y:
                    k = 1.0
                elif (error != 1.0 and
                      self._random_state.rand() < (error / (1.0 - error))):
                    k = 1.0
                else:
                    k = 0.0

            # leveraging_bag_half - Resampling without replacement for
            #                       half of the instances
            elif self.leverage_algorithm == self._LEVERAGE_ALGORITHMS[2]:
                w = 1.0
                k = 0.0 if (self._random_state.randint(2) == 1) else w

            # leveraging_bag_wt - Without taking out all instances
            elif self.leverage_algorithm == self._LEVERAGE_ALGORITHMS[3]:
                w = 1.0
                k = 1.0 + self._random_state.poisson(w)

            # leveraging_subag - Resampling without replacement
            elif self.leverage_algorithm == self._LEVERAGE_ALGORITHMS[4]:
                w = 1.0
                k = self._random_state.poisson(1)
                k = w if k > 0 else 0

            else:
                raise RuntimeError("Invalid option for leverage_algorithm: '{}'\n"
                                   "Valid options are: {}".format(self.leverage_algorithm,
                                                                  self._LEVERAGE_ALGORITHMS))

            y_coded = cp.deepcopy(y)
            if k > 0:
                classes = self.classes
                if self.enable_code_matrix:
                    y_coded = self.matrix_codes[i][int(y)]
                    classes = [0, 1]
                for _ in range(int(k)):
                    self.ensemble[i].partial_fit(X=np.asarray([X]), y=np.asarray([y_coded]),
                                                 classes=classes)

            pred = self.ensemble[i].predict(np.asarray([X]))
            if pred is not None:
                add = 0 if (pred[0] == y_coded) else 1
                error = self.adwin_ensemble[i].estimation
                self.adwin_ensemble[i].add_element(add)
                if self.adwin_ensemble[i].detected_change():
                    if self.adwin_ensemble[i].estimation > error:
                        change_detected = True

        if change_detected:
            self.n_detected_changes += 1
            max_threshold = 0.0
            i_max = -1
            for i in range(self.actual_n_estimators):
                if max_threshold < self.adwin_ensemble[i].estimation:
                    max_threshold = self.adwin_ensemble[i].estimation
                    i_max = i
            if i_max != -1:
                self.ensemble[i_max].reset()
                self.adwin_ensemble[i_max] = ADWIN(self.delta)
        return self
    def partial_fit(self, X, y, classes=None, weight=None):
        """ Partially fits the model, based on the X and y matrix.

        Since it's an ensemble learner, if X and y matrix of more than one
        sample are passed, the algorithm will partial fit the model one sample
        at a time.

        Each sample is trained by each classifier a total of K times, where K
        is drawn by a Poisson(l) distribution. l is updated after every example
        using :math:`lambda_{sc}` if th estimator correctly classifies the example or
        :math:`lambda_{sw}` in the other case.

        Parameters
        ----------
        X : numpy.ndarray of shape (n_samples, n_features)
            The features to train the model.

        y: numpy.ndarray of shape (n_samples)
            An array-like with the class labels of all samples in X.

        classes: numpy.ndarray, optional (default=None)
            Array with all possible/known class labels. This is an optional parameter, except
            for the first partial_fit call where it is compulsory.

        sample_weight: Array-like
            Instance weight. If not provided, uniform weights are assumed. Usage varies depending on the base estimator.

        Raises
        ------
        ValueError: A ValueError is raised if the 'classes' parameter is not
        passed in the first partial_fit call, or if they are passed in further
        calls but differ from the initial classes list passed.

        Returns
        -------
        self

        """
        if self.classes is None:
            if classes is None:
                raise ValueError("The first partial_fit call should pass all the classes.")
            else:
                self.classes = classes

        if self.classes is not None and classes is not None:
            if set(self.classes) == set(classes):
                pass
            else:
                raise ValueError("The classes passed to the partial_fit function differ from those passed earlier.")

        self.__adjust_ensemble_size()
        r, _ = get_dimensions(X)
        for j in range(r):
            change_detected = False
            lam = 1
            for i in range(self.actual_n_estimators):
                if y[j] == 1:
                    self.lam_pos[i] += lam
                    self.n_pos += 1
                else:
                    self.lam_neg[i] += lam
                    self.n_neg += 1
                lam_rus = 1
                if self.algorithm == 1:
                    if y[j] == 1:
                        if self.n_neg != 0:
                            lam_rus = lam * ((self.lam_pos[i] + self.lam_neg[i]) /
                                             (self.lam_pos[i] + self.lam_neg[i] *
                                              (self.sampling_rate * (self.n_pos / self.n_neg))) *
                                             (((self.sampling_rate + 1) * self.n_pos) / (self.n_pos + self.n_neg)))
                    else:
                        if self.n_pos != 0:
                            lam_rus = lam * ((self.lam_pos[i] + self.lam_neg[i]) /
                                             (self.lam_pos[i] + self.lam_neg[i] *
                                              (self.n_neg / (self.n_pos * self.sampling_rate))) *
                                             (((self.sampling_rate + 1) * self.n_pos) / (self.n_pos + self.n_neg)))
                elif self.algorithm == 2:
                    if y[j] == 1:
                        lam_rus = ((lam * self.n_pos) / (self.n_pos + self.n_neg)) / \
                                  (self.lam_pos[i] / (self.lam_pos[i] + self.lam_neg[i]))
                    else:
                        lam_rus = ((lam * self.sampling_rate * self.n_pos) / (self.n_pos + self.n_neg)) / \
                                  (self.lam_neg[i] / (self.lam_pos[i] + self.lam_neg[i]))
                elif self.algorithm == 3:
                    if y[j] == 1:
                        lam_rus = lam
                    else:
                        lam_rus = lam / self.sampling_rate
                k = self._random_state.poisson(lam_rus)
                if k > 0:
                    for b in range(k):
                        self.ensemble[i].partial_fit([X[j]], [y[j]], classes, weight)
                if self.ensemble[i].predict([X[j]])[0] == y[j]:
                    self.lam_sc[i] += lam
                    self.epsilon[i] = (self.lam_sw[i]) / (self.lam_sc[i] + self.lam_sw[i])
                    if self.epsilon[i] != 1:
                        lam = lam / (2 * (1 - self.epsilon[i]))
                else:
                    self.lam_sw[i] += lam
                    self.epsilon[i] = (self.lam_sw[i]) / (self.lam_sc[i] + self.lam_sw[i])
                    if self.epsilon[i] != 0:
                        lam = lam / (2 * self.epsilon[i])

                if self.drift_detection:
                    try:
                        pred = self.ensemble[i].predict(X)
                        error_estimation = self.adwin_ensemble[i].estimation
                        for k in range(r):
                            if pred[k] is not None:
                                self.adwin_ensemble[i].add_element(int(pred[k] == y[k]))
                        if self.adwin_ensemble[i].detected_change():
                            if self.adwin_ensemble[i].estimation > error_estimation:
                                change_detected = True
                    except ValueError:
                        change_detected = False
                        pass

            if change_detected and self.drift_detection:
                max_threshold = 0.0
                i_max = -1
                for i in range(self.actual_n_estimators):
                    if max_threshold < self.adwin_ensemble[i].estimation:
                        max_threshold = self.adwin_ensemble[i].estimation
                        i_max = i
                if i_max != -1:
                    self.ensemble[i_max].reset()
                    self.adwin_ensemble[i_max] = ADWIN()

        return self
示例#30
0
today = date.today()
result_dir='result/multiflow/'+today.strftime("%Y-%m-%d")+'/'
if not os.path.exists(result_dir):
    os.makedirs(result_dir)
file_name = "CMGMM-"+test_dataset+".log"

DETECTOR=args.detector#""
nama_model = "CMGMM"
if (prune_comp):
    nama_model = nama_model+"+ "
else:
    nama_model = nama_model+" "
if DETECTOR == "ADWIN":
    print ("adwin")
    nama_model = nama_model+DETECTOR
    detector = ADWIN()
elif DETECTOR == "DDM":
    print ("DDM")
    nama_model = nama_model+DETECTOR
    detector = DDM()
elif DETECTOR == "EDDM":
    print ("EDDM")
    nama_model = nama_model+DETECTOR
    detector = EDDM()
elif DETECTOR == "HDDM_A":
    print ("HDDM_A")
    nama_model = nama_model+DETECTOR
    detector = HDDM_A()
elif DETECTOR == "HDDM_W":
    print ("HDDM_W")
    nama_model = nama_model+DETECTOR