def __init__(self,
                 model=HoeffdingTreeClassifier(grace_period=50,
                                               split_confidence=0.01),
                 n_models: int = 100,
                 subspace_size: typing.Union[int, float, str] = .6,
                 training_method: str = "patches",
                 lam: float = 6.0,
                 drift_detector: typing.Union[base.DriftDetector,
                                              None] = ADWIN(delta=1e-5),
                 warning_detector: base.DriftDetector = ADWIN(delta=1e-4),
                 disable_weighted_vote: bool = False,
                 nominal_attributes=None,
                 seed=None,
                 metric: MultiClassMetric = Accuracy()):
        super().__init__([None
                          ])  # List of models is properly initialized later
        self.models = []
        self.model = model  # Not restricted to a specific base estimator.
        self.n_models = n_models
        self.subspace_size = subspace_size
        self.training_method = training_method
        self.lam = lam
        self.drift_detector = drift_detector
        self.warning_detector = warning_detector
        self.disable_weighted_vote = disable_weighted_vote
        self.metric = metric
        self.nominal_attributes = nominal_attributes if nominal_attributes else []
        self.seed = seed
        self._rng = check_random_state(self.seed)

        self._n_samples_seen = 0
        self._subspaces = None

        self._base_learner_class = StreamingRandomPatchesBaseLearner
Esempio n. 2
0
    def __init__(
        self,
        model: base.Estimator = None,
        n_models: int = 10,
        subspace_size: typing.Union[int, float, str] = 0.6,
        training_method: str = "patches",
        lam: float = 6.0,
        drift_detector: base.DriftDetector = None,
        warning_detector: base.DriftDetector = None,
        disable_detector: str = "off",
        disable_weighted_vote: bool = False,
        seed=None,
        metric: Metric = None,
    ):
        if model is None:
            model = HoeffdingTreeClassifier(grace_period=50,
                                            split_confidence=0.01)

        if drift_detector is None:
            drift_detector = ADWIN(delta=1e-5)

        if warning_detector is None:
            warning_detector = ADWIN(delta=1e-4)

        if disable_detector == "off":
            pass
        elif disable_detector == "drift":
            drift_detector = None
            warning_detector = None
        elif disable_detector == "warning":
            warning_detector = None
        else:
            raise AttributeError(
                f"{disable_detector} is not a valid value for disable_detector.\n"
                f"Valid options are: 'off', 'drift', 'warning'")

        if metric is None:
            metric = Accuracy()

        super().__init__(
            model=model,
            n_models=n_models,
            subspace_size=subspace_size,
            training_method=training_method,
            lam=lam,
            drift_detector=drift_detector,
            warning_detector=warning_detector,
            disable_detector=disable_detector,
            disable_weighted_vote=disable_weighted_vote,
            seed=seed,
            metric=metric,
        )

        self._base_learner_class = BaseSRPClassifier
    def __init__(
        self,
        model: base.Classifier = None,
        n_models: int = 100,
        subspace_size: typing.Union[int, float, str] = 0.6,
        training_method: str = "patches",
        lam: float = 6.0,
        drift_detector: base.DriftDetector = None,
        warning_detector: base.DriftDetector = None,
        disable_detector: str = "off",
        disable_weighted_vote: bool = False,
        nominal_attributes=None,
        seed=None,
        metric: MultiClassMetric = None,
    ):

        if model is None:
            model = HoeffdingTreeClassifier(grace_period=50,
                                            split_confidence=0.01)

        if drift_detector is None:
            drift_detector = ADWIN(delta=1e-5)

        if warning_detector is None:
            warning_detector = ADWIN(delta=1e-4)

        if disable_detector == "off":
            pass
        elif disable_detector == "drift":
            drift_detector = None
            warning_detector = None
        elif disable_detector == "warning":
            warning_detector = None
        else:
            raise AttributeError(
                f"{disable_detector} is not a valid value for disable_detector.\n"
                f"Valid options are: 'off', 'drift', 'warning'")
        self.disable_detector = disable_detector

        if metric is None:
            metric = Accuracy()

        super().__init__([None
                          ])  # List of models is properly initialized later
        self.models = []
        self.model = model  # Not restricted to a specific base estimator.
        self.n_models = n_models
        self.subspace_size = subspace_size
        self.training_method = training_method
        self.lam = lam
        self.drift_detector = drift_detector
        self.warning_detector = warning_detector
        self.disable_weighted_vote = disable_weighted_vote
        self.metric = metric
        self.nominal_attributes = nominal_attributes if nominal_attributes else []
        self.seed = seed
        self._rng = np.random.default_rng(self.seed)

        self._n_samples_seen = 0
        self._subspaces = None

        self._base_learner_class = StreamingRandomPatchesBaseLearner
Esempio n. 4
0
dataset = stream.shuffle(dataset, buffer_size=50)

# construct our pipeline
"""
model = StandardScaler() | HoeffdingTreeClassifier(
    grace_period=100, split_confidence=1e-5)


model = StandardScaler() | OneVsRestClassifier(classifier=LogisticRegression())
"""

filename = 'model.pkl'

base_path = os.path.join('benchmark', 'tree', model_name)

model = HoeffdingTreeClassifier()
#model = pickle.load(open(os.path.join('benchmark', 'tree', 'resnet50', 'model2.pkl'), 'rb'))

metric = Accuracy()

acc_history = list()

start = time.time()

#for j in range(0,5):
for (i, (X, y)) in enumerate(dataset):

    preds = model.predict_one(X)
    model = model.learn_one(X, y)

    metric = metric.update(y, preds)