示例#1
0
def test_hoeffding_tree_regressor_categorical_features(test_path):
    data_path = os.path.join(test_path, 'ht_categorical_features_testcase.npy')
    stream = np.load(data_path)

    # Remove class value
    stream = stream[:, np.delete(np.arange(8), 7)]
    # Removes the last column (used only in the multi-target regression case)
    stream = stream[:, :-1]
    X, y = stream[:, :-1], stream[:, -1]

    nominal_attr_idx = np.arange(7).tolist()
    learner = HoeffdingTreeRegressor(nominal_attributes=nominal_attr_idx)

    learner.partial_fit(X, y)

    expected_description = "if Attribute 4 = 0.0:\n" \
                           "  Leaf = Statistics {0: 606.0000, 1: 1212.0000, 2: 3626.0000}\n" \
                           "if Attribute 4 = 1.0:\n" \
                           "  Leaf = Statistics {0: 551.0000, 1: 1128.0000, 2: 3400.0000}\n" \
                           "if Attribute 4 = 2.0:\n" \
                           "  Leaf = Statistics {0: 566.0000, 1: 1139.0000, 2: 3423.0000}\n" \
                           "if Attribute 4 = 3.0:\n" \
                           "  Leaf = Statistics {0: 577.0000, 1: 1138.0000, 2: 3374.0000}\n" \
                           "if Attribute 4 = 4.0:\n" \
                           "  Leaf = Statistics {0: 620.0000, 1: 1233.0000, 2: 3725.0000}\n" \
                           "if Attribute 4 = -3.0:\n" \
                           "  Leaf = Statistics {0: 80.0000, 1: 163.0000, 2: 483.0000}\n"

    assert SequenceMatcher(
        None, expected_description, learner.get_model_description()
    ).ratio() > 0.9
示例#2
0
def train_ht():
    print("Start training Hoeffding Tree Regressor...")

    ht_regressor = HoeffdingTreeRegressor(grace_period=1000, nb_threshold=6)

    start_time = time.time()

    ht_regressor.fit(X_train, y_train)
    print("Execution ht completed in -- %s seconds --" %
          round(time.time() - start_time, 2))
    pickle.dump(ht_regressor, open("models/ht_regressor12F.p", "wb"))
def test_get_tags():
    classifier = HoeffdingTreeClassifier()
    regressor = HoeffdingTreeRegressor()
    multi_output_regressor = iSOUPTreeRegressor()

    classifier_tags = classifier._get_tags()

    expected_tags = {
        'X_types': ['2darray'],
        '_skip_test': False,
        'allow_nan': False,
        'multilabel': False,
        'multioutput': False,
        'multioutput_only': False,
        'no_validation': False,
        'non_deterministic': False,
        'poor_score': False,
        'requires_positive_data': False,
        'stateless': False
    }
    assert classifier_tags == expected_tags

    regressor_tags = regressor._get_tags()
    expected_tags = {
        'X_types': ['2darray'],
        '_skip_test': False,
        'allow_nan': False,
        'multilabel': False,
        'multioutput': False,
        'multioutput_only': False,
        'no_validation': False,
        'non_deterministic': False,
        'poor_score': False,
        'requires_positive_data': False,
        'stateless': False
    }
    assert regressor_tags == expected_tags

    multi_output_regressor_tags = multi_output_regressor._get_tags()
    expected_tags = {
        'X_types': ['2darray'],
        '_skip_test': False,
        'allow_nan': False,
        'multilabel': False,
        'multioutput': True,
        'multioutput_only': True,
        'no_validation': False,
        'non_deterministic': False,
        'poor_score': False,
        'requires_positive_data': False,
        'stateless': False
    }
    assert multi_output_regressor_tags == expected_tags
示例#4
0
def test_hoeffding_tree_regressor_perceptron():
    stream = RegressionGenerator(n_samples=500,
                                 n_features=20,
                                 n_informative=15,
                                 random_state=1)
    stream.prepare_for_use()

    learner = HoeffdingTreeRegressor(leaf_prediction='perceptron',
                                     random_state=1)

    cnt = 0
    max_samples = 500
    y_pred = array('d')
    y_true = array('d')
    wait_samples = 10

    while cnt < max_samples:
        X, y = stream.next_sample()
        # Test every n samples
        if (cnt % wait_samples == 0) and (cnt != 0):
            y_pred.append(learner.predict(X)[0])
            y_true.append(y[0])
        learner.partial_fit(X, y)
        cnt += 1

    expected_predictions = array('d', [
        1198.4326121743168, 456.36607750881586, 927.9912160545144,
        1160.4797981899128, 506.50541829176535, -687.8187227095925,
        -677.8120094065415, 231.14888704761225, -284.46324039942937,
        -255.69195985557175, 47.58787439365423, -135.22494016284043,
        -10.351457437330152, 164.95903200643997, 360.72854984472383,
        193.30633911830088, -64.23638301570358, 587.9771578214296,
        649.8395655757931, 481.01214222804026, 305.4402728117724,
        266.2096493865043, -445.11447171009775, -567.5748694154349,
        -68.70070048021438, -446.79910655850153, -115.892348067663,
        -98.26862866231015, 71.04707905920286, -10.239274802165584,
        18.748731569441812, 4.971217265129857, 172.2223575990573,
        -655.2864976783711, -129.69921313686626, -114.01187375876822,
        -405.66166686550963, -215.1264381928009, -345.91020370426247,
        -80.49330468453074, 108.78958382083302, 134.95267043280126,
        -398.5273538477553, -157.1784910649728, 219.72541225645654,
        -100.91598162899217, 80.9768574308987, -296.8856956382453,
        251.9332271253148
    ])
    assert np.allclose(y_pred, expected_predictions)

    error = mean_absolute_error(y_true, y_pred)
    expected_error = 362.98595964244623
    assert np.isclose(error, expected_error)

    expected_info = "HoeffdingTreeRegressor(binary_split=False, grace_period=200, leaf_prediction='perceptron', " \
                    "learning_ratio_const=True, learning_ratio_decay=0.001, learning_ratio_perceptron=0.02, " \
                    "max_byte_size=33554432, memory_estimate_period=1000000, nb_threshold=0, no_preprune=False, " \
                    "nominal_attributes=None, random_state=1, remove_poor_atts=False, split_confidence=1e-07, " \
                    "stop_mem_management=False, tie_threshold=0.05)"
    info = " ".join([line.strip() for line in learner.get_info().split()])
    assert info == expected_info

    assert isinstance(learner.get_model_description(), type(''))
    assert type(learner.predict(X)) == np.ndarray
示例#5
0
def test_hoeffding_tree_regressor():
    stream = RegressionGenerator(n_samples=500,
                                 n_features=20,
                                 n_informative=15,
                                 random_state=1)
    stream.prepare_for_use()

    learner = HoeffdingTreeRegressor(leaf_prediction='mean')

    cnt = 0
    max_samples = 500
    y_pred = array('d')
    y_true = array('d')
    wait_samples = 10

    while cnt < max_samples:
        X, y = stream.next_sample()
        # Test every n samples
        if (cnt % wait_samples == 0) and (cnt != 0):
            y_pred.append(learner.predict(X)[0])
            y_true.append(y[0])
        learner.partial_fit(X, y)
        cnt += 1

    expected_predictions = array('d', [
        102.38946041769101, 55.6584574987656, 5.746076599168373,
        17.11797209372667, 2.566888222752787, 9.188247802192826,
        17.87894804676911, 15.940629626883966, 8.981172175448485,
        13.152624115190092, 11.106058099429399, 6.473195313058236,
        4.723621479590173, 13.825568609556493, 8.698873073880696,
        1.6452441811010252, 5.123496188584294, 6.34387187194982,
        5.9977733790395105, 6.874251577667707, 4.605348088338317,
        8.20112636572672, 9.032631648758098, 4.428189978974459,
        4.249801041367518, 9.983272668044492, 12.859518508979734,
        11.741395774380285, 11.230028410261868, 9.126921979081521,
        9.132146661688296, 7.750655625124709, 6.445145118245414,
        5.760928671876355, 4.041291302080659, 3.591837600560529,
        0.7640424010500604, 0.1738639840537784, 2.2068337802212286,
        -81.05302946841077, 96.17757415335177, -77.35894903819677,
        95.85568683733698, 99.1981674250886, 99.89327888035015,
        101.66673013734784, -79.1904234513751, -80.42952143783687,
        100.63954789983896
    ])
    assert np.allclose(y_pred, expected_predictions)

    error = mean_absolute_error(y_true, y_pred)
    expected_error = 143.11351404083086
    assert np.isclose(error, expected_error)

    expected_info = "HoeffdingTreeRegressor(binary_split=False, grace_period=200, leaf_prediction='mean', " \
                    "learning_ratio_const=True, learning_ratio_decay=0.001, learning_ratio_perceptron=0.02, " \
                    "max_byte_size=33554432, memory_estimate_period=1000000, nb_threshold=0, no_preprune=False, " \
                    "nominal_attributes=None, random_state=None, remove_poor_atts=False, split_confidence=1e-07, " \
                    "stop_mem_management=False, tie_threshold=0.05)"
    info = " ".join([line.strip() for line in learner.get_info().split()])
    assert info == expected_info

    assert isinstance(learner.get_model_description(), type(''))
    assert type(learner.predict(X)) == np.ndarray
def test_hoeffding_tree_regressor_perceptron():
    stream = RegressionGenerator(n_samples=500,
                                 n_features=20,
                                 n_informative=15,
                                 random_state=1)

    learner = HoeffdingTreeRegressor(leaf_prediction='perceptron',
                                     random_state=1)

    cnt = 0
    max_samples = 500
    y_pred = array('d')
    y_true = array('d')
    wait_samples = 10

    while cnt < max_samples:
        X, y = stream.next_sample()
        # Test every n samples
        if (cnt % wait_samples == 0) and (cnt != 0):
            y_pred.append(learner.predict(X)[0])
            y_true.append(y[0])
        learner.partial_fit(X, y)
        cnt += 1

    expected_predictions = array('d', [
        525.7553636732247, 352.8160300365902, 224.80744320456478,
        193.72837054292074, 132.6059603765031, 117.06974933197759,
        114.53342429855932, 89.37195405567235, 57.85335051891305,
        60.00883955911155, 47.263185779784266, 25.17616431074491,
        17.43259526890146, 47.33468996498019, 22.83975208548138,
        -7.659282840823236, 8.564101665071064, 14.61585289361161,
        11.560941733770441, 13.70120291865976, 1.1938438210799651,
        19.01970713481836, 21.23459424444584, -5.667473522309328,
        -5.203149619381393, 28.726275200889173, 41.03406433337882,
        27.950322712127267, 21.267116786963925, 5.53344652490152,
        6.753264259267268, -2.3288137435962213, -10.492766334689875,
        -11.19641058176631, -20.134685945295644, -19.36581990084085,
        -38.26894947177957, -34.90246284430353, -11.019543212232008,
        -22.016714766708127, -18.710456277443544, -20.5568019328217,
        -2.636583876625667, 24.787714491718187, 29.325261678088406,
        45.31267371823666, -48.271054430207776, -59.7649172085901,
        48.22724814037523
    ])
    # assert np.allclose(y_pred, expected_predictions)

    error = mean_absolute_error(y_true, y_pred)
    expected_error = 152.12931270533377
    assert np.isclose(error, expected_error)

    expected_info = "HoeffdingTreeRegressor(binary_split=False, grace_period=200, leaf_prediction='perceptron', " \
                    "learning_ratio_const=True, learning_ratio_decay=0.001, learning_ratio_perceptron=0.02, " \
                    "max_byte_size=33554432, memory_estimate_period=1000000, nb_threshold=0, no_preprune=False, " \
                    "nominal_attributes=None, random_state=1, remove_poor_atts=False, split_confidence=1e-07, " \
                    "stop_mem_management=False, tie_threshold=0.05)"
    info = " ".join([line.strip() for line in learner.get_info().split()])
    assert info == expected_info

    assert isinstance(learner.get_model_description(), type(''))
    assert type(learner.predict(X)) == np.ndarray
示例#7
0
def test_hoeffding_tree_regressor_coverage(test_path):
    # Cover nominal attribute observer
    test_file = os.path.join(test_path, 'regression_data.npz')
    data = np.load(test_file)
    X = data['X']
    y = data['y']

    # Typo in leaf prediction
    learner = HoeffdingTreeRegressor(leaf_prediction='percptron',
                                     nominal_attributes=[i for i in range(3)])
    print(learner.split_criterion)
    # Invalid split_criterion
    learner.split_criterion = 'VR'
    learner.partial_fit(X, y)

    assert learner._estimator_type == 'regressor'
示例#8
0
def demo(output_file=None, instances=40000):
    """ _test_regression

    This demo demonstrates how to evaluate a regressor. The data stream used
    is an instance of the RegressionGenerator, which feeds an instance from
    sklearn's SGDRegressor.

    Parameters
    ----------
    output_file: string
        The name of the csv output file

    instances: int
        The evaluation's max number of instances

    """
    stream = RegressionGenerator(n_samples=40000)

    regressor = HoeffdingTreeRegressor()

    # Setup the evaluator
    evaluator = EvaluatePrequential(pretrain_size=1,
                                    max_samples=instances,
                                    batch_size=1,
                                    n_wait=200,
                                    max_time=1000,
                                    output_file=output_file,
                                    show_plot=False,
                                    metrics=['mean_square_error'])

    # Evaluate
    evaluator.evaluate(stream=stream, model=regressor)
示例#9
0
def test_evaluate_delayed_regression_coverage(tmpdir):
    # A simple coverage test. Tests for metrics are placed in the corresponding test module.
    from skmultiflow.data import RegressionGenerator
    from skmultiflow.trees import HoeffdingTreeRegressor

    max_samples = 1000

    # Generate data
    data = RegressionGenerator(n_samples=max_samples)
    # Get X and y
    X, y = data.next_sample(max_samples)
    time = generate_random_dates(seed=1, samples=max_samples)

    # Setup temporal stream
    stream = TemporalDataStream(X, y, time, ordered=False)

    # Learner
    htr = HoeffdingTreeRegressor()

    output_file = os.path.join(str(tmpdir), "prequential_delayed_summary.csv")
    metrics = ['mean_square_error', 'mean_absolute_error']
    evaluator = EvaluatePrequentialDelayed(max_samples=max_samples,
                                           metrics=metrics,
                                           output_file=output_file)

    evaluator.evaluate(stream=stream, model=htr, model_names=['HTR'])
示例#10
0
def test_hoeffding_tree_regressor_perceptron():
    stream = RegressionGenerator(n_samples=500, n_features=20, n_informative=15, random_state=1)

    learner = HoeffdingTreeRegressor(leaf_prediction='perceptron', random_state=1)

    cnt = 0
    max_samples = 500
    y_pred = array('d')
    y_true = array('d')
    wait_samples = 10

    while cnt < max_samples:
        X, y = stream.next_sample()
        # Test every n samples
        if (cnt % wait_samples == 0) and (cnt != 0):
            y_pred.append(learner.predict(X)[0])
            y_true.append(y[0])
        learner.partial_fit(X, y)
        cnt += 1

    expected_predictions = array('d', [-106.84237763060068, -10.965517384802226,
                                       -180.90711470797237, -218.20896751607663, -96.4271589961865,
                                       110.51551963099622, 108.34616947202511, 30.1720109214627,
                                       57.92205878998479, 77.82418885914053, 49.972060923364765,
                                       68.56117081695875, 15.996949915551697, -34.22744443808294,
                                       -19.762696110319702, -28.447329394752995,
                                       -50.62864370485592, -47.37357781048561, -99.82613515424342,
                                       13.985531117918336, 41.41709671929987, -34.679807275938174,
                                       62.75626094547859, 30.925078688018893, 12.130320819235365,
                                       119.3648998377624, 82.96422756064737, -6.920397563039609,
                                       -12.701774870569059, 24.883730398016034, -74.22855883237567,
                                       -0.8012436194087567, -83.03683748750394, 46.737839617687854,
                                       0.537404558240671, 48.53591837633138, -86.2259777783834,
                                       -24.985514024179967, 6.396035456152859, -90.19454995571908,
                                       32.05821807667601, -83.08553684151566, -28.32223999320023,
                                       113.28916673506842, 68.10498750807977, 173.9146410394573,
                                       -150.2067507947196, -74.10346402222962, 54.39153137687993])
    assert np.allclose(y_pred, expected_predictions)

    error = mean_absolute_error(y_true, y_pred)
    expected_error = 115.78916175164417
    assert np.isclose(error, expected_error)

    expected_info = "HoeffdingTreeRegressor(binary_split=False, grace_period=200, leaf_prediction='perceptron', " \
                    "learning_ratio_const=True, learning_ratio_decay=0.001, learning_ratio_perceptron=0.02, " \
                    "max_byte_size=33554432, memory_estimate_period=1000000, nb_threshold=0, no_preprune=False, " \
                    "nominal_attributes=None, random_state=1, remove_poor_atts=False, split_confidence=1e-07, " \
                    "stop_mem_management=False, tie_threshold=0.05)"
    info = " ".join([line.strip() for line in learner.get_info().split()])
    assert info == expected_info

    assert isinstance(learner.get_model_description(), type(''))
    assert type(learner.predict(X)) == np.ndarray
def test_hoeffding_tree_regressor_perceptron():
    stream = RegressionGenerator(n_samples=500, n_features=20, n_informative=15, random_state=1)

    learner = HoeffdingTreeRegressor(leaf_prediction='perceptron', random_state=1)

    cnt = 0
    max_samples = 500
    y_pred = array('d')
    y_true = array('d')
    wait_samples = 10

    while cnt < max_samples:
        X, y = stream.next_sample()
        # Test every n samples
        if (cnt % wait_samples == 0) and (cnt != 0):
            y_pred.append(learner.predict(X)[0])
            y_true.append(y[0])
        learner.partial_fit(X, y)
        cnt += 1

    expected_predictions = array('d', [207.20901655684412, 106.30316877540555, 101.46950096324191,
                                       114.38162776688861, 48.40271620592212, -79.94375846313639,
                                       -76.69182794940929, 88.38425569670662, -13.92372162581644,
                                       3.0549887923350507, 55.36276732455883, 32.0512081208464,
                                       17.54953203218902, -1.7305966738232161, 43.54548690756897,
                                       8.502241407478213, -61.14739038895263, 50.528736810827745,
                                       9.679668917948607, 89.93098085572623, 85.1994809437223,
                                       1.8721866382932664, -7.1972581323107825, -45.86230662663542,
                                       3.111671172363243, 57.921908276916646, 61.43400576850072,
                                       -16.61695641848216, -6.0769944259948065, 19.929266442289546,
                                       -60.972801351912224, -0.3342549973033524,
                                       -50.53334350658139, -14.885488543743078,
                                       -13.255920225124637, 28.909916365484275,
                                       -103.03499425386107, -36.44921969674884, -15.40018796932204,
                                       -84.98471039676006, 38.270205984888065, -62.97228157481581,
                                       -48.095864628804044, 95.5028130171316, 73.62390886812497,
                                       152.7135140597221, -120.4662342226783, -77.68182541723442,
                                       66.82059046110074])
    assert np.allclose(y_pred, expected_predictions)
    error = mean_absolute_error(y_true, y_pred)
    expected_error = 126.11208652969131
    assert np.isclose(error, expected_error)

    expected_info = "HoeffdingTreeRegressor(binary_split=False, grace_period=200, leaf_prediction='perceptron', " \
                    "learning_ratio_const=True, learning_ratio_decay=0.001, learning_ratio_perceptron=0.02, " \
                    "max_byte_size=33554432, memory_estimate_period=1000000, nb_threshold=0, no_preprune=False, " \
                    "nominal_attributes=None, random_state=1, remove_poor_atts=False, split_confidence=1e-07, " \
                    "stop_mem_management=False, tie_threshold=0.05)"
    info = " ".join([line.strip() for line in learner.get_info().split()])
    assert info == expected_info

    assert isinstance(learner.get_model_description(), type(''))
    assert type(learner.predict(X)) == np.ndarray
示例#12
0
def test_hoeffding_tree_regressor_model_description():
    stream = RegressionGenerator(
        n_samples=500, n_features=20, n_informative=15, random_state=1
    )

    learner = HoeffdingTreeRegressor(leaf_prediction='mean')

    max_samples = 500
    X, y = stream.next_sample(max_samples)
    learner.partial_fit(X, y)

    expected_description = "if Attribute 6 <= 0.1394515530995348:\n" \
                           "  Leaf = Statistics {0: 276.0000, 1: -21537.4157, 2: 11399392.2187}\n" \
                           "if Attribute 6 > 0.1394515530995348:\n" \
                           "  Leaf = Statistics {0: 224.0000, 1: 22964.8868, 2: 10433581.2534}\n"

    assert SequenceMatcher(
        None, expected_description, learner.get_model_description()
    ).ratio() > 0.9
    def __init__(self, base_estimator=HoeffdingTreeRegressor(grace_period=50,
                                                             split_confidence=0.01,
                                                             random_state=1),
                 n_estimators: int = 100,
                 subspace_mode: str = "percentage",
                 subspace_size: int = 60,
                 training_method: str = "randompatches",
                 lam: float = 6.0,
                 drift_detection_method: BaseDriftDetector = ADWIN(delta=1e-5),
                 warning_detection_method: BaseDriftDetector = ADWIN(delta=1e-4),
                 disable_weighted_vote: bool = True,
                 disable_drift_detection: bool = False,
                 disable_background_learner: bool = False,
                 drift_detection_criteria='error',
                 aggregation_method='mean',
                 nominal_attributes=None,
                 random_state=None):

        super().__init__(base_estimator=base_estimator,
                         n_estimators=n_estimators,
                         subspace_mode=subspace_mode,
                         subspace_size=subspace_size,
                         training_method=training_method,
                         lam=lam,
                         drift_detection_method=drift_detection_method,
                         warning_detection_method=warning_detection_method,
                         disable_weighted_vote=disable_weighted_vote,
                         disable_drift_detection=disable_drift_detection,
                         disable_background_learner=disable_background_learner,
                         nominal_attributes=nominal_attributes,
                         random_state=random_state)
        self._base_performance_evaluator = RegressionMeasurements()
        self._base_learner_class = StreamingRandomPatchesRegressorBaseLearner

        if aggregation_method not in {self._MEAN, self._MEDIAN}:
            raise ValueError("Invalid aggregation_method: {}.\n"
                             "Valid options are: {}".format(aggregation_method,
                                                            {self._MEAN, self._MEDIAN}))
        self.aggregation_method = aggregation_method

        if drift_detection_criteria not in {self._ERROR, self._PREDICTION}:
            raise ValueError("Invalid drift_detection_criteria: {}.\n"
                             "Valid options are: {}".format(drift_detection_criteria,
                                                            {self._ERROR, self._PREDICTION}))
        self.drift_detection_criteria = drift_detection_criteria
def test_evaluate_regression_coverage(tmpdir):
    # A simple coverage test. Tests for metrics are placed in the corresponding test module.
    from skmultiflow.data import RegressionGenerator
    from skmultiflow.trees import HoeffdingTreeRegressor

    max_samples = 1000

    # Stream
    stream = RegressionGenerator(n_samples=max_samples)

    # Learner
    htr = HoeffdingTreeRegressor()

    output_file = os.path.join(str(tmpdir), "prequential_summary.csv")
    metrics = ['mean_square_error', 'mean_absolute_error']
    evaluator = EvaluatePrequential(max_samples=max_samples,
                                    metrics=metrics,
                                    output_file=output_file)

    evaluator.evaluate(stream=stream, model=htr, model_names=['HTR'])
def test_is_regressor():
    learner = HoeffdingTreeRegressor()
    assert is_regressor(learner) is True
# Setup a data stream
dstream = RegressionGenerator(n_features=9,
                              n_samples=800,
                              n_targets=1,
                              random_state=456)
dstream
#RegressionGenerator(n_features=9, n_informative=10, n_samples=800, n_targets=1,
#                    random_state=456)

dstream.next_sample()
#(array([[ 0.72465838, -1.92979924, -0.02607907,  2.35603757, -0.37461529,
#         -0.38480019,  0.06603468, -2.1436878 ,  0.49182531]]),
# array([61.302191]))

# Instantiate the Hoeffding Tree Regressor object
htr = HoeffdingTreeRegressor()

# Prequential evaluation
eval1 = EvaluatePrequential(
    pretrain_size=400,
    max_samples=800,
    batch_size=1,
    n_wait=100,
    max_time=2000,
    show_plot=False,
    metrics=['mean_square_error', 'mean_absolute_error'])

eval1.evaluate(stream=dstream, model=htr)

#############################################################################################
示例#17
0
def test_hoeffding_tree_regressor_coverage():
    max_samples = 1000
    max_size_mb = 2

    stream = RegressionGenerator(
        n_samples=max_samples, n_features=10, n_informative=7, n_targets=1,
        random_state=42
    )
    X, y = stream.next_sample(max_samples)

    # Cover memory management
    tree = HoeffdingTreeRegressor(
        leaf_prediction='mean', grace_period=100,
        memory_estimate_period=100, max_byte_size=max_size_mb*2**20
    )
    tree.partial_fit(X, y)

    # A tree without memory management enabled reaches over 3 MB in size
    assert calculate_object_size(tree, 'MB') <= max_size_mb

    # Typo in leaf prediction
    tree = HoeffdingTreeRegressor(
        leaf_prediction='percptron', grace_period=100,
        memory_estimate_period=100, max_byte_size=max_size_mb*2**20
    )
    # Invalid split_criterion
    tree.split_criterion = 'VR'

    tree.partial_fit(X, y)
    assert calculate_object_size(tree, 'MB') <= max_size_mb

    tree.reset()
    assert tree._estimator_type == 'regressor'

###############################################################################
#                                    Options                                  #
###############################################################################
SEED = 123456
n_estimators = 3
aggregation_method = 'median'            # 'median', 'mean'
drift_detection_criteria= 'prediciton'   # 'error', 'prediction'
subspace_mode = "randompatches"          # "randomsubspaces", "resampling", "randompatches"
###############################################################################

stream = FileStream('datasets/cal_housing.csv')

SRPR = StreamingRandomPatchesRegressor(n_estimators=n_estimators,
                                       aggregation_method=aggregation_method,
                                       random_state=SEED)
HTR = HoeffdingTreeRegressor(random_state=SEED)  # , leaf_prediction='mean')

evaluator = EvaluatePrequential(pretrain_size=0,
                                show_plot=True,
                                metrics=['mean_square_error',
                                         'mean_absolute_error',
                                         'true_vs_predicted']
                                )

evaluator.evaluate(stream=stream, model=[SRPR, HTR], model_names=['SRP-Reg', 'HT-Reg'])

print(SRPR.get_info())
print('DONE')