def deserialize_gradient_boosting_regressor(model_dict):
    model = GradientBoostingRegressor(**model_dict['params'])
    trees = [
        deserialize_decision_tree_regressor(tree)
        for tree in model_dict['estimators_']
    ]
    model.estimators_ = np.array(trees).reshape(model_dict['estimators_shape'])
    if 'init_' in model_dict and model_dict['init_']['meta'] == 'dummy':
        model.init_ = dummy.DummyRegressor()
        model.init_.__dict__ = model_dict['init_']
        model.init_.__dict__.pop('meta')

    model.train_score_ = np.array(model_dict['train_score_'])
    model.max_features_ = model_dict['max_features_']
    model.n_features_ = model_dict['n_features_']
    if model_dict['loss_'] == 'ls':
        model.loss_ = _gb_losses.LeastSquaresError(1)
    elif model_dict['loss_'] == 'lad':
        model.loss_ = _gb_losses.LeastAbsoluteError(1)
    elif model_dict['loss_'] == 'huber':
        model.loss_ = _gb_losses.HuberLossFunction(1)
    elif model_dict['loss_'] == 'quantile':
        model.loss_ = _gb_losses.QuantileLossFunction(1)

    if 'priors' in model_dict:
        model.init_.priors = np.array(model_dict['priors'])
    return model
Beispiel #2
0
def _dt_pdp(dt, train_X, train_y, rescalers):
    gbt = GradientBoostingRegressor(learning_rate=1.0, init=ZeroEstimator())
    gbt.estimators_ = np.array([np.array([dt])])
    pprint(gbt.estimators_.shape)
    set_n_features_v0_18_v0_19(gbt, len(train_X.columns()))
    return PartialDependencyPlotBuilder(gbt, train_X, train_y, rescalers, True).build()