def sym_predict_linear(estimator):
    if hasattr(estimator, 'intercept_'):
        expression = RealNumber(estimator.intercept_[0])
    else:
        expression = RealNumber(0)
    symbols = syms(estimator)
    for coef, sym in zip(np.ravel(estimator.coef_), symbols):
        expression += RealNumber(coef) * sym
    return expression
Esempio n. 2
0
def _sym_predict_decision_tree(model,
                               names,
                               current_node=0,
                               output_idx=0,
                               class_idx=0):
    left = model.tree_.children_left[current_node]
    right = model.tree_.children_right[current_node]
    if left == -1:
        assert right == -1
        left_expr = RealNumber(model.tree_.value[current_node, output_idx,
                                                 class_idx])
        right_expr = left_expr
    else:
        left_expr = _sym_predict_decision_tree(model,
                                               names,
                                               current_node=left,
                                               output_idx=output_idx,
                                               class_idx=class_idx)
        right_expr = _sym_predict_decision_tree(model,
                                                names,
                                                current_node=right,
                                                output_idx=output_idx,
                                                class_idx=class_idx)
    return Piecewise(
        (left_expr, Symbol(names[model.tree_.feature[current_node]]) <=
         model.tree_.threshold[current_node]),
        (right_expr, Symbol(names[model.tree_.feature[current_node]]) >
         model.tree_.threshold[current_node]),
    )
Esempio n. 3
0
def sym_predict_proba_calibrated_classifier_cv(estimator):
    if not hasattr(estimator, 'calibrated_classifiers_'):
        raise NotFittedError()
    return reduce(__add__,
                  map(sym_predict_proba,
                      estimator.calibrated_classifiers_)) / RealNumber(
                          len(estimator.calibrated_classifiers_))
Esempio n. 4
0
def sym_decision_function_gradient_boosting_classifier(estimator):
    learning_rate = RealNumber(estimator.learning_rate)
    n_classes = estimator.estimators_.shape[1]
    trees = [list(map(sym_predict, estimator.estimators_[:,i])) for i in range(n_classes)]
    tree_part = [learning_rate * reduce(add, trees[i]) for i in range(n_classes)]
    init_part = sym_predict(estimator.init_)
    if not isinstance(init_part, list):
        init_part = [init_part]
    result = [tree_part[i] + init_part[i] for i in range(n_classes)]
    if len(result) == 1:
        return result[0]
    else:
        return result
Esempio n. 5
0
def sym_predict_proba_parts__calibrated_classifier(estimator):
    if hasattr(estimator.base_estimator, 'decision_function'):
        inner_pred = sym_decision_function(estimator.base_estimator)
    elif hasattr(estimator.base_estimator, 'predict_proba'):
        inner_pred = sym_predict_proba(estimator.base_estimator)
    result = Zero()
    var = None
    for cal in estimator.calibrators_:
        variables = syms(cal)
        if len(variables) != 1 or (var != variables[0] and var is not None):
            raise ValueError()
        var = variables[0]
        result += sym_predict(cal)
    result = result / RealNumber(len(estimator.calibrators_))
    return ((var, ), [result], (syms(estimator.base_estimato), inner_pred,
                                None))
Esempio n. 6
0
def sym_predict_proba__calibrated_classifier(estimator):
    if hasattr(estimator.base_estimator, 'decision_function'):
        inner_pred = sym_decision_function(estimator.base_estimator)
    elif hasattr(estimator.base_estimator, 'predict_proba'):
        inner_pred = sym_predict_proba(estimator.base_estimator)


#     inner_pred = fallback(sym_decision_function, sym_predict_proba)(estimator.base_estimator)
    result = Zero()
    for cal in estimator.calibrators_:
        variables = syms(cal)
        if len(variables) != 1:
            raise ValueError()
        var = variables[0]
        result += sym_predict(cal).subs({var: inner_pred})
    return result / RealNumber(len(estimator.calibrators_))
Esempio n. 7
0
 def indicator(arg):
     return Piecewise((RealNumber(0), arg < self.lower if self.lower_closed else arg <= self.lower),
                      (RealNumber(0), arg > self.upper if self.upper_closed else arg >= self.upper),
                      (RealNumber(1), True))
Esempio n. 8
0
 def indicator(arg):
     return Piecewise((RealNumber(self.lower), arg < self.lower),
                      (RealNumber(self.upper), arg > self.upper),
                      (arg, True))
Esempio n. 9
0
 def sym_transform(self):
     return list(map(lambda x: log(Max(RealNumber(self.guard) + RealNumber(self.offset), x + RealNumber(self.offset))), self.syms()))
Esempio n. 10
0
 def sym_transform(self, xlabels):
     return RealNumber(self.value)
Esempio n. 11
0
def sym_score_to_decision(loss):
    return Piecewise((RealNumber(1), sym_score_to_proba(loss) > RealNumber(1)/RealNumber(2)), (RealNumber(0), true))
Esempio n. 12
0
def sym_predict_mean_estimator(estimator):
    return RealNumber(estimator.mean)
Esempio n. 13
0
def sym_predict_log_odds_estimator(estimator):
    return RealNumber(estimator.prior)
Esempio n. 14
0
def sym_predict_quantile_estimator(estimator):
    return RealNumber(estimator.quantile)