def test_score_to_label(self): manual_scores = [0.1, 0.4, 0.2, 0.3, 0.5, 0.9, 0.7, 1, 0.8, 0.6] labels = score_to_label(manual_scores, outliers_fraction=0.1) assert_allclose(labels, [0, 0, 0, 0, 0, 0, 0, 1, 0, 0]) labels = score_to_label(manual_scores, outliers_fraction=0.3) assert_allclose(labels, [0, 0, 0, 0, 0, 1, 0, 1, 1, 0])
def _parallel_predict(n_estimators, clfs, approximators, X, total_n_estimators, rp_transformers, approx_flags, contamination, verbose): X = check_array(X) pred = [] for i in range(n_estimators): estimator = clfs[i] if verbose > 1: print("predicting with estimator %d of %d for this parallel run " "(total %d)..." % (i + 1, n_estimators, total_n_estimators)) # project matrix X_scaled = jl_transform(X, rp_transformers[i]) # turn approximator scores to labels by outlier if approx_flags[i] == 1: predicted_labels = score_to_label( approximators[i].predict(X_scaled), outliers_fraction=contamination) else: predicted_labels = estimator.predict(X_scaled) pred.append(predicted_labels) return pred
def _parallel_predict(n_estimators, clfs, approximators, X, total_n_estimators, rp_flags, rp_transformers, approx_flags, contamination, verbose): X = check_array(X) pred = [] for i in range(n_estimators): estimator = clfs[i] if verbose > 1: print("predicting with estimator %d of %d for this parallel run " "(total %d)..." % (i + 1, n_estimators, total_n_estimators)) # if the random projection is needed if rp_flags[i] == 1: X_scaled = jl_transform(X, rp_transformers[i]) else: X_scaled = X # turn approximator scores to labels by outlier # todo: decide whether the approximation should happen on the reduced # space or the original space. For now, it is on the original space if approx_flags[i] == 1: predicted_labels = score_to_label(approximators[i].predict(X), outliers_fraction=contamination) else: predicted_labels = estimator.predict(X_scaled) pred.append(predicted_labels) return pred