Ejemplo n.º 1
0
def test_gridsearch_metrics_threads(n_threads=3):
    X, y, sample_weight = generate_classification_data(n_classes=2,
                                                       distance=0.7)
    param_grid = OrderedDict({'reg_param': numpy.linspace(0, 1, 20)})

    from itertools import cycle

    optimizers = cycle([
        RegressionParameterOptimizer(param_grid=param_grid,
                                     n_evaluations=4,
                                     start_evaluations=2),
        SubgridParameterOptimizer(param_grid=param_grid, n_evaluations=4),
        RandomParameterOptimizer(param_grid=param_grid, n_evaluations=4),
    ])

    for metric in [RocAuc(), OptimalAMS(), OptimalSignificance(), log_loss]:
        scorer = FoldingScorer(metric)
        clf = SklearnClassifier(QDA())
        grid = GridOptimalSearchCV(
            estimator=clf,
            params_generator=next(optimizers),
            scorer=scorer,
            parallel_profile='threads-{}'.format(n_threads))
        grid.fit(X, y)
        print(grid.params_generator.best_score_)
        print(grid.params_generator.best_params_)
        grid.params_generator.print_results()
Ejemplo n.º 2
0
def test_optimal_metric_function(size=10000):
    labels = numpy.random.randint(0, 2, size=size)
    predictions = numpy.random.random(size=[size, 2])
    predictions /= predictions.sum(axis=1, keepdims=True)
    sample_weight = numpy.random.random(size=size)

    for metric, optimal_metric in [(significance, OptimalSignificance()),
                                   (ams, OptimalAMS())]:
        optimal_metric.fit(None, labels, sample_weight=sample_weight)
        value = optimal_metric(labels,
                               predictions,
                               sample_weight=sample_weight)
        thresholds, values = optimal_metric.compute(
            labels, predictions, sample_weight=sample_weight)
        assert numpy.max(values) == value, "maximal value doesn't coincide"
        index = numpy.random.randint(0, len(thresholds))
        threshold = thresholds[index]
        passed = numpy.array(predictions[:, 1] >= threshold)

        s = optimal_metric.expected_s * numpy.average(
            passed, weights=sample_weight * (labels == 1))
        b = optimal_metric.expected_b * numpy.average(
            passed, weights=sample_weight * (labels == 0))
        assert numpy.allclose(metric(s, b), values[index]), \
            'no coincidence {} {} {}'.format(type(optimal_metric), metric(s, b), values[index])