Example #1
0
def test_lazy_before():
    instances, models = get_training_instances_and_models()
    instances[0].build_lazy_constraint = Mock(return_value="c1")
    solver = LearningSolver()
    solver.internal_solver = Mock(spec=InternalSolver)
    component = LazyConstraintsComponent(threshold=0.10)
    component.classifiers = {
        "a": Mock(spec=Classifier),
        "b": Mock(spec=Classifier)
    }
    component.classifiers["a"].predict_proba = Mock(
        return_value=[[0.95, 0.05]])
    component.classifiers["b"].predict_proba = Mock(
        return_value=[[0.02, 0.80]])

    component.before_solve(solver, instances[0], models[0])

    # Should ask classifier likelihood of each constraint being violated
    expected_x_test_a = np.array([[67., 21.75, 1287.92]])
    expected_x_test_b = np.array([[67., 21.75, 1287.92]])
    actual_x_test_a = component.classifiers["a"].predict_proba.call_args[0][0]
    actual_x_test_b = component.classifiers["b"].predict_proba.call_args[0][0]
    assert norm(expected_x_test_a - actual_x_test_a) < E
    assert norm(expected_x_test_b - actual_x_test_b) < E

    # Should ask instance to generate cut for constraints whose likelihood
    # of being violated exceeds the threshold
    instances[0].build_lazy_constraint.assert_called_once_with(models[0], "b")

    # Should ask internal solver to add generated constraint
    solver.internal_solver.add_constraint.assert_called_once_with("c1")
Example #2
0
def test_lazy_evaluate():
    instances, models = get_training_instances_and_models()
    component = LazyConstraintsComponent()
    component.classifiers = {
        "a": Mock(spec=Classifier),
        "b": Mock(spec=Classifier),
        "c": Mock(spec=Classifier)
    }
    component.classifiers["a"].predict_proba = Mock(return_value=[[1.0, 0.0]])
    component.classifiers["b"].predict_proba = Mock(return_value=[[0.0, 1.0]])
    component.classifiers["c"].predict_proba = Mock(return_value=[[0.0, 1.0]])

    instances[0].found_violations = ["a", "b", "c"]
    instances[1].found_violations = ["b", "d"]
    assert component.evaluate(instances) == {
        0: {
            "Accuracy": 0.75,
            "F1 score": 0.8,
            "Precision": 1.0,
            "Recall": 2 / 3.,
            "Predicted positive": 2,
            "Predicted negative": 2,
            "Condition positive": 3,
            "Condition negative": 1,
            "False negative": 1,
            "False positive": 0,
            "True negative": 1,
            "True positive": 2,
            "Predicted positive (%)": 50.0,
            "Predicted negative (%)": 50.0,
            "Condition positive (%)": 75.0,
            "Condition negative (%)": 25.0,
            "False negative (%)": 25.0,
            "False positive (%)": 0,
            "True negative (%)": 25.0,
            "True positive (%)": 50.0,
        },
        1: {
            "Accuracy": 0.5,
            "F1 score": 0.5,
            "Precision": 0.5,
            "Recall": 0.5,
            "Predicted positive": 2,
            "Predicted negative": 2,
            "Condition positive": 2,
            "Condition negative": 2,
            "False negative": 1,
            "False positive": 1,
            "True negative": 1,
            "True positive": 1,
            "Predicted positive (%)": 50.0,
            "Predicted negative (%)": 50.0,
            "Condition positive (%)": 50.0,
            "Condition negative (%)": 50.0,
            "False negative (%)": 25.0,
            "False positive (%)": 25.0,
            "True negative (%)": 25.0,
            "True positive (%)": 25.0,
        }
    }
Example #3
0
def test_obj_evaluate():
    instances, models = get_training_instances_and_models()
    reg = Mock(spec=Regressor)
    reg.predict = Mock(return_value=np.array([1000.0, 1000.0]))
    comp = ObjectiveValueComponent(regressor=reg)
    comp.fit(instances)
    ev = comp.evaluate(instances)
    assert ev == {
        'Lower bound': {
            'Explained variance': 0.0,
            'Max error': 183.0,
            'Mean absolute error': 126.5,
            'Mean squared error': 19194.5,
            'Median absolute error': 126.5,
            'R2': -5.012843605607331,
        },
        'Upper bound': {
            'Explained variance': 0.0,
            'Max error': 183.0,
            'Mean absolute error': 126.5,
            'Mean squared error': 19194.5,
            'Median absolute error': 126.5,
            'R2': -5.012843605607331,
        }
    }
Example #4
0
def test_branch_calculate():
    instances, models = get_training_instances_and_models()
    comp = BranchPriorityComponent()

    # If instances do not have branch_priority property, fit should compute them
    comp.fit(instances)
    assert instances[0].branch_priorities == {
        "x": {
            0: 5730,
            1: 24878,
            2: 0,
            3: 0,
        }
    }

    # If instances already have branch_priority, fit should not modify them
    instances[0].branch_priorities = {"x": {0: 100, 1: 200, 2: 300, 3: 400}}
    comp.fit(instances)
    assert instances[0].branch_priorities == {
        "x": {
            0: 100,
            1: 200,
            2: 300,
            3: 400
        }
    }
Example #5
0
def test_branch_extract():
    instances, models = get_training_instances_and_models()
    instances[0].branch_priorities = {"x": {0: 100, 1: 200, 2: 300, 3: 400}}
    instances[1].branch_priorities = {"x": {0: 150, 1: 250, 2: 350, 3: 450}}
    priorities = BranchPriorityExtractor().extract(instances)
    assert priorities["default"].tolist() == [
        100, 200, 300, 400, 150, 250, 350, 450
    ]
Example #6
0
def test_usage():
    instances, models = get_training_instances_and_models()
    comp = ObjectiveValueComponent()
    comp.fit(instances)
    assert instances[0].lower_bound == 1183.0
    assert instances[0].upper_bound == 1183.0
    assert comp.predict(instances).tolist() == [[1183.0, 1183.0],
                                                [1070.0, 1070.0]]
Example #7
0
def test_predict():
    instances, models = get_training_instances_and_models()
    comp = PrimalSolutionComponent()
    comp.fit(instances)
    solution = comp.predict(instances[0])
    assert "x" in solution
    assert 0 in solution["x"]
    assert 1 in solution["x"]
    assert 2 in solution["x"]
    assert 3 in solution["x"]
Example #8
0
def test_branch_x_y_predict():
    instances, models = get_training_instances_and_models()
    instances[0].branch_priorities = {"x": {0: 100, 1: 200, 2: 300, 3: 400}}
    instances[1].branch_priorities = {"x": {0: 150, 1: 250, 2: 350, 3: 450}}
    comp = BranchPriorityComponent()
    comp.regressors["default"] = Mock(spec=Regressor)
    comp.regressors["default"].predict = Mock(
        return_value=np.array([150., 100., 0., 0.]))
    x, y = comp.x(instances), comp.y(instances)
    assert x["default"].shape == (8, 5)
    assert y["default"].shape == (8, )
    pred = comp.predict(instances[0])
    assert pred == {"x": {0: 150., 1: 100., 2: 0., 3: 0.}}
Example #9
0
def test_lazy_fit():
    instances, models = get_training_instances_and_models()
    instances[0].found_violations = ["a", "b"]
    instances[1].found_violations = ["b", "c"]
    classifier = Mock(spec=Classifier)
    component = LazyConstraintsComponent(classifier=classifier)

    component.fit(instances)

    # Should create one classifier for each violation
    assert "a" in component.classifiers
    assert "b" in component.classifiers
    assert "c" in component.classifiers

    # Should provide correct x_train to each classifier
    expected_x_train_a = np.array([[67., 21.75, 1287.92],
                                   [70., 23.75, 1199.83]])
    expected_x_train_b = np.array([[67., 21.75, 1287.92],
                                   [70., 23.75, 1199.83]])
    expected_x_train_c = np.array([[67., 21.75, 1287.92],
                                   [70., 23.75, 1199.83]])
    actual_x_train_a = component.classifiers["a"].fit.call_args[0][0]
    actual_x_train_b = component.classifiers["b"].fit.call_args[0][0]
    actual_x_train_c = component.classifiers["c"].fit.call_args[0][0]
    assert norm(expected_x_train_a - actual_x_train_a) < E
    assert norm(expected_x_train_b - actual_x_train_b) < E
    assert norm(expected_x_train_c - actual_x_train_c) < E

    # Should provide correct y_train to each classifier
    expected_y_train_a = np.array([1.0, 0.0])
    expected_y_train_b = np.array([1.0, 1.0])
    expected_y_train_c = np.array([0.0, 1.0])
    actual_y_train_a = component.classifiers["a"].fit.call_args[0][1]
    actual_y_train_b = component.classifiers["b"].fit.call_args[0][1]
    actual_y_train_c = component.classifiers["c"].fit.call_args[0][1]
    assert norm(expected_y_train_a - actual_y_train_a) < E
    assert norm(expected_y_train_b - actual_y_train_b) < E
    assert norm(expected_y_train_c - actual_y_train_c) < E
Example #10
0
def test_primal_parallel_fit():
    instances, models = get_training_instances_and_models()
    comp = PrimalSolutionComponent()
    comp.fit(instances, n_jobs=2)
    assert len(comp.classifiers) == 2
Example #11
0
def test_evaluate():
    instances, models = get_training_instances_and_models()
    clf_zero = Mock(spec=Classifier)
    clf_zero.predict_proba = Mock(return_value=np.array([
        [0., 1.],  # x[0]
        [0., 1.],  # x[1]
        [1., 0.],  # x[2]
        [1., 0.],  # x[3]
    ]))
    clf_one = Mock(spec=Classifier)
    clf_one.predict_proba = Mock(return_value=np.array([
        [1., 0.],  # x[0] instances[0]
        [1., 0.],  # x[1] instances[0]
        [0., 1.],  # x[2] instances[0]
        [1., 0.],  # x[3] instances[0]
    ]))
    comp = PrimalSolutionComponent(classifier=[clf_zero, clf_one],
                                   threshold=0.50)
    comp.fit(instances[:1])
    assert comp.predict(instances[0]) == {"x": {0: 0,
                                                1: 0,
                                                2: 1,
                                                3: None}}
    assert instances[0].solution == {"x": {0: 1,
                                           1: 0,
                                           2: 1,
                                           3: 1}}
    ev = comp.evaluate(instances[:1])
    assert ev == {'Fix one': {0: {'Accuracy': 0.5,
                                  'Condition negative': 1,
                                  'Condition negative (%)': 25.0,
                                  'Condition positive': 3,
                                  'Condition positive (%)': 75.0,
                                  'F1 score': 0.5,
                                  'False negative': 2,
                                  'False negative (%)': 50.0,
                                  'False positive': 0,
                                  'False positive (%)': 0.0,
                                  'Precision': 1.0,
                                  'Predicted negative': 3,
                                  'Predicted negative (%)': 75.0,
                                  'Predicted positive': 1,
                                  'Predicted positive (%)': 25.0,
                                  'Recall': 0.3333333333333333,
                                  'True negative': 1,
                                  'True negative (%)': 25.0,
                                  'True positive': 1,
                                  'True positive (%)': 25.0}},
                  'Fix zero': {0: {'Accuracy': 0.75,
                                   'Condition negative': 3,
                                   'Condition negative (%)': 75.0,
                                   'Condition positive': 1,
                                   'Condition positive (%)': 25.0,
                                   'F1 score': 0.6666666666666666,
                                   'False negative': 0,
                                   'False negative (%)': 0.0,
                                   'False positive': 1,
                                   'False positive (%)': 25.0,
                                   'Precision': 0.5,
                                   'Predicted negative': 2,
                                   'Predicted negative (%)': 50.0,
                                   'Predicted positive': 2,
                                   'Predicted positive (%)': 50.0,
                                   'Recall': 1.0,
                                   'True negative': 2,
                                   'True negative (%)': 50.0,
                                   'True positive': 1,
                                   'True positive (%)': 25.0}}}