Пример #1
0
def test_growth_from_data_qualitative(model, experiment, threshold=0.95):
    """
    Expect a perfect accuracy when predicting growth.

    The in-silico growth prediction is compared with experimental
    data and the accuracy is expected to be better than 0.95.
    In principal, Matthews' correlation coefficient is a more comprehensive
    metric but is a little fragile to not having any false negatives or false
    positives in the output.

    """
    ann = test_growth_from_data_qualitative.annotation
    exp = pytest.memote.experimental.growth[experiment]
    expected = exp.data
    test = exp.evaluate(model)
    # Growth data sets need not use unique exchange reactions thus we use the
    # numeric index here to compute the confusion matrix.
    ann["data"][experiment] = confusion_matrix(
        set(test.loc[test["growth"], "exchange"].index),
        set(expected.loc[expected["growth"], "exchange"].index),
        set(test.loc[~test["growth"], "exchange"].index),
        set(expected.loc[~expected["growth"], "exchange"].index))
    ann["metric"][experiment] = ann["data"][experiment]["ACC"]
    ann["message"][experiment] = wrapper.fill(
        """Ideally, every model would show a perfect accuracy of 1. In
        experiment '{}' the model has  {:.2}.""".format(
            experiment, ann["data"][experiment]["MCC"]))
    assert ann["data"][experiment]["ACC"] > threshold
Пример #2
0
def test_gene_essentiality_from_data_qualitative(model,
                                                 experiment,
                                                 threshold=0.95):
    """
    Expect a perfect accuracy when predicting gene essentiality.

    The in-silico gene essentiality is compared with experimental
    data and the accuracy is expected to be better than 0.95.
    In principal, Matthews' correlation coefficient is a more comprehensive
    metric but is a little fragile to not having any false negatives or false
    positives in the output.

    """
    ann = test_gene_essentiality_from_data_qualitative.annotation
    exp = pytest.memote.experimental.essentiality[experiment]
    expected = exp.data
    test = exp.evaluate(model)
    ann["data"][experiment] = confusion_matrix(
        set(test.loc[test["essential"], "gene"]),
        set(expected.loc[expected["essential"], "gene"]),
        set(test.loc[~test["essential"], "gene"]),
        set(expected.loc[~expected["essential"], "gene"]))
    ann["metric"][experiment] = ann["data"][experiment]["ACC"]
    ann["message"][experiment] = wrapper.fill(
        """Ideally, every model would show a perfect accuracy of 1. In
        experiment '{}' the model has  {:.2}.""".format(
            experiment, ann["data"][experiment]["MCC"]))
    assert ann["data"][experiment]["ACC"] > threshold
Пример #3
0
def test_confusion_matrix(input_values, expected_results):
    result_dict = essentiality.confusion_matrix(*input_values)
    for key, value in result_dict.items():
        if key in ['TPR', "TNR", "PPV", "FDR", "ACC", "MCC"]:
            assert np.isclose(value, expected_results[key], atol=1e-03)
        else:
            assert set(value) == set(expected_results[key])
Пример #4
0
def test_confusion_matrix(input_values, expected_results):
    result_dict = essentiality.confusion_matrix(*input_values)
    for key, value in result_dict.items():
        if key in ['TPR', "TNR", "PPV", "FDR", "ACC", "MCC"]:
            assert np.isclose(value, expected_results[key], atol=1e-03)
        else:
            assert set(value) == set(expected_results[key])
Пример #5
0
def test_growth_from_data_qualitative(model, experiment, threshold=0.95):
    """
    Expect a perfect accuracy when predicting growth.

    The in-silico growth prediction is compared with experimental
    data and the accuracy is expected to be better than 0.95.
    In principal, Matthews' correlation coefficient is a more comprehensive
    metric but is a little fragile to not having any false negatives or false
    positives in the output.

    Implementation:
    Read and validate experimental config file and data tables. Constrain the
    model with the parameters provided by a user's definition of the medium,
    then compute a confusion matrix based on the predicted true, expected
    true, predicted false and expected false growth.
    The individual values of the confusion matrix are calculated as described
    in https://en.wikipedia.org/wiki/Confusion_matrix

    """
    ann = test_growth_from_data_qualitative.annotation
    name, exp = experiment
    expected = exp.data
    test = exp.evaluate(model)
    # Growth data sets need not use unique exchange reactions thus we use the
    # numeric index here to compute the confusion matrix.
    ann["data"][name] = result = confusion_matrix(
        set(test.loc[test["growth"], "exchange"].index),
        set(expected.loc[expected["growth"], "exchange"].index),
        set(test.loc[~test["growth"], "exchange"].index),
        set(expected.loc[~expected["growth"], "exchange"].index)
    )
    ann["metric"][name] = result["ACC"]
    ann["message"][name] = wrapper.fill(
        """Ideally, every model would show a perfect accuracy of 1. In
        name '{}' the model has  {:.2}.""".format(
            name, result["ACC"]))
    assert result["ACC"] > threshold
Пример #6
0
def test_growth_from_data_qualitative(model, experiment, threshold=0.95):
    """
    Expect a perfect accuracy when predicting growth.

    The in-silico growth prediction is compared with experimental
    data and the accuracy is expected to be better than 0.95.
    In principal, Matthews' correlation coefficient is a more comprehensive
    metric but is a little fragile to not having any false negatives or false
    positives in the output.

    Implementation:
    Read and validate experimental config file and data tables. Constrain the
    model with the parameters provided by a user's definition of the medium,
    then compute a confusion matrix based on the predicted true, expected
    true, predicted false and expected false growth.
    The individual values of the confusion matrix are calculated as described
    in https://en.wikipedia.org/wiki/Confusion_matrix

    """
    ann = test_growth_from_data_qualitative.annotation
    name, exp = experiment
    expected = exp.data
    test = exp.evaluate(model)
    # Growth data sets need not use unique exchange reactions thus we use the
    # numeric index here to compute the confusion matrix.
    ann["data"][name] = result = confusion_matrix(
        set(test.loc[test["growth"], "exchange"].index),
        set(expected.loc[expected["growth"], "exchange"].index),
        set(test.loc[~test["growth"], "exchange"].index),
        set(expected.loc[~expected["growth"], "exchange"].index)
    )
    ann["metric"][name] = result["ACC"]
    ann["message"][name] = wrapper.fill(
        """Ideally, every model would show a perfect accuracy of 1. In
        name '{}' the model has  {:.2}.""".format(
            name, result["MCC"]))
    assert result["ACC"] > threshold