Exemple #1
0
 def load(self, filename):
     """Load a result from the given JSON file."""
     LOGGER.info("Loading result from '%s'.", filename)
     if filename.endswith(".gz"):
         with gzip.open(filename, "rb") as file_handle:
             result = MemoteResult(
                 json.loads(file_handle.read().decode("utf-8")))
     else:
         with open(filename, "r", encoding="utf-8") as file_handle:
             result = MemoteResult(json.load(file_handle))
     # TODO (Moritz Beber): Validate the read-in JSON maybe? Trade-off
     #  between extra time taken and correctness. Maybe we re-visit this
     #  issue when there was a new JSON format version needed.
     return result
Exemple #2
0
 def load(self, filename):
     """Load a result from the given JSON file."""
     # TODO: validate the read-in JSON maybe?
     LOGGER.info("Loading result from '%s'.", filename)
     with open(filename, encoding="utf-8") as file_handle:
         result = MemoteResult(json.load(file_handle))
     return result
Exemple #3
0
    def __init__(self,
                 model,
                 experimental_config=None,
                 exclusive=None,
                 skip=None,
                 **kwargs):
        """
        Collect and store values during testing.

        Parameters
        ----------
        model : cobra.Model
            The metabolic model under investigation.
        experimental_config : memote.ExperimentConfiguration, optional
            A description of experiments.
        exclusive : iterable, optional
            Names of test cases or modules to run and exclude all others. Takes
            precedence over ``skip``.
        skip : iterable, optional
            Names of test cases or modules to skip.

        """
        super(ResultCollectionPlugin, self).__init__(**kwargs)
        self._model = model
        self._exp_config = experimental_config
        self.results = MemoteResult()
        self._xcld = frozenset() if exclusive is None else frozenset(exclusive)
        self._skip = frozenset() if skip is None else frozenset(skip)
Exemple #4
0
    def __init__(self, model, sbml_version=None, experimental_config=None,
                 exclusive=None, skip=None, **kwargs):
        """
        Collect and store values during testing.

        Parameters
        ----------
        model : cobra.Model
            The metabolic model under investigation.
        sbml_version: tuple, optional
            A tuple reporting on the level, version, and FBC use of
            the SBML file.
        experimental_config : memote.ExperimentConfiguration, optional
            A description of experiments.
        exclusive : iterable, optional
            Names of test cases or modules to run and exclude all others. Takes
            precedence over ``skip``.
        skip : iterable, optional
            Names of test cases or modules to skip.

        """
        super(ResultCollectionPlugin, self).__init__(**kwargs)
        self._model = model
        self._sbml_ver = sbml_version
        self._exp_config = experimental_config
        self.results = MemoteResult()
        self.results.add_environment_information(self.results.meta)
        self._xcld = frozenset() if exclusive is None else frozenset(exclusive)
        self._skip = frozenset() if skip is None else frozenset(skip)
Exemple #5
0
 def load(self, commit=None):
     """Load a result from the database."""
     git_info = self.record_git_info(commit)
     LOGGER.info("Loading result from '%s'.", git_info.hexsha)
     result = MemoteResult(
         self.session.query(Result.memote_result).
         filter_by(hexsha=git_info.hexsha).
         one().memote_result)
     # Add git info so the object is equivalent to the one returned by the
     #  RepoResultManager.
     self.add_git(result.meta, git_info)
     return result
Exemple #6
0
    def __init__(self,
                 model,
                 sbml_version=None,
                 experimental_config=None,
                 exclusive=None,
                 skip=None,
                 **kwargs):
        """
        Collect and store values during testing.

        Parameters
        ----------
        model : cobra.Model
            The metabolic model under investigation.
        sbml_version: tuple, optional
            A tuple reporting on the level, version, and FBC use of
            the SBML file.
        experimental_config : memote.ExperimentConfiguration, optional
            A description of experiments.
        exclusive : iterable, optional
            Names of test cases or modules to run and exclude all others. Takes
            precedence over ``skip``.
        skip : iterable, optional
            Names of test cases or modules to skip.

        """
        super(ResultCollectionPlugin, self).__init__(**kwargs)
        self._model = model
        self._sbml_ver = sbml_version
        self._exp_config = experimental_config
        self.results = MemoteResult()
        self.results.add_environment_information(self.results.meta)
        self._xcld = frozenset() if exclusive is None else frozenset(exclusive)
        self._skip = frozenset() if skip is None else frozenset(skip)
        if LOGGER.getEffectiveLevel() <= logging.DEBUG:
            self._model.solver.configuration.verbosity = 3
Exemple #7
0
class ResultCollectionPlugin(object):
    """
    Provide functionality for complex test result collection.

    The plugin exposes the fixture ``store`` which can be used in test
    functions to store values in a dictionary. The dictionary is namespaced to
    the module so within a module the same keys should not be re-used
    (unless intended).

    """

    # Match pytest test case names to decide whether they were parametrized.
    # Seems brittle, can we do better?
    _param = re.compile(r"\[(?P<param>[a-zA-Z0-9_.\-]+)\]$")

    def __init__(self,
                 model,
                 sbml_version=None,
                 experimental_config=None,
                 exclusive=None,
                 skip=None,
                 **kwargs):
        """
        Collect and store values during testing.

        Parameters
        ----------
        model : cobra.Model
            The metabolic model under investigation.
        sbml_version: tuple, optional
            A tuple reporting on the level, version, and FBC use of
            the SBML file.
        experimental_config : memote.ExperimentConfiguration, optional
            A description of experiments.
        exclusive : iterable, optional
            Names of test cases or modules to run and exclude all others. Takes
            precedence over ``skip``.
        skip : iterable, optional
            Names of test cases or modules to skip.

        """
        super(ResultCollectionPlugin, self).__init__(**kwargs)
        self._model = model
        self._sbml_ver = sbml_version
        self._exp_config = experimental_config
        self.results = MemoteResult()
        self.results.add_environment_information(self.results.meta)
        self._xcld = frozenset() if exclusive is None else frozenset(exclusive)
        self._skip = frozenset() if skip is None else frozenset(skip)
        if LOGGER.getEffectiveLevel() <= logging.DEBUG:
            self._model.solver.configuration.verbosity = 3

    def pytest_generate_tests(self, metafunc):
        """Parametrize marked functions at runtime."""
        if metafunc.definition.get_closest_marker("biomass"):
            metafunc.parametrize(
                "reaction_id",
                [rxn.id for rxn in find_biomass_reaction(self._model)])
            return
        # Parametrize experimental test cases.
        for kind in ["essentiality", "growth"]:
            # Find a corresponding pytest marker on the test case.
            if not metafunc.definition.get_closest_marker(kind):
                continue
            exp = getattr(self._exp_config, kind, None)
            if exp is None:
                metafunc.parametrize("experiment", [])
            else:
                names = sorted(exp)
                metafunc.parametrize("experiment",
                                     argvalues=[(n, exp[n]) for n in names],
                                     ids=names)
            # We only expect one kind of experimental marker per test case
            # and thus end execution here.
            return

    @pytest.hookimpl(tryfirst=True)
    def pytest_runtest_call(self, item):
        """Either run a test exclusively or skip it."""
        if item.obj.__module__ in self._xcld:
            return
        elif item.obj.__name__ in self._xcld:
            return
        elif len(self._xcld) > 0:
            pytest.skip("Excluded.")
        elif item.obj.__module__ in self._skip:
            pytest.skip("Skipped by module.")
        elif item.obj.__name__ in self._skip:
            pytest.skip("Skipped individually.")

    @pytest.hookimpl(tryfirst=True)
    def pytest_runtest_teardown(self, item):
        """Collect the annotation from each test case and store it."""
        case = self.results.cases.setdefault(item.obj.__name__, dict())
        if hasattr(item.obj, "annotation"):
            case.update(item.obj.annotation)
        else:
Exemple #8
0
def mock_history_manager():
    """Build a mock history manager that already contains results."""
    result1 = MemoteResult({
        "meta": {
            "branch": "master",
            "commit_author": "John Doe",
            "commit_hash": "3f4665356a24d76a9461043f62a2b12dab56c75f",
            "packages": {
                "SomePackate": "0.1.0"
            },
            "platform": "Darwin",
            "python": "2.7.10",
            "release": "14.5.0",
            "timestamp": "2017-05-03 18:26:11+02:00"
        },
        "tests": {
            "test_parametrized": {
                "data": {
                    "parameter1": ["item2", "item3"],
                    "parameter2": ["item4", "item3"]
                },
                "duration": {
                    "parameter1": 0.12,
                    "parameter2": 0.32
                },
                "format_type": 'percent',
                "message": {
                    "parameter1": "Some Message 1",
                    "parameter2": "Some Message 2"
                },
                "metric": {
                    "parameter1": 0.5,
                    "parameter2": 0.9
                },
                "result": {
                    "parameter1": "failed",
                    "parameter2": "failed"
                },
                "summary": "Some description of the test",
                "title": "Parametrized Test"
            },
            "test_number": {
                "data": ['x', 'y', 'z'],
                "duration": 0.002,
                "format_type": "count",
                "message": "Some Message 3",
                "result": "passed",
                "summary": "Some description again",
                "metric": 0.2,
                "title": "Non-Parametrized Test"
            }
        }
    })
    result2 = MemoteResult({
        "meta": {
            "branch": "develop",
            "commit_author": "John Doe",
            "commit_hash": "6e30d6236f5d47ebb4be39253eaa6a5dcb487687",
            "packages": {
                "SomePackate": "0.1.0"
            },
            "platform": "Darwin",
            "python": "2.7.10",
            "release": "14.5.0",
            "timestamp": "2017-05-03 18:50:11+02:00"
        },
        "tests": {
            "test_parametrized": {
                "data": {
                    "parameter1": ["item1", "item2"],
                    "parameter2": ["item2", "item3"]
                },
                "duration": {
                    "parameter1": 0.2,
                    "parameter2": 0.1
                },
                "format_type": 'percent',
                "message": {
                    "parameter1": "Some Message 1",
                    "parameter2": "Some Message 2"
                },
                "metric": {
                    "parameter1": 1.0,
                    "parameter2": 1.0
                },
                "result": {
                    "parameter1": "failed",
                    "parameter2": "failed"
                },
                "summary": "Some description of the test",
                "title": "Parametrized Test"
            },
            "test_number": {
                "data": ['x', 'y', 'z'],
                "duration": 0.002,
                "format_type": "count",
                "message": "Some Message 3",
                "result": "passed",
                "summary": "Some description again",
                "metric": 0.6,
                "title": "Non-Parametrized Test"
            }
        }
    })
    branch_structure = {
        "commits": {
            "3f4665356a24d76a9461043f62a2b12dab56c75f": {
                "timestamp": "2017-05-03 18:26:11+02:00",
                "author": "John Doe",
                "email": "*****@*****.**"
            },
            "6e30d6236f5d47ebb4be39253eaa6a5dcb487687": {
                "timestamp": "2017-05-03 18:50:11+02:00",
                "author": "John Doe",
                "email": "*****@*****.**"
            }
        },
        "branches": {
            "master": ["3f4665356a24d76a9461043f62a2b12dab56c75f"],
            "develop": [
                "6e30d6236f5d47ebb4be39253eaa6a5dcb487687",
                "3f4665356a24d76a9461043f62a2b12dab56c75f"
            ]
        }
    }
    results = {
        "3f4665356a24d76a9461043f62a2b12dab56c75f": result1,
        "6e30d6236f5d47ebb4be39253eaa6a5dcb487687": result2,
    }

    # Create mock history manager.
    class History(object):
        def __init__(self, **kwargs):
            super(History, self).__init__(**kwargs)
            self._results = results
            self._history = branch_structure

        def get_result(self, commit):
            return results[commit]

        def iter_branches(self):
            return iteritems(self._history["branches"])

        def build_branch_structure(self):
            pass

        def load_history(self):
            pass

    return History()
Exemple #9
0
class ResultCollectionPlugin(object):
    """
    Provide functionality for complex test result collection.

    The plugin exposes the fixture ``store`` which can be used in test
    functions to store values in a dictionary. The dictionary is namespaced to
    the module so within a module the same keys should not be re-used
    (unless intended).

    """

    # Match pytest test case names to decide whether they were parametrized.
    # Seems brittle, can we do better?
    _param = re.compile(r"\[(?P<param>[a-zA-Z0-9_.\-]+)\]$")

    def __init__(self, model, sbml_version=None, experimental_config=None,
                 exclusive=None, skip=None, **kwargs):
        """
        Collect and store values during testing.

        Parameters
        ----------
        model : cobra.Model
            The metabolic model under investigation.
        sbml_version: tuple, optional
            A tuple reporting on the level, version, and FBC use of
            the SBML file.
        experimental_config : memote.ExperimentConfiguration, optional
            A description of experiments.
        exclusive : iterable, optional
            Names of test cases or modules to run and exclude all others. Takes
            precedence over ``skip``.
        skip : iterable, optional
            Names of test cases or modules to skip.

        """
        super(ResultCollectionPlugin, self).__init__(**kwargs)
        self._model = model
        self._sbml_ver = sbml_version
        self._exp_config = experimental_config
        self.results = MemoteResult()
        self.results.add_environment_information(self.results.meta)
        self._xcld = frozenset() if exclusive is None else frozenset(exclusive)
        self._skip = frozenset() if skip is None else frozenset(skip)

    def pytest_generate_tests(self, metafunc):
        """Parametrize marked functions at runtime."""
        if metafunc.definition.get_closest_marker("biomass"):
            metafunc.parametrize("reaction_id", [
                rxn.id for rxn in find_biomass_reaction(self._model)])
            return
        # Parametrize experimental test cases.
        for kind in ["essentiality", "growth"]:
            # Find a corresponding pytest marker on the test case.
            if not metafunc.definition.get_closest_marker(kind):
                continue
            exp = getattr(self._exp_config, kind, None)
            if exp is None:
                metafunc.parametrize("experiment", [])
            else:
                metafunc.parametrize(
                    "experiment", list(exp.items()))
            # We only expect one kind of experimental marker per test case
            # and thus end execution here.
            return

    @pytest.hookimpl(tryfirst=True)
    def pytest_runtest_call(self, item):
        """Either run a test exclusively or skip it."""
        if item.obj.__module__ in self._xcld:
            return
        elif item.obj.__name__ in self._xcld:
            return
        elif len(self._xcld) > 0:
            pytest.skip("Excluded.")
        elif item.obj.__module__ in self._skip:
            pytest.skip("Skipped by module.")
        elif item.obj.__name__ in self._skip:
            pytest.skip("Skipped individually.")

    @pytest.hookimpl(tryfirst=True)
    def pytest_runtest_teardown(self, item):
        """Collect the annotation from each test case and store it."""
        case = self.results.cases.setdefault(item.obj.__name__, dict())
        if hasattr(item.obj, "annotation"):
            case.update(item.obj.annotation)
        else:
            LOGGER.debug("Test case '%s' has no annotation (%s).",
                         item.obj.__name__, item.nodeid)

    def pytest_report_teststatus(self, report):
        """
        Log pytest results for each test.

        The categories are passed, failed, error, skipped and marked to fail.

        Parameters
        ----------
        report : TestReport
            A test report object from pytest with test case result.

        """
        if report.when == 'teardown':
            return
        item_name = report.location[2]

        # Check for a parametrized test.
        match = self._param.search(item_name)
        if match is not None:
            param = match.group("param")
            item_name = item_name[:match.start()]
            LOGGER.debug(
                "%s with parameter %s %s", item_name, param, report.outcome)
        else:
            LOGGER.debug(
                "%s %s", item_name, report.outcome)

        case = self.results.cases.setdefault(item_name, dict())

        if match is not None:
            case["duration"] = case.setdefault("duration", dict())
            case["duration"][param] = report.duration
            case["result"] = case.setdefault("result", dict())
            case["result"][param] = report.outcome
        else:
            case["duration"] = report.duration
            case["result"] = report.outcome

    @pytest.fixture(scope="function")
    def model(self):
        """Provide each test case with a pristine model."""
        with self._model as model:
            yield model

    @pytest.fixture(scope="session")
    def sbml_version(self):
        """Provide SBML level, version, and FBC use."""
        return self._sbml_ver