Exemplo n.º 1
0
def diff(models, filename, pytest_args, exclusive, skip, solver,
         experimental, custom_tests, custom_config):
    """
    Take a snapshot of all the supplied models and generate a diff report.

    MODELS: List of paths to two or more model files.
    """
    if not any(a.startswith("--tb") for a in pytest_args):
        pytest_args = ["--tb", "no"] + pytest_args
    # Add further directories to search for tests.
    pytest_args.extend(custom_tests)
    config = ReportConfiguration.load()
    # Update the default test configuration with custom ones (if any).
    for custom in custom_config:
        config.merge(ReportConfiguration.load(custom))
    # Build the diff report specific data structure
    diff_results = dict()
    for model_path in models:
        try:
            model_filename = os.path.basename(model_path)
            diff_results.setdefault(model_filename, dict())
            model = callbacks._load_model(model_path)
            model.solver = solver
            _, diff_results[model_filename] = api.test_model(
                model, results=True, pytest_args=pytest_args,
                skip=skip, exclusive=exclusive, experimental=experimental)
        except Exception as e:
            LOGGER.warning(
                "The following exception occurred while loading model {}: {}"
                "".format(model_filename, e))
    with open(filename, "w", encoding="utf-8") as file_handle:
        LOGGER.info("Writing diff report to '%s'.", filename)
        file_handle.write(api.diff_report(diff_results, config))
Exemplo n.º 2
0
def history(location, model, filename, deployment, custom_config):
    """Generate a report over a model's git commit history."""
    callbacks.git_installed()
    LOGGER.info("Initialising history report generation.")
    if location is None:
        raise click.BadParameter("No 'location' given or configured.")
    try:
        repo = git.Repo()
    except git.InvalidGitRepositoryError:
        LOGGER.critical(
            "The history report requires a git repository in order to check "
            "the model's commit history.")
        sys.exit(1)
    LOGGER.info("Obtaining history of results from "
                "the deployment branch {}.".format(deployment))
    repo.git.checkout(deployment)
    try:
        manager = managers.SQLResultManager(repository=repo, location=location)
    except (AttributeError, ArgumentError):
        manager = managers.RepoResultManager(repository=repo,
                                             location=location)
    config = ReportConfiguration.load()
    # Update the default test configuration with custom ones (if any).
    for custom in custom_config:
        config.merge(ReportConfiguration.load(custom))
    LOGGER.info("Tracing the commit history.")
    history = managers.HistoryManager(repository=repo, manager=manager)
    history.load_history(model, skip={deployment})
    LOGGER.info("Composing the history report.")
    report = api.history_report(history, config=config)
    with open(filename, "w", encoding="utf-8") as file_handle:
        file_handle.write(report)
Exemplo n.º 3
0
def history(location, model, filename, deployment, custom_config):
    """Generate a report over a model's git commit history."""
    if model is None:
        raise click.BadParameter("No 'model' path given or configured.")
    if location is None:
        raise click.BadParameter("No 'location' given or configured.")
    try:
        repo = git.Repo()
    except git.InvalidGitRepositoryError:
        LOGGER.critical(
            "The history report requires a git repository in order to check "
            "the model's commit history.")
        sys.exit(1)
    previous = repo.active_branch
    repo.heads[deployment].checkout()
    try:
        manager = managers.SQLResultManager(repository=repo, location=location)
    except (AttributeError, ArgumentError):
        manager = managers.RepoResultManager(
            repository=repo, location=location)
    config = ReportConfiguration.load()
    # Update the default test configuration with custom ones (if any).
    for custom in custom_config:
        config.merge(ReportConfiguration.load(custom))
    history = managers.HistoryManager(repository=repo, manager=manager)
    history.load_history(model, skip={deployment})
    report = api.history_report(history, config=config)
    previous.checkout()
    with open(filename, "w", encoding="utf-8") as file_handle:
        file_handle.write(report)
Exemplo n.º 4
0
def snapshot(model, filename, pytest_args, exclusive, skip, solver,
             solver_timeout, experimental, custom_tests, custom_config):
    """
    Take a snapshot of a model's state and generate a report.

    MODEL: Path to model file. Can also be supplied via the environment variable
    MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'.
    """
    model_obj, sbml_ver, notifications = api.validate_model(model)
    if model_obj is None:
        LOGGER.critical(
            "The model could not be loaded due to the following SBML errors.")
        utils.stdout_notifications(notifications)
        api.validation_report(model, notifications, filename)
        sys.exit(1)
    if not any(a.startswith("--tb") for a in pytest_args):
        pytest_args = ["--tb", "no"] + pytest_args
    # Add further directories to search for tests.
    pytest_args.extend(custom_tests)
    config = ReportConfiguration.load()
    # Update the default test configuration with custom ones (if any).
    for custom in custom_config:
        config.merge(ReportConfiguration.load(custom))
    model_obj.solver = solver
    _, results = api.test_model(model_obj,
                                sbml_version=sbml_ver,
                                results=True,
                                pytest_args=pytest_args,
                                skip=skip,
                                exclusive=exclusive,
                                experimental=experimental,
                                solver_timeout=solver_timeout)
    with open(filename, "w", encoding="utf-8") as file_handle:
        LOGGER.info("Writing snapshot report to '%s'.", filename)
        file_handle.write(api.snapshot_report(results, config))
Exemplo n.º 5
0
def snapshot(model, filename, pytest_args, solver, custom_tests,
             custom_config):
    """
    Take a snapshot of a model's state and generate a report.

    MODEL: Path to model file. Can also be supplied via the environment variable
    MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'.
    """
    if not any(a.startswith("--tb") for a in pytest_args):
        pytest_args = ["--tb", "no"] + pytest_args
    # Add further directories to search for tests.
    pytest_args.extend(custom_tests)
    config = ReportConfiguration.load()
    # Update the default test configuration with custom ones (if any).
    for custom in custom_config:
        config.merge(ReportConfiguration.load(custom))
    model.solver = solver
    _, results = api.test_model(model, results=True, pytest_args=pytest_args)
    api.snapshot_report(results, config, filename)
Exemplo n.º 6
0
def snapshot(model, filename, pytest_args, exclusive, skip, solver,
             experimental, custom_tests, custom_config):
    """
    Take a snapshot of a model's state and generate a report.

    MODEL: Path to model file. Can also be supplied via the environment variable
    MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'.
    """
    if not any(a.startswith("--tb") for a in pytest_args):
        pytest_args = ["--tb", "no"] + pytest_args
    # Add further directories to search for tests.
    pytest_args.extend(custom_tests)
    config = ReportConfiguration.load()
    # Update the default test configuration with custom ones (if any).
    for custom in custom_config:
        config.merge(ReportConfiguration.load(custom))
    model.solver = solver
    _, results = api.test_model(model, results=True, pytest_args=pytest_args,
                                skip=skip, exclusive=exclusive,
                                experimental=experimental)
    with open(filename, "w", encoding="utf-8") as file_handle:
        LOGGER.info("Writing snapshot report to '%s'.", filename)
        file_handle.write(api.snapshot_report(results, config))
Exemplo n.º 7
0
def diff_report(diff_results, config=None, html=True):
    """
    Generate a diff report from a result set and configuration.

    Parameters
    ----------
    diff_results : iterable of memote.MemoteResult
        Nested dictionary structure as returned from the test suite.
    config : dict, optional
        The final test report configuration (default None).
    html : bool, optional
        Whether to render the report as full HTML or JSON (default True).

    """
    if config is None:
        config = ReportConfiguration.load()
    report = DiffReport(diff_results=diff_results, configuration=config)
    if html:
        return report.render_html()
    else:
        return report.render_json()
Exemplo n.º 8
0
def history_report(history, config=None, html=True):
    """
    Test a model and save a history report.

    Parameters
    ----------
    history : memote.HistoryManager
        The manager grants access to previous results.
    config : dict, optional
        The final test report configuration.
    html : bool, optional
        Whether to render the report as full HTML or JSON (default True).

    """
    if config is None:
        config = ReportConfiguration.load()
    report = HistoryReport(history=history, configuration=config)
    if html:
        return report.render_html()
    else:
        return report.render_json()
Exemplo n.º 9
0
def diff_report(diff_results, config=None, html=True):
    """
    Generate a diff report from a result set and configuration.

    Parameters
    ----------
    diff_results : iterable of memote.MemoteResult
        Nested dictionary structure as returned from the test suite.
    config : dict, optional
        The final test report configuration (default None).
    html : bool, optional
        Whether to render the report as full HTML or JSON (default True).

    """
    if config is None:
        config = ReportConfiguration.load()
    report = DiffReport(diff_results=diff_results, configuration=config)
    if html:
        return report.render_html()
    else:
        return report.render_json()
Exemplo n.º 10
0
def history_report(history, config=None, html=True):
    """
    Test a model and save a history report.

    Parameters
    ----------
    history : memote.HistoryManager
        The manager grants access to previous results.
    config : dict, optional
        The final test report configuration.
    html : bool, optional
        Whether to render the report as full HTML or JSON (default True).

    """
    if config is None:
        config = ReportConfiguration.load()
    report = HistoryReport(history=history, configuration=config)
    if html:
        return report.render_html()
    else:
        return report.render_json()
Exemplo n.º 11
0
    def run_memote(self, ctx, params):
        """
        :param params: instance of type "RunMemoteParams" -> structure:
           parameter "workspace" of String, parameter "model_id" of String,
           parameter "media_id" of String, parameter "out_model_id" of String
        :returns: instance of type "RunMemoteResults" -> structure: parameter
           "model_ref" of String
        """
        # ctx is the context object
        # return variables are: output
        #BEGIN run_memote
        print(params)
        kbase_api = cobrakbase.KBaseAPI(ctx['token'],
                                        config={'workspace-url': self.ws_url})
        modelseed = cobrakbase.modelseed.from_local(
            '/kb/module/data/ModelSEEDDatabase')

        kmodel_data = kbase_api.get_object(params['model_id'],
                                           params['workspace'])
        fbamodel = KBaseFBAModel(kmodel_data)

        builder = KBaseFBAModelToCobraBuilder(fbamodel)

        if 'genome_ref' in kmodel_data:
            logging.info("Annotating model with genome information: %s",
                         kmodel_data['genome_ref'])
            ref_data = kbase_api.get_object_info_from_ref(
                kmodel_data['genome_ref'])
            genome_data = kbase_api.get_object(ref_data.id,
                                               ref_data.workspace_id)
            builder.with_genome(KBaseGenome(genome_data))

        media = None

        if 'media_id' in params and not params[
                'media_id'] == "" and not params['media_id'] == None:
            print("MEDIA ID", params['media_id'])
            media_data = kbase_api.get_object(params['media_id'],
                                              params['workspace'])
            media = KBaseBiochemMedia(media_data)

        if media == None:
            if 'gapfillings' in kmodel_data and len(
                    kmodel_data['gapfillings']) > 0:
                print("Pulling media from gapfilling...",
                      kmodel_data['gapfillings'])
                ref = kmodel_data['gapfillings'][0]['media_ref']
                ref_data = kbase_api.get_object_info_from_ref(ref)
                media_data = kbase_api.get_object(ref_data.id,
                                                  ref_data.workspace_id)
                media = KBaseBiochemMedia(media_data)

        if not media == None:
            builder.with_media(media)

        #converts to cobra model object with builder
        model = builder.build()
        cobrakbase.annotate_model_with_modelseed(model, modelseed)

        #modelseed = cobrakbase.modelseed.from_local('/kb/module/data/ModelSEEDDatabase-dev')
        #print(cobrakbase.annotate_model_with_modelseed(model, modelseed))
        a, results = memote_api.test_model(model,
                                           results=True,
                                           skip=['test_thermodynamics'])
        config = ReportConfiguration.load()
        html = memote_api.snapshot_report(results, config)

        report_folder = self.shared_folder

        with open(report_folder + "/report.html", 'w') as f:
            f.write(html)
        cobra.io.write_sbml_model(model, report_folder + "/model.xml")

        report_client = KBaseReport(self.callback_url)
        report_params = {
            'direct_html_link_index':
            0,
            'workspace_name':
            params['workspace'],
            'report_object_name':
            'run_memote_' + uuid.uuid4().hex,
            'objects_created': [],
            'html_links': [{
                'name': 'report',
                'description': 'Memote HTML Report',
                'path': report_folder + "/report.html"
            }],
            'file_links': [{
                'name': params['model_id'] + ".xml",
                'description': 'desc',
                'path': report_folder + "/model.xml"
            }]
        }

        report_info = report_client.create_extended_report(report_params)

        output = {
            'report_name': report_info['name'],
            'report_ref': report_info['ref']
        }

        #END run_memote
        # At some point might do deeper type checking...
        if not isinstance(output, dict):
            raise ValueError('Method run_memote return value ' +
                             'output is not type dict as required.')
        # return the results
        return [output]
Exemplo n.º 12
0
def diff(models, filename, pytest_args, exclusive, skip, solver,
         solver_timeout, experimental, custom_tests, custom_config):
    """
    Take a snapshot of all the supplied models and generate a diff report.

    MODELS: List of paths to two or more model files.
    """
    if not any(a.startswith("--tb") for a in pytest_args):
        pytest_args = ["--tb", "no"] + pytest_args
    # Add further directories to search for tests.
    pytest_args.extend(custom_tests)
    config = ReportConfiguration.load()
    # Update the default test configuration with custom ones (if any).
    for custom in custom_config:
        config.merge(ReportConfiguration.load(custom))
    # Build the diff report specific data structure
    diff_results = dict()
    model_and_model_ver_tuple = list()
    for model_path in models:
        try:
            model_filename = os.path.basename(model_path)
            diff_results.setdefault(model_filename, dict())
            model, model_ver, notifications = api.validate_model(model_path)
            if model is None:
                head, tail = os.path.split(filename)
                report_path = os.path.join(
                    head, '{}_structural_report.html'.format(model_filename))
                api.validation_report(model_path, notifications, report_path)
                LOGGER.critical(
                    "The model {} could not be loaded due to SBML errors "
                    "reported in {}.".format(model_filename, report_path))
                continue
            model.solver = solver
            model_and_model_ver_tuple.append((model, model_ver))
        except (IOError, SBMLError):
            LOGGER.debug(exc_info=True)
            LOGGER.warning(
                "An error occurred while loading the model '%s'. "
                "Skipping.", model_filename)
    # Abort the diff report unless at least two models can be loaded
    # successfully.
    if len(model_and_model_ver_tuple) < 2:
        LOGGER.critical(
            "Out of the %d provided models only %d could be loaded. Please, "
            "check if the models that could not be loaded are valid SBML. "
            "Aborting.", len(models), len(model_and_model_ver_tuple))
        sys.exit(1)
    # Running pytest in individual processes to avoid interference
    partial_test_diff = partial(_test_diff,
                                pytest_args=pytest_args,
                                skip=skip,
                                exclusive=exclusive,
                                experimental=experimental,
                                solver_timeout=solver_timeout)
    pool = Pool(min(len(models), cpu_count()))
    results = pool.map(partial_test_diff, model_and_model_ver_tuple)

    for model_path, result in zip(models, results):
        model_filename = os.path.basename(model_path)
        diff_results[model_filename] = result

    with open(filename, "w", encoding="utf-8") as file_handle:
        LOGGER.info("Writing diff report to '%s'.", filename)
        file_handle.write(api.diff_report(diff_results, config))