def test_test_model_timeout(model): api.test_model( model, solver_timeout=1, exclusive="test_find_reactions_unbounded_flux_default_condition", ) assert model.solver.configuration.timeout == 1
def run(model, collect, filename, directory, ignore_git, pytest_args, exclusive, skip, solver, custom): """ Run the test suite and collect results. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. """ if ignore_git: repo = None else: repo = callbacks.probe_git() if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "short"] + pytest_args if not any(a.startswith("-v") for a in pytest_args): pytest_args.append("-vv") model.solver = solver if collect: if repo is not None and directory is not None: filename = join(directory, "{}.json".format(repo.active_branch.commit.hexsha)) code = api.test_model(model, filename, pytest_args=pytest_args, skip=skip, exclusive=exclusive, custom=custom) else: code = api.test_model(model, pytest_args=pytest_args, skip=skip, exclusive=exclusive, custom=custom) sys.exit(code)
def history_directory(request, tmpdir_factory): model = model_builder(request.param) directory = tmpdir_factory.mkdir("Results") for i in range(3): filename = str(directory.join("{}.json".format(i))) api.test_model(model, filename) return str(directory)
def test_snapshot_report_file(model, tmpdir): filename = str(tmpdir.join("index.html")) _, results = api.test_model(model, results=True, pytest_args=["--tb", "no"]) report = api.snapshot_report(results, html=True) assert report.startswith("<!doctype html>")
def snapshot(model, filename, pytest_args, exclusive, skip, solver, solver_timeout, experimental, custom_tests, custom_config): """ Take a snapshot of a model's state and generate a report. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. """ model_obj, sbml_ver, notifications = api.validate_model(model) if model_obj is None: LOGGER.critical( "The model could not be loaded due to the following SBML errors.") utils.stdout_notifications(notifications) api.validation_report(model, notifications, filename) sys.exit(1) if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "no"] + pytest_args # Add further directories to search for tests. pytest_args.extend(custom_tests) config = ReportConfiguration.load() # Update the default test configuration with custom ones (if any). for custom in custom_config: config.merge(ReportConfiguration.load(custom)) model_obj.solver = solver _, results = api.test_model(model_obj, sbml_version=sbml_ver, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental, solver_timeout=solver_timeout) with open(filename, "w", encoding="utf-8") as file_handle: LOGGER.info("Writing snapshot report to '%s'.", filename) file_handle.write(api.snapshot_report(results, config))
def diff(models, filename, pytest_args, exclusive, skip, solver, experimental, custom_tests, custom_config): """ Take a snapshot of all the supplied models and generate a diff report. MODELS: List of paths to two or more model files. """ if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "no"] + pytest_args # Add further directories to search for tests. pytest_args.extend(custom_tests) config = ReportConfiguration.load() # Update the default test configuration with custom ones (if any). for custom in custom_config: config.merge(ReportConfiguration.load(custom)) # Build the diff report specific data structure diff_results = dict() for model_path in models: try: model_filename = os.path.basename(model_path) diff_results.setdefault(model_filename, dict()) model = callbacks._load_model(model_path) model.solver = solver _, diff_results[model_filename] = api.test_model( model, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) except Exception as e: LOGGER.warning( "The following exception occurred while loading model {}: {}" "".format(model_filename, e)) with open(filename, "w", encoding="utf-8") as file_handle: LOGGER.info("Writing diff report to '%s'.", filename) file_handle.write(api.diff_report(diff_results, config))
def run(model, collect, filename, location, ignore_git, pytest_args, exclusive, skip, solver, experimental, custom_tests, deployment): """ Run the test suite on a single model and collect results. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. """ def is_verbose(arg): return (arg.startswith("--verbosity") or arg.startswith("-v") or arg.startswith("--verbose") or arg.startswith("-q") or arg.startswith("--quiet")) if ignore_git: repo = None else: repo = callbacks.probe_git() if collect: if repo is not None: if location is None: LOGGER.critical( "Working with a repository requires a storage location.") sys.exit(1) if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "short"] + pytest_args if not any(is_verbose(a) for a in pytest_args): pytest_args.append("-vv") # Add further directories to search for tests. pytest_args.extend(custom_tests) model.solver = solver code, result = api.test_model(model=model, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) if collect: if repo is None: manager = ResultManager() manager.store(result, filename=filename) else: LOGGER.info("Checking out deployment branch.") previous = repo.active_branch previous_cmt = previous.commit repo.heads[deployment].checkout() try: manager = SQLResultManager(repository=repo, location=location) except (AttributeError, ArgumentError): manager = RepoResultManager(repository=repo, location=location) LOGGER.info( "Committing result and changing back to working branch.") manager.store(result, commit=previous_cmt.hexsha) repo.git.add(".") repo.index.commit("chore: add result for {}".format( previous_cmt.hexsha)) previous.checkout()
def _test_history(model, solver, manager, commit, pytest_args, skip, exclusive, experimental): model.solver = solver _, result = api.test_model(model, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) manager.store(result, commit=commit)
def _test_history(model, sbml_ver, solver, manager, commit, pytest_args, skip, exclusive, experimental): model.solver = solver # Load the experimental configuration using model information. if experimental is not None: experimental.load(model) _, result = api.test_model( model, sbml_version=sbml_ver, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) manager.store(result, commit=commit)
def _test_history(model, sbml_ver, solver, solver_timeout, manager, commit, pytest_args, skip, exclusive, experimental): model.solver = solver # Load the experimental configuration using model information. if experimental is not None: experimental.load(model) _, result = api.test_model( model, sbml_version=sbml_ver, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental, solver_timeout=solver_timeout) manager.store(result, commit=commit)
def _test_diff(model_and_model_ver_tuple, pytest_args, skip, exclusive, experimental, solver_timeout): model, sbml_ver = model_and_model_ver_tuple _, diff_results = api.test_model(model, sbml_version=sbml_ver, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental, solver_timeout=solver_timeout) return diff_results
def snapshot(model, filename, pytest_args): """ Take a snapshot of a model's state and generate a report. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. """ if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "short"] + pytest_args if not any(a.startswith("-v") for a in pytest_args): pytest_args.append("-vv") _, results = api.test_model(model, results=True, pytest_args=pytest_args) api.basic_report(results, filename)
def snapshot(model, filename, pytest_args, solver, custom_tests, custom_config): """ Take a snapshot of a model's state and generate a report. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. """ if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "no"] + pytest_args # Add further directories to search for tests. pytest_args.extend(custom_tests) config = ReportConfiguration.load() # Update the default test configuration with custom ones (if any). for custom in custom_config: config.merge(ReportConfiguration.load(custom)) model.solver = solver _, results = api.test_model(model, results=True, pytest_args=pytest_args) api.snapshot_report(results, config, filename)
def snapshot(model, filename, pytest_args, exclusive, skip, solver, experimental, custom_tests, custom_config): """ Take a snapshot of a model's state and generate a report. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. """ if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "no"] + pytest_args # Add further directories to search for tests. pytest_args.extend(custom_tests) config = ReportConfiguration.load() # Update the default test configuration with custom ones (if any). for custom in custom_config: config.merge(ReportConfiguration.load(custom)) model.solver = solver _, results = api.test_model(model, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) with open(filename, "w", encoding="utf-8") as file_handle: LOGGER.info("Writing snapshot report to '%s'.", filename) file_handle.write(api.snapshot_report(results, config))
def _test_history(model, filename, pytest_args, skip): model = callbacks.validate_model(None, "model", model) api.test_model(model, filename, pytest_args=pytest_args, skip=skip)
def test_basic_report_file(model, tmpdir): filename = str(tmpdir.join("index.html")) _, results = api.test_model(model, results=True) api.basic_report(results, filename) assert exists(filename)
def test_test_model_file(model, tmpdir): filename = str(tmpdir.join("result.json")) api.test_model(model, filename) assert exists(filename)
def test_test_model_result(model): _, result = api.test_model(model, results=True) # TODO: Once introduced perform schema checks here. assert len(result) > 0
def test_test_model_code(model, code): assert api.test_model(model) == code
def run_memote(self, ctx, params): """ :param params: instance of type "RunMemoteParams" -> structure: parameter "workspace" of String, parameter "model_id" of String, parameter "media_id" of String, parameter "out_model_id" of String :returns: instance of type "RunMemoteResults" -> structure: parameter "model_ref" of String """ # ctx is the context object # return variables are: output #BEGIN run_memote print(params) kbase_api = cobrakbase.KBaseAPI(ctx['token'], config={'workspace-url': self.ws_url}) modelseed = cobrakbase.modelseed.from_local( '/kb/module/data/ModelSEEDDatabase') kmodel_data = kbase_api.get_object(params['model_id'], params['workspace']) fbamodel = KBaseFBAModel(kmodel_data) builder = KBaseFBAModelToCobraBuilder(fbamodel) if 'genome_ref' in kmodel_data: logging.info("Annotating model with genome information: %s", kmodel_data['genome_ref']) ref_data = kbase_api.get_object_info_from_ref( kmodel_data['genome_ref']) genome_data = kbase_api.get_object(ref_data.id, ref_data.workspace_id) builder.with_genome(KBaseGenome(genome_data)) media = None if 'media_id' in params and not params[ 'media_id'] == "" and not params['media_id'] == None: print("MEDIA ID", params['media_id']) media_data = kbase_api.get_object(params['media_id'], params['workspace']) media = KBaseBiochemMedia(media_data) if media == None: if 'gapfillings' in kmodel_data and len( kmodel_data['gapfillings']) > 0: print("Pulling media from gapfilling...", kmodel_data['gapfillings']) ref = kmodel_data['gapfillings'][0]['media_ref'] ref_data = kbase_api.get_object_info_from_ref(ref) media_data = kbase_api.get_object(ref_data.id, ref_data.workspace_id) media = KBaseBiochemMedia(media_data) if not media == None: builder.with_media(media) #converts to cobra model object with builder model = builder.build() cobrakbase.annotate_model_with_modelseed(model, modelseed) #modelseed = cobrakbase.modelseed.from_local('/kb/module/data/ModelSEEDDatabase-dev') #print(cobrakbase.annotate_model_with_modelseed(model, modelseed)) a, results = memote_api.test_model(model, results=True, skip=['test_thermodynamics']) config = ReportConfiguration.load() html = memote_api.snapshot_report(results, config) report_folder = self.shared_folder with open(report_folder + "/report.html", 'w') as f: f.write(html) cobra.io.write_sbml_model(model, report_folder + "/model.xml") report_client = KBaseReport(self.callback_url) report_params = { 'direct_html_link_index': 0, 'workspace_name': params['workspace'], 'report_object_name': 'run_memote_' + uuid.uuid4().hex, 'objects_created': [], 'html_links': [{ 'name': 'report', 'description': 'Memote HTML Report', 'path': report_folder + "/report.html" }], 'file_links': [{ 'name': params['model_id'] + ".xml", 'description': 'desc', 'path': report_folder + "/model.xml" }] } report_info = report_client.create_extended_report(report_params) output = { 'report_name': report_info['name'], 'report_ref': report_info['ref'] } #END run_memote # At some point might do deeper type checking... if not isinstance(output, dict): raise ValueError('Method run_memote return value ' + 'output is not type dict as required.') # return the results return [output]
def run(model, collect, filename, location, ignore_git, pytest_args, exclusive, skip, solver, experimental, custom_tests, deployment, skip_unchanged): """ Run the test suite on a single model and collect results. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. """ def is_verbose(arg): return (arg.startswith("--verbosity") or arg.startswith("-v") or arg.startswith("--verbose") or arg.startswith("-q") or arg.startswith("--quiet")) if ignore_git: repo = None else: callbacks.git_installed() repo = callbacks.probe_git() if collect: if repo is not None: if location is None: LOGGER.critical( "Working with a repository requires a storage location.") sys.exit(1) if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "short"] + pytest_args if not any(is_verbose(a) for a in pytest_args): pytest_args.append("-vv") # Check if the model was changed in this commit. Exit `memote run` if this # was not the case. if skip_unchanged and repo is not None: commit = repo.head.commit if not is_modified(model, commit): LOGGER.info("The model was not modified in commit '%s'. Skipping.", commit.hexsha) sys.exit(0) # Add further directories to search for tests. pytest_args.extend(custom_tests) # Check if the model can be loaded at all. model, sbml_ver, notifications = api.validate_model(model) if model is None: LOGGER.critical( "The model could not be loaded due to the following SBML errors.") stdout_notifications(notifications) sys.exit(1) model.solver = solver # Load the experimental configuration using model information. if experimental is not None: experimental.load(model) code, result = api.test_model( model=model, sbml_version=sbml_ver, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental) if collect: if repo is None: manager = ResultManager() manager.store(result, filename=filename) else: LOGGER.info("Checking out deployment branch.") # If the repo HEAD is pointing to the most recent branch then # GitPython's `repo.active_branch` works. Yet, if the repo is in # detached HEAD state, i.e., when a user has checked out a specific # commit as opposed to a branch, this won't work and throw a # `TypeError`, which we are circumventing below. try: previous = repo.active_branch previous_cmt = previous.commit is_branch = True except TypeError: previous_cmt = repo.head.commit is_branch = False repo.git.checkout(deployment) try: manager = SQLResultManager(repository=repo, location=location) except (AttributeError, ArgumentError): manager = RepoResultManager(repository=repo, location=location) LOGGER.info( "Committing result and changing back to working branch.") manager.store(result, commit=previous_cmt.hexsha) repo.git.add(".") check_call( ['git', 'commit', '-m', "chore: add result for {}".format(previous_cmt.hexsha)] ) if is_branch: previous.checkout() else: repo.commit(previous_cmt)
def run(model, collect, filename, location, ignore_git, pytest_args, exclusive, skip, solver, solver_timeout, experimental, custom_tests, deployment, skip_unchanged): """ Run the test suite on a single model and collect results. MODEL: Path to model file. Can also be supplied via the environment variable MEMOTE_MODEL or configured in 'setup.cfg' or 'memote.ini'. """ def is_verbose(arg): return (arg.startswith("--verbosity") or arg.startswith("-v") or arg.startswith("--verbose") or arg.startswith("-q") or arg.startswith("--quiet")) if ignore_git: repo = None else: callbacks.git_installed() repo = callbacks.probe_git() if collect: if repo is not None: if location is None: LOGGER.critical( "Working with a repository requires a storage location.") sys.exit(1) if not any(a.startswith("--tb") for a in pytest_args): pytest_args = ["--tb", "short"] + pytest_args if not any(is_verbose(a) for a in pytest_args): pytest_args.append("-vv") # Check if the model was changed in this commit. Exit `memote run` if this # was not the case. if skip_unchanged and repo is not None: commit = repo.head.commit if not is_modified(model, commit): LOGGER.info("The model was not modified in commit '%s'. Skipping.", commit.hexsha) sys.exit(0) # Add further directories to search for tests. pytest_args.extend(custom_tests) # Check if the model can be loaded at all. model, sbml_ver, notifications = api.validate_model(model) if model is None: LOGGER.critical( "The model could not be loaded due to the following SBML errors.") stdout_notifications(notifications) sys.exit(1) model.solver = solver code, result = api.test_model(model=model, sbml_version=sbml_ver, results=True, pytest_args=pytest_args, skip=skip, exclusive=exclusive, experimental=experimental, solver_timeout=solver_timeout) if collect: if repo is None: manager = ResultManager() manager.store(result, filename=filename) else: LOGGER.info("Checking out deployment branch.") # If the repo HEAD is pointing to the most recent branch then # GitPython's `repo.active_branch` works. Yet, if the repo is in # detached HEAD state, i.e., when a user has checked out a specific # commit as opposed to a branch, this won't work and throw a # `TypeError`, which we are circumventing below. try: previous = repo.active_branch previous_cmt = previous.commit is_branch = True except TypeError: previous_cmt = repo.head.commit is_branch = False repo.git.checkout(deployment) try: manager = SQLResultManager(repository=repo, location=location) except (AttributeError, ArgumentError): manager = RepoResultManager(repository=repo, location=location) LOGGER.info( "Committing result and changing back to working branch.") manager.store(result, commit=previous_cmt.hexsha) repo.git.add(".") repo.git.commit( "--message", "chore: add result for {}".format(previous_cmt.hexsha)) if is_branch: previous.checkout() else: repo.commit(previous_cmt)
import json from memote.suite.api import test_model import cobra model = cobra.io.read_sbml_model("WormJam.xml") code, results = test_model(model, sbml_version=(3, 1), results=True) # ,skip=["test_consistency"] with open("results.json", "w+") as f: f.write(json.dumps(results, indent=4)) print("Memote Done")