def cli(ctx, path, template=None, **kwds): """(Experimental) Initialize a new tool project. This is only a proof-of-concept demo right now. """ if template is None: warn("Creating empty project, this function doesn't do much yet.") if not os.path.exists(path): os.makedirs(path) if template is None: return tempdir = tempfile.mkdtemp() try: untar_args = UNTAR_ARGS % (tempdir) untar_to(DOWNLOAD_URL, tempdir, untar_args) template_dir = os.path.join(tempdir, template) shell("ls '%s'" % (template_dir)) shell("mv '%s'/* '%s'" % (template_dir, path)) dot_files = [os.path.join(template_dir, f) for f in os.listdir(template_dir) if f.startswith(".")] if len(dot_files) > 0: dot_files_quoted = "'" + "' '".join(dot_files) + "'" shell("mv %s '%s'" % (dot_files_quoted, path)) finally: shutil.rmtree(tempdir)
def cli(ctx, path): """Create files to use GitHub/TravisCI testing. Setup files in a github tool repository to enable continuous integration testing.:: % planemo travis_init . % # setup Ubuntu 12.04 w/ dependencies in % vim .travis/setup_custom_dependencies.bash % git add .travis.yml .travis % git commit -m "Add Travis CI testing infrastructure for tools." % git push # and register repository @ http://travis-ci.org/ These tests were inspired by work original done and documented by Peter C**k here http://bit.ly/gxtravisci. """ # TODO: Option --verbose_travis_yaml to unroll travis_test.sh line by line # and place all but last in 'install' section and last in 'script'. Would # require a yaml dependency though. shell("mkdir -p '%s/.travis'" % path) travis_yml = os.path.join(path, ".travis.yml") setup_sh = os.path.join(path, ".travis", "setup_custom_dependencies.bash") if not os.path.exists(travis_yml): open(travis_yml, "w").write(TRAVIS_YML) else: warn(".travis.yml file already exists, not overwriting.") if not os.path.exists(setup_sh): open(setup_sh, "w").write(TRAVIS_SETUP) else: warn( ".travis/setup_custom_dependencies.bash already exists, not overwriting." ) info(PREPARE_MESSAGE)
def cli(ctx, **kwds): """Download and install conda. This will download conda for managing dependencies for your platform using the appropriate Miniconda installer. By running this command, you are agreeing to the terms of the conda license a 3-clause BSD 3 license. Please review full license at http://docs.continuum.io/anaconda/eula. Planemo will print a warning and terminate with an exit code of 7 if Conda is already installed. """ conda_context = build_conda_context(ctx, **kwds) if conda_context.is_conda_installed(): warn(MESSAGE_ERROR_ALREADY_EXISTS % conda_context.conda_exec) exit = EXIT_CODE_ALREADY_EXISTS else: exit = conda_util.install_conda(conda_context=conda_context, force_conda_build=True) if exit: warn(MESSAGE_ERROR_FAILED % conda_context.conda_exec) else: info(MESSAGE_INSTALL_OKAY % conda_context.conda_exec) ctx.exit(exit)
def cli(ctx, uri, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ path = uri_to_path(ctx, uri) # TODO: convert UI to runnable and do a better test of cwl. is_cwl = path.endswith(".cwl") kwds["cwl"] = is_cwl if kwds.get("engine", None) is None: kwds["engine"] = "galaxy" if not is_cwl else "cwltool" with engine_context(ctx, **kwds) as engine: run_result = engine.run(path, job_path) if not run_result.was_successful: warn("Run failed [%s]" % str(run_result)) ctx.exit(1) outputs_dict = run_result.outputs_dict print(outputs_dict) output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def cli(ctx, uri, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ path = uri_to_path(ctx, uri) # TODO: convert UI to runnable and do a better test of cwl. is_cwl = path.endswith(".cwl") kwds["cwl"] = is_cwl if kwds.get("engine", None) is None: kwds["engine"] = "galaxy" if not is_cwl else "cwltool" with engine_context(ctx, **kwds) as engine: run_result = engine.run(path, job_path) if not run_result.was_successful: warn("Run failed [%s]" % unicodify(run_result)) ctx.exit(1) outputs_dict = run_result.outputs_dict print(outputs_dict) output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def _handle_summary(structured_data, **kwds): summary_dict = get_dict_value("summary", structured_data) num_tests = get_dict_value("num_tests", summary_dict) num_failures = get_dict_value("num_failures", summary_dict) num_errors = get_dict_value("num_errors", summary_dict) num_problems = num_failures + num_errors summary_exit_code = EXIT_CODE_OK if num_problems > 0: summary_exit_code = EXIT_CODE_GENERIC_FAILURE elif num_tests == 0: summary_exit_code = EXIT_CODE_NO_SUCH_TARGET summary_style = kwds.get("summary") if summary_style != "none": if num_tests == 0: warn(NO_TESTS_MESSAGE) elif num_problems == 0: info(ALL_TESTS_PASSED_MESSAGE % num_tests) elif num_problems: html_report_file = kwds.get("test_output") message_args = (num_problems, num_tests, html_report_file) message = PROBLEM_COUNT_MESSAGE % message_args warn(message) _summarize_tests_full(structured_data, **kwds) return summary_exit_code
def cli(ctx, path): """Create files to use GitHub/TravisCI testing. Setup files in a github tool repository to enable continuous integration testing.:: % planemo travis_init . % # setup Ubuntu 12.04 w/ dependencies in % vim .travis/setup_custom_dependencies.bash % git add .travis.yml .travis % git commit -m "Add Travis CI testing infrastructure for tools." % git push # and register repository @ http://travis-ci.org/ These tests were inspired by work original done and documented by Peter C**k here http://bit.ly/gxtravisci. """ # TODO: Option --verbose_travis_yaml to unroll travis_test.sh line by line # and place all but last in 'install' section and last in 'script'. Would # require a yaml dependency though. shell("mkdir -p '%s/.travis'" % path) travis_yml = os.path.join(path, ".travis.yml") setup_sh = os.path.join(path, ".travis", "setup_custom_dependencies.bash") if not os.path.exists(travis_yml): open(travis_yml, "w").write(TRAVIS_YML) else: warn(".travis.yml file already exists, not overwriting.") if not os.path.exists(setup_sh): open(setup_sh, "w").write(TRAVIS_SETUP) else: warn(".travis/setup_custom_dependencies.bash already exists, not overwriting.") info(PREPARE_MESSAGE)
def cli(ctx, path, job_path, **kwds): """Planemo command for running tools and jobs. :: % planemo run cat1-tool.cwl cat-job.json """ kwds["cwl"] = path.endswith(".cwl") conformance_test = kwds.get("conformance_test", False) with conditionally_captured_io(conformance_test): with engine_context(ctx, **kwds) as engine: run_result = engine.run(path, job_path) if not run_result.was_successful: warn("Run failed [%s]" % str(run_result)) ctx.exit(1) if conformance_test: if hasattr(run_result, "cwl_command_state"): command_state = run_result.cwl_command_state dumped_json = json.dumps(command_state) if hasattr(run_result, "galaxy_paths"): for (local_path, galaxy_path) in run_result.galaxy_paths: dumped_json = dumped_json.replace(galaxy_path, local_path) print(dumped_json) else: outputs_dict = run_result.outputs_dict print(outputs_dict) output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def cli(ctx, path, template=None, **kwds): """(Experimental) Initialize a new tool project. This is only a proof-of-concept demo right now. """ if template is None: warn("Creating empty project, this function doesn't do much yet.") if not os.path.exists(path): os.makedirs(path) if template is None: return tempdir = tempfile.mkdtemp() try: untar_args = UNTAR_ARGS % (tempdir) untar_to(DOWNLOAD_URL, tempdir, untar_args) template_dir = os.path.join(tempdir, template) shell("ls '%s'" % (template_dir)) shell("mv '%s'/* '%s'" % (template_dir, path)) dot_files = [ os.path.join(template_dir, f) for f in os.listdir(template_dir) if f.startswith(".") ] if len(dot_files) > 0: dot_files_quoted = "'" + "' '".join(dot_files) + "'" shell("mv %s '%s'" % (dot_files_quoted, path)) finally: shutil.rmtree(tempdir)
def cli(ctx, runnable_identifier, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ runnable = for_runnable_identifier(ctx, runnable_identifier, kwds) is_cwl = runnable.type.is_cwl_artifact kwds["cwl"] = is_cwl if kwds.get("engine", None) is None: if is_cwl: kwds["engine"] = "cwltool" elif kwds.get('galaxy_url', None): kwds["engine"] = "external_galaxy" else: kwds["engine"] = "galaxy" with engine_context(ctx, **kwds) as engine: run_result = engine.run(runnable, job_path) if not run_result.was_successful: warn("Run failed [%s]" % unicodify(run_result)) ctx.exit(1) outputs_dict = run_result.outputs_dict output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def _handle_summary( structured_data, **kwds ): summary_dict = get_dict_value("summary", structured_data) num_tests = get_dict_value("num_tests", summary_dict) num_failures = get_dict_value("num_failures", summary_dict) num_errors = get_dict_value("num_errors", summary_dict) num_problems = num_failures + num_errors summary_exit_code = EXIT_CODE_OK if num_problems > 0: summary_exit_code = EXIT_CODE_GENERIC_FAILURE elif num_tests == 0: summary_exit_code = EXIT_CODE_NO_SUCH_TARGET summary_style = kwds.get("summary") if summary_style != "none": if num_tests == 0: warn(NO_TESTS_MESSAGE) elif num_problems == 0: info(ALL_TESTS_PASSED_MESSAGE % num_tests) elif num_problems: html_report_file = kwds.get("test_output") message_args = (num_problems, num_tests, html_report_file) message = PROBLEM_COUNT_MESSAGE % message_args warn(message) _summarize_tests_full( structured_data, **kwds ) return summary_exit_code
def run_in_config(ctx, config, **kwds): config_directory = config.config_directory html_report_file = kwds["test_output"] job_output_files = kwds.get("job_output_files", None) if job_output_files is None: job_output_files = os.path.join(config_directory, "jobfiles") xunit_supported, xunit_report_file = __xunit_state(kwds, config) structured_report_file = __structured_report_file(kwds, config) info("Testing using galaxy_root %s", config.galaxy_root) # TODO: Allow running dockerized Galaxy here instead. server_ini = os.path.join(config_directory, "galaxy.ini") config.env["GALAXY_CONFIG_FILE"] = server_ini config.env["GALAXY_TEST_VERBOSE_ERRORS"] = "true" config.env["GALAXY_TEST_SAVE"] = job_output_files cd_to_galaxy_command = "cd %s" % config.galaxy_root test_cmd = test_structures.GalaxyTestCommand( html_report_file, xunit_report_file, structured_report_file, failed=kwds.get("failed", False), installed=kwds.get("installed", False), ).build() cmd = "; ".join([ cd_to_galaxy_command, galaxy_run.ACTIVATE_COMMAND, # TODO: this should be moved to # run_tests.sh to match run.sh. test_cmd, ]) action = "Testing tools" return_code = galaxy_run.run_galaxy_command(ctx, cmd, config.env, action) if kwds.get('update_test_data', False): update_cp_args = (job_output_files, config.test_data_dir) shell('cp -r "%s"/* "%s"' % update_cp_args) if xunit_report_file and (not os.path.exists(xunit_report_file)): warn(NO_XUNIT_MESSAGE) xunit_report_file = None test_results = test_structures.GalaxyTestResults( structured_report_file, xunit_report_file, html_report_file, return_code, ) try: test_data = test_results.structured_data new_report = build_report.build_report(test_data) open(test_results.output_html_path, "w").write(new_report) except Exception: pass __handle_summary(test_results, **kwds) return return_code
def cwl_properties(self): base_command = [] arguments = [] inputs = [] outputs = [] lex_list = self.cwl_lex_list() index = 0 while index < len(lex_list): token = lex_list[index] if isinstance(token, _CwlCommandPart): base_command.append(token.value) else: break index += 1 while index < len(lex_list): token = lex_list[index] if token.is_token(">"): break token.position = index - len(base_command) + 1 if isinstance(token, _CwlCommandPart): arguments.append(token) elif isinstance(token, _CwlInput): inputs.append(token) elif isinstance(token, _CwlOutput): token.glob = "$(inputs.%s)" % token.id outputs.append(token) index += 1 stdout = None if index < len(lex_list): token = lex_list[index] if token.is_token(">") and (index + 1) < len(lex_list): output_token = lex_list[index + 1] if not isinstance(output_token, _CwlOutput): output_token = _CwlOutput("std_out", None) output_token.glob = "out" output_token.require_filename = False outputs.append(output_token) stdout = "out" index += 2 else: io.warn( "Example command too complex, you will need to build it up manually." ) return { "inputs": inputs, "outputs": outputs, "arguments": arguments, "base_command": base_command, "stdout": stdout, }
def install_shed_repos(runnable, admin_gi, ignore_dependency_problems): tools_info = load_shed_repos(runnable) if tools_info: install_tool_manager = shed_tools.InstallRepositoryManager(admin_gi) install_results = install_tool_manager.install_repositories(tools_info) if install_results.errored_repositories: if ignore_dependency_problems: warn(FAILED_REPOSITORIES_MESSAGE) else: raise Exception(FAILED_REPOSITORIES_MESSAGE)
def cwl_properties(self): base_command = [] arguments = [] inputs = [] outputs = [] lex_list = self.cwl_lex_list() index = 0 while index < len(lex_list): token = lex_list[index] if isinstance(token, CwlCommandPart): base_command.append(token.value) else: break index += 1 while index < len(lex_list): token = lex_list[index] if token.is_token(">"): break token.position = index - len(base_command) + 1 if isinstance(token, CwlCommandPart): arguments.append(token) elif isinstance(token, CwlInput): inputs.append(token) elif isinstance(token, CwlOutput): token.glob = "$(inputs.%s)" % token.id outputs.append(token) index += 1 stdout = None if index < len(lex_list): token = lex_list[index] if token.is_token(">") and (index + 1) < len(lex_list): output_token = lex_list[index + 1] if not isinstance(output_token, CwlOutput): output_token = CwlOutput("std_out", None) output_token.glob = "out" output_token.require_filename = False outputs.append(output_token) stdout = "out" index += 2 else: io.warn("Example command too complex, you will need to build it up manually.") return { "inputs": inputs, "outputs": outputs, "arguments": arguments, "base_command": base_command, "stdout": stdout, }
def _find_test_data(path, **kwds): # Find test data directory associated with path. test_data = kwds.get("test_data", None) if test_data: return os.path.abspath(test_data) else: test_data = _search_tool_path_for(path, "test-data") if test_data: return test_data warn(NO_TEST_DATA_MESSAGE) return None
def cli(ctx, path, template=None, **kwds): """Help initialize global configuration (in home directory) for Planemo. """ # TODO: prompt for values someday. config_path = config.global_config_path() if os.path.exists(config_path): warn("File %s already exists, exiting." % config_path) sys.exit(1) with open(config_path, "w") as f: f.write(CONFIG_TEMPLATE) info(SUCCESS_MESSAGE % config_path)
def install_shed_repos(runnable, admin_gi, ignore_dependency_problems): tools_info = load_shed_repos(runnable) if tools_info: shed_tools._ensure_log_configured("ephemeris") install_tool_manager = shed_tools.InstallToolManager(tools_info, admin_gi) install_tool_manager.install_repositories() if install_tool_manager.errored_repositories: if ignore_dependency_problems: warn(FAILED_REPOSITORIES_MESSAGE) else: raise Exception(FAILED_REPOSITORIES_MESSAGE)
def __handle_summary(test_results, **kwds): summary_style = kwds.get("summary") if summary_style == "none": return if test_results.has_details: __summarize_tests_full(test_results, **kwds) else: if test_results.exit_code: warn(GENERIC_PROBLEMS_MESSAGE % test_results.output_html_path) else: info(GENERIC_TESTS_PASSED_MESSAGE)
def install_shed_repos(runnable, admin_gi, ignore_dependency_problems): tools_info = load_shed_repos(runnable) if tools_info: shed_tools._ensure_log_configured("ephemeris") install_tool_manager = shed_tools.InstallToolManager( tools_info, admin_gi) install_tool_manager.install_repositories() if install_tool_manager.errored_repositories: if ignore_dependency_problems: warn(FAILED_REPOSITORIES_MESSAGE) else: raise Exception(FAILED_REPOSITORIES_MESSAGE)
def __xunit_state(kwds, config): xunit_supported = True if shell("grep -q xunit '%s'/run_tests.sh" % config.galaxy_root): xunit_supported = False xunit_report_file = kwds.get("test_output_xunit", None) if xunit_report_file is None and xunit_supported: xunit_report_file = os.path.join(config.config_directory, "xunit.xml") elif xunit_report_file is not None and not xunit_supported: warn(XUNIT_UPGRADE_MESSAGE) xunit_report_file = None return xunit_supported, xunit_report_file
def run_galaxy(ctx, path, job_path, **kwds): kwds["cwl"] = True conformance_test = kwds.get("conformance_test", False) with conditionally_captured_io(conformance_test): with serve_daemon(ctx, [path], **kwds) as config: try: cwl_run = run_cwl_tool(path, job_path, config, **kwds) except Exception: io.warn("Problem running cwl tool...") print(config.log_contents) raise print(cwl_run.cwl_command_state) return 0
def __structured_report_file(kwds, config): structured_data_supported = True if shell("grep -q structured_data '%s'/run_tests.sh" % config.galaxy_root): structured_data_supported = False structured_report_file = None structured_report_file = kwds.get("test_output_json", None) if structured_report_file is None and structured_data_supported: conf_dir = config.config_directory structured_report_file = os.path.join(conf_dir, "structured_data.json") elif structured_report_file is not None and not structured_data_supported: warn(NO_JSON_MESSAGE) structured_report_file = None return structured_report_file
def _find_test_data(runnables, **kwds): test_data_search_path = "." runnables = [r for r in runnables if r.has_tools] if len(runnables) > 0: test_data_search_path = runnables[0].test_data_search_path # Find test data directory associated with path. test_data = kwds.get("test_data", None) if test_data: return os.path.abspath(test_data) else: test_data = _search_tool_path_for(test_data_search_path, "test-data") if test_data: return test_data warn(NO_TEST_DATA_MESSAGE) return None
def _serve(ctx, runnables, **kwds): engine = kwds.get("engine", "galaxy") if engine == "docker_galaxy": kwds["dockerize"] = True daemon = kwds.get("daemon", False) if daemon: kwds["no_cleanup"] = True port = kwds.get("port", None) if port is None: port = network_util.get_free_port() kwds["port"] = port with galaxy_config(ctx, runnables, **kwds) as config: cmd = config.startup_command(ctx, **kwds) action = "Starting galaxy" exit_code = run_galaxy_command( ctx, cmd, config.env, action, ) if exit_code: message = "Problem running Galaxy command [%s]." % config.log_contents io.warn(message) raise Exception(message) host = kwds.get("host", "127.0.0.1") timeout = 500 galaxy_url = "http://%s:%s" % (host, port) ctx.vlog("Waiting for service on (%s, %s)" % (host, port)) assert network_util.wait_http_service(galaxy_url, timeout=timeout) time.sleep(.1) ctx.vlog("Waiting for service on (%s, %s)" % (host, port)) assert network_util.wait_http_service(galaxy_url) time.sleep(5) ctx.vlog("Waiting for service on (%s, %s)" % (host, port)) assert network_util.wait_http_service(galaxy_url) config.install_workflows() if kwds.get("pid_file"): real_pid_file = config.pid_file if os.path.exists(config.pid_file): os.symlink(real_pid_file, kwds["pid_file"]) else: io.warn("Can't find Galaxy pid file [%s] to link" % real_pid_file) return config
def cli(ctx, path, template=None, **kwds): """Initialize a new tool project (demo only right now). """ if template is None: warn("Creating empty project, this function doesn't do much yet.") if not os.path.exists(path): os.makedirs(path) if template is None: return tempdir = tempfile.mkdtemp() try: untar_args = UNTAR_ARGS % (tempdir) untar_to(DOWNLOAD_URL, tempdir, untar_args) shell("ls '%s'" % (tempdir)) shell("mv '%s/%s'/* '%s'" % (tempdir, template, path)) finally: shutil.rmtree(tempdir)
def __handle_summary( test_results, **kwds ): summary_style = kwds.get("summary") if summary_style == "none": return if test_results.has_details: __summarize_tests_full( test_results, **kwds ) else: if test_results.exit_code: warn(GENERIC_PROBLEMS_MESSAGE % test_results.output_html_path) else: info(GENERIC_TESTS_PASSED_MESSAGE)
def upload_repository(ctx, realized_repository, **kwds): """Upload a tool directory as a tarball to a tool shed.""" path = realized_repository.path tar_path = kwds.get("tar") if not tar_path: tar_path = build_tarball(path, **kwds) if kwds.get("tar_only", False): name = realized_repository.pattern_to_file_name("shed_upload.tar.gz") shutil.copy(tar_path, name) return 0 shed_context = get_shed_context(ctx, **kwds) update_kwds = {} _update_commit_message(ctx, realized_repository, update_kwds, **kwds) repo_id = handle_force_create(realized_repository, ctx, shed_context, **kwds) # failing to create the repo, give up if repo_id is None: return report_non_existent_repository(realized_repository) if kwds.get("check_diff", False): is_diff = diff_repo(ctx, realized_repository, **kwds) != 0 if not is_diff: name = realized_repository.name info("Repository [%s] not different, skipping upload." % name) return 0 # TODO: support updating repo information if it changes in the config file try: shed_context.tsi.repositories.update_repository( str(repo_id), tar_path, **update_kwds) except Exception as e: if isinstance(e, bioblend.ConnectionError) and e.status_code == 400 and \ '"No changes to repository."' in e.body: warn( "Repository %s was not updated because there were no changes" % realized_repository.name) return 0 message = api_exception_to_message(e) error("Could not update %s" % realized_repository.name) error(message) return -1 info("Repository %s updated successfully." % realized_repository.name) return 0
def upload_repository(ctx, realized_repository, **kwds): """Upload a tool directory as a tarball to a tool shed.""" path = realized_repository.path tar_path = kwds.get("tar", None) if not tar_path: tar_path = build_tarball(path, **kwds) if kwds.get("tar_only", False): name = realized_repository.pattern_to_file_name("shed_upload.tar.gz") shutil.copy(tar_path, name) return 0 shed_context = get_shed_context(ctx, **kwds) update_kwds = {} _update_commit_message(ctx, realized_repository, update_kwds, **kwds) repo_id = handle_force_create(realized_repository, ctx, shed_context, **kwds) # failing to create the repo, give up if repo_id is None: return report_non_existent_repository(realized_repository) if kwds.get("check_diff", False): is_diff = diff_repo(ctx, realized_repository, **kwds) != 0 if not is_diff: name = realized_repository.name info("Repository [%s] not different, skipping upload." % name) return 0 # TODO: support updating repo information if it changes in the config file try: shed_context.tsi.repositories.update_repository( str(repo_id), tar_path, **update_kwds ) except Exception as e: if isinstance(e, bioblend.ConnectionError) and e.status_code == 400 and \ '"No changes to repository."' in e.body: warn("Repository %s was not updated because there were no changes" % realized_repository.name) return 0 message = api_exception_to_message(e) error("Could not update %s" % realized_repository.name) error(message) return -1 info("Repository %s updated successfully." % realized_repository.name) return 0
def _serve(ctx, runnables, **kwds): engine = kwds.get("engine", "galaxy") if engine == "docker_galaxy": kwds["dockerize"] = True daemon = kwds.get("daemon", False) if daemon: kwds["no_cleanup"] = True port = kwds.get("port", None) if port is None: port = network_util.get_free_port() kwds["port"] = port with galaxy_config(ctx, runnables, **kwds) as config: cmd = config.startup_command(ctx, **kwds) action = "Starting Galaxy" exit_code = run_galaxy_command( ctx, cmd, config.env, action, ) if exit_code: message = "Problem running Galaxy command [%s]." % config.log_contents io.warn(message) raise Exception(message) host = kwds.get("host", "127.0.0.1") timeout = 500 galaxy_url = "http://%s:%s" % (host, port) galaxy_alive = sleep(galaxy_url, verbose=ctx.verbose, timeout=timeout) if not galaxy_alive: raise Exception("Attempted to serve Galaxy at %s, but it failed to start in %d seconds." % (galaxy_url, timeout)) config.install_workflows() if kwds.get("pid_file"): real_pid_file = config.pid_file if os.path.exists(config.pid_file): os.symlink(real_pid_file, kwds["pid_file"]) else: io.warn("Can't find Galaxy pid file [%s] to link" % real_pid_file) return config
def test_io_capture(): """Test :func:`planemo.io.conditionally_captured_io`.""" with io.conditionally_captured_io(True, tee=False) as capture: io.warn("Problem...") assert_equal(capture[0]["data"], "Problem...") with io.conditionally_captured_io(True, tee=False) as capture: io.shell("echo 'Problem...'") assert_equal(capture[0]["data"], "echo 'Problem...'") assert_equal(capture[1]["data"], "Problem...") with io.conditionally_captured_io(True, tee=False) as capture: io.communicate("echo 'Problem...'") assert_equal(capture[0]["data"], "echo 'Problem...'") assert_equal(capture[1]["data"], "Problem...") with io.conditionally_captured_io(False, tee=False) as capture: io.communicate("echo 'Test...'") assert capture is None
def __summarize_tests_full(test_results, **kwds): num_tests = test_results.num_tests num_problems = test_results.num_problems if num_tests == 0: warn(NO_TESTS_MESSAGE) return if num_problems == 0: info(ALL_TESTS_PASSED_MESSAGE % num_tests) if num_problems: html_report_file = test_results.output_html_path message_args = (num_problems, num_tests, html_report_file) message = PROBLEM_COUNT_MESSAGE % message_args warn(message) for testcase_el in test_results.xunit_testcase_elements: structured_data_tests = test_results.structured_data_tests __summarize_test_case(structured_data_tests, testcase_el, **kwds)
def cli(ctx, path, template=None, **kwds): """(Experimental) Initialize a new tool project. This is only a proof-of-concept demo right now. """ if template is None: warn("Creating empty project, this function doesn't do much yet.") if not os.path.exists(path): os.makedirs(path) if template is None: return tempdir = tempfile.mkdtemp() try: untar_args = UNTAR_ARGS % (tempdir) untar_to(DOWNLOAD_URL, tempdir, untar_args) template_dir = os.path.join(tempdir, template) for entry in os.listdir(template_dir): shutil.move(os.path.join(template_dir, entry), path) finally: shutil.rmtree(tempdir)
def install_shed_repos(runnable, admin_gi, ignore_dependency_problems, install_tool_dependencies=False, install_resolver_dependencies=True, install_repository_dependencies=True, install_most_recent_revision=False): tools_info = load_shed_repos(runnable) if tools_info: install_tool_manager = shed_tools.InstallRepositoryManager(admin_gi) install_results = install_tool_manager.install_repositories( tools_info, default_install_tool_dependencies=install_tool_dependencies, default_install_resolver_dependencies=install_resolver_dependencies, default_install_repository_dependencies= install_repository_dependencies) if install_most_recent_revision: # for workflow autoupdates we also need the most recent tool versions update_results = install_tool_manager.update_repositories( tools_info, default_install_tool_dependencies=install_tool_dependencies, default_install_resolver_dependencies= install_resolver_dependencies, default_install_repository_dependencies= install_repository_dependencies) install_results.errored_repositories.extend( update_results.errored_repositories) updated_repos = update_results.installed_repositories else: updated_repos = None if install_results.errored_repositories: if ignore_dependency_problems: warn(FAILED_REPOSITORIES_MESSAGE) else: raise Exception(FAILED_REPOSITORIES_MESSAGE) return install_results.installed_repositories, updated_repos else: return None, None
def __summarize_tests_full( test_results, **kwds ): num_tests = test_results.num_tests num_problems = test_results.num_problems if num_tests == 0: warn(NO_TESTS_MESSAGE) return if num_problems == 0: info(ALL_TESTS_PASSED_MESSAGE % num_tests) if num_problems: html_report_file = test_results.output_html_path message_args = (num_problems, num_tests, html_report_file) message = PROBLEM_COUNT_MESSAGE % message_args warn(message) for testcase_el in test_results.xunit_testcase_elements: structured_data_tests = test_results.structured_data_tests __summarize_test_case(structured_data_tests, testcase_el, **kwds)
def _serve(ctx, runnables, **kwds): engine = kwds.get("engine", "galaxy") if engine == "docker_galaxy": kwds["dockerize"] = True daemon = kwds.get("daemon", False) if daemon: kwds["no_cleanup"] = True with galaxy_config(ctx, runnables, **kwds) as config: cmd = config.startup_command(ctx, **kwds) action = "Starting galaxy" exit_code = run_galaxy_command( ctx, cmd, config.env, action, ) if exit_code: message = "Problem running Galaxy command [%s]." % config.log_contents io.warn(message) raise Exception(message) host = kwds.get("host", "127.0.0.1") port = kwds.get("port", None) if port is None: port = network_util.get_free_port() ctx.vlog("Waiting for service on (%s, %s)" % (host, port)) assert network_util.wait_net_service(host, port) time.sleep(.1) ctx.vlog("Waiting for service on (%s, %s)" % (host, port)) assert network_util.wait_net_service(host, port) time.sleep(5) ctx.vlog("Waiting for service on (%s, %s)" % (host, port)) assert network_util.wait_net_service(host, port) config.install_workflows() return config
def cli(ctx, runnable_identifier, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ runnable = for_runnable_identifier(ctx, runnable_identifier, kwds) is_cwl = runnable.type.is_cwl_artifact kwds["cwl"] = is_cwl kwds["execution_type"] = "Run" if kwds.get("engine", None) is None: if is_cwl: kwds["engine"] = "cwltool" elif kwds.get('galaxy_url', None): kwds["engine"] = "external_galaxy" else: kwds["engine"] = "galaxy" with engine_context(ctx, **kwds) as engine: run_result = engine.run(runnable, job_path) if not run_result.was_successful: warn("Run failed [%s]" % unicodify(run_result)) elif kwds.get('no_wait'): info('Run successfully executed - exiting without waiting for results.') else: output_json = kwds.get("output_json", None) outputs_dict = run_result.outputs_dict if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) info('Run completed successfully.') report_data = StructuredData(data={'tests': [run_result.structured_data()], 'version': '0.1'}) report_data.calculate_summary_data() return_value = handle_reports_and_summary(ctx, report_data.structured_data, kwds=kwds) ctx.exit(return_value)
def cli(ctx, runnable_identifier, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ runnable_identifier = translate_alias(ctx, runnable_identifier, kwds.get('profile')) path = uri_to_path(ctx, runnable_identifier) if os.path.exists(path): runnable = for_path(path) else: # assume galaxy workflow id runnable = for_id(runnable_identifier) # TODO: do a better test of cwl. is_cwl = path.endswith(".cwl") kwds["cwl"] = is_cwl if kwds.get("engine", None) is None: if is_cwl: kwds["engine"] = "cwltool" elif kwds.get('galaxy_url', None): kwds["engine"] = "external_galaxy" else: kwds["engine"] = "galaxy" with engine_context(ctx, **kwds) as engine: run_result = engine.run(runnable, job_path) if not run_result.was_successful: warn("Run failed [%s]" % unicodify(run_result)) ctx.exit(1) outputs_dict = run_result.outputs_dict output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def cli(ctx, uri, job_path, **kwds): """Planemo command for running tools and jobs. \b % planemo run cat1-tool.cwl cat-job.json """ path = uri_to_path(ctx, uri) kwds["cwl"] = path.endswith(".cwl") with engine_context(ctx, **kwds) as engine: run_result = engine.run(path, job_path) if not run_result.was_successful: warn("Run failed [%s]" % str(run_result)) ctx.exit(1) outputs_dict = run_result.outputs_dict print(outputs_dict) output_json = kwds.get("output_json", None) if output_json: with open(output_json, "w") as f: json.dump(outputs_dict, f) return 0
def run_in_config(ctx, config, **kwds): config_directory = config.config_directory html_report_file = kwds["test_output"] job_output_files = kwds.get("job_output_files", None) if job_output_files is None: job_output_files = os.path.join(config_directory, "jobfiles") xunit_supported, xunit_report_file = __xunit_state(kwds, config) structured_report_file = __structured_report_file(kwds, config) info("Testing using galaxy_root %s", config.galaxy_root) # TODO: Allow running dockerized Galaxy here instead. server_ini = os.path.join(config_directory, "galaxy.ini") config.env["GALAXY_CONFIG_FILE"] = server_ini config.env["GALAXY_TEST_VERBOSE_ERRORS"] = "true" config.env["GALAXY_TEST_SAVE"] = job_output_files cd_to_galaxy_command = "cd %s" % config.galaxy_root test_cmd = test_structures.GalaxyTestCommand( html_report_file, xunit_report_file, structured_report_file, failed=kwds.get("failed", False), installed=kwds.get("installed", False), ).build() setup_common_startup_args = "" if kwds.get("skip_venv", False): setup_common_startup_args = ( 'COMMON_STARTUP_ARGS=--skip-venv; ' 'export COMMON_STARTUP_ARGS; ' 'echo "Set COMMON_STARTUP_ARGS to ${COMMON_STARTUP_ARGS}"' ) setup_venv_command = setup_venv(ctx, kwds) cmd = shell_join( cd_to_galaxy_command, setup_common_startup_args, setup_venv_command, test_cmd, ) action = "Testing tools" return_code = run_galaxy_command( ctx, cmd, config.env, action ) if kwds.get('update_test_data', False): update_cp_args = (job_output_files, config.test_data_dir) shell('cp -r "%s"/* "%s"' % update_cp_args) if xunit_report_file and (not os.path.exists(xunit_report_file)): warn(NO_XUNIT_MESSAGE) xunit_report_file = None test_results = test_structures.GalaxyTestResults( structured_report_file, xunit_report_file, html_report_file, return_code, ) test_data = test_results.structured_data handle_reports(ctx, test_data, kwds) __handle_summary( test_results, **kwds ) return return_code
def cli(ctx, path, **kwds): """Run the tests in the specified tool tests in a Galaxy instance. All referenced tools (by default all the tools in the current working directory) will be tested and the results quickly summarized. To run these tests planemo needs a Galaxy instance to utilize, planemo will search parent directories to see if any is a Galaxy instance - but one can pick the Galaxy instance to use with the --galaxy_root option or force planemo to download a disposable instance with the ``--install_galaxy`` flag. In additon to to quick summary printed to the console - various detailed output summaries can be configured. ``tool_test_output.html`` (settable via ``--test_output``) will contain a human consumable HTML report describing the test run. A JSON file (settable via ``--test_output_json`` and defaulting to ``tool_test_output.json``) will also be created. These files can can be disabled by passing in empty arguments or globally by setting the values ``default_test_output`` and/or ``default_test_output_json`` in ``~/.planemo.yml`` to ``null``. For continuous integration testing a xUnit-style report can be confiured using the ``--test_output_xunit``. planemo uses temporarily generated config files and environment variables to attempt to shield this execution of Galaxy from manually launched runs against that same Galaxy root - but this may not be bullet proof yet so please careful and do not try this against production Galaxy instances. """ for name, default in OUTPUT_DFEAULTS.items(): __populate_default_output(ctx, name, kwds, default) kwds["for_tests"] = True with galaxy_config.galaxy_config(ctx, path, **kwds) as config: config_directory = config.config_directory html_report_file = kwds["test_output"] job_output_files = kwds.get("job_output_files", None) if job_output_files is None: job_output_files = os.path.join(config_directory, "jobfiles") xunit_supported, xunit_report_file = __xunit_state(kwds, config) structured_report_file = __structured_report_file(kwds, config) info("Testing using galaxy_root %s", config.galaxy_root) # TODO: Allow running dockerized Galaxy here instead. server_ini = os.path.join(config_directory, "galaxy.ini") config.env["GALAXY_CONFIG_FILE"] = server_ini config.env["GALAXY_TEST_VERBOSE_ERRORS"] = "true" config.env["GALAXY_TEST_SAVE"] = job_output_files cd_to_galaxy_command = "cd %s" % config.galaxy_root cmd = "; ".join([ cd_to_galaxy_command, galaxy_run.ACTIVATE_COMMAND, # TODO: this should be moved to # run_tests.sh to match run.sh. __run_tests_cmd( html_report_file, xunit_report_file, structured_report_file, ), ]) action = "Testing tools" return_code = galaxy_run.run_galaxy_command(ctx, cmd, config.env, action) if kwds.get('update_test_data', False): update_cp_args = (job_output_files, config.test_data_dir) shell('cp -r "%s"/* "%s"' % update_cp_args) if xunit_report_file and (not os.path.exists(xunit_report_file)): warn(NO_XUNIT_MESSAGE) xunit_report_file = None test_results = galaxy_test.GalaxyTestResults( structured_report_file, xunit_report_file, html_report_file, return_code, ) try: test_data = test_results.structured_data new_report = build_report.build_report(test_data) open(test_results.output_html_path, "w").write(new_report) except Exception: pass __handle_summary(test_results, **kwds) if return_code: sys.exit(1)
def cli(ctx, path, **kwds): """Run the tests in the specified tool tests in a Galaxy instance. All referenced tools (by default all the tools in the current working directory) will be tested and the results quickly summarized. To run these tests planemo needs a Galaxy instance to utilize, planemo will search parent directories to see if any is a Galaxy instance - but one can pick the Galaxy instance to use with the --galaxy_root option or force planemo to download a disposable instance with the ``--install_galaxy`` flag. In additon to to quick summary printed to the console - various detailed output summaries can be configured. ``tool_test_output.html`` (settable via ``--test_output``) will contain a human consumable HTML report describing the test run. A JSON file (settable via ``--test_output_json`` and defaulting to ``tool_test_output.json``) will also be created. These files can can be disabled by passing in empty arguments or globally by setting the values ``default_test_output`` and/or ``default_test_output_json`` in ``~/.planemo.yml`` to ``null``. For continuous integration testing a xUnit-style report can be confiured using the ``--test_output_xunit``. planemo uses temporarily generated config files and environment variables to attempt to shield this execution of Galaxy from manually launched runs against that same Galaxy root - but this may not be bullet proof yet so please careful and do not try this against production Galaxy instances. """ for name, default in OUTPUT_DFEAULTS.items(): __populate_default_output(ctx, name, kwds, default) kwds["for_tests"] = True with galaxy_config.galaxy_config(ctx, path, **kwds) as config: config_directory = config.config_directory html_report_file = kwds["test_output"] job_output_files = kwds.get("job_output_files", None) if job_output_files is None: job_output_files = os.path.join(config_directory, "jobfiles") xunit_supported, xunit_report_file = __xunit_state(kwds, config) structured_report_file = __structured_report_file(kwds, config) info("Testing using galaxy_root %s", config.galaxy_root) # TODO: Allow running dockerized Galaxy here instead. server_ini = os.path.join(config_directory, "galaxy.ini") config.env["GALAXY_CONFIG_FILE"] = server_ini config.env["GALAXY_TEST_VERBOSE_ERRORS"] = "true" config.env["GALAXY_TEST_SAVE"] = job_output_files cd_to_galaxy_command = "cd %s" % config.galaxy_root cmd = "; ".join([ cd_to_galaxy_command, galaxy_run.ACTIVATE_COMMAND, # TODO: this should be moved to # run_tests.sh to match run.sh. __run_tests_cmd( html_report_file, xunit_report_file, structured_report_file, ), ]) action = "Testing tools" return_code = galaxy_run.run_galaxy_command( ctx, cmd, config.env, action ) if kwds.get('update_test_data', False): update_cp_args = (job_output_files, config.test_data_dir) shell('cp -r "%s"/* "%s"' % update_cp_args) if xunit_report_file and (not os.path.exists(xunit_report_file)): warn(NO_XUNIT_MESSAGE) xunit_report_file = None test_results = galaxy_test.GalaxyTestResults( structured_report_file, xunit_report_file, html_report_file, return_code, ) try: test_data = test_results.structured_data new_report = build_report.build_report(test_data) open(test_results.output_html_path, "w").write(new_report) except Exception: pass __handle_summary( test_results, **kwds ) if return_code: sys.exit(1)
def run_in_config(ctx, config, **kwds): config_directory = config.config_directory html_report_file = kwds["test_output"] job_output_files = kwds.get("job_output_files", None) if job_output_files is None: job_output_files = os.path.join(config_directory, "jobfiles") xunit_supported, xunit_report_file = __xunit_state(kwds, config) structured_report_file = __structured_report_file(kwds, config) info("Testing using galaxy_root %s", config.galaxy_root) # TODO: Allow running dockerized Galaxy here instead. server_ini = os.path.join(config_directory, "galaxy.ini") config.env["GALAXY_CONFIG_FILE"] = server_ini config.env["GALAXY_TEST_VERBOSE_ERRORS"] = "true" config.env["GALAXY_TEST_SAVE"] = job_output_files cd_to_galaxy_command = "cd %s" % config.galaxy_root test_cmd = test_structures.GalaxyTestCommand( html_report_file, xunit_report_file, structured_report_file, failed=kwds.get("failed", False), installed=kwds.get("installed", False), ).build() cmd = "; ".join([ cd_to_galaxy_command, galaxy_run.ACTIVATE_COMMAND, # TODO: this should be moved to # run_tests.sh to match run.sh. test_cmd, ]) action = "Testing tools" return_code = galaxy_run.run_galaxy_command( ctx, cmd, config.env, action ) if kwds.get('update_test_data', False): update_cp_args = (job_output_files, config.test_data_dir) shell('cp -r "%s"/* "%s"' % update_cp_args) if xunit_report_file and (not os.path.exists(xunit_report_file)): warn(NO_XUNIT_MESSAGE) xunit_report_file = None test_results = test_structures.GalaxyTestResults( structured_report_file, xunit_report_file, html_report_file, return_code, ) try: test_data = test_results.structured_data if 'test_output' in kwds: output_path = kwds['test_output'] if output_path is not None: with open(output_path, 'w') as handle: handle.write(build_report.build_report(test_data)) for kw_name in ('markdown', 'text'): if 'test_output_%s' % kw_name in kwds: output_path = kwds['test_output_%s' % kw_name] if output_path is None: continue with open(output_path, 'w') as handle: handle.write(build_report.build_report(test_data, report_type=kw_name)) except Exception: ctx.vlog("Problem producing test output.", exception=True) __handle_summary( test_results, **kwds ) return return_code