def run_tool_test(self, tool_id, index=0, resource_parameters={}): host, port, url = target_url_parts() galaxy_interactor_kwds = { "galaxy_url": url, "master_api_key": get_master_api_key(), "api_key": get_user_api_key(), "keep_outputs_dir": None, } galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds) verify_tool(tool_id=tool_id, test_index=index, galaxy_interactor=galaxy_interactor, resource_parameters=resource_parameters)
def _get_interactor(self, test_user, test_user_api_key): if test_user_api_key is None: whoami = self.gi.make_get_request(self.gi.url + "/whoami").json() if whoami is not None: test_user_api_key = self.gi.key galaxy_interactor_kwds = { "galaxy_url": re.sub('/api', '', self.gi.url), "master_api_key": self.gi.key, "api_key": test_user_api_key, # TODO "keep_outputs_dir": '', } if test_user_api_key is None: galaxy_interactor_kwds["test_user"] = test_user galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds) return galaxy_interactor
def build_tests(app=None, testing_shed_tools=False, master_api_key=None, user_api_key=None, name_prefix="TestForTool_", baseclass=ToolTestCase, create_admin=False, user_email=None, G=None, contains=None): """ If the module level variable `toolbox` is set, generate `ToolTestCase` classes for all of its tests and put them into this modules globals() so they can be discovered by nose. """ # galaxy_interactor = None # if app is None: host, port, url = target_url_parts() keep_outputs_dir = setup_keep_outdir() galaxy_interactor_kwds = { "galaxy_url": url, "master_api_key": master_api_key, "api_key": user_api_key, "keep_outputs_dir": keep_outputs_dir, "user_api_key_is_admin_key": True, } if create_admin and not user_api_key: galaxy_interactor_kwds['test_user'] = user_email galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds) if not G: # Push all the toolbox tests to module level G = globals() # Eliminate all previous tests from G. for key, val in G.copy().items(): if key.startswith('TestForTool_'): del G[key] tests_summary = galaxy_interactor.get_tests_summary() for tool_id, tool_summary in tests_summary.items(): # Create a new subclass of ToolTestCase, dynamically adding methods # named test_tool_XXX that run each test defined in the tool config. if contains and contains not in tool_id: continue name = name_prefix + tool_id.replace(' ', '_') baseclasses = (baseclass, ) namespace = dict() all_versions_test_count = 0 for tool_version, version_summary in tool_summary.items(): count = version_summary["count"] for i in range(count): test_function_name = 'test_tool_%06d' % all_versions_test_count def make_test_method(tool_version, test_index): def test_tool(self): self.do_it(tool_version=tool_version, test_index=test_index) test_tool.__name__ = test_function_name return test_tool test_method = make_test_method(tool_version, i) test_method.__doc__ = "( %s ) > Test-%d" % ( tool_id, all_versions_test_count + 1) namespace[test_function_name] = test_method namespace['tool_id'] = tool_id namespace["galaxy_interactor"] = galaxy_interactor namespace['master_api_key'] = master_api_key namespace[ 'user_api_key'] = user_api_key or galaxy_interactor.api_key namespace['test_count'] = count all_versions_test_count += 1 # Create new class object, with name name, derived # from baseclasses (which should be a tuple of classes) and with namespace dict. new_class_obj = type(str(name), baseclasses, namespace) G[name] = new_class_obj return G
def run_tests(args, test_filters=None, log=None): # Split out argument parsing so we can quickly build other scripts - such as a script # to run all tool tests for a workflow by just passing in a custom test_filters. test_filters = test_filters or [] log = log or setup_global_logger(__name__, verbose=args.verbose) client_test_config_path = args.client_test_config if client_test_config_path is not None: log.debug(f"Reading client config path {client_test_config_path}") with open(client_test_config_path) as f: client_test_config = yaml.full_load(f) else: client_test_config = {} def get_option(key): arg_val = getattr(args, key, None) if arg_val is None and key in client_test_config: val = client_test_config.get(key) else: val = arg_val return val output_json_path = get_option("output_json") galaxy_url = get_option("galaxy_url") galaxy_interactor_kwds = { "galaxy_url": galaxy_url, "master_api_key": get_option("admin_key"), "api_key": get_option("key"), "keep_outputs_dir": args.output, "download_attempts": get_option("download_attempts"), "download_sleep": get_option("download_sleep"), "test_data": get_option("test_data"), } tool_id = args.tool_id tool_version = args.tool_version tools_client_test_config = DictClientTestConfig( client_test_config.get("tools")) verbose = args.verbose galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds) results = Results(args.suite_name, output_json_path, append=args.append, galaxy_url=galaxy_url) skip = args.skip if skip == "executed": test_filters.append(results.already_executed) elif skip == "successful": test_filters.append(results.already_successful) test_references = build_case_references( galaxy_interactor, tool_id=tool_id, tool_version=tool_version, test_index=args.test_index, page_size=args.page_size, page_number=args.page_number, test_filters=test_filters, log=log, ) log.debug(f"Built {len(test_references)} test references to executed.") verify_kwds = dict( client_test_config=tools_client_test_config, force_path_paste=args.force_path_paste, skip_with_reference_data=not args.with_reference_data, quiet=not verbose, ) test_tools( galaxy_interactor, test_references, results, log=log, parallel_tests=args.parallel_tests, history_per_test_case=args.history_per_test_case, no_history_cleanup=args.no_history_cleanup, publish_history=get_option("publish_history"), verify_kwds=verify_kwds, ) exceptions = results.test_exceptions if exceptions: exception = exceptions[0] if hasattr(exception, "exception"): exception = exception.exception raise exception
def main(argv=None): if argv is None: argv = sys.argv[1:] args = _arg_parser().parse_args(argv) client_test_config_path = args.client_test_config if client_test_config_path is not None: with open(client_test_config_path, "r") as f: client_test_config = yaml.full_load(f) else: client_test_config = {} def get_option(key): arg_val = getattr(args, key, None) if arg_val is None and key in client_test_config: val = client_test_config.get(key) else: val = arg_val return val output_json_path = get_option("output_json") galaxy_interactor_kwds = { "galaxy_url": get_option("galaxy_url"), "master_api_key": get_option("admin_key"), "api_key": get_option("key"), "keep_outputs_dir": args.output, } tool_id = args.tool_id tool_version = args.tool_version tools_client_test_config = DictClientTestConfig( client_test_config.get("tools")) galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds) raw_test_index = args.test_index if raw_test_index == ALL_TESTS: tool_test_dicts = galaxy_interactor.get_tool_tests( tool_id, tool_version=tool_version) test_indices = list(range(len(tool_test_dicts))) else: test_indices = [int(raw_test_index)] test_results = [] if args.append: assert output_json_path != "-" with open(output_json_path) as f: previous_results = json.load(f) test_results = previous_results["tests"] exceptions = [] verbose = args.verbose for test_index in test_indices: if tool_version: tool_id_and_version = "{}/{}".format(tool_id, tool_version) else: tool_id_and_version = tool_id test_identifier = "tool %s test # %d" % (tool_id_and_version, test_index) def register(job_data): test_results.append({ 'id': tool_id + "-" + str(test_index), 'has_data': True, 'data': job_data, }) try: verify_tool( tool_id, galaxy_interactor, test_index=test_index, tool_version=tool_version, register_job_data=register, quiet=not verbose, force_path_paste=args.force_path_paste, client_test_config=tools_client_test_config, ) if verbose: print("%s passed" % test_identifier) except Exception as e: if verbose: print("{} failed, {}".format(test_identifier, e)) exceptions.append(e) report_obj = { 'version': '0.1', 'tests': test_results, } if output_json_path: if output_json_path == "-": print(json.dumps(report_obj)) else: with open(output_json_path, "w") as f: json.dump(report_obj, f) if exceptions: raise exceptions[0]