def __init__(self, galaxy_url=None, galaxy_api_key=None, max_retries=None, retry_delay=None, polling_interval=None, output_folder='.', stream=_sys.stderr, descriptions=True, verbosity=1, elapsed_times=True): self.galaxy_api_key = galaxy_api_key # create Galaxy instance self._galaxy_instance = _common.get_galaxy_instance( galaxy_url, galaxy_api_key, max_retries=max_retries, retry_delay=retry_delay, polling_interval=polling_interval) # create WorkflowLoader self._workflow_loader = _common.WorkflowLoader.get_instance( self._galaxy_instance) # logger self._logger = _common.LoggerManager.get_logger(self) # runner reference self._runner = _ExtendedXMLTestRunner(output=output_folder, stream=stream, verbosity=verbosity, descriptions=descriptions, elapsed_times=elapsed_times)
def cleanup_test_workflow_data(galaxy_url=None, galaxy_api_key=None): _logger.debug("Cleaning saved histories ...") galaxy_instance = _common.get_galaxy_instance(galaxy_url, galaxy_api_key) hslist = galaxy_instance.histories.list() for history in [ h for h in hslist if _core.WorkflowTestCase.DEFAULT_HISTORY_NAME_PREFIX in h.name ]: galaxy_instance.histories.delete(history.id)
def cleanup_test_workflows(galaxy_url=None, galaxy_api_key=None): _logger.debug("Cleaning workflow library ...") galaxy_instance = _common.get_galaxy_instance(galaxy_url, galaxy_api_key) workflow_loader = _common.WorkflowLoader.get_instance(galaxy_instance) wflist = galaxy_instance.workflows.list() workflows = [ w for w in wflist if _core.WorkflowTestCase.DEFAULT_WORKFLOW_NAME_PREFIX in w.name ] for wf in workflows: workflow_loader.unload_workflow(wf.id)
def _get_history_info(config): result = None gi = _common.get_galaxy_instance(config["galaxy_url"], config["galaxy_api_key"]) _logger.info("Loading Galaxy history info ...") candidate_histories = [ h for h in gi.histories.list() if config["history"] in h.name ] candidate_count = len(candidate_histories) if candidate_count == 0: print("\n No history found with name: \"{0}\"".format( config["history"])) elif candidate_count == 1: result = candidate_histories[0] else: while True: print( "\nNOTICE:".ljust(10), "More than one history matches the name \"{0}\".".format( config["history"])) print("".ljust(9), "Please select one of the following options:\n") for opt, h in enumerate(candidate_histories): print( "".ljust(3), "{0})".format(opt + 1).ljust(4), h.name.ljust(30), "".ljust(4), "create-time:", _datetime.datetime.strptime( h.wrapped["create_time"], "%Y-%m-%dT%H:%M:%S.%f").strftime("%Y-%m-%d %H:%M:%S")) print("\n".ljust(4), "0)".ljust(4), "Exit") try: # get the user choice as int # notice that `input` in python3 is equivalent to `raw_input` in python2 choice = int(input("\n ==> Choice: ")) if choice in range(0, candidate_count + 1): if choice > 0: result = candidate_histories[choice - 1] print("\n") break except ValueError: print("\nWARNING: ".ljust(10), "Your choice is not valid!!!") except NameError: print("\nWARNING: ".ljust(10), "Your choice is not valid!!!") except SyntaxError: print("\nWARNING: ".ljust(10), "Your choice is not valid!!!") except KeyboardInterrupt: break else: print("\nWARNING: ".ljust(10), "Your choice is not valid!!!") return result
def __init__(self, history_id, galaxy_url=None, galaxy_api_key=None): super(History, self).__init__() # configure logger self._logger = _common.LoggerManager.get_logger(self) # set the Galaxy instance self._gi = _common.get_galaxy_instance(galaxy_url, galaxy_api_key) # set wrapped history self._logger.info("Loading history %s info", history_id) self._history = self._gi.histories.get(history_id) # job info self.job_tool = {} self.creating_jobs = {} # datasets self.datasets = None self.input_datasets = _collections.OrderedDict() self.output_datasets = _collections.OrderedDict() self.intermediate_datasets = _collections.OrderedDict() # map dataset inputs to their order self._input_order_map = {} # job info self._jobs = {} self.job_input_ids = {} self.job_output_ids = {} self.creating_jobs = {} self.processing_jobs = _collections.OrderedDict() self.processing_job_levels = {} # tool cache self._tools = {} # labels self.input_dataset_labels = {} self.output_dataset_labels = {} self.intermediate_dataset_labels = {} # process history self._logger.info("Processing history info...") self._process_history() self._logger.info("History info processing: done")
def _get_workflow_info(filename, galaxy_url, galaxy_api_key, tool_folder=DEFAULT_TOOLS_FOLDER): inputs = [] params = _CommentedMap() outputs = {} # loading wf info start _logger.debug("Loading workflow definition from %s file...", filename) # setup galaxy instance galaxy_instance = _common.get_galaxy_instance(galaxy_url, galaxy_api_key) galaxy_tool_client = _ToolClient( galaxy_instance.gi) # get the non-object version of the GI if not _os.path.exists(DEFAULT_TOOLS_FOLDER): _os.makedirs(DEFAULT_TOOLS_FOLDER) with open(filename) as fp: wf_config = _json.load(fp) for sid, step in _iteritems(wf_config["steps"]): # tool = gi.tools.get() _logger.debug("Processing step '%s' -- '%s'", sid, step["name"]) # an input step.... if not step["tool_id"] and step["type"] == "data_input": for input_ in step["inputs"]: _logger.debug("Processing input: '%s' (%s)", input_["name"], input_["description"]) inputs.append(input_) # a processing step (with outputs) ... if step["tool_id"] and step["type"] == "tool": # tool parameters tool_params = _CommentedMap() # process tool info to extract parameters tool_id = step["tool_id"] # tool = galaxy_instance.tools.get(tool_id) ## LP: re-write this using the bioblend.objects API to fetch the tool # inputs. See the comment above `def _process_tool_param_element` # tool_config_xml = _os.path.basename(tool.wrapped["config_file"]) # _logger.debug("Processing step tool '%s'", tool_id) # # try: # _logger.debug("Download TOOL '%s' definition file XML: %s....", tool_id, tool_config_xml) # targz_filename = _os.path.join(DEFAULT_TOOLS_FOLDER, tool_id + ".tar.gz") # targz_content = galaxy_tool_client._get(_os.path.join(tool_id, "download"), json=False) # if targz_content.status_code == 200: # with open(targz_filename, "w") as tfp: # tfp.write(targz_content.content) # tar = _tarfile.open(targz_filename) # tar.extractall(path=tool_folder) # tar.close() # _os.remove(targz_filename) # _logger.debug("Download TOOL '%s' definition file XML: %s....: DONE", tool_id, tool_config_xml) # else: # _logger.debug("Download TOOL '%s' definition file XML: %s....: ERROR %r", # tool_id, tool_config_xml, targz_content.status_code) # # tool_config_xml = _os.path.join(DEFAULT_TOOLS_FOLDER, tool_config_xml) # if _os.path.exists(tool_config_xml): # tree = _etree.parse(tool_config_xml) # root = tree.getroot() # inputs_el = root.find("inputs") # for input_el in inputs_el: # _process_tool_param_element(input_el, tool_params) # if len(tool_params) > 0: # params.insert(int(sid), sid, tool_params) # # except _StandardError as e: # _logger.debug("Download TOOL '%s' definition file XML: %s....: ERROR", tool_id, tool_config_xml) # _logger.error(e) # process outputs[str(sid)] = {} for output in step["workflow_outputs"]: outputs[str(sid)][output["uuid"]] = output # loading wf info end _logger.debug("Workflow definition loaded from %s file...", filename) # return loaded info return wf_config, inputs, params, outputs