def __mock_tool( id="cat1", version="1.0", ): # For now ignoring inputs, params_from_strings, and # check_and_update_param_values since only have unit tests for version # handling - but need to write tests for all of this longer term. tool = bunch.Bunch( id=id, version=version, name=id, inputs={}, outputs={ 'out_file1': bunch.Bunch(collection=None, format='input', format_source=None, change_format=[], filters=[], label=None, output_type='data') }, params_from_strings=mock.Mock(), check_and_update_param_values=mock.Mock(), to_json=_to_json, assert_finalized=lambda: None, ) return tool
def __init__( self, app, test_directory, tool ): working_directory = os.path.join( test_directory, "workdir" ) tool_working_directory = os.path.join( working_directory, "working" ) os.makedirs( tool_working_directory ) self.app = app self.tool = tool self.state = model.Job.states.QUEUED self.command_line = "echo HelloWorld" self.environment_variables = [] self.commands_in_new_shell = False self.prepare_called = False self.write_version_cmd = None self.dependency_shell_commands = None self.working_directory = working_directory self.tool_working_directory = tool_working_directory self.requires_setting_metadata = True self.job_destination = bunch.Bunch( id="default", params={} ) self.galaxy_lib_dir = os.path.abspath( "lib" ) self.job_id = 1 self.external_id = None self.output_paths = [ '/tmp/output1.dat' ] self.mock_metadata_path = os.path.abspath( os.path.join( test_directory, "METADATA_SET" ) ) self.metadata_command = "touch %s" % self.mock_metadata_path self.galaxy_virtual_env = None self.shell = "/bin/bash" # Cruft for setting metadata externally, axe at some point. self.external_output_metadata = bunch.Bunch( set_job_runner_external_pid=lambda pid, session: None ) self.app.datatypes_registry.set_external_metadata_tool = bunch.Bunch( build_dependency_shell_commands=lambda: [] )
def setUp( self ): self.temp_directory = tempfile.mkdtemp() self.config = bunch.Bunch( job_config_file=os.path.join( self.temp_directory, "job_conf.xml" ), use_tasked_jobs=False, job_resource_params_file="/tmp/fake_absent_path", ) self.__write_config_from( SIMPLE_JOB_CONF ) self.app = bunch.Bunch( config=self.config, job_metrics=MockJobMetrics() ) self.__job_configuration = None
def setUp( self ): super(DataToolParameterTestCase, self).setUp() self.test_history = model.History() self.app.model.context.add( self.test_history ) self.app.model.context.flush() self.trans = bunch.Bunch( app=self.app, get_history=lambda: self.test_history, get_current_user_roles=lambda: [], workflow_building_mode=False, webapp=bunch.Bunch( name="galaxy" ), ) self.multiple = False self.optional = False self._param = None
def setUp(self): self.temp_directory = tempfile.mkdtemp() self.config = bunch.Bunch( job_config_file=os.path.join(self.temp_directory, "job_conf.xml"), use_tasked_jobs=False, job_resource_params_file="/tmp/fake_absent_path", config_dict={}, default_job_resubmission_condition="", server_name="main", ) self.__write_config_from(SIMPLE_JOB_CONF) self.app = bunch.Bunch(config=self.config, job_metrics=MockJobMetrics(), application_stack=ApplicationStack()) self.__job_configuration = None
def _toolshed_install_dependency_from_dict(as_dict): # Rather than requiring full models in Pulsar, just use simple objects # containing only properties and associations used to resolve # dependencies for tool execution. repository_object = bunch.Bunch( name=as_dict['repository_name'], owner=as_dict['repository_owner'], installed_changeset_revision=as_dict['repository_installed_changeset'], ) dependency_object = bunch.Bunch( name=as_dict['dependency_name'], version=as_dict['dependency_version'], type=as_dict['dependency_type'], tool_shed_repository=repository_object, ) return dependency_object
def __main__(): if len(sys.argv) < 4: print('usage: upload.py <root> <datatypes_conf> <json paramfile> <output spec> ...', file=sys.stderr) sys.exit(1) output_paths = parse_outputs(sys.argv[4:]) registry = Registry() registry.load_datatypes(root_dir=sys.argv[1], config=sys.argv[2]) try: datasets = __read_paramfile(sys.argv[3]) except (ValueError, AssertionError): datasets = __read_old_paramfile(sys.argv[3]) metadata = [] for dataset in datasets: dataset = bunch.Bunch(**safe_dict(dataset)) try: output_path = output_paths[int(dataset.dataset_id)][0] except Exception: print('Output path for dataset %s not found on command line' % dataset.dataset_id, file=sys.stderr) sys.exit(1) try: if dataset.type == 'composite': files_path = output_paths[int(dataset.dataset_id)][1] metadata.append(add_composite_file(dataset, registry, output_path, files_path)) else: metadata.append(add_file(dataset, registry, output_path)) except UploadProblemException as e: metadata.append(file_err(unicodify(e), dataset)) __write_job_metadata(metadata)
def __init__(self): self.config = bunch.Bunch(tool_secret="awesome_secret", ) self.model = mapping.init("/tmp", "sqlite:///:memory:", create_tables=True) self.toolbox = TestToolbox() self.datatypes_registry = TestDatatypesRegistry()
def test_context(self): if self._test_context is None: option_xml = "" if self.filtered_param: option_xml = '''<options><filter type="data_meta" ref="data1" key="dbkey" /></options>''' if self.metadata_filtered_param: option_xml = ''' <options options_filter_attribute="metadata.foo"> <filter type="add_value" value="bar" /> <filter type="add_value" value="baz" /> </options>''' param_xml = XML('''<param name="data2" type="data" format="txt">%s</param>''' % option_xml) self.param = basic.DataToolParameter( self.tool, param_xml, ) trans = bunch.Bunch( app=self.app, get_current_user_roles=lambda: self.current_user_roles, workflow_building_mode=True, ) self._test_context = dataset_matcher.get_dataset_matcher_factory(trans).dataset_matcher( param=self.param, other_values=self.other_values ) return self._test_context
def setUp(self): self.setup_app(mock_model=False) self.mock_tool = bunch.Bunch( app=self.app, tool_type="default", valid_input_states=model.Dataset.valid_input_states, )
def __init__(self): self.config = bunch.Bunch() self.model = mapping.init( "/tmp", "sqlite:///:memory:", create_tables=True )
def app(self): if not self._app: self._app = bunch.Bunch( config=self.config, job_metrics=JobMetrics(), application_stack=self.application_stack ) return self._app
def __init__( self, config_file ): self.config = ConfigParser.ConfigParser( dict( database_file='database/universe.sqlite', file_path='database/files', transfer_worker_port_range='12275-12675', transfer_worker_log=None ) ) self.config.read( config_file ) self.model = bunch.Bunch() self.connect_database()
def __init__(self): self.config = bunch.Bunch( log_events=False, log_actions=False, ) self.model = mapping.init("/tmp", "sqlite:///:memory:", create_tables=True)
def __init__(self): self.config = bunch.Bunch(tool_secret="awesome_secret", ) self.model = mapping.init("/tmp", "sqlite:///:memory:", create_tables=True) self.toolbox = TestToolbox() self.datatypes_registry = TestDatatypesRegistry() self.security = IdEncodingHelper(id_secret="testing") self.workflow_manager = WorkflowsManager(self)
def get_job(self): raw_params = { "threshold": 8, "__workflow_invocation_uuid__": WORKFLOW_UUID, } def get_param_values(app, ignore_errors): assert app == self.app params = raw_params.copy() params["__job_resource"] = { "__job_resource__select": "True", "memory": "8gb" } return params return bunch.Bunch(user=bunch.Bunch(id=6789, email="*****@*****.**"), raw_param_dict=lambda: raw_params, get_param_values=get_param_values)
def test_workflow( self, workflow_test_file=None ): maxseconds = 120 workflow_test_file = workflow_test_file or WorkflowTestCase.workflow_test_file assert workflow_test_file workflow_test = parse_test_file( workflow_test_file ) galaxy_interactor = GalaxyWorkflowInteractor( self ) # Calling workflow https://github.com/jmchilton/blend4j/blob/master/src/test/java/com/github/jmchilton/blend4j/galaxy/WorkflowsTest.java # Import workflow workflow_id, step_id_map, output_defs = self.__import_workflow( galaxy_interactor, workflow_test.workflow ) # Stage data and history for workflow test_history = galaxy_interactor.new_history() stage_data_in_history( galaxy_interactor, workflow_test.test_data(), test_history ) # Build workflow parameters uploads = galaxy_interactor.uploads ds_map = {} for step_index, input_dataset_label in workflow_test.input_datasets(): # Upload is {"src": "hda", "id": hid} try: upload = uploads[ workflow_test.upload_name( input_dataset_label ) ] except KeyError: raise AssertionError( "Failed to find upload with label %s in uploaded datasets %s" % ( input_dataset_label, uploads ) ) ds_map[ step_id_map[ step_index ] ] = upload payload = { "history": "hist_id=%s" % test_history, "ds_map": dumps( ds_map ), "workflow_id": workflow_id, } run_response = galaxy_interactor.run_workflow( payload ).json() outputs = run_response[ 'outputs' ] if not len( outputs ) == len( output_defs ): msg_template = "Number of outputs [%d] created by workflow execution does not equal expected number from input file [%d]." msg = msg_template % ( len( outputs ), len( output_defs ) ) raise AssertionError( msg ) galaxy_interactor.wait_for_ids( test_history, outputs ) for expected_output_def in workflow_test.outputs: # Get the correct hid name, outfile, attributes = expected_output_def output_testdef = bunch.Bunch( name=name, outfile=outfile, attributes=attributes ) output_data = outputs[ int( name ) ] try: galaxy_interactor.verify_output( test_history, output_data, output_testdef=output_testdef, shed_tool_id=None, maxseconds=maxseconds ) except Exception: for stream in ['stdout', 'stderr']: stream_output = galaxy_interactor.get_job_stream( test_history, output_data, stream=stream ) print >>sys.stderr, self._format_stream( stream_output, stream=stream, format=True ) raise
def setUp(self): super(DataColumnParameterTestCase, self).setUp() self.test_history = model.History() self.app.model.context.add(self.test_history) self.app.model.context.flush() self.trans = bunch.Bunch( app=self.app, get_history=lambda: self.test_history, get_current_user_roles=lambda: [], workflow_building_mode=False, webapp=bunch.Bunch(name="galaxy"), ) self.type = "data_column" self.other_attributes = "" self.set_data_ref = "input_tsv" self.multiple = False self.optional = False self._param = None
def setUp(self): super(SelectToolParameterTestCase, self).setUp() self.test_history = model.History() self.app.model.context.add(self.test_history) self.app.model.context.flush() self.app.tool_data_tables["test_table"] = MockToolDataTable() self.trans = bunch.Bunch( app=self.app, get_history=lambda: self.test_history, get_current_user_roles=lambda: [], workflow_building_mode=False, webapp=bunch.Bunch(name="galaxy"), ) self.type = "select" self.set_data_ref = False self.multiple = False self.optional = False self.options_xml = "" self._param = None
def setUp(self): self.setup_app(mock_model=False) self.mock_tool = bunch.Bunch( app=self.app, tool_type="default", ) self.test_history = model.History() self.app.model.context.add(self.test_history) self.app.model.context.flush() self.trans = bunch.Bunch( app=self.app, get_history=lambda: self.test_history, get_current_user_roles=lambda: [], workflow_building_mode=False, webapp=bunch.Bunch(name="galaxy"), ) self.multiple = False self.optional = False self._param = None
def cli(ctx, path, brew=None, skip_install=False, shell=None): """List commands to inject brew dependencies. Display commands used to modify environment to inject tool's brew dependencies. \b % . <(planemo brew_env bowtie2.xml) % which bowtie2 /home/john/.linuxbrew/Cellar/bowtie2/2.1.0/bin/bowtie2 By default this will attempt to attempt to install these recipes as needed. This automatic installation can be skipped with the ``--skip_install`` flag. Intead of injecting the enviornment into your current shell using the above idiom, the ``--shell`` flag can be sent to launch a new subshell when sourced. \b % . <(planemo brew_env --skip_install --shell bowtie2.xml) (bowtie2) % which bowtie2 /home/john/.linuxbrew/Cellar/bowtie2/2.1.0/bin/bowtie2 """ tool_xml = load_tool(path) mock_args = bunch.Bunch(brew=brew) brew_context = brew_exts.BrewContext(mock_args) requirements, containers = parse_requirements_from_xml(tool_xml) lines = [] for recipe_context in brew_util.requirements_to_recipe_contexts( requirements, brew_context ): if not skip_install: brew_exts.versioned_install(recipe_context) lines = brew_exts.build_env_statements_from_recipe_context( recipe_context ) split_lines = lines.split("\n") lines.extend(split_lines) if shell: # TODO: Would be cool if this wasn't a bunch of random hackery. launch_shell = os.environ.get("SHELL") if "bash" in launch_shell: ps1 = ps1_for_path(path) launch_shell = '(source ~/.bashrc; env PS1="%s" %s --norc)' % ( ps1, launch_shell, ) lines.extend([launch_shell]) print(";".join(lines)) else: print("\n".join(lines))
def setUp(self): self.setup_app() self.mock_hda = MockHistoryDatasetAssociation() self.tool = bunch.Bunch( app=self.app, tool_type="default", ) self.current_user_roles = [] self.other_values = {} # Reset lazily generated stuff self.filtered_param = False self._test_context = None self.param = None
def __mock_tool( id="cat1", version="1.0", ): # For now ignoring inputs, params_from_strings, and # check_and_update_param_values since only have unit tests for version # handling - but need to write tests for all of this longer term. tool = bunch.Bunch( id=id, version=version, inputs={}, params_from_strings=mock.Mock(), check_and_update_param_values=mock.Mock(), ) return tool
def test_watcher(): if not watcher.can_watch: from nose.plugins.skip import SkipTest raise SkipTest() with __test_directory() as t: tool_path = path.join(t, "test.xml") toolbox = Toolbox() open(tool_path, "w").write("a") tool_watcher = watcher.get_watcher(toolbox, bunch.Bunch(watch_tools=True)) tool_watcher.watch_file(tool_path, "cool_tool") open(tool_path, "w").write("b") time.sleep(2) toolbox.assert_reloaded("cool_tool")
def setUp(self): self.temp_directory = tempfile.mkdtemp() self.config = bunch.Bunch( job_config_file=os.path.join(self.temp_directory, "job_conf.xml"), use_tasked_jobs=False, job_resource_params_file="/tmp/fake_absent_path", config_dict={}, default_job_resubmission_condition="", track_jobs_in_database=True, server_name="main", ) self.__write_config_from(SIMPLE_JOB_CONF) self.__app = None self.__application_stack = None self.__job_configuration = None self.__job_configuration_base_pools = None self.__uwsgi_opt = None
def param(self): if not self._param: multi_text = "" if self.multiple: multi_text = 'multiple="True"' optional_text = "" if self.optional: optional_text = 'optional="True"' data_ref_text = "" if self.set_data_ref: data_ref_text = 'data_ref="input_tsv"' template_xml = '''<param name="my_name" type="%s" %s %s %s %s></param>''' param_str = template_xml % (self.type, data_ref_text, multi_text, optional_text, self.other_attributes) self._param = self._parameter_for(xml=param_str) self._param.ref_input = bunch.Bunch(formats=[datatypes_registry.get_datatype_by_extension("tabular")]) return self._param
def test_watcher(): with __test_directory() as t: tool_path = path.join(t, "test.xml") toolbox = Toolbox() with open(tool_path, "w") as f: f.write("a") tool_watcher = watcher.get_tool_watcher(toolbox, bunch.Bunch(watch_tools=True)) tool_watcher.start() tool_watcher.watch_file(tool_path, "cool_tool") time.sleep(2) assert not toolbox.was_reloaded("cool_tool") with open(tool_path, "w") as f: f.write("b") wait_for_reload(lambda: toolbox.was_reloaded("cool_tool")) tool_watcher.shutdown() assert tool_watcher.observer is None
def __init__(self, test_dataset=None, name="Test Dataset", id=1): if not test_dataset: test_dataset = model.Dataset() self.states = model.HistoryDatasetAssociation.states self.deleted = False self.dataset = test_dataset self.visible = True self.datatype_matches = True self.conversion_destination = (None, None) self.datatype = bunch.Bunch( matches_any=lambda formats: self.datatype_matches, ) self.dbkey = "hg19" self.implicitly_converted_parent_datasets = False self.name = name self.hid = id self.id = id self.children = []
def test_watcher(): if not watcher.can_watch: from nose.plugins.skip import SkipTest raise SkipTest() with __test_directory() as t: tool_path = path.join(t, "test.xml") toolbox = Toolbox() open(tool_path, "w").write("a") tool_watcher = watcher.get_tool_watcher(toolbox, bunch.Bunch( watch_tools=True )) tool_watcher.watch_file(tool_path, "cool_tool") assert not toolbox.was_reloaded("cool_tool") open(tool_path, "w").write("b") wait_for_reload(lambda: toolbox.was_reloaded("cool_tool")) tool_watcher.shutdown() assert not tool_watcher.observer.is_alive()
def test_stopping_job(self): self.job_wrapper.command_line = '''python -c "import time; time.sleep(15)"''' runner = local.LocalJobRunner(self.app, 1) def queue(): runner.queue_job(self.job_wrapper) t = threading.Thread(target=queue) t.start() external_id = self.job_wrapper.wait_for_external_id() mock_job = bunch.Bunch( get_external_output_metadata=lambda: None, get_job_runner_external_id=lambda: str(external_id), get_id=lambda: 1) assert psutil.pid_exists(external_id) runner.stop_job(mock_job) t.join(1) assert not psutil.pid_exists(external_id)