def execute(self, cmd, timeout=60): def retry(): try: _, stdout, stderr = self._execute(cmd, timeout) except paramiko.SSHException as e: log.error(e) time.sleep(10) self.connect() _, stdout, stderr = self._execute(cmd, timeout) return stdout, stderr stdout, stderr = self.retry_action_executor.execute(retry) return_code = stdout.channel.recv_exit_status() return Bunch(stdout=unicodify(stdout.read()), stderr=unicodify(stderr.read()), returncode=return_code)
def test_module_dependency(): with __test_base_path() as temp_directory: # Create mock modulecmd script that just exports a variable # the way modulecmd sh load would, but also validate correct # module name and version are coming through. mock_modulecmd = os.path.join(temp_directory, 'modulecmd') __write_script(mock_modulecmd, '''#!/bin/sh if [ $3 != "foomodule/1.0" ]; then exit 1 fi echo 'FOO="bar"' ''') resolver = Bunch(modulecmd=mock_modulecmd, modulepath='/something') dependency = ModuleDependency(resolver, "foomodule", "1.0") __assert_foo_exported(dependency.shell_commands())
class RBACAgent(object): """Handle Galaxy Tool Shed security""" permitted_actions = Bunch() def associate_components(self, **kwd): raise Exception( 'No valid method of associating provided components: %s' % kwd) def associate_user_role(self, user, role): raise Exception('No valid method of associating a user with a role') def convert_permitted_action_strings(self, permitted_action_strings): """ When getting permitted actions from an untrusted source like a form, ensure that they match our actual permitted actions. """ return [ x for x in [ self.permitted_actions.get(action_string) for action_string in permitted_action_strings ] if x is not None ] def create_user_role(self, user, app): raise Exception("Unimplemented Method") def create_private_user_role(self, user): raise Exception("Unimplemented Method") def get_action(self, name, default=None): """Get a permitted action by its dict key or action name""" for k, v in self.permitted_actions.items(): if k == name or v.action == name: return v return default def get_actions(self): """Get all permitted actions as a list of Action objects""" return list(self.permitted_actions.__dict__.values()) def get_item_actions(self, action, item): raise Exception( 'No valid method of retrieving action (%s) for item %s.' % (action, item)) def get_private_user_role(self, user): raise Exception("Unimplemented Method")
def __init__( self, sa_session: SessionlessContext, tool_app_config: ToolAppConfig, datatypes_registry: Registry, object_store: ObjectStore, tool_data_table_manager: ToolDataTableManager, file_sources: ConfiguredFileSources, ): self.model = Bunch(context=sa_session) self.config = tool_app_config self.datatypes_registry = datatypes_registry self.object_store = object_store self.genome_builds = GenomeBuilds(self) self.tool_data_tables = tool_data_table_manager self.file_sources = file_sources self.biotools_metadata_source = None
def __build_metadata_configuration(self, client, job_wrapper, remote_metadata, remote_job_config): metadata_kwds = {} if remote_metadata: remote_system_properties = remote_job_config.get("system_properties", {}) remote_galaxy_home = remote_system_properties.get("galaxy_home", None) if not remote_galaxy_home: raise Exception(NO_REMOTE_GALAXY_FOR_METADATA_MESSAGE) metadata_kwds['exec_dir'] = remote_galaxy_home outputs_directory = remote_job_config['outputs_directory'] configs_directory = remote_job_config['configs_directory'] working_directory = remote_job_config['working_directory'] metadata_directory = remote_job_config['metadata_directory'] # For metadata calculation, we need to build a list of of output # file objects with real path indicating location on Galaxy server # and false path indicating location on compute server. Since the # Pulsar disables from_work_dir copying as part of the job command # line we need to take the list of output locations on the Pulsar # server (produced by self.get_output_files(job_wrapper)) and for # each work_dir output substitute the effective path on the Pulsar # server relative to the remote working directory as the # false_path to send the metadata command generation module. work_dir_outputs = self.get_work_dir_outputs(job_wrapper, job_working_directory=working_directory) outputs = [Bunch(false_path=os.path.join(outputs_directory, os.path.basename(path)), real_path=path) for path in self.get_output_files(job_wrapper)] for output in outputs: for pulsar_workdir_path, real_path in work_dir_outputs: if real_path == output.real_path: output.false_path = pulsar_workdir_path metadata_kwds['output_fnames'] = outputs metadata_kwds['compute_tmp_dir'] = metadata_directory metadata_kwds['config_root'] = remote_galaxy_home default_config_file = os.path.join(remote_galaxy_home, 'config/galaxy.ini') metadata_kwds['config_file'] = remote_system_properties.get('galaxy_config_file', default_config_file) metadata_kwds['dataset_files_path'] = remote_system_properties.get('galaxy_dataset_files_path', None) if PulsarJobRunner.__use_remote_datatypes_conf( client ): remote_datatypes_config = remote_system_properties.get('galaxy_datatypes_config_file', None) if not remote_datatypes_config: log.warn(NO_REMOTE_DATATYPES_CONFIG) remote_datatypes_config = os.path.join(remote_galaxy_home, 'datatypes_conf.xml') metadata_kwds['datatypes_config'] = remote_datatypes_config else: integrates_datatypes_config = self.app.datatypes_registry.integrated_datatypes_configs # Ensure this file gets pushed out to the remote config dir. job_wrapper.extra_filenames.append(integrates_datatypes_config) metadata_kwds['datatypes_config'] = os.path.join(configs_directory, os.path.basename(integrates_datatypes_config)) return metadata_kwds
def __new_composite_file(self, name, optional=False, mimetype=None, description=None, substitute_name_with_metadata=None, is_binary=False, space_to_tab=False, **kwds): kwds['name'] = name kwds['optional'] = optional kwds['mimetype'] = mimetype kwds['description'] = description kwds['substitute_name_with_metadata'] = substitute_name_with_metadata kwds['is_binary'] = is_binary kwds['space_to_tab'] = space_to_tab return Bunch(**kwds)
class ComponentReview(object): approved_states = Bunch(NO='no', YES='yes', NA='not_applicable') def __init__(self, repository_review_id=None, component_id=None, comment=None, private=False, approved=False, rating=None, deleted=False): self.repository_review_id = repository_review_id self.component_id = component_id self.comment = comment self.private = private self.approved = approved self.rating = rating self.deleted = deleted
def populate( self ): param_dict = {} param_dict['fields'] = Bunch( **self.service_instance.form_values.content ) param_dict['item'] = self.item param_dict['service'] = self.service_group.parent param_dict['service_instance'] = self.service_instance action_list = ActionSection( self.service_group.name, self.service_group.label ) for item in self.service_group.items: if isinstance( item, ExternalServiceParameter ): param_dict[ item.name ] = item.get_value( param_dict ) elif isinstance( item, ExternalServiceAction ): action_list.append( item.populate_action( param_dict ) ) elif isinstance( item, ExternalServiceActionsGroup ): item.prepare_actions( param_dict, param_dict, action_list ) else: raise Exception( 'unknown item type found' ) self.param_dict = param_dict self.actions = action_list
def get_action_links(self): rval = [] param_dict = {} param_dict['fields'] = Bunch( **self.service_instance.form_values.content) param_dict['item'] = self.item for item in self.service.items: if isinstance(item, ExternalServiceParameter): param_dict[item.name] = item.get_value(param_dict) elif isinstance(item, ExternalServiceAction): rval.append( item.get_action_access_link(self.item, trans, param_dict)) elif isinstance(item, ExternalServiceActionsGroup): rval.extend( item.populate(self.service_instance, item, param_dict).get_action_links()) else: raise 'unknown item type found'
def test_lmod_dependency(): with __test_base_path() as temp_directory: # Create mock lmod script that just exports a variable # the way "lmod load" would, but also validate correct # module name and version are coming through. mock_lmodexec = os.path.join(temp_directory, 'pouet') __write_script( mock_lmodexec, '''#!/bin/sh if [ "$2" != "foomodule/1.0" ]; then exit 1 fi echo 'FOO="bar"' ''') resolver = Bunch(lmodexec=mock_lmodexec, settargexec=None, modulepath='/path/to/modulefiles') dependency = LmodDependency(resolver, "foomodule", "1.0") __assert_foo_exported(dependency.shell_commands())
def _get_dataset_like_object( self, other_values ): # this returned object has file_name, state, and states attributes equivalent to a DatasetAssociation data = other_values.get( self.dataset, None ) assert data, 'Base dataset could not be found in values provided to DisplayApplicationDataParameter' if isinstance( data, DisplayDataValueWrapper ): data = data.value if self.metadata: rval = getattr( data.metadata, self.metadata, None ) assert rval, 'Unknown metadata name (%s) provided for dataset type (%s).' % ( self.metadata, data.datatype.__class__.name ) return Bunch( file_name=rval.file_name, state=data.state, states=data.states, extension='data' ) elif self.extensions and ( self.force_conversion or not isinstance( data.datatype, self.formats ) ): for ext in self.extensions: rval = data.get_converted_files_by_type( ext ) if rval: return rval assert data.find_conversion_destination( self.formats )[0] is not None, "No conversion path found for data param: %s" % self.name return None return data
def join_files(filename1, column1, filename2, column2, out_filename, split=None, buffer=1000000, keep_unmatched=False, keep_partial=False, keep_headers=False, index_depth=3, fill_options=None): # return identifier based upon line def get_identifier_by_line(line, column, split=None): if isinstance(line, str): fields = line.rstrip('\r\n').split(split) if column < len(fields): return fields[column] return None if fill_options is None: fill_options = Bunch(fill_unjoined_only=True, file1_columns=None, file2_columns=None) keep_headers_done = False out = open(out_filename, 'w') index = BufferedIndex(filename2, column2, split, buffer, index_depth) for line1 in open(filename1, 'r'): if keep_headers and not keep_headers_done: header1 = line1 with open(filename2, 'r') as file2: header2 = file2.readline() header2 = re.sub(r'^#', '', header2) out.write("%s%s%s\n" % (header1.rstrip('\r\n'), split, header2.rstrip('\r\n'))) keep_headers_done = True continue identifier = get_identifier_by_line(line1, column1, split) if identifier: written = False for line2 in index.get_lines_by_identifier(identifier): if not fill_options.fill_unjoined_only: out.write("%s%s%s\n" % (fill_empty_columns(line1.rstrip('\r\n'), split, fill_options.file1_columns), split, fill_empty_columns(line2.rstrip('\r\n'), split, fill_options.file2_columns))) else: out.write("%s%s%s\n" % (line1.rstrip('\r\n'), split, line2.rstrip('\r\n'))) written = True if not written and keep_unmatched: out.write(fill_empty_columns(line1.rstrip('\r\n'), split, fill_options.file1_columns)) if fill_options: if fill_options.file2_columns: out.write("%s%s" % (split, fill_empty_columns("", split, fill_options.file2_columns))) out.write("\n") elif keep_partial: out.write(fill_empty_columns(line1.rstrip('\r\n'), split, fill_options.file1_columns)) if fill_options: if fill_options.file2_columns: out.write("%s%s" % (split, fill_empty_columns("", split, fill_options.file2_columns))) out.write("\n") out.close()
def run_tool(self, testdef, history_id, resource_parameters={}): # We need to handle the case where we've uploaded a valid compressed file since the upload # tool will have uncompressed it on the fly. inputs_tree = testdef.inputs.copy() for key, value in inputs_tree.items(): values = [value] if not isinstance(value, list) else value new_values = [] for value in values: if isinstance(value, TestCollectionDef): hdca_id = self._create_collection(history_id, value) new_values = [dict(src="hdca", id=hdca_id)] elif value in self.uploads: new_values.append(self.uploads[value]) else: new_values.append(value) inputs_tree[key] = new_values if resource_parameters: inputs_tree["__job_resource|__job_resource__select"] = "yes" for key, value in resource_parameters.items(): inputs_tree["__job_resource|%s" % key] = value # HACK: Flatten single-value lists. Required when using expand_grouping for key, value in inputs_tree.items(): if isinstance(value, list) and len(value) == 1: inputs_tree[key] = value[0] submit_response = self.__submit_tool(history_id, tool_id=testdef.tool_id, tool_input=inputs_tree) submit_response_object = submit_response.json() try: return Bunch( inputs=inputs_tree, outputs=self.__dictify_outputs(submit_response_object), output_collections=self.__dictify_output_collections( submit_response_object), jobs=submit_response_object['jobs'], ) except KeyError: message = "Error creating a job for these tool inputs - %s" % submit_response_object[ 'err_msg'] raise RunToolException(message, inputs_tree)
def create_dataset(name): ud = Bunch(name=name, file_type=None, dbkey=None) if nonfile_params.get('folder_id', False): replace_id = nonfile_params.get('replace_id', None) if replace_id not in [None, 'None']: replace_dataset = trans.sa_session.query( l.LibraryDataset).get(int(replace_id)) else: replace_dataset = None library_bunch = upload_common.handle_library_params( trans, nonfile_params, nonfile_params.folder_id, replace_dataset) else: library_bunch = None return upload_common.new_upload( trans, ud, library_bunch=library_bunch, state=trans.app.model.HistoryDatasetAssociation.states.UPLOAD)
def main(): parser = optparse.OptionParser() parser.add_option( '-b','--buffer', dest='buffer', type='int',default=1000000, help='Number of lines to buffer at a time. Default: 1,000,000 lines. A buffer of 0 will attempt to use memory only.' ) parser.add_option( '-d','--index_depth', dest='index_depth', type='int',default=3, help='Depth to use on filebased offset indexing. Default: 3.' ) parser.add_option( '-p','--keep_partial', action='store_true', dest='keep_partial', default=False, help='Keep rows in first input which are missing identifiers.') parser.add_option( '-u','--keep_unmatched', action='store_true', dest='keep_unmatched', default=False, help='Keep rows in first input which are not joined with the second input.') parser.add_option( '-f','--fill_options_file', dest='fill_options_file', type='str',default=None, help='Fill empty columns with a values from a JSONified file.') options, args = parser.parse_args() fill_options = None if options.fill_options_file is not None: try: if simplejson is None: raise simplejson_exception fill_options = Bunch( **stringify_dictionary_keys( simplejson.load( open( options.fill_options_file ) ) ) ) #simplejson.load( open( options.fill_options_file ) ) except Exception, e: print "Warning: Ignoring fill options due to simplejson error (%s)." % e
class JobState(object): """ Encapsulate state of jobs. """ runner_states = Bunch(WALLTIME_REACHED='walltime_reached', MEMORY_LIMIT_REACHED='memory_limit_reached', UNKNOWN_ERROR='unknown_error', GLOBAL_WALLTIME_REACHED='global_walltime_reached', OUTPUT_SIZE_LIMIT='output_size_limit') def __init__(self, job_wrapper, job_destination): self.runner_state_handled = False self.job_wrapper = job_wrapper self.job_destination = job_destination def set_defaults(self, files_dir): if self.job_wrapper is not None: id_tag = self.job_wrapper.get_id_tag() if files_dir is not None: self.job_file = JobState.default_job_file(files_dir, id_tag) self.output_file = os.path.join(files_dir, 'galaxy_%s.o' % id_tag) self.error_file = os.path.join(files_dir, 'galaxy_%s.e' % id_tag) self.exit_code_file = os.path.join(files_dir, 'galaxy_%s.ec' % id_tag) job_name = 'g%s' % id_tag if self.job_wrapper.tool.old_id: job_name += '_%s' % self.job_wrapper.tool.old_id if self.job_wrapper.user: job_name += '_%s' % self.job_wrapper.user self.job_name = ''.join( map( lambda x: x if x in (string.letters + string.digits + '_') else '_', job_name)) @staticmethod def default_job_file(files_dir, id_tag): return os.path.join(files_dir, 'galaxy_%s.sh' % id_tag) @staticmethod def default_exit_code_file(files_dir, id_tag): return os.path.join(files_dir, 'galaxy_%s.ec' % id_tag)
class RepositoryReview(object, Dictifiable): dict_collection_visible_keys = ('id', 'repository_id', 'changeset_revision', 'user_id', 'rating', 'deleted') dict_element_visible_keys = ('id', 'repository_id', 'changeset_revision', 'user_id', 'rating', 'deleted') approved_states = Bunch(NO='no', YES='yes') def __init__(self, repository_id=None, changeset_revision=None, user_id=None, rating=None, deleted=False): self.repository_id = repository_id self.changeset_revision = changeset_revision self.user_id = user_id self.rating = rating self.deleted = deleted
class Role(Base, Dictifiable, _HasTable): __tablename__ = 'role' id = Column(Integer, primary_key=True) create_time = Column(DateTime, default=now) update_time = Column(DateTime, default=now, onupdate=now) name = Column(String(255), index=True, unique=True) description = Column(TEXT) type = Column(String(40), index=True) deleted = Column(Boolean, index=True, default=False) repositories = relationship('RepositoryRoleAssociation', back_populates='role') groups = relationship('GroupRoleAssociation', back_populates='role') users = relationship('UserRoleAssociation', back_populates='role') dict_collection_visible_keys = ['id', 'name'] dict_element_visible_keys = ['id', 'name', 'description', 'type'] private_id = None types = Bunch(PRIVATE='private', SYSTEM='system', USER='******', ADMIN='admin', SHARING='sharing') def __init__(self, name=None, description=None, type=types.SYSTEM, deleted=False): self.name = name self.description = description self.type = type self.deleted = deleted @property def is_repository_admin_role(self): # A repository admin role must always be associated with a repository. The mapper returns an # empty list for those roles that have no repositories. This method will require changes if # new features are introduced that results in more than one role per repository. if self.repositories: return True return False
class Dataset( object ): states = Bunch( NEW='new', UPLOAD='upload', QUEUED='queued', RUNNING='running', OK='ok', EMPTY='empty', ERROR='error', DISCARDED='discarded' ) permitted_actions = get_permitted_actions( filter='DATASET' ) file_path = "/tmp/" engine = None def __init__( self, id=None, state=None, external_filename=None, extra_files_path=None, file_size=None, purgable=True ): self.id = id self.state = state self.deleted = False self.purged = False self.purgable = purgable self.external_filename = external_filename self._extra_files_path = extra_files_path self.file_size = file_size def get_file_name( self ): if not self.external_filename: assert self.id is not None, "ID must be set before filename used (commit the object)" # First try filename directly under file_path filename = os.path.join( self.file_path, "dataset_%d.dat" % self.id ) # Only use that filename if it already exists (backward compatibility), # otherwise construct hashed path if not os.path.exists( filename ): dir = os.path.join( self.file_path, *directory_hash_id( self.id ) ) # Create directory if it does not exist try: os.makedirs( dir ) except OSError, e: # File Exists is okay, otherwise reraise if e.errno != errno.EEXIST: raise # Return filename inside hashed directory return os.path.abspath( os.path.join( dir, "dataset_%d.dat" % self.id ) ) else:
class RepositoryGridFilterManager(object): """Provides filtered views of the many Tool SHed repository grids.""" filters = Bunch(CERTIFIED_LEVEL_ONE='certified_level_one', CERTIFIED_LEVEL_TWO='certified_level_two', CERTIFIED_LEVEL_ONE_SUITES='certified_level_one_suites', CERTIFIED_LEVEL_TWO_SUITES='certified_level_two_suites', SUITES='suites') def get_grid_title(self, trans, trailing_string='', default=''): filter = self.get_filter(trans) if filter == self.filters.CERTIFIED_LEVEL_ONE: return "Certified 1 Repositories %s" % trailing_string if filter == self.filters.CERTIFIED_LEVEL_TWO: return "Certified 2 Repositories %s" % trailing_string if filter == self.filters.CERTIFIED_LEVEL_ONE_SUITES: return "Certified 1 Repository Suites %s" % trailing_string if filter == self.filters.CERTIFIED_LEVEL_TWO_SUITES: return "Certified 2 Repository Suites %s" % trailing_string if filter == self.filters.SUITES: return "Repository Suites %s" % trailing_string return "%s %s" % (default, trailing_string) def get_filter(self, trans): filter = trans.get_cookie(name='toolshedrepogridfilter') return filter or None def is_valid_filter(self, filter): if filter is None: return True for valid_key, valid_filter in self.filters.items(): if filter == valid_filter: return True return False def set_filter(self, trans, **kwd): # Set a session cookie value with the selected filter. filter = kwd.get('filter', None) if filter is not None and self.is_valid_filter(filter): trans.set_cookie(value=filter, name='toolshedrepogridfilter') # if the filter is not valid, expire the cookie. trans.set_cookie(value=filter, name='toolshedrepogridfilter', age=-1)
def __init__(self, trans, plugin): plugin_config = plugin.config self.trans = trans self.log = log self.attr = Bunch() self.attr.viz_id = plugin_config["name"].lower() self.attr.history_id = trans.security.encode_id( trans.history.id ) self.attr.galaxy_config = trans.app.config self.attr.galaxy_root_dir = os.path.abspath(self.attr.galaxy_config.root) self.attr.root = web.url_for("/") self.attr.app_root = self.attr.root + "plugins/interactive_environments/" + self.attr.viz_id + "/static/" plugin_path = os.path.abspath( plugin.path ) # Store our template and configuration path self.attr.our_config_dir = os.path.join(plugin_path, "config") self.attr.our_template_dir = os.path.join(plugin_path, "templates") self.attr.HOST = trans.request.host.rsplit(':', 1)[0] self.load_deploy_config() self.attr.docker_hostname = self.attr.viz_config.get("docker", "docker_hostname") self.attr.proxy_request = trans.app.proxy_manager.setup_proxy( trans, host=self.attr.docker_hostname ) self.attr.proxy_url = self.attr.proxy_request[ 'proxy_url' ] self.attr.PORT = self.attr.proxy_request[ 'proxied_port' ] # Generate per-request passwords the IE plugin can use to configure # the destination container. self.notebook_pw_salt = self.generate_password(length=12) self.notebook_pw = self.generate_password(length=24) self.temp_dir = os.path.abspath( tempfile.mkdtemp() ) if self.attr.viz_config.getboolean("docker", "wx_tempdir"): # Ensure permissions are set try: os.chmod( self.temp_dir, os.stat(self.temp_dir).st_mode | stat.S_IXOTH ) except Exception: log.error( "Could not change permissions of tmpdir %s" % self.temp_dir )
def __setup_object_store(self, conf): if "object_store_config_file" not in conf and "object_store_config" not in conf: self.object_store = None return config_obj_kwds = dict( file_path=conf.get("object_store_file_path", None), object_store_check_old_style=False, job_working_directory=conf.get("object_store_job_working_directory", None), new_file_path=conf.get("object_store_new_file_path", tempdir), umask=int(conf.get("object_store_umask", "0000")), jobs_directory=None, ) config_dict = None if conf.get("object_store_config_file"): config_obj_kwds["object_store_config_file"] = conf['object_store_config_file'] else: config_dict = conf["object_store_config"] object_store_config = Bunch(**config_obj_kwds) self.object_store = build_object_store_from_config(object_store_config, config_dict=config_dict)
class RBACAgent: """Class that handles galaxy community space security""" permitted_actions = Bunch() def associate_components(self, **kwd): raise 'No valid method of associating provided components: %s' % kwd def associate_user_role(self, user, role): raise 'No valid method of associating a user with a role' def convert_permitted_action_strings(self, permitted_action_strings): """ When getting permitted actions from an untrusted source like a form, ensure that they match our actual permitted actions. """ return filter(lambda x: x is not None, [ self.permitted_actions.get(action_string) for action_string in permitted_action_strings ]) def create_private_user_role(self, user): raise "Unimplemented Method" def get_action(self, name, default=None): """Get a permitted action by its dict key or action name""" for k, v in self.permitted_actions.items(): if k == name or v.action == name: return v return default def get_actions(self): """Get all permitted actions as a list of Action objects""" return self.permitted_actions.__dict__.values() def get_item_actions(self, action, item): raise 'No valid method of retrieving action (%s) for item %s.' % ( action, item) def get_private_user_role(self, user): raise "Unimplemented Method"
def _precreate_fetched_hdas(trans, history, target, outputs): for item in target.get("elements", []): name = item.get("name", None) if name is None: src = item.get("src", None) if src == "url": url = item.get("url") if name is None: name = url.split("/")[-1] elif src == "path": path = item["path"] if name is None: name = os.path.basename(path) file_type = item.get("ext", "auto") dbkey = item.get("dbkey", "?") uploaded_dataset = Bunch( type='file', name=name, file_type=file_type, dbkey=dbkey ) data = upload_common.new_upload(trans, '', uploaded_dataset, library_bunch=None, history=history) outputs.append(data) item["object_id"] = data.id
def __init__(self, config=None, **kwargs): self.config = config or MockAppConfig(**kwargs) self.security = self.config.security self.name = kwargs.get('name', 'galaxy') self.object_store = objectstore.build_object_store_from_config( self.config) self.model = mapping.init("/tmp", "sqlite:///:memory:", create_tables=True, object_store=self.object_store) self.security_agent = self.model.security_agent self.visualizations_registry = MockVisualizationsRegistry() self.tag_handler = tags.GalaxyTagManager(self.model.context) self.quota_agent = quota.QuotaAgent(self.model) self.init_datatypes() self.job_config = Bunch(dynamic_params=None, ) self.tool_data_tables = {} self.dataset_collections_service = None self.container_finder = NullContainerFinder() self._toolbox_lock = MockLock() self.genome_builds = GenomeBuilds(self) self.job_queue = NoopQueue()
def __new_composite_file( self, name, optional=False, mimetype=None, description=None, substitute_name_with_metadata=None, is_binary=False, to_posix_lines=True, space_to_tab=False, **kwds, ): kwds["name"] = name kwds["optional"] = optional kwds["mimetype"] = mimetype kwds["description"] = description kwds["substitute_name_with_metadata"] = substitute_name_with_metadata kwds["is_binary"] = is_binary kwds["to_posix_lines"] = to_posix_lines kwds["space_to_tab"] = space_to_tab composite_file = Bunch(**kwds) return composite_file
def init( file_path, url, engine_options={}, create_tables=False ): """Connect mappings to the database""" # Load the appropriate db module load_egg_for_url( url ) # Create the database engine engine = create_engine( url, **engine_options ) # Connect the metadata to the database. metadata.bind = engine # Clear any existing contextual sessions and reconfigure Session.remove() Session.configure( bind=engine ) # Create tables if needed if create_tables: metadata.create_all() # Pack everything into a bunch result = Bunch( **globals() ) result.engine = engine result.session = Session result.create_tables = create_tables #load local galaxy security policy result.security_agent = CommunityRBACAgent( result ) result.shed_counter = ShedCounter( result ) return result
def create_dataset(name): ud = Bunch(name=name, file_type=None, dbkey=None) if nonfile_params.get('folder_id', False): replace_id = nonfile_params.get('replace_id', None) if replace_id not in [None, 'None']: replace_dataset = trans.sa_session.query( trans.app.model.LibraryDataset).get( trans.security.decode_id(replace_id)) else: replace_dataset = None # FIXME: instead of passing params here ( chiech have been process by util.Params(), the original kwd # should be passed so that complex objects that may have been included in the initial request remain. library_bunch = upload_common.handle_library_params( trans, nonfile_params, nonfile_params.folder_id, replace_dataset) else: library_bunch = None return upload_common.new_upload( trans, cntrller, ud, library_bunch=library_bunch, state=trans.app.model.HistoryDatasetAssociation.states.UPLOAD)
class ComponentReview(object, Dictifiable): dict_collection_visible_keys = ('id', 'repository_review_id', 'component_id', 'private', 'approved', 'rating', 'deleted') dict_element_visible_keys = ('id', 'repository_review_id', 'component_id', 'private', 'approved', 'rating', 'deleted') approved_states = Bunch(NO='no', YES='yes', NA='not_applicable') def __init__(self, repository_review_id=None, component_id=None, comment=None, private=False, approved=False, rating=None, deleted=False): self.repository_review_id = repository_review_id self.component_id = component_id self.comment = comment self.private = private self.approved = approved self.rating = rating self.deleted = deleted
def test_subcollection_multirun_with_state_updates(self): self._init_tool(REPEAT_COLLECTION_PARAM_CONTENTS) hda1, hda2 = self.__add_dataset(1), self.__add_dataset(2) collection = self.__history_dataset_collection_for( [hda1, hda2], collection_type="list:paired") collection_id = self.app.security.encode_id(collection.id) self.app.dataset_collections_service = Bunch( match_collections=lambda collections: None) template, template_vars = self.__handle_with_incoming( repeat1_add="dummy", ) state = self.__assert_rerenders_tool_without_errors( template, template_vars) assert len(state.inputs["repeat1"]) == 1 template, template_vars = self.__handle_with_incoming( state, **{ "repeat1_0|param2|__collection_multirun__": "%s|paired" % collection_id, "repeat1_add": "dummy", }) state = self.__assert_rerenders_tool_without_errors( template, template_vars) assert state.inputs["repeat1"][0][ "param2|__collection_multirun__"] == "%s|paired" % collection_id