コード例 #1
0
ファイル: __init__.py プロジェクト: boratonAJ/bioblend
    def create_folder(self, library_id, folder_name, description=None, base_folder_id=None):
        """
        Create a folder in a library.

        :type library_id: str
        :param library_id: library id to use

        :type folder_name: str
        :param folder_name: name of the new folder in the data library

        :type description: str
        :param description: description of the new folder in the data library

        :type base_folder_id: str
        :param base_folder_id: id of the folder where to create the new folder.
          If not provided, the root folder will be used
        """
        # Get root folder ID if no ID was provided
        if base_folder_id is None:
            base_folder_id = self._get_root_folder_id(library_id)
        # Compose the payload
        payload = {}
        payload['name'] = folder_name
        payload['folder_id'] = base_folder_id
        payload['create_type'] = 'folder'
        if description is not None:
            payload['description'] = description
        return Client._post(self, payload, id=library_id, contents=True)
コード例 #2
0
ファイル: __init__.py プロジェクト: abretaud/bioblend
 def update_repository(self, id, tar_ball_path, commit_message=None):
     url = self.gi._make_url(self, id) + '/changeset_revision'
     payload = {}
     if commit_message is not None:
         payload[commit_message] = commit_message
     payload["file"] = open(tar_ball_path, "rb")
     return Client._post(self, id=id, payload=payload, files_attached=True, url=url)
コード例 #3
0
ファイル: __init__.py プロジェクト: ratzeni/bioblend
    def search_jobs(self, job_info):
        """
        Return jobs for current user based payload content

        :type   job_info: dict
        :param  job_info: Dictionary containing description of requested job.
          This is in the same format as a request to POST /api/tools would take
          to initiate a job

        :rtype:     list
        :returns:   list of dictionaries containing summary job information of the jobs that match the requested job run

        This method is designed to scan the list of previously run jobs and find records of jobs that had
        the exact some input parameters and datasets. This can be used to minimize the amount of repeated work,
        and simply
        recycle the old results.

        """

        payload = job_info

        url = self.gi._make_url(self)
        url = '/'.join([url, "search"])

        return Client._post(self, url=url, payload=payload)
コード例 #4
0
ファイル: __init__.py プロジェクト: jmchilton/bioblend
    def create_local_user(self, username, user_email, password):
        """
        Create a new Galaxy user.

        .. note::
          For this method to work, the Galaxy instance must have the
          ``allow_user_creation`` option set to ``True`` and
          ``use_remote_user`` option set to ``False`` in the
          ``config/galaxy.ini`` configuration file.

        :type username: str
        :param username: Username of user to be created

        :type user_email: str
        :param user_email: Email of user to be created

        :type password: str
        :param password: password of user to be created

        :rtype: dict
        :return: dictionary containing information about the user
        """
        payload = {}
        payload['username'] = username
        payload['email'] = user_email
        payload['password'] = password
        return Client._post(self, payload)
コード例 #5
0
ファイル: __init__.py プロジェクト: bgruening/bioblend
    def update_repository(self, id, tar_ball_path, commit_message=None):
        """
        Update the contents of a Tool Shed repository with specified tar ball.

        :type id: str
        :param id: Encoded repository ID

        :type tar_ball_path: str
        :param tar_ball_path: Path to file containing tar ball to upload.

        :type commit_message: str
        :param commit_message: Commit message used for the underlying Mercurial
          repository backing Tool Shed repository.

        :rtype: dict
        :return: Returns a dictionary that includes repository content warnings.
          Most valid uploads will result in no such warning and an exception
          will be raised generally if there are problems.
          For example a successful upload will look like::

            {u'content_alert': u'',
             u'message': u''}

        .. versionadded:: 0.5.2
        """
        url = '/'.join([self.gi._make_url(self, id), 'changeset_revision'])
        payload = {
            'file': attach_file(tar_ball_path)
        }
        if commit_message is not None:
            payload['commit_message'] = commit_message
        try:
            return Client._post(self, id=id, payload=payload, files_attached=True, url=url)
        finally:
            payload['file'].close()
コード例 #6
0
ファイル: __init__.py プロジェクト: boratonAJ/bioblend
    def copy_from_dataset(self, library_id, dataset_id, folder_id=None, message=''):
        """
        Copy a Galaxy dataset into a library.

        :type library_id: str
        :param library_id: id of the library where to place the uploaded file

        :type dataset_id: str
        :param dataset_id: id of the dataset to copy from

        :type folder_id: str
        :param folder_id: id of the folder where to place the uploaded files.
          If not provided, the root folder will be used

        :type message: str
        :param message: message for copying action
        """
        if folder_id is None:
            folder_id = self._get_root_folder_id(library_id)
        payload = {}
        payload['folder_id'] = folder_id
        payload['create_type'] = 'file'
        payload['from_hda_id'] = dataset_id
        payload['ldda_message'] = message
        return Client._post(self, payload, id=library_id, contents=True)
コード例 #7
0
ファイル: __init__.py プロジェクト: Intel-HSS/bioblend
    def create_group(self, group_name, user_ids=[], role_ids=[]):
        """
        Create a new group.

        :type group_name: str
        :param group_name: A name for the new group

        :type user_ids: list
        :param user_ids: A list of encoded user IDs to add to the new group

        :type role_ids: list
        :param role_ids: A list of encoded role IDs to add to the new group

        :rtype: list
        :return: A (size 1) list with newly created group
                 details, like::

                    [{u'id': u'7c9636938c3e83bf',
                      u'model_class': u'Group',
                      u'name': u'My Group Name',
                      u'url': u'/api/groups/7c9636938c3e83bf'}]
        """
        payload = {}
        payload['name'] = group_name
        payload['user_ids'] = user_ids
        payload['role_ids'] = role_ids
        return Client._post(self, payload)
コード例 #8
0
ファイル: __init__.py プロジェクト: boratonAJ/bioblend
    def create_library(self, name, description=None, synopsis=None):
        """
        Create a data library with the properties defined in the arguments.
        Return a list of JSON dicts, looking like so::

        :type name: str
        :param name: Name of the new data library

        :type description: str
        :param description: Optional data library description

        :type synopsis: str
        :param synopsis: Optional data library synopsis

        :rtype: dict
        :return: details of the created library:

            {"id": "f740ab636b360a70",
              "name": "Library from bioblend",
              "url": "/api/libraries/f740ab636b360a70"}

        """
        payload = {'name': name}
        if description:
            payload['description'] = description
        if synopsis:
            payload['synopsis'] = synopsis
        return Client._post(self, payload)
コード例 #9
0
ファイル: __init__.py プロジェクト: andrewjrobinson/bioblend
 def set_library_permissions(self, library_id, access_in=None, modify_in=None, 
                             add_in=None, manage_in=None):
     """
     Sets the permissions for a library.  Note: it will override all 
     security for this library even if you leave out a permission type.
     
     access_in, modify_in, add_in, manage_in expect a list of user id's OR None
     """
     
     payload = {}
     if access_in:
         payload['LIBRARY_ACCESS_in'] = access_in
     if modify_in:
         payload['LIBRARY_MODIFY_in'] = modify_in
     if add_in:
         payload['LIBRARY_ADD_in'] = add_in
     if manage_in:
         payload['LIBRARY_MANAGE_in'] = manage_in
     
     # create the url
     url = self.url
     url = '/'.join([url, library_id, 'permissions'])
     
     return Client._post(self, payload, url=url)
     
コード例 #10
0
ファイル: __init__.py プロジェクト: bgruening/bioblend
    def create_library(self, name, description=None, synopsis=None):
        """
        Create a data library with the properties defined in the arguments.

        :type name: str
        :param name: Name of the new data library

        :type description: str
        :param description: Optional data library description

        :type synopsis: str
        :param synopsis: Optional data library synopsis

        :rtype: dict
        :return: Details of the created library.
          For example::

            {'id': 'f740ab636b360a70',
             'name': 'Library from bioblend',
             'url': '/api/libraries/f740ab636b360a70'}
        """
        payload = {'name': name}
        if description:
            payload['description'] = description
        if synopsis:
            payload['synopsis'] = synopsis
        return Client._post(self, payload)
コード例 #11
0
ファイル: __init__.py プロジェクト: boratonAJ/bioblend
    def set_library_permissions(self, library_id, access_in=None,
                                modify_in=None, add_in=None, manage_in=None):
        """
        Set the permissions for a library.  Note: it will override all security
        for this library even if you leave out a permission type.

        :type library_id: str
        :param library_id: id of the library

        :type access_in: list
        :param access_in: list of role ids

        :type modify_in: list
        :param modify_in: list of role ids

        :type add_in: list
        :param add_in: list of role ids

        :type manage_in: list
        :param manage_in: list of role ids
        """

        payload = {}
        if access_in:
            payload['LIBRARY_ACCESS_in'] = access_in
        if modify_in:
            payload['LIBRARY_MODIFY_in'] = modify_in
        if add_in:
            payload['LIBRARY_ADD_in'] = add_in
        if manage_in:
            payload['LIBRARY_MANAGE_in'] = manage_in
        url = '/'.join([self.gi._make_url(self, library_id), 'permissions'])
        return Client._post(self, payload, url=url)
コード例 #12
0
ファイル: __init__.py プロジェクト: AAFC-MBB/bioblend
    def create_history_tag(self, history_id, tag):
        """
        Create history tag

        :type history_id: str
        :param history_id: Encoded history ID

        :type tag: str
        :param tag: Add tag to history

        :rtype: dict
        :return: A dictionary with information regarding the tag.
          For example::

            {'id': 'f792763bee8d277a',
             'model_class': 'HistoryTagAssociation',
             'user_tname': 'NGS_PE_RUN',
             'user_value': None}
        """

        # empty payload since we are adding the new tag using the url
        payload = {}

        # creating the url
        url = self.url
        url = '/'.join([url, history_id, 'tags', tag])

        return Client._post(self, payload, url=url)
コード例 #13
0
ファイル: __init__.py プロジェクト: odoppelt/bioblend
    def run_workflow(self, workflow_id, dataset_map,params=None, history_id=None, history_name=None,
            import_inputs_to_history=False):
        """
        Run the workflow identified by ``workflow_id``

        :type workflow_id: string
        :param workflow_id: Encoded workflow ID

        :type dataset_map: string or dict
        :param dataset_map: A mapping of workflow inputs to datasets. The datasets
                            source can be a LibraryDatasetDatasetAssociation (``ldda``),
                            LibraryDataset (``ld``), or HistoryDatasetAssociation (``hda``).
                            The map must be in the following format:
                            ``{'<input>': {'id': <encoded dataset ID>, 'src': '[ldda, ld, hda]'}}``
                            (eg, ``{'23': {'id': '29beef4fadeed09f', 'src': 'ld'}}``)
        :type params: string or dict
        :param params: A mapping of tool parameters that are non-datasets parameters. The map must be in the
                         following format:
                         ``{'blastn': {'param': 'evalue', 'value': '1e-06'}}``

        :type history_id: string
        :param history_id: The encoded history ID where to store the workflow output.
                           ``history_id`` OR ``history_name`` should be provided but not both!

        :type history_name: string
        :param history_name: Create a new history with the given name to store the
                             workflow output. ``history_id`` OR ``history_name``
                             should be provided but not both!

        :type import_inputs_to_history: bool
        :param import_inputs_to_history: If ``True``, used workflow inputs will be imported
                                         into the history. If ``False``, only workflow outputs
                                         will be visible in the given history.

        :rtype: dict
        :return: A dict containing the history ID where the outputs are placed as well as
                 output dataset IDs.
                 For example::

                  {u'history': u'64177123325c9cfd',
                   u'outputs': [u'aa4d3084af404259']}

        """
        payload = {}
        payload['workflow_id'] = workflow_id
        payload['ds_map'] = dataset_map
        
        if params:
            payload['parameters'] = params
            
        if history_id:
            payload['history'] = 'hist_id={0}'.format(history_id)
        elif history_name:
            payload['history'] = history_name
        else:
            print "Must provide history_id or history_name argument"
        if import_inputs_to_history is False:
            payload['no_add_to_history'] = True
        return Client._post(self, payload)
コード例 #14
0
 def create_history(self, name=None):
     """
     Create a new history, optionally setting the ``name``.
     """
     payload = {}
     if name is not None:
         payload['name'] = name
     return Client._post(self, payload)
コード例 #15
0
 def undelete_history(self, history_id):
     """
     Undelete a history
     """
     url = self.gi._make_url(self, history_id, deleted=True)
     # Append the 'undelete' action to the history URL
     url = '/'.join([url, 'undelete'])
     return Client._post(self, payload={}, url=url)
コード例 #16
0
 def upload_dataset_from_library(self, history_id, lib_dataset_id):
     """
     Upload a dataset into the history from a library. Requires the
     library dataset ID, which can be obtained from the library
     contents.
     """
     payload = {'from_ld_id': lib_dataset_id}
     return Client._post(self, payload, id=history_id, contents=True)
コード例 #17
0
ファイル: __init__.py プロジェクト: bzeitner/bioblend
 def import_shared_workflow(self, workflow_id):
     """
     Imports a shared workflow.
     """
     payload = {}
     payload['workflow_id'] = workflow_id
     url = self.gi._make_url(self)
     url = '/'.join([url, 'import'])
     return Client._post(self, url=url, payload=payload)
コード例 #18
0
ファイル: __init__.py プロジェクト: kidaak/bioblend
    def install_genome(
        self,
        func="download",
        source=None,
        dbkey=None,
        ncbi_name=None,
        ensembl_dbkey=None,
        url_dbkey=None,
        indexers=None,
    ):
        """
        Download and/or index a genome.


        :type dbkey: str
        :param dbkey: DB key of the build to download, ignored unless 'UCSC' is specified as the source

        :type ncbi_name: str
        :param ncbi_name: NCBI's genome identifier, ignored unless NCBI is specified as the source

        :type ensembl_dbkey: str
        :param ensembl_dbkey: Ensembl's genome identifier, ignored unless Ensembl is specified as the source

        :type url_dbkey: str
        :param url_dbkey: DB key to use for this build, ignored unless URL is specified as the source

        :type source: str
        :param source: Data source for this build. Can be: UCSC, Ensembl, NCBI, URL

        :type indexers: list
        :param indexers: POST array of indexers to run after downloading (indexers[] = first, indexers[] = second, ...)

        :type func: str
        :param func: Allowed values: 'download', Download and index; 'index', Index only

        :rtype: dict
        :return: dict( status: 'ok', job: <job ID> )
                 If error:
                 dict( status: 'error', error: <error message> )
        """
        payload = {}
        if source:
            payload["source"] = source
        if func:
            payload["func"] = func
        if dbkey:
            payload["dbkey"] = dbkey
        if ncbi_name:
            payload["ncbi_name"] = ncbi_name
        if ensembl_dbkey:
            payload["ensembl_dbkey"] = ensembl_dbkey
        if url_dbkey:
            payload["url_dbkey"] = url_dbkey
        if indexers:
            payload["indexers"] = indexers
        return Client._post(self, payload)
コード例 #19
0
 def run_tool(self, history_id, tool_id, tool_inputs):
     """
     Runs tool specified by ``tool_id`` in history indicated
     by ``history_id`` with inputs from ``dict`` ``tool_inputs``.
     """
     payload = {}
     payload["history_id"] = history_id
     payload["tool_id"] = tool_id
     payload["inputs"] = dumps(tool_inputs)
     return Client._post(self, payload)
コード例 #20
0
ファイル: __init__.py プロジェクト: Intel-HSS/bioblend
    def _do_upload(self, **keywords):
        """
        Set up the POST request and do the actual data upload to a data library.
        This method should not be called directly but instead refer to the methods
        specific for the desired type of data upload.
        """
        library_id = keywords['library_id']
        folder_id = keywords.get('folder_id', None)
        if folder_id is None:
            folder_id = self._get_root_folder_id(library_id)
        files_attached = False
        # Compose the payload dict
        payload = {}
        payload['folder_id'] = folder_id
        payload['file_type'] = keywords.get('file_type', 'auto')
        payload['dbkey'] = keywords.get('dbkey', '?')
        payload['create_type'] = 'file'
        if keywords.get("roles", None):
            payload["roles"] = keywords["roles"]
        if keywords.get("link_data_only", None) and keywords['link_data_only'] != 'copy_files':
            payload["link_data_only"] = 'link_to_files'
        if keywords.get('remote_dataset', None):
            payload['remote_dataset'] = keywords['remote_dataset'];
        if keywords.get('uuid_list', None):
            payload['uuid_list'] = keywords['uuid_list'];
        if keywords.get('remote_dataset_type_list', None):
            payload['remote_dataset_type_list'] = keywords['remote_dataset_type_list'];
        if keywords.get('file_size_list', None):
            payload['file_size_list'] = keywords['file_size_list'];
        if keywords.get('line_count_list', None):
            payload['line_count_list'] = keywords['line_count_list'];
        # upload options
        if keywords.get('file_url', None) is not None:
            payload['upload_option'] = 'upload_file'
            payload['files_0|url_paste'] = keywords['file_url']
        elif keywords.get('pasted_content', None) is not None:
            payload['upload_option'] = 'upload_file'
            payload['files_0|url_paste'] = keywords['pasted_content']
        elif keywords.get('server_dir', None) is not None:
            payload['upload_option'] = 'upload_directory'
            payload['server_dir'] = keywords['server_dir']
        elif keywords.get('file_local_path', None) is not None:
            payload['upload_option'] = 'upload_file'
            payload['files_0|file_data'] = attach_file(keywords['file_local_path'])
            files_attached = True
        elif keywords.get("filesystem_paths", None) is not None:
            payload["upload_option"] = "upload_paths"
            payload["filesystem_paths"] = keywords["filesystem_paths"]

        try:
            return Client._post(self, payload, id=library_id, contents=True,
                                files_attached=files_attached)
        finally:
            if payload.get('files_0|file_data', None) is not None:
                payload['files_0|file_data'].close()
コード例 #21
0
    def import_workflow_json(self, workflow_json):
        """
        Imports a new workflow given a json representation of a previously exported
        workflow.
        """
        payload = {}
        payload['workflow'] = workflow_json

        url = self.gi._make_url(self)
        url = '/'.join([url, "upload"])
        return Client._post(self, url=url, payload=payload)
コード例 #22
0
ファイル: __init__.py プロジェクト: jmchilton/bioblend
    def undelete_history(self, history_id):
        """
        Undelete a history

        :type history_id: str
        :param history_id: Encoded history ID
        """
        url = self.gi._make_url(self, history_id, deleted=True)
        # Append the 'undelete' action to the history URL
        url = "/".join([url, "undelete"])
        return Client._post(self, payload={}, url=url)
コード例 #23
0
ファイル: activity.py プロジェクト: galaxyproject/planemo
 def create_collection_func(element_identifiers, collection_type):
     payload = {
         "name": "dataset collection",
         "instance_type": "history",
         "history_id": history_id,
         "element_identifiers": element_identifiers,
         "collection_type": collection_type,
         "fields": None if collection_type != "record" else "auto",
     }
     dataset_collections_url = user_gi.url + "/dataset_collections"
     dataset_collection = Client._post(user_gi.histories, payload, url=dataset_collections_url)
     return dataset_collection
コード例 #24
0
ファイル: __init__.py プロジェクト: bgruening/bioblend
    def create_form(self, form_xml_text):
        """
        Create a new form.

        :type   form_xml_text: str
        :param  form_xml_text: Form xml to create a form on galaxy instance

        :rtype:     str
        :returns:   Unique url of newly created form with encoded id
        """
        payload = form_xml_text
        return Client._post(self, payload=payload)
コード例 #25
0
ファイル: __init__.py プロジェクト: abretaud/bioblend
 def create_dataset_collection(self, history_id, collection_description):
     try:
         collection_description = collection_description.to_dict()
     except AttributeError:
         pass
     payload = dict(
         name=collection_description["name"],
         type="dataset_collection",
         collection_type=collection_description["collection_type"],
         element_identifiers=collection_description["element_identifiers"],
     )
     return Client._post(self, payload, id=history_id, contents=True)
コード例 #26
0
ファイル: __init__.py プロジェクト: abretaud/bioblend
 def upload_dataset_from_library(self, history_id, lib_dataset_id):
     """
     Upload a dataset into the history from a library. Requires the
     library dataset ID, which can be obtained from the library
     contents.
     """
     payload = {
         'content': lib_dataset_id,
         'source': 'library',
         'from_ld_id': lib_dataset_id,  # compatibility with old API
     }
     return Client._post(self, payload, id=history_id, contents=True)
コード例 #27
0
ファイル: __init__.py プロジェクト: cariaso/bioblend
    def _do_upload(self, **keywords):
        """
        Set up the POST request and do the actual data upload to a data library.
        This method should not be called directly but instead refer to the methods
        specific for the desired type of data upload.
        """
        # If folder_id was not provided in the arguments, find the root folder ID
        if keywords.get('folder_id', None) is None:
            folders = self.show_library(library_id=keywords['library_id'], contents=True)
            for f in folders:
                if f['name'] == '/':
                    folder_id = f['id']
                    break
        else:
            folder_id = keywords['folder_id']

        files_attached = False
        # Compose the payload dict
        payload = {}
        payload['folder_id'] = folder_id
        payload['file_type'] = keywords.get('file_type', 'auto')
        payload['dbkey'] = keywords.get('dbkey', '?')
        payload['create_type'] = 'file'
        if keywords.get("roles", None):
            payload["roles"] = keywords["roles"]
        if keywords.get("link_data_only", None):
            payload["link_data_only"] = 'link_to_files'
        # upload options
        if keywords.get('file_url', None) is not None:
            payload['upload_option'] = 'upload_file'
            payload['files_0|url_paste'] = keywords['file_url']
        elif keywords.get('pasted_content', None) is not None:
            payload['upload_option'] = 'upload_file'
            payload['files_0|url_paste'] = keywords['pasted_content']
        elif keywords.get('server_dir', None) is not None:
            payload['upload_option'] = 'upload_directory'
            payload['server_dir'] = keywords['server_dir']
        elif keywords.get('file_local_path', None) is not None:
            payload['upload_option'] = 'upload_file'
            payload['files_0|file_data'] = open(keywords['file_local_path'], 'rb')
            files_attached = True
        elif keywords.get("filesystem_paths", None) is not None:
            payload["upload_option"] = "upload_paths"
            payload["filesystem_paths"] = keywords["filesystem_paths"]

        r = Client._post(self, payload, id=keywords['library_id'], contents=True,
                         files_attached=files_attached)

        if payload.get('files_0|file_data', None) is not None:
            payload['files_0|file_data'].close()

        return r
コード例 #28
0
ファイル: __init__.py プロジェクト: bgruening/bioblend
    def import_workflow_json(self, workflow_json):
        """
        Imports a new workflow given a json representation of a previously exported
        workflow.

        :type workflow_json: str
        :param workflow_json: JSON string representing the workflow to be imported
        """
        payload = {'workflow': workflow_json}

        url = self.gi._make_url(self)
        url = _join(url, "upload")
        return Client._post(self, url=url, payload=payload)
コード例 #29
0
ファイル: __init__.py プロジェクト: Pandorin/bioblend
 def _tool_post(self, payload, files_attached=False):
     if files_attached:
         # If files_attached - this will be posted as multi-part form data
         # and so each individual parameter needs to be encoded so can be
         # decoded as JSON by Galaxy (hence dumping complex parameters).
         # If not files are attached the whole thing is posted a
         # application/json and dumped/loaded all at once by requests and
         # Galaxy.
         complex_payload_params = ["inputs"]
         for key in complex_payload_params:
             if key in payload:
                 payload[key] = dumps(payload[key])
     return Client._post(self, payload, files_attached=files_attached)
コード例 #30
0
ファイル: __init__.py プロジェクト: AAFC-MBB/bioblend
    def create_history(self, name=None):
        """
        Create a new history, optionally setting the ``name``.

        :type name: str
        :param name: Optional name for new history

        :rtype: dict
        :return: Dictionary containing information about newly created history
        """
        payload = {}
        if name is not None:
            payload['name'] = name
        return Client._post(self, payload)
コード例 #31
0
ファイル: __init__.py プロジェクト: nuwang/bioblend
    def invoke_workflow(self,
                        workflow_id,
                        inputs=None,
                        params=None,
                        history_id=None,
                        history_name=None,
                        import_inputs_to_history=False,
                        replacement_params=None,
                        allow_tool_state_corrections=None):
        """
        Invoke the workflow identified by ``workflow_id``. This will
        cause a workflow to be scheduled and return an object describing
        the workflow invocation.

        :type workflow_id: str
        :param workflow_id: Encoded workflow ID

        :type inputs: dict
        :param inputs: A mapping of workflow inputs to datasets and dataset collections.
                       The datasets source can be a LibraryDatasetDatasetAssociation (``ldda``),
                       LibraryDataset (``ld``), HistoryDatasetAssociation (``hda``), or
                       HistoryDatasetCollectionAssociation (``hdca``).

                       The map must be in the following format:
                       ``{'<input_index>': {'id': <encoded dataset ID>, 'src': '[ldda, ld, hda, hdca]'}}``
                       (e.g. ``{'2': {'id': '29beef4fadeed09f', 'src': 'hda'}}``)

                       This map may also be indexed by the UUIDs of the workflow steps,
                       as indicated by the ``uuid`` property of steps returned from the
                       Galaxy API.

        :type params: str or dict
        :param params: A mapping of tool parameters that are non-datasets
          parameters. The map must be in the following format:
          ``{'blastn': {'param': 'evalue', 'value': '1e-06'}}``

        :type history_id: str
        :param history_id: The encoded history ID where to store the workflow
          output. Alternatively, ``history_name`` may be specified to create a
          new history.

        :type history_name: str
        :param history_name: Create a new history with the given name to store
          the workflow output. If both ``history_id`` and ``history_name`` are
          provided, ``history_name`` is ignored. If neither is specified, a new
          'Unnamed history' is created.

        :type import_inputs_to_history: bool
        :param import_inputs_to_history: If ``True``, used workflow inputs will
          be imported into the history. If ``False``, only workflow outputs will
          be visible in the given history.

        :type allow_tool_state_corrections: bool
        :param allow_tool_state_corrections: If True, allow Galaxy to fill in
          missing tool state when running workflows. This may be useful for
          workflows using tools that have changed over time or for workflows
          built outside of Galaxy with only a subset of inputs defined.

        :type replacement_params: dict
        :param replacement_params: pattern-based replacements for post-job
          actions (see below)

        :rtype: dict
        :return: A dict containing the workflow invocation describing the
          scheduling of the workflow. For example::

            {u'history_id': u'2f94e8ae9edff68a',
             u'id': u'df7a1f0c02a5b08e',
             u'inputs': {u'0': {u'id': u'a7db2fac67043c7e',
               u'src': u'hda',
               u'uuid': u'7932ffe0-2340-4952-8857-dbaa50f1f46a'}},
             u'model_class': u'WorkflowInvocation',
             u'state': u'ready',
             u'steps': [{u'action': None,
               u'id': u'd413a19dec13d11e',
               u'job_id': None,
               u'model_class': u'WorkflowInvocationStep',
               u'order_index': 0,
               u'state': None,
               u'update_time': u'2015-10-31T22:00:26',
               u'workflow_step_id': u'cbbbf59e8f08c98c',
               u'workflow_step_label': None,
               u'workflow_step_uuid': u'b81250fd-3278-4e6a-b269-56a1f01ef485'},
              {u'action': None,
               u'id': u'2f94e8ae9edff68a',
               u'job_id': u'e89067bb68bee7a0',
               u'model_class': u'WorkflowInvocationStep',
               u'order_index': 1,
               u'state': u'new',
               u'update_time': u'2015-10-31T22:00:26',
               u'workflow_step_id': u'964b37715ec9bd22',
               u'workflow_step_label': None,
               u'workflow_step_uuid': u'e62440b8-e911-408b-b124-e05435d3125e'}],
             u'update_time': u'2015-10-31T22:00:26',
             u'uuid': u'c8aa2b1c-801a-11e5-a9e5-8ca98228593c',
             u'workflow_id': u'03501d7626bd192f'}

        The ``replacement_params`` dict should map parameter names in
        post-job actions (PJAs) to their runtime values. For
        instance, if the final step has a PJA like the following::

          {u'RenameDatasetActionout_file1': {u'action_arguments': {u'newname': u'${output}'},
            u'action_type': u'RenameDatasetAction',
            u'output_name': u'out_file1'}}

        then the following renames the output dataset to 'foo'::

          replacement_params = {'output': 'foo'}

        see also `this email thread
        <http://lists.bx.psu.edu/pipermail/galaxy-dev/2011-September/006875.html>`_.

        .. warning::
          Historically, the ``run_workflow`` method consumed a ``dataset_map``
          data structure that was indexed by unencoded workflow step IDs. These
          IDs would not be stable across Galaxy instances. The new ``inputs``
          property is instead indexed by either the ``order_index`` property
          (which is stable across workflow imports) or the step UUID which is
          also stable.
        """
        payload = {'workflow_id': workflow_id}
        if inputs:
            payload['inputs'] = inputs

        if params:
            payload['parameters'] = params

        if replacement_params:
            payload['replacement_params'] = replacement_params

        if history_id:
            payload['history'] = 'hist_id={0}'.format(history_id)
        elif history_name:
            payload['history'] = history_name
        if import_inputs_to_history is False:
            payload['no_add_to_history'] = True
        if allow_tool_state_corrections is not None:
            payload[
                'allow_tool_state_corrections'] = allow_tool_state_corrections
        url = self.gi._make_url(self)
        url = _join(url, workflow_id, "invocations")
        return Client._post(self, payload, url=url)
コード例 #32
0
ファイル: __init__.py プロジェクト: fmareuil/bioblend
    def create_repository(self,
                          name,
                          synopsis,
                          description=None,
                          type="unrestricted",
                          remote_repository_url=None,
                          homepage_url=None,
                          category_ids=None):
        """
        Create a new repository in a Tool Shed

        :type name: str
        :param name: Name of the repository

        :type synopsis: str
        :param synopsis: Synopsis of the repository

        :type description: str
        :param description: Optional description of the repository

        :type type: str
        :param type: type of the repository. One of "unrestricted",
                     "repository_suite_definition", or "tool_dependency_definition"

        :type remote_repository_url: str
        :param remote_repository_url: Remote URL (e.g. github/bitbucket repository)

        :type homepage_url: str
        :param homepage_url: Upstream's homepage for the project.

        :type category_ids: list
        :param category_ids: List of encoded category IDs

        :rtype: dict
        :return: a dictionary containing information about the new repository.
                 For example::

                    {
                        "deleted": false,
                        "deprecated": false,
                        "description": "new_synopsis",
                        "homepage_url": "https://github.com/galaxyproject/",
                        "id": "8cf91205f2f737f4",
                        "long_description": "this is some repository",
                        "model_class": "Repository",
                        "name": "new_repo_17",
                        "owner": "qqqqqq",
                        "private": false,
                        "remote_repository_url": "https://github.com/galaxyproject/tools-devteam",
                        "times_downloaded": 0,
                        "type": "unrestricted",
                        "user_id": "adb5f5c93f827949"
                    }

        """

        payload = {
            'name': name,
            'synopsis': synopsis,
        }

        if description is not None:
            payload['description'] = description

        if description is not None:
            payload['description'] = description

        if type is not None:
            payload['type'] = type

        if remote_repository_url is not None:
            payload['remote_repository_url'] = remote_repository_url

        if homepage_url is not None:
            payload['homepage_url'] = homepage_url

        if category_ids is not None:
            payload['category_ids[]'] = category_ids

        return Client._post(self, payload)
コード例 #33
0
def _execute(ctx, config, runnable, job_path, **kwds):
    user_gi = config.user_gi
    admin_gi = config.gi

    history_id = _history_id(user_gi, **kwds)

    galaxy_paths, job_dict, _ = stage_in(ctx, runnable, config, user_gi, history_id, job_path, **kwds)

    if runnable.type in [RunnableType.galaxy_tool, RunnableType.cwl_tool]:
        response_class = GalaxyToolRunResponse
        tool_id = _verified_tool_id(runnable, user_gi)
        inputs_representation = _inputs_representation(runnable)
        run_tool_payload = dict(
            history_id=history_id,
            tool_id=tool_id,
            inputs=job_dict,
            inputs_representation=inputs_representation,
        )
        ctx.vlog("Post to Galaxy tool API with payload [%s]" % run_tool_payload)
        tool_run_response = user_gi.tools._tool_post(run_tool_payload)

        job = tool_run_response["jobs"][0]
        job_id = job["id"]
        try:
            final_state = _wait_for_job(user_gi, job_id)
        except Exception:
            summarize_history(ctx, user_gi, history_id)
            raise
        if final_state != "ok":
            msg = "Failed to run CWL tool job final job state is [%s]." % final_state
            summarize_history(ctx, user_gi, history_id)
            with open("errored_galaxy.log", "w") as f:
                f.write(config.log_contents)
            raise Exception(msg)

        ctx.vlog("Final job state was ok, fetching details for job [%s]" % job_id)
        job_info = admin_gi.jobs.show_job(job_id)
        response_kwds = {
            'job_info': job_info,
            'api_run_response': tool_run_response,
        }
        if ctx.verbose:
            summarize_history(ctx, user_gi, history_id)
    elif runnable.type in [RunnableType.galaxy_workflow, RunnableType.cwl_workflow]:
        response_class = GalaxyWorkflowRunResponse
        workflow_id = config.workflow_id(runnable.path)
        ctx.vlog("Found Galaxy workflow ID [%s] for path [%s]" % (workflow_id, runnable.path))
        # TODO: update bioblend to allow inputs_by.
        # invocation = user_gi.worklfows.invoke_workflow(
        #    workflow_id,
        #    history_id=history_id,
        #    inputs=job_dict,
        # )
        payload = dict(
            workflow_id=workflow_id,
            history_id=history_id,
            inputs=job_dict,
            inputs_by="name",
            allow_tool_state_corrections=True,
        )
        invocations_url = "%s/%s/invocations" % (
            user_gi._make_url(user_gi.workflows),
            workflow_id,
        )
        invocation = Client._post(user_gi.workflows, payload, url=invocations_url)
        invocation_id = invocation["id"]
        ctx.vlog("Waiting for invocation [%s]" % invocation_id)
        try:
            final_invocation_state = _wait_for_invocation(ctx, user_gi, history_id, workflow_id, invocation_id)
        except Exception:
            ctx.vlog("Problem waiting on invocation...")
            summarize_history(ctx, user_gi, history_id)
            raise
        ctx.vlog("Final invocation state is [%s]" % final_invocation_state)
        final_state = _wait_for_history(ctx, user_gi, history_id)
        if final_state != "ok":
            msg = "Failed to run workflow final history state is [%s]." % final_state
            summarize_history(ctx, user_gi, history_id)
            with open("errored_galaxy.log", "w") as f:
                f.write(config.log_contents)
            raise Exception(msg)
        ctx.vlog("Final history state is 'ok'")
        response_kwds = {
            'workflow_id': workflow_id,
            'invocation_id': invocation_id,
        }
    else:
        raise NotImplementedError()

    run_response = response_class(
        ctx=ctx,
        runnable=runnable,
        user_gi=user_gi,
        history_id=history_id,
        galaxy_paths=galaxy_paths,
        log=config.log_contents,
        **response_kwds
    )
    output_directory = kwds.get("output_directory", None)
    ctx.vlog("collecting outputs from run...")
    run_response.collect_outputs(ctx, output_directory)
    ctx.vlog("collecting outputs complete")
    return run_response
コード例 #34
0
def _execute(config, runnable, job_path, **kwds):
    user_gi = config.user_gi
    admin_gi = config.gi

    history_id = _history_id(user_gi, **kwds)

    galaxy_paths, job_dict, datasets = stage_in(config, user_gi, history_id,
                                                job_path, **kwds)

    if runnable.type in [RunnableType.galaxy_tool, RunnableType.cwl_tool]:
        response_class = GalaxyToolRunResponse
        tool_id = _tool_id(runnable.path)
        if runnable.type == RunnableType.cwl_tool:
            inputs_representation = "cwl"
        else:
            inputs_representation = "galaxy"
        run_tool_payload = dict(
            history_id=history_id,
            tool_id=tool_id,
            inputs=job_dict,
            inputs_representation=inputs_representation,
        )
        tool_run_response = user_gi.tools._tool_post(run_tool_payload)

        job = tool_run_response["jobs"][0]
        job_id = job["id"]
        final_state = _wait_for_job(user_gi, job_id)
        if final_state != "ok":
            msg = "Failed to run CWL job final job state is [%s]." % final_state
            with open("errored_galaxy.log", "w") as f:
                f.write(config.log_contents)
            raise Exception(msg)

        job_info = admin_gi.jobs.show_job(job_id)
        response_kwds = {
            'job_info': job_info,
            'api_run_response': tool_run_response,
        }

    elif runnable.type in [RunnableType.galaxy_workflow]:
        response_class = GalaxyWorkflowRunResponse
        workflow_id = config.workflow_id(runnable.path)
        # TODO: update bioblend to allow inputs_by.
        # invocation = user_gi.worklfows.invoke_workflow(
        #    workflow_id,
        #    history_id=history_id,
        #    inputs=job_dict,
        # )
        payload = dict(
            workflow_id=workflow_id,
            history_id=history_id,
            inputs=job_dict,
            inputs_by="name",
            allow_tool_state_corrections=True,
        )
        invocations_url = "%s/%s/invocations" % (
            user_gi._make_url(user_gi.workflows),
            workflow_id,
        )
        invocation = Client._post(user_gi.workflows,
                                  payload,
                                  url=invocations_url)
        invocation_id = invocation["id"]
        _wait_for_invocation(user_gi, workflow_id, invocation_id)
        final_state = _wait_for_history(user_gi, history_id)
        if final_state != "ok":
            msg = "Failed to run CWL job final job state is [%s]." % final_state
            with open("errored_galaxy.log", "w") as f:
                f.write(config.log_contents)
            raise Exception(msg)
        response_kwds = {
            'workflow_id': workflow_id,
            'invocation_id': invocation_id,
        }
    else:
        raise NotImplementedError()

    run_response = response_class(runnable=runnable,
                                  user_gi=user_gi,
                                  history_id=history_id,
                                  galaxy_paths=galaxy_paths,
                                  log=config.log_contents,
                                  **response_kwds)
    output_directory = kwds.get("output_directory", None)
    run_response.collect_outputs(output_directory)
    return run_response
コード例 #35
0
ファイル: __init__.py プロジェクト: Takadonet/bioblend
    def run_workflow(self,
                     workflow_id,
                     dataset_map,
                     params=None,
                     history_id=None,
                     history_name=None,
                     import_inputs_to_history=False,
                     replacement_params=None):
        """
        Run the workflow identified by ``workflow_id``

        :type workflow_id: string
        :param workflow_id: Encoded workflow ID

        :type dataset_map: string or dict
        :param dataset_map: A mapping of workflow inputs to datasets. The datasets
                            source can be a LibraryDatasetDatasetAssociation (``ldda``),
                            LibraryDataset (``ld``), or HistoryDatasetAssociation (``hda``).
                            The map must be in the following format:
                            ``{'<input>': {'id': <encoded dataset ID>, 'src': '[ldda, ld, hda]'}}``
                            (eg, ``{'23': {'id': '29beef4fadeed09f', 'src': 'ld'}}``)
        :type params: string or dict
        :param params: A mapping of tool parameters that are non-datasets parameters. The map must be in the
                         following format:
                         ``{'blastn': {'param': 'evalue', 'value': '1e-06'}}``

        :type history_id: string
        :param history_id: The encoded history ID where to store the workflow output.
                           ``history_id`` OR ``history_name`` should be provided but not both!

        :type history_name: string
        :param history_name: Create a new history with the given name to store the
                             workflow output. ``history_id`` OR ``history_name``
                             should be provided but not both!

        :type import_inputs_to_history: bool
        :param import_inputs_to_history: If ``True``, used workflow inputs will be imported
                                         into the history. If ``False``, only workflow outputs
                                         will be visible in the given history.

        :type replacement_params: dict
        :param params: A mapping from workflow replacement params to values. The map must be in the following
                       format: 
                       ``{'output_name': 'my_output_name', 'threshold': '10'}``

        :rtype: dict
        :return: A dict containing the history ID where the outputs are placed as well as
                 output dataset IDs.
                 For example::

                  {u'history': u'64177123325c9cfd',
                   u'outputs': [u'aa4d3084af404259']}

        """
        payload = {}
        payload['workflow_id'] = workflow_id
        payload['ds_map'] = dataset_map

        if params:
            payload['parameters'] = params

        if replacement_params:
            payload['replacement_params'] = replacement_params

        if history_id:
            payload['history'] = 'hist_id={0}'.format(history_id)
        elif history_name:
            payload['history'] = history_name
        else:
            print "Must provide history_id or history_name argument"
        if import_inputs_to_history is False:
            payload['no_add_to_history'] = True
        return Client._post(self, payload)
コード例 #36
0
    def run_workflow(self,
                     workflow_id,
                     dataset_map=None,
                     params=None,
                     history_id=None,
                     history_name=None,
                     import_inputs_to_history=False,
                     replacement_params=None):
        """
        Run the workflow identified by ``workflow_id``

        :type workflow_id: str
        :param workflow_id: Encoded workflow ID

        :type dataset_map: str or dict
        :param dataset_map: A mapping of workflow inputs to datasets. The datasets
                            source can be a LibraryDatasetDatasetAssociation (``ldda``),
                            LibraryDataset (``ld``), or HistoryDatasetAssociation (``hda``).
                            The map must be in the following format:
                            ``{'<input>': {'id': <encoded dataset ID>, 'src': '[ldda, ld, hda]'}}``
                            (e.g. ``{'23': {'id': '29beef4fadeed09f', 'src': 'ld'}}``)

        :type params: str or dict
        :param params: A mapping of tool parameters that are non-datasets parameters. The map must be in the
                         following format:
                         ``{'blastn': {'param': 'evalue', 'value': '1e-06'}}``

        :type history_id: str
        :param history_id: The encoded history ID where to store the workflow
          output. Alternatively, ``history_name`` may be specified to create a
          new history.

        :type history_name: str
        :param history_name: Create a new history with the given name to store
          the workflow output. If both ``history_id`` and ``history_name`` are
          provided, ``history_name`` is ignored. If neither is specified, a new
          'Unnamed history' is created.

        :type import_inputs_to_history: bool
        :param import_inputs_to_history: If ``True``, used workflow inputs will be imported
                                         into the history. If ``False``, only workflow outputs
                                         will be visible in the given history.

        :type replacement_params: dict
        :param replacement_params: pattern-based replacements for post-job actions (see below)

        :rtype: dict
        :return: A dict containing the history ID where the outputs are placed as well as
                 output dataset IDs.
                 For example::

                  {u'history': u'64177123325c9cfd',
                   u'outputs': [u'aa4d3084af404259']}

        The ``replacement_params`` dict should map parameter names in
        post-job actions (PJAs) to their runtime values.  For
        instance, if the final step has a PJA like the following::

          {u'RenameDatasetActionout_file1': {
             u'action_arguments': {u'newname': u'${output}'},
             u'action_type': u'RenameDatasetAction',
             u'output_name': u'out_file1'}}

        then the following renames the output dataset to 'foo'::

          replacement_params = {'output': 'foo'}

        see also `this thread
        <http://lists.bx.psu.edu/pipermail/galaxy-dev/2011-September/006875.html>`_
        """
        payload = {}
        payload['workflow_id'] = workflow_id
        if dataset_map:
            payload['ds_map'] = dataset_map

        if params:
            payload['parameters'] = params

        if replacement_params:
            payload['replacement_params'] = replacement_params

        if history_id:
            payload['history'] = 'hist_id={0}'.format(history_id)
        elif history_name:
            payload['history'] = history_name
        if import_inputs_to_history is False:
            payload['no_add_to_history'] = True
        return Client._post(self, payload)
コード例 #37
0
ファイル: __init__.py プロジェクト: bzeitner/bioblend
    def install_repository_revision(self, tool_shed_url, name, owner, changeset_revision,
          install_tool_dependencies=False, install_repository_dependencies=False,
          tool_panel_section_id=None, new_tool_panel_section_label=None):
        """
        Install a specified repository revision from a specified Tool Shed into
        this Galaxy instance. This example demonstrates installation of a repository
        that contains valid tools, loading them into a section of the Galaxy tool
        panel or creating a new tool panel section.
        You can choose if tool dependencies or repository dependencies should be
        installed, use ``install_tool_dependencies`` or ``install_repository_dependencies``.

        Installing the repository into an existing tool panel section requires
        the tool panel config file (e.g., tool_conf.xml, shed_tool_conf.xml, etc)
        to contain the given tool panel section:

            <section id="from_test_tool_shed" name="From Test Tool Shed" version="">
            </section>

        :type tool_shed_url: string
        :param tool_shed_url: URL of the Tool Shed from which the repository should
                              be installed from (e.g., ``http://testtoolshed.g2.bx.psu.edu``)

        :type name: string
        :param name: The name of the repository that should be installed

        :type owner: string
        :param owner: The name of the repository owner

        :type changeset_revision: string
        :param changeset_revision: The revision of the repository to be installed

        :type install_tool_dependencies: Boolean
        :param install_tool_dependencies: Whether or not to automatically handle
                                          tool dependencies (see
                                          http://wiki.galaxyproject.org/AToolOrASuitePerRepository
                                          for more details)

        :type install_repository_dependencies: Boolean
        :param install_repository_dependencies: Whether or not to automatically
                                                handle repository dependencies
                                                (see http://wiki.galaxyproject.org/DefiningRepositoryDependencies
                                                for more details)

        :type tool_panel_section_id: string
        :param tool_panel_section_id: The ID of the Galaxy tool panel section
                                      where the tool should be insterted under.
                                      Note that you should specify either this
                                      parameter or the ``new_tool_panel_section_label``.
                                      If both are specified, this one will take
                                      precedence.

        :type new_tool_panel_section_label: string
        :param new_tool_panel_section_label: The name of a Galaxy tool panel section
                                             that should be created and the repository
                                             installed into.
        """
        payload = {}
        payload['tool_shed_url'] = tool_shed_url
        payload['name'] = name
        payload['owner'] = owner
        payload['changeset_revision'] = changeset_revision
        payload['install_tool_dependencies'] = install_tool_dependencies
        payload['install_repository_dependencies'] = install_repository_dependencies
        if tool_panel_section_id:
            # Galaxy requires 'section_' to be prepended to the section ID so ensure it's there
            if 'section_' not in tool_panel_section_id:
                payload['tool_panel_section_id'] = 'section_%s' % tool_panel_section_id
            else:
                payload['tool_panel_section_id'] = tool_panel_section_id
        elif new_tool_panel_section_label:
            payload['new_tool_panel_section_label'] = new_tool_panel_section_label

        url = "%s%s" % (self.gi.url, '/tool_shed_repositories/new/install_repository_revision')

        return Client._post(self, url=url, payload=payload)
コード例 #38
0
    def _do_upload(self, **keywords):
        """
        Set up the POST request and do the actual data upload to a data library.
        This method should not be called directly but instead refer to the methods
        specific for the desired type of data upload.
        """
        library_id = keywords['library_id']
        folder_id = keywords.get('folder_id', None)
        if folder_id is None:
            folder_id = self._get_root_folder_id(library_id)
        files_attached = False
        # Compose the payload dict
        payload = {}
        payload['folder_id'] = folder_id
        payload['file_type'] = keywords.get('file_type', 'auto')
        payload['dbkey'] = keywords.get('dbkey', '?')
        payload['create_type'] = 'file'
        if keywords.get("roles", None):
            payload["roles"] = keywords["roles"]
        if keywords.get("link_data_only",
                        None) and keywords['link_data_only'] != 'copy_files':
            payload["link_data_only"] = 'link_to_files'
        if keywords.get('remote_dataset', None):
            payload['remote_dataset'] = keywords['remote_dataset']
        if keywords.get('uuid_list', None):
            payload['uuid_list'] = keywords['uuid_list']
        if keywords.get('remote_dataset_type_list', None):
            payload['remote_dataset_type_list'] = keywords[
                'remote_dataset_type_list']
        if keywords.get('file_size_list', None):
            payload['file_size_list'] = keywords['file_size_list']
        if keywords.get('line_count_list', None):
            payload['line_count_list'] = keywords['line_count_list']
        # upload options
        if keywords.get('file_url', None) is not None:
            payload['upload_option'] = 'upload_file'
            payload['files_0|url_paste'] = keywords['file_url']
        elif keywords.get('pasted_content', None) is not None:
            payload['upload_option'] = 'upload_file'
            payload['files_0|url_paste'] = keywords['pasted_content']
        elif keywords.get('server_dir', None) is not None:
            payload['upload_option'] = 'upload_directory'
            payload['server_dir'] = keywords['server_dir']
        elif keywords.get('file_local_path', None) is not None:
            payload['upload_option'] = 'upload_file'
            payload['files_0|file_data'] = attach_file(
                keywords['file_local_path'])
            files_attached = True
        elif keywords.get("filesystem_paths", None) is not None:
            payload["upload_option"] = "upload_paths"
            payload["filesystem_paths"] = keywords["filesystem_paths"]

        try:
            return Client._post(self,
                                payload,
                                id=library_id,
                                contents=True,
                                files_attached=files_attached)
        finally:
            if payload.get('files_0|file_data', None) is not None:
                payload['files_0|file_data'].close()