Beispiel #1
0
    def create(self, trans, payload, **kwd):
        """
        * POST /api/dataset_collections:
            create a new dataset collection instance.

        :type   payload: dict
        :param  payload: (optional) dictionary structure containing:
            * collection_type: dataset colltion type to create.
            * instance_type:   Instance type - 'history' or 'library'.
            * name:            the new dataset collections's name
            * datasets:        object describing datasets for collection
        :rtype:     dict
        :returns:   element view of new dataset collection
        """
        # TODO: Error handling...
        create_params = api_payload_to_create_params(payload)
        instance_type = payload.pop("instance_type", "history")
        if instance_type == "history":
            history_id = payload.get('history_id')
            history_id = self.decode_id(history_id)
            history = self.history_manager.get_owned(history_id, trans.user, current_history=trans.history)
            create_params["parent"] = history
        elif instance_type == "library":
            folder_id = payload.get('folder_id')
            library_folder = self.get_library_folder(trans, folder_id, check_accessible=True)
            self.check_user_can_add_to_library_item(trans, library_folder, check_accessible=False)
            create_params["parent"] = library_folder
        else:
            trans.status = 501
            return
        dataset_collection_instance = self.__service(trans).create(trans=trans, **create_params)
        return dictify_dataset_collection_instance(dataset_collection_instance,
                                                   security=trans.security, parent=create_params["parent"])
Beispiel #2
0
 def _serialize_collection( self, trans, collection ):
     service = self.app.dataset_collections_service
     dataset_collection_instance = service.get_dataset_collection_instance(
         trans=trans,
         instance_type='history',
         id=self.app.security.encode_id( collection.id ),
     )
     return collections_util.dictify_dataset_collection_instance( dataset_collection_instance,
         security=self.app.security, parent=dataset_collection_instance.history, view="element" )
 def show( self, trans, instance_type, id, **kwds ):
     dataset_collection_instance = self.__service( trans ).get(
         id=id,
         instance_type=instance_type,
     )
     if instance_type == 'history':
         parent = dataset_collection_instance.history
     elif instance_type == 'library':
         parent = dataset_collection_instance.folder
     else:
         trans.status = 501
         return
     return dictify_dataset_collection_instance( trans, dataset_collection_instance, parent )
Beispiel #4
0
    def _get_history_data( self, trans, history ):
        """
        Returns a dictionary containing ``history`` and ``contents``, serialized
        history and an array of serialized history contents respectively.
        """
        #TODO: instantiate here? really?
        history_serializer = HistorySerializer( self.app )
        hda_serializer = hdas.HDASerializer( self.app )
        history_dictionary = {}
        contents_dictionaries = []
        try:
            history_dictionary = history_serializer.serialize_to_view( trans, history, view='detailed' )

            #for content in history.contents_iter( **contents_kwds ):
            for content in history.contents_iter( types=[ 'dataset', 'dataset_collection' ] ):
                contents_dict = {}

                if isinstance( content, model.HistoryDatasetAssociation ):
                    contents_dict = hda_serializer.serialize_to_view( trans, content, view='detailed' )

                elif isinstance( content, model.HistoryDatasetCollectionAssociation ):
                    try:
                        service = self.app.dataset_collections_service
                        dataset_collection_instance = service.get_dataset_collection_instance(
                            trans=trans,
                            instance_type='history',
                            id=self.app.security.encode_id( content.id ),
                        )
                        contents_dict = dictify_dataset_collection_instance( dataset_collection_instance,
                            security=self.app.security, parent=dataset_collection_instance.history, view="element" )

                    except Exception, exc:
                        log.exception( "Error in history API at listing dataset collection: %s", exc )
                        #TODO: return some dict with the error

                contents_dictionaries.append( contents_dict )

        except Exception, exc:
            user_id = str( trans.user.id ) if trans.user else '(anonymous)'
            log.exception( 'Error bootstrapping history for user %s: %s', user_id, str( exc ) )
            message = ( 'An error occurred getting the history data from the server. '
                      + 'Please contact a Galaxy administrator if the problem persists.' )
            history_dictionary[ 'error' ] = message
Beispiel #5
0
    def create( self, trans, payload, **kwd ):
        """
        POST /api/tools
        Executes tool using specified inputs and returns tool's outputs.
        """
        # HACK: for now, if action is rerun, rerun tool.
        action = payload.get( 'action', None )
        if action == 'rerun':
            return self._rerun_tool( trans, payload, **kwd )

        # -- Execute tool. --

        # Get tool.
        tool = trans.app.toolbox.get_tool( payload[ 'tool_id' ] ) if 'tool_id' in payload else None
        if not tool:
            trans.response.status = 404
            return { "message": { "type": "error", "text" : trans.app.model.Dataset.conversion_messages.NO_TOOL } }

        # Set running history from payload parameters.
        # History not set correctly as part of this API call for
        # dataset upload.
        history_id = payload.get("history_id", None)
        if history_id:
            target_history = self.get_history( trans, history_id )
        else:
            target_history = None

        # Set up inputs.
        inputs = payload.get( 'inputs', {} )
        # Find files coming in as multipart file data and add to inputs.
        for k, v in payload.iteritems():
            if k.startswith("files_") or k.startswith("__files_"):
                inputs[k] = v

        #for inputs that are coming from the Library, copy them into the history
        input_patch = {}
        for k, v in inputs.iteritems():
            if  isinstance(v, dict) and v.get('src', '') == 'ldda' and 'id' in v:
                ldda = trans.sa_session.query( trans.app.model.LibraryDatasetDatasetAssociation ).get( trans.security.decode_id(v['id']) )
                if trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ):
                    input_patch[k] = ldda.to_history_dataset_association(target_history, add_to_history=True)

        for k, v in input_patch.iteritems():
            inputs[k] = v

        # HACK: add run button so that tool.handle_input will run tool.
        inputs['runtool_btn'] = 'Execute'
        # TODO: encode data ids and decode ids.
        # TODO: handle dbkeys
        params = util.Params( inputs, sanitize=False )
        # process_state will be 'populate' or 'update'. When no tool
        # state is specified in input - it will be 'populate', and
        # tool will fully expand repeat and conditionals when building
        # up state. If tool state is found in input
        # parameters,process_state will be 'update' and complex
        # submissions (with repeats and conditionals) must be built up
        # over several iterative calls to the API - mimicing behavior
        # of web controller (though frankly API never returns
        # tool_state so this "legacy" behavior is probably impossible
        # through API currently).
        incoming = params.__dict__
        process_state = "update" if "tool_state" in incoming else "populate"
        template, vars = tool.handle_input( trans, incoming, history=target_history, process_state=process_state, source="json" )
        if 'errors' in vars:
            trans.response.status = 400
            return { "message": { "type": "error", "data" : vars[ 'errors' ] } }

        # TODO: check for errors and ensure that output dataset(s) are available.
        output_datasets = vars.get( 'out_data', [] )
        rval = {
            "outputs": [],
            "jobs": [],
            "implicit_collections": [],
        }

        job_errors = vars.get( 'job_errors', [] )
        if job_errors:
            # If we are here - some jobs were successfully executed but some failed.
            rval[ "errors" ] = job_errors

        outputs = rval[ "outputs" ]
        #TODO:?? poss. only return ids?
        for output_name, output in output_datasets:
            output_dict = output.to_dict()
            #add the output name back into the output data structure
            #so it's possible to figure out which newly created elements
            #correspond with which tool file outputs
            output_dict[ 'output_name' ] = output_name
            outputs.append( trans.security.encode_dict_ids( output_dict ) )

        for job in vars.get('jobs', []):
            rval[ 'jobs' ].append( self.encode_all_ids( trans, job.to_dict( view='collection' ), recursive=True ) )

        for output_name, collection_instance in vars.get( 'implicit_collections', {} ).iteritems():
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance( collection_instance, security=trans.security, parent=history )
            output_dict[ 'output_name' ] = output_name
            rval[ 'implicit_collections' ].append( output_dict )

        return rval
Beispiel #6
0
    def create( self, trans, payload, **kwd ):
        """
        POST /api/tools
        Executes tool using specified inputs and returns tool's outputs.
        """
        # HACK: for now, if action is rerun, rerun tool.
        action = payload.get( 'action', None )
        if action == 'rerun':
            return self._rerun_tool( trans, payload, **kwd )

        # -- Execute tool. --

        # Get tool.
        tool_version = payload.get( 'tool_version', None )
        tool = trans.app.toolbox.get_tool( payload[ 'tool_id' ], tool_version ) if 'tool_id' in payload else None
        if not tool or not tool.allow_user_access( trans.user ):
            raise exceptions.MessageException( 'Tool not found or not accessible.' )
        if trans.app.config.user_activation_on:
            if not trans.user:
                log.warning( "Anonymous user attempts to execute tool, but account activation is turned on." )
            elif not trans.user.active:
                log.warning( "User \"%s\" attempts to execute tool, but account activation is turned on and user account is not active." % trans.user.email )

        # Set running history from payload parameters.
        # History not set correctly as part of this API call for
        # dataset upload.
        history_id = payload.get('history_id', None)
        if history_id:
            decoded_id = self.decode_id( history_id )
            target_history = self.history_manager.get_owned( decoded_id, trans.user, current_history=trans.history )
        else:
            target_history = None

        # Set up inputs.
        inputs = payload.get( 'inputs', {} )
        # Find files coming in as multipart file data and add to inputs.
        for k, v in payload.iteritems():
            if k.startswith('files_') or k.startswith('__files_'):
                inputs[k] = v

        # for inputs that are coming from the Library, copy them into the history
        input_patch = {}
        for k, v in inputs.iteritems():
            if isinstance(v, dict) and v.get('src', '') == 'ldda' and 'id' in v:
                ldda = trans.sa_session.query( trans.app.model.LibraryDatasetDatasetAssociation ).get( self.decode_id(v['id']) )
                if trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ):
                    input_patch[k] = ldda.to_history_dataset_association(target_history, add_to_history=True)

        for k, v in input_patch.iteritems():
            inputs[k] = v

        # TODO: encode data ids and decode ids.
        # TODO: handle dbkeys
        params = util.Params( inputs, sanitize=False )
        incoming = params.__dict__
        vars = tool.handle_input( trans, incoming, history=target_history )

        # TODO: check for errors and ensure that output dataset(s) are available.
        output_datasets = vars.get( 'out_data', [] )
        rval = { 'outputs': [], 'output_collections': [], 'jobs': [], 'implicit_collections': [] }

        job_errors = vars.get( 'job_errors', [] )
        if job_errors:
            # If we are here - some jobs were successfully executed but some failed.
            rval[ 'errors' ] = job_errors

        outputs = rval[ 'outputs' ]
        # TODO:?? poss. only return ids?
        for output_name, output in output_datasets:
            output_dict = output.to_dict()
            # add the output name back into the output data structure
            # so it's possible to figure out which newly created elements
            # correspond with which tool file outputs
            output_dict[ 'output_name' ] = output_name
            outputs.append( trans.security.encode_dict_ids( output_dict, skip_startswith="metadata_" ) )

        for job in vars.get('jobs', []):
            rval[ 'jobs' ].append( self.encode_all_ids( trans, job.to_dict( view='collection' ), recursive=True ) )

        for output_name, collection_instance in vars.get('output_collections', []):
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance( collection_instance, security=trans.security, parent=history )
            output_dict[ 'output_name' ] = output_name
            rval[ 'output_collections' ].append( output_dict )

        for output_name, collection_instance in vars.get( 'implicit_collections', {} ).iteritems():
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance( collection_instance, security=trans.security, parent=history )
            output_dict[ 'output_name' ] = output_name
            rval[ 'implicit_collections' ].append( output_dict )

        return rval
Beispiel #7
0
    def create(self, trans, library_id, payload, **kwd):
        """
        create( self, trans, library_id, payload, **kwd )
        * POST /api/libraries/{library_id}/contents:
            create a new library file or folder

        To copy an HDA into a library send ``create_type`` of 'file' and
        the HDA's encoded id in ``from_hda_id`` (and optionally ``ldda_message``).

        To copy an HDCA into a library send ``create_type`` of 'file' and
        the HDCA's encoded id in ``from_hdca_id`` (and optionally ``ldda_message``).

        :type   library_id: str
        :param  library_id: the encoded id of the library where to create the new item
        :type   payload:    dict
        :param  payload:    dictionary structure containing:

            * folder_id:    the encoded id of the parent folder of the new item
            * create_type:  the type of item to create ('file', 'folder' or 'collection')
            * from_hda_id:  (optional, only if create_type is 'file') the
                encoded id of an accessible HDA to copy into the library
            * ldda_message: (optional) the new message attribute of the LDDA created
            * extended_metadata: (optional) sub-dictionary containing any extended
                metadata to associate with the item
            * upload_option: (optional) one of 'upload_file' (default), 'upload_directory' or 'upload_paths'
            * server_dir: (optional, only if upload_option is
                'upload_directory') relative path of the subdirectory of Galaxy
                ``library_import_dir`` to upload. All and only the files (i.e.
                no subdirectories) contained in the specified directory will be
                uploaded.
            * filesystem_paths: (optional, only if upload_option is
                'upload_paths' and the user is an admin) file paths on the
                Galaxy server to upload to the library, one file per line
            * link_data_only: (optional, only when upload_option is
                'upload_directory' or 'upload_paths') either 'copy_files'
                (default) or 'link_to_files'. Setting to 'link_to_files'
                symlinks instead of copying the files
            * name: (optional, only if create_type is 'folder') name of the
                folder to create
            * description: (optional, only if create_type is 'folder')
                description of the folder to create
            * tag_using_filename: (optional)
                create tags on datasets using the file's original name

        :returns:   a dictionary describing the new item unless ``from_hdca_id`` is supplied,
                    in that case a list of such dictionaries is returned.
        :rtype:     object
        """
        if 'create_type' not in payload:
            trans.response.status = 400
            return "Missing required 'create_type' parameter."
        else:
            create_type = payload.pop('create_type')
        if create_type not in ('file', 'folder', 'collection'):
            trans.response.status = 400
            return "Invalid value for 'create_type' parameter ( %s ) specified." % create_type

        if 'folder_id' not in payload:
            trans.response.status = 400
            return "Missing required 'folder_id' parameter."
        else:
            folder_id = payload.pop('folder_id')
            class_name, folder_id = self.__decode_library_content_id(folder_id)
        try:
            # security is checked in the downstream controller
            parent = self.get_library_folder(trans, folder_id, check_ownership=False, check_accessible=False)
        except Exception as e:
            return str(e)
        # The rest of the security happens in the library_common controller.
        real_folder_id = trans.security.encode_id(parent.id)

        # are we copying an HDA to the library folder?
        #   we'll need the id and any message to attach, then branch to that private function
        from_hda_id, from_hdca_id, ldda_message = (payload.pop('from_hda_id', None), payload.pop('from_hdca_id', None), payload.pop('ldda_message', ''))
        if create_type == 'file':
            if from_hda_id:
                return self._copy_hda_to_library_folder(trans, self.hda_manager, self.decode_id(from_hda_id), real_folder_id, ldda_message)
            if from_hdca_id:
                return self._copy_hdca_to_library_folder(trans, self.hda_manager, self.decode_id(from_hdca_id), real_folder_id, ldda_message)

        # check for extended metadata, store it and pop it out of the param
        # otherwise sanitize_param will have a fit
        ex_meta_payload = payload.pop('extended_metadata', None)

        # Now create the desired content object, either file or folder.
        if create_type == 'file':
            status, output = trans.webapp.controllers['library_common'].upload_library_dataset(trans, 'api', library_id, real_folder_id, **payload)
        elif create_type == 'folder':
            status, output = trans.webapp.controllers['library_common'].create_folder(trans, 'api', real_folder_id, library_id, **payload)
        elif create_type == 'collection':
            # Not delegating to library_common, so need to check access to parent
            # folder here.
            self.check_user_can_add_to_library_item(trans, parent, check_accessible=True)
            create_params = api_payload_to_create_params(payload)
            create_params['parent'] = parent
            service = trans.app.dataset_collections_service
            dataset_collection_instance = service.create(**create_params)
            return [dictify_dataset_collection_instance(dataset_collection_instance, security=trans.security, parent=parent)]
        if status != 200:
            trans.response.status = status
            return output
        else:
            rval = []
            for v in output.values():
                if ex_meta_payload is not None:
                    # If there is extended metadata, store it, attach it to the dataset, and index it
                    ex_meta = ExtendedMetadata(ex_meta_payload)
                    trans.sa_session.add(ex_meta)
                    v.extended_metadata = ex_meta
                    trans.sa_session.add(v)
                    trans.sa_session.flush()
                    for path, value in self._scan_json_block(ex_meta_payload):
                        meta_i = ExtendedMetadataIndex(ex_meta, path, value)
                        trans.sa_session.add(meta_i)
                    trans.sa_session.flush()
                if type(v) == trans.app.model.LibraryDatasetDatasetAssociation:
                    v = v.library_dataset
                encoded_id = trans.security.encode_id(v.id)
                if create_type == 'folder':
                    encoded_id = 'F' + encoded_id
                rval.append(dict(id=encoded_id,
                                 name=v.name,
                                 url=url_for('library_content', library_id=library_id, id=encoded_id)))
            return rval
        ex_meta_payload = payload.pop('extended_metadata', None)

        # Now create the desired content object, either file or folder.
        if create_type == 'file':
            status, output = trans.webapp.controllers['library_common'].upload_library_dataset( trans, 'api', library_id, real_folder_id, **payload )
        elif create_type == 'folder':
            status, output = trans.webapp.controllers['library_common'].create_folder( trans, 'api', real_folder_id, library_id, **payload )
        elif create_type == 'collection':
            # Not delegating to library_common, so need to check access to parent
            # folder here.
            self.check_user_can_add_to_library_item( trans, parent, check_accessible=True )
            create_params = api_payload_to_create_params( payload )
            create_params[ 'parent' ] = parent
            service = trans.app.dataset_collections_service
            dataset_collection_instance = service.create( **create_params )
            return [ dictify_dataset_collection_instance( dataset_collection_instance, security=trans.security, parent=parent ) ]
        if status != 200:
            trans.response.status = status
            return output
        else:
            rval = []
            for v in output.values():
                if ex_meta_payload is not None:
                    # If there is extended metadata, store it, attach it to the dataset, and index it
                    ex_meta = ExtendedMetadata(ex_meta_payload)
                    trans.sa_session.add( ex_meta )
                    v.extended_metadata = ex_meta
                    trans.sa_session.add(v)
                    trans.sa_session.flush()
                    for path, value in self._scan_json_block(ex_meta_payload):
                        meta_i = ExtendedMetadataIndex(ex_meta, path, value)
Beispiel #9
0
    def create(self, trans, library_id, payload, **kwd):
        """
        POST /api/libraries/{library_id}/contents:

        Create a new library file or folder.

        To copy an HDA into a library send ``create_type`` of 'file' and
        the HDA's encoded id in ``from_hda_id`` (and optionally ``ldda_message``).

        To copy an HDCA into a library send ``create_type`` of 'file' and
        the HDCA's encoded id in ``from_hdca_id`` (and optionally ``ldda_message``).

        :type   library_id: str
        :param  library_id: the encoded id of the library where to create the new item
        :type   payload:    dict
        :param  payload:    dictionary structure containing:

            * folder_id:    the encoded id of the parent folder of the new item
            * create_type:  the type of item to create ('file', 'folder' or 'collection')
            * from_hda_id:  (optional, only if create_type is 'file') the
                encoded id of an accessible HDA to copy into the library
            * ldda_message: (optional) the new message attribute of the LDDA created
            * extended_metadata: (optional) sub-dictionary containing any extended
                metadata to associate with the item
            * upload_option: (optional) one of 'upload_file' (default), 'upload_directory' or 'upload_paths'
            * server_dir: (optional, only if upload_option is
                'upload_directory') relative path of the subdirectory of Galaxy
                ``library_import_dir`` (if admin) or ``user_library_import_dir``
                (if non-admin) to upload. All and only the files (i.e.
                no subdirectories) contained in the specified directory will be
                uploaded.
            * filesystem_paths: (optional, only if upload_option is
                'upload_paths' and the user is an admin) file paths on the
                Galaxy server to upload to the library, one file per line
            * link_data_only: (optional, only when upload_option is
                'upload_directory' or 'upload_paths') either 'copy_files'
                (default) or 'link_to_files'. Setting to 'link_to_files'
                symlinks instead of copying the files
            * name: (optional, only if create_type is 'folder') name of the
                folder to create
            * description: (optional, only if create_type is 'folder')
                description of the folder to create
            * tag_using_filenames: (optional)
                create tags on datasets using the file's original name
            * tags: (optional)
                create the given list of tags on datasets

        :returns:   a dictionary describing the new item unless ``from_hdca_id`` is supplied,
                    in that case a list of such dictionaries is returned.
        :rtype:     object
        """
        if trans.user_is_bootstrap_admin:
            raise exceptions.RealUserRequiredException(
                "Only real users can create a new library file or folder.")
        if 'create_type' not in payload:
            raise exceptions.RequestParameterMissingException(
                "Missing required 'create_type' parameter.")
        create_type = payload.pop('create_type')
        if create_type not in ('file', 'folder', 'collection'):
            raise exceptions.RequestParameterInvalidException(
                f"Invalid value for 'create_type' parameter ( {create_type} ) specified."
            )
        if 'upload_option' in payload and payload['upload_option'] not in (
                'upload_file', 'upload_directory', 'upload_paths'):
            raise exceptions.RequestParameterInvalidException(
                f"Invalid value for 'upload_option' parameter ( {payload['upload_option']} ) specified."
            )
        if 'folder_id' not in payload:
            raise exceptions.RequestParameterMissingException(
                "Missing required 'folder_id' parameter.")
        folder_id = payload.pop('folder_id')
        _, folder_id = self._decode_library_content_id(folder_id)
        # security is checked in the downstream controller
        parent = self.get_library_folder(trans,
                                         folder_id,
                                         check_ownership=False,
                                         check_accessible=False)
        # The rest of the security happens in the library_common controller.
        real_folder_id = trans.security.encode_id(parent.id)

        payload['tag_using_filenames'] = util.string_as_bool(
            payload.get('tag_using_filenames', None))
        payload['tags'] = util.listify(payload.get('tags', None))

        # are we copying an HDA to the library folder?
        #   we'll need the id and any message to attach, then branch to that private function
        from_hda_id, from_hdca_id, ldda_message = (payload.pop(
            'from_hda_id',
            None), payload.pop('from_hdca_id',
                               None), payload.pop('ldda_message', ''))
        if create_type == 'file':
            if from_hda_id:
                return self._copy_hda_to_library_folder(
                    trans, self.hda_manager, self.decode_id(from_hda_id),
                    real_folder_id, ldda_message)
            if from_hdca_id:
                return self._copy_hdca_to_library_folder(
                    trans, self.hda_manager, self.decode_id(from_hdca_id),
                    real_folder_id, ldda_message)

        # check for extended metadata, store it and pop it out of the param
        # otherwise sanitize_param will have a fit
        ex_meta_payload = payload.pop('extended_metadata', None)

        # Now create the desired content object, either file or folder.
        if create_type == 'file':
            status, output = self._upload_library_dataset(
                trans, library_id, real_folder_id, **payload)
        elif create_type == 'folder':
            status, output = self._create_folder(trans, real_folder_id,
                                                 library_id, **payload)
        elif create_type == 'collection':
            # Not delegating to library_common, so need to check access to parent
            # folder here.
            self.check_user_can_add_to_library_item(trans,
                                                    parent,
                                                    check_accessible=True)
            create_params = api_payload_to_create_params(payload)
            create_params['parent'] = parent
            dataset_collection_manager = trans.app.dataset_collection_manager
            dataset_collection_instance = dataset_collection_manager.create(
                **create_params)
            return [
                dictify_dataset_collection_instance(
                    dataset_collection_instance,
                    security=trans.security,
                    url_builder=trans.url_builder,
                    parent=parent)
            ]
        if status != 200:
            trans.response.status = status
            return output
        else:
            rval = []
            for v in output.values():
                if ex_meta_payload is not None:
                    # If there is extended metadata, store it, attach it to the dataset, and index it
                    ex_meta = ExtendedMetadata(ex_meta_payload)
                    trans.sa_session.add(ex_meta)
                    v.extended_metadata = ex_meta
                    trans.sa_session.add(v)
                    trans.sa_session.flush()
                    for path, value in self._scan_json_block(ex_meta_payload):
                        meta_i = ExtendedMetadataIndex(ex_meta, path, value)
                        trans.sa_session.add(meta_i)
                    trans.sa_session.flush()
                if type(v) == trans.app.model.LibraryDatasetDatasetAssociation:
                    v = v.library_dataset
                encoded_id = trans.security.encode_id(v.id)
                if create_type == 'folder':
                    encoded_id = f"F{encoded_id}"
                rval.append(
                    dict(id=encoded_id,
                         name=v.name,
                         url=url_for('library_content',
                                     library_id=library_id,
                                     id=encoded_id)))
            return rval
Beispiel #10
0
        ex_meta_payload = payload.pop('extended_metadata', None)

        # Now create the desired content object, either file or folder.
        if create_type == 'file':
            status, output = trans.webapp.controllers['library_common'].upload_library_dataset( trans, 'api', library_id, real_folder_id, **payload )
        elif create_type == 'folder':
            status, output = trans.webapp.controllers['library_common'].create_folder( trans, 'api', real_folder_id, library_id, **payload )
        elif create_type == 'collection':
            # Not delegating to library_common, so need to check access to parent
            # folder here.
            self.check_user_can_add_to_library_item( trans, parent, check_accessible=True )
            create_params = api_payload_to_create_params( payload )
            create_params[ 'parent' ] = parent
            service = trans.app.dataset_collections_service
            dataset_collection_instance = service.create( **create_params )
            return [ dictify_dataset_collection_instance( dataset_collection_instance, security=trans.security, parent=parent ) ]
        if status != 200:
            trans.response.status = status
            return output
        else:
            rval = []
            for v in output.values():
                if ex_meta_payload is not None:
                    # If there is extended metadata, store it, attach it to the dataset, and index it
                    ex_meta = ExtendedMetadata(ex_meta_payload)
                    trans.sa_session.add( ex_meta )
                    v.extended_metadata = ex_meta
                    trans.sa_session.add(v)
                    trans.sa_session.flush()
                    for path, value in self._scan_json_block(ex_meta_payload):
                        meta_i = ExtendedMetadataIndex(ex_meta, path, value)
Beispiel #11
0
    def _create(self, trans, payload, **kwd):
        action = payload.get('action', None)
        if action == 'rerun':
            raise Exception("'rerun' action has been deprecated")

        # Get tool.
        tool_version = payload.get('tool_version', None)
        tool_id = payload.get('tool_id', None)
        tool_uuid = payload.get('tool_uuid', None)
        get_kwds = dict(
            tool_id=tool_id,
            tool_uuid=tool_uuid,
            tool_version=tool_version,
        )
        if tool_id is None and tool_uuid is None:
            raise exceptions.RequestParameterMissingException("Must specify either a tool_id or a tool_uuid.")

        tool = trans.app.toolbox.get_tool(**get_kwds)
        if not tool or not tool.allow_user_access(trans.user):
            raise exceptions.MessageException('Tool not found or not accessible.')
        if trans.app.config.user_activation_on:
            if not trans.user:
                log.warning("Anonymous user attempts to execute tool, but account activation is turned on.")
            elif not trans.user.active:
                log.warning("User \"%s\" attempts to execute tool, but account activation is turned on and user account is not active." % trans.user.email)

        # Set running history from payload parameters.
        # History not set correctly as part of this API call for
        # dataset upload.
        history_id = payload.get('history_id', None)
        if history_id:
            decoded_id = self.decode_id(history_id)
            target_history = self.history_manager.get_owned(decoded_id, trans.user, current_history=trans.history)
        else:
            target_history = None

        # Set up inputs.
        inputs = payload.get('inputs', {})
        if not isinstance(inputs, dict):
            raise exceptions.RequestParameterInvalidException("inputs invalid %s" % inputs)

        # Find files coming in as multipart file data and add to inputs.
        for k, v in payload.items():
            if k.startswith('files_') or k.startswith('__files_'):
                inputs[k] = v

        # for inputs that are coming from the Library, copy them into the history
        self._patch_library_inputs(trans, inputs, target_history)

        # TODO: encode data ids and decode ids.
        # TODO: handle dbkeys
        params = util.Params(inputs, sanitize=False)
        incoming = params.__dict__

        # use_cached_job can be passed in via the top-level payload or among the tool inputs.
        # I think it should be a top-level parameter, but because the selector is implemented
        # as a regular tool parameter we accept both.
        use_cached_job = payload.get('use_cached_job', False) or util.string_as_bool(inputs.get('use_cached_job', 'false'))
        vars = tool.handle_input(trans, incoming, history=target_history, use_cached_job=use_cached_job)

        # TODO: check for errors and ensure that output dataset(s) are available.
        output_datasets = vars.get('out_data', [])
        rval = {'outputs': [], 'output_collections': [], 'jobs': [], 'implicit_collections': []}
        rval['produces_entry_points'] = tool.produces_entry_points
        job_errors = vars.get('job_errors', [])
        if job_errors:
            # If we are here - some jobs were successfully executed but some failed.
            rval['errors'] = job_errors

        outputs = rval['outputs']
        # TODO:?? poss. only return ids?
        for output_name, output in output_datasets:
            output_dict = output.to_dict()
            # add the output name back into the output data structure
            # so it's possible to figure out which newly created elements
            # correspond with which tool file outputs
            output_dict['output_name'] = output_name
            outputs.append(trans.security.encode_dict_ids(output_dict, skip_startswith="metadata_"))

        new_pja_flush = False
        for job in vars.get('jobs', []):
            rval['jobs'].append(self.encode_all_ids(trans, job.to_dict(view='collection'), recursive=True))
            if inputs.get('send_email_notification', False):
                # Unless an anonymous user is invoking this via the API it
                # should never be an option, but check and enforce that here
                if trans.user is None:
                    raise exceptions.ToolExecutionError("Anonymously run jobs cannot send an email notification.")
                else:
                    job_email_action = trans.model.PostJobAction('EmailAction')
                    job.add_post_job_action(job_email_action)
                    new_pja_flush = True

        if new_pja_flush:
            trans.sa_session.flush()

        for output_name, collection_instance in vars.get('output_collections', []):
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance(collection_instance, security=trans.security, parent=history)
            output_dict['output_name'] = output_name
            rval['output_collections'].append(output_dict)

        for output_name, collection_instance in vars.get('implicit_collections', {}).items():
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance(collection_instance, security=trans.security, parent=history)
            output_dict['output_name'] = output_name
            rval['implicit_collections'].append(output_dict)

        return rval
Beispiel #12
0
 def __collection_dict(self, trans, dataset_collection_instance, **kwds):
     return dictify_dataset_collection_instance(dataset_collection_instance,
         security=trans.security, parent=dataset_collection_instance.history, **kwds)
Beispiel #13
0
    def create(self, trans, payload, **kwd):
        """
        POST /api/tools
        Executes tool using specified inputs and returns tool's outputs.
        """
        # HACK: for now, if action is rerun, rerun tool.
        action = payload.get('action', None)
        if action == 'rerun':
            return self._rerun_tool(trans, payload, **kwd)

        # -- Execute tool. --

        # Get tool.
        tool_version = payload.get('tool_version', None)
        tool = trans.app.toolbox.get_tool(
            payload['tool_id'], tool_version) if 'tool_id' in payload else None
        if not tool or not tool.allow_user_access(trans.user):
            raise exceptions.MessageException(
                'Tool not found or not accessible.')

        # Set running history from payload parameters.
        # History not set correctly as part of this API call for
        # dataset upload.
        history_id = payload.get('history_id', None)
        if history_id:
            decoded_id = self.decode_id(history_id)
            target_history = self.history_manager.get_owned(
                decoded_id, trans.user, current_history=trans.history)
        else:
            target_history = None

        # Set up inputs.
        inputs = payload.get('inputs', {})
        # Find files coming in as multipart file data and add to inputs.
        for k, v in payload.iteritems():
            if k.startswith('files_') or k.startswith('__files_'):
                inputs[k] = v

        # for inputs that are coming from the Library, copy them into the history
        input_patch = {}
        for k, v in inputs.iteritems():
            if isinstance(v, dict) and v.get('src',
                                             '') == 'ldda' and 'id' in v:
                ldda = trans.sa_session.query(
                    trans.app.model.LibraryDatasetDatasetAssociation).get(
                        self.decode_id(v['id']))
                if trans.user_is_admin(
                ) or trans.app.security_agent.can_access_dataset(
                        trans.get_current_user_roles(), ldda.dataset):
                    input_patch[k] = ldda.to_history_dataset_association(
                        target_history, add_to_history=True)

        for k, v in input_patch.iteritems():
            inputs[k] = v

        # TODO: encode data ids and decode ids.
        # TODO: handle dbkeys
        params = util.Params(inputs, sanitize=False)
        incoming = params.__dict__
        vars = tool.handle_input(trans,
                                 incoming,
                                 history=target_history,
                                 source='json')

        # TODO: check for errors and ensure that output dataset(s) are available.
        output_datasets = vars.get('out_data', [])
        rval = {
            'outputs': [],
            'output_collections': [],
            'jobs': [],
            'implicit_collections': []
        }

        job_errors = vars.get('job_errors', [])
        if job_errors:
            # If we are here - some jobs were successfully executed but some failed.
            rval['errors'] = job_errors

        outputs = rval['outputs']
        # TODO:?? poss. only return ids?
        for output_name, output in output_datasets:
            output_dict = output.to_dict()
            # add the output name back into the output data structure
            # so it's possible to figure out which newly created elements
            # correspond with which tool file outputs
            output_dict['output_name'] = output_name
            outputs.append(trans.security.encode_dict_ids(output_dict))

        for job in vars.get('jobs', []):
            rval['jobs'].append(
                self.encode_all_ids(trans,
                                    job.to_dict(view='collection'),
                                    recursive=True))

        for output_name, collection_instance in vars.get(
                'output_collections', []):
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance(
                collection_instance, security=trans.security, parent=history)
            output_dict['output_name'] = output_name
            rval['output_collections'].append(output_dict)

        for output_name, collection_instance in vars.get(
                'implicit_collections', {}).iteritems():
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance(
                collection_instance, security=trans.security, parent=history)
            output_dict['output_name'] = output_name
            rval['implicit_collections'].append(output_dict)

        return rval
Beispiel #14
0
    def _create(self, trans, payload, **kwd):
        # HACK: for now, if action is rerun, rerun tool.
        action = payload.get('action', None)
        if action == 'rerun':
            return self._rerun_tool(trans, payload, **kwd)

        # -- Execute tool. --

        # Get tool.
        tool_version = payload.get('tool_version', None)
        tool = trans.app.toolbox.get_tool(
            payload['tool_id'], tool_version) if 'tool_id' in payload else None
        if not tool or not tool.allow_user_access(trans.user):
            raise exceptions.MessageException(
                'Tool not found or not accessible.')
        if trans.app.config.user_activation_on:
            if not trans.user:
                log.warning(
                    "Anonymous user attempts to execute tool, but account activation is turned on."
                )
            elif not trans.user.active:
                log.warning(
                    "User \"%s\" attempts to execute tool, but account activation is turned on and user account is not active."
                    % trans.user.email)

        # Set running history from payload parameters.
        # History not set correctly as part of this API call for
        # dataset upload.
        history_id = payload.get('history_id', None)
        if history_id:
            decoded_id = self.decode_id(history_id)
            target_history = self.history_manager.get_owned(
                decoded_id, trans.user, current_history=trans.history)
        else:
            target_history = None

        # Set up inputs.
        inputs = payload.get('inputs', {})
        # Find files coming in as multipart file data and add to inputs.
        for k, v in payload.items():
            if k.startswith('files_') or k.startswith('__files_'):
                inputs[k] = v

        # for inputs that are coming from the Library, copy them into the history
        input_patch = {}
        for k, v in inputs.items():
            if isinstance(v, dict) and v.get('src',
                                             '') == 'ldda' and 'id' in v:
                ldda = trans.sa_session.query(
                    trans.app.model.LibraryDatasetDatasetAssociation).get(
                        self.decode_id(v['id']))
                if trans.user_is_admin(
                ) or trans.app.security_agent.can_access_dataset(
                        trans.get_current_user_roles(), ldda.dataset):
                    input_patch[k] = ldda.to_history_dataset_association(
                        target_history, add_to_history=True)

        for k, v in input_patch.items():
            inputs[k] = v

        # TODO: encode data ids and decode ids.
        # TODO: handle dbkeys
        params = util.Params(inputs, sanitize=False)
        incoming = params.__dict__

        # use_cached_job can be passed in via the top-level payload or among the tool inputs.
        # I think it should be a top-level parameter, but because the selector is implemented
        # as a regular tool parameter we accept both.
        use_cached_job = payload.get('use_cached_job',
                                     False) or util.string_as_bool(
                                         inputs.get('use_cached_job', 'false'))
        vars = tool.handle_input(trans,
                                 incoming,
                                 history=target_history,
                                 use_cached_job=use_cached_job)

        # TODO: check for errors and ensure that output dataset(s) are available.
        output_datasets = vars.get('out_data', [])
        rval = {
            'outputs': [],
            'output_collections': [],
            'jobs': [],
            'implicit_collections': []
        }

        job_errors = vars.get('job_errors', [])
        if job_errors:
            # If we are here - some jobs were successfully executed but some failed.
            rval['errors'] = job_errors

        outputs = rval['outputs']
        # TODO:?? poss. only return ids?
        for output_name, output in output_datasets:
            output_dict = output.to_dict()
            # add the output name back into the output data structure
            # so it's possible to figure out which newly created elements
            # correspond with which tool file outputs
            output_dict['output_name'] = output_name
            outputs.append(
                trans.security.encode_dict_ids(output_dict,
                                               skip_startswith="metadata_"))

        for job in vars.get('jobs', []):
            rval['jobs'].append(
                self.encode_all_ids(trans,
                                    job.to_dict(view='collection'),
                                    recursive=True))

        for output_name, collection_instance in vars.get(
                'output_collections', []):
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance(
                collection_instance, security=trans.security, parent=history)
            output_dict['output_name'] = output_name
            rval['output_collections'].append(output_dict)

        for output_name, collection_instance in vars.get(
                'implicit_collections', {}).items():
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance(
                collection_instance, security=trans.security, parent=history)
            output_dict['output_name'] = output_name
            rval['implicit_collections'].append(output_dict)

        return rval
 def __collection_dict( self, trans, dataset_collection_instance, view="collection" ):
     return dictify_dataset_collection_instance( dataset_collection_instance,
         security=trans.security, parent=dataset_collection_instance.history, view=view )
Beispiel #16
0
    def create( self, trans, payload, **kwd ):
        """
        POST /api/tools
        Executes tool using specified inputs and returns tool's outputs.
        """
        # HACK: for now, if action is rerun, rerun tool.
        action = payload.get( 'action', None )
        if action == 'rerun':
            return self._rerun_tool( trans, payload, **kwd )

        # -- Execute tool. --

        # Get tool.
        tool = trans.app.toolbox.get_tool( payload[ 'tool_id' ] ) if 'tool_id' in payload else None
        if not tool or not tool.allow_user_access( trans.user ):
            trans.response.status = 404
            return { "message": { "type": "error", "text" : trans.app.model.Dataset.conversion_messages.NO_TOOL } }

        # Set running history from payload parameters.
        # History not set correctly as part of this API call for
        # dataset upload.
        history_id = payload.get("history_id", None)
        if history_id:
            target_history = self.get_history( trans, history_id )
        else:
            target_history = None

        # Set up inputs.
        inputs = payload.get( 'inputs', {} )
        # Find files coming in as multipart file data and add to inputs.
        for k, v in payload.iteritems():
            if k.startswith("files_") or k.startswith("__files_"):
                inputs[k] = v

        #for inputs that are coming from the Library, copy them into the history
        input_patch = {}
        for k, v in inputs.iteritems():
            if  isinstance(v, dict) and v.get('src', '') == 'ldda' and 'id' in v:
                ldda = trans.sa_session.query( trans.app.model.LibraryDatasetDatasetAssociation ).get( trans.security.decode_id(v['id']) )
                if trans.user_is_admin() or trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), ldda.dataset ):
                    input_patch[k] = ldda.to_history_dataset_association(target_history, add_to_history=True)

        for k, v in input_patch.iteritems():
            inputs[k] = v

        # HACK: add run button so that tool.handle_input will run tool.
        inputs['runtool_btn'] = 'Execute'
        # TODO: encode data ids and decode ids.
        # TODO: handle dbkeys
        params = util.Params( inputs, sanitize=False )
        # process_state will be 'populate' or 'update'. When no tool
        # state is specified in input - it will be 'populate', and
        # tool will fully expand repeat and conditionals when building
        # up state. If tool state is found in input
        # parameters,process_state will be 'update' and complex
        # submissions (with repeats and conditionals) must be built up
        # over several iterative calls to the API - mimicing behavior
        # of web controller (though frankly API never returns
        # tool_state so this "legacy" behavior is probably impossible
        # through API currently).
        incoming = params.__dict__
        process_state = "update" if "tool_state" in incoming else "populate"
        template, vars = tool.handle_input( trans, incoming, history=target_history, process_state=process_state, source="json" )
        if 'errors' in vars:
            trans.response.status = 400
            return { "message": { "type": "error", "data" : vars[ 'errors' ] } }

        # TODO: check for errors and ensure that output dataset(s) are available.
        output_datasets = vars.get( 'out_data', [] )
        rval = {
            "outputs": [],
            "jobs": [],
            "implicit_collections": [],
        }

        job_errors = vars.get( 'job_errors', [] )
        if job_errors:
            # If we are here - some jobs were successfully executed but some failed.
            rval[ "errors" ] = job_errors

        outputs = rval[ "outputs" ]
        #TODO:?? poss. only return ids?
        for output_name, output in output_datasets:
            output_dict = output.to_dict()
            #add the output name back into the output data structure
            #so it's possible to figure out which newly created elements
            #correspond with which tool file outputs
            output_dict[ 'output_name' ] = output_name
            outputs.append( trans.security.encode_dict_ids( output_dict ) )

        for job in vars.get('jobs', []):
            rval[ 'jobs' ].append( self.encode_all_ids( trans, job.to_dict( view='collection' ), recursive=True ) )

        for output_name, collection_instance in vars.get( 'implicit_collections', {} ).iteritems():
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance( collection_instance, security=trans.security, parent=history )
            output_dict[ 'output_name' ] = output_name
            rval[ 'implicit_collections' ].append( output_dict )

        return rval
    def create(self, trans, library_id, payload, **kwd):
        """
        create( self, trans, library_id, payload, **kwd )
        * POST /api/libraries/{library_id}/contents:
            create a new library file or folder

        To copy an HDA into a library send ``create_type`` of 'file' and
        the HDA's encoded id in ``from_hda_id`` (and optionally ``ldda_message``).

        :type   library_id: str
        :param  library_id: the encoded id of the library where to create the new item
        :type   payload:    dict
        :param  payload:    dictionary structure containing:

            * folder_id:    the encoded id of the parent folder of the new item
            * create_type:  the type of item to create ('file', 'folder' or 'collection')
            * from_hda_id:  (optional, only if create_type is 'file') the
                encoded id of an accessible HDA to copy into the library
            * ldda_message: (optional) the new message attribute of the LDDA created
            * extended_metadata: (optional) dub-dictionary containing any extended
                metadata to associate with the item
            * upload_option: (optional) one of 'upload_file' (default), 'upload_directory' or 'upload_paths'
            * server_dir: (optional, only if upload_option is
                'upload_directory') relative path of the subdirectory of Galaxy
                ``library_import_dir`` to upload. All and only the files (i.e.
                no subdirectories) contained in the specified directory will be
                uploaded.
            * filesystem_paths: (optional, only if upload_option is
                'upload_paths' and the user is an admin) file paths on the
                Galaxy server to upload to the library, one file per line
            * link_data_only: (optional, only when upload_option is
                'upload_directory' or 'upload_paths') either 'copy_files'
                (default) or 'link_to_files'. Setting to 'link_to_files'
                symlinks instead of copying the files
            * name: (optional, only if create_type is 'folder') name of the
                folder to create
            * description: (optional, only if create_type is 'folder')
                description of the folder to create

        :rtype:     dict
        :returns:   a dictionary containing the id, name,
            and 'show' url of the new item
        """
        if 'create_type' not in payload:
            trans.response.status = 400
            return "Missing required 'create_type' parameter."
        else:
            create_type = payload.pop('create_type')
        if create_type not in ('file', 'folder', 'collection'):
            trans.response.status = 400
            return "Invalid value for 'create_type' parameter ( %s ) specified." % create_type

        if 'folder_id' not in payload:
            trans.response.status = 400
            return "Missing required 'folder_id' parameter."
        else:
            folder_id = payload.pop('folder_id')
            class_name, folder_id = self.__decode_library_content_id(folder_id)
        try:
            # security is checked in the downstream controller
            parent = self.get_library_folder(trans,
                                             folder_id,
                                             check_ownership=False,
                                             check_accessible=False)
        except Exception as e:
            return str(e)
        # The rest of the security happens in the library_common controller.
        real_folder_id = trans.security.encode_id(parent.id)

        # are we copying an HDA to the library folder?
        #   we'll need the id and any message to attach, then branch to that private function
        from_hda_id, ldda_message = (payload.pop('from_hda_id', None),
                                     payload.pop('ldda_message', ''))
        if create_type == 'file' and from_hda_id:
            return self._copy_hda_to_library_folder(trans, from_hda_id,
                                                    library_id, real_folder_id,
                                                    ldda_message)

        # check for extended metadata, store it and pop it out of the param
        # otherwise sanitize_param will have a fit
        ex_meta_payload = payload.pop('extended_metadata', None)

        # Now create the desired content object, either file or folder.
        if create_type == 'file':
            status, output = trans.webapp.controllers[
                'library_common'].upload_library_dataset(
                    trans, 'api', library_id, real_folder_id, **payload)
        elif create_type == 'folder':
            status, output = trans.webapp.controllers[
                'library_common'].create_folder(trans, 'api', real_folder_id,
                                                library_id, **payload)
        elif create_type == 'collection':
            # Not delegating to library_common, so need to check access to parent
            # folder here.
            self.check_user_can_add_to_library_item(trans,
                                                    parent,
                                                    check_accessible=True)
            create_params = api_payload_to_create_params(payload)
            create_params['parent'] = parent
            service = trans.app.dataset_collections_service
            dataset_collection_instance = service.create(**create_params)
            return [
                dictify_dataset_collection_instance(
                    dataset_collection_instance,
                    security=trans.security,
                    parent=parent)
            ]
        if status != 200:
            trans.response.status = status
            return output
        else:
            rval = []
            for v in output.values():
                if ex_meta_payload is not None:
                    # If there is extended metadata, store it, attach it to the dataset, and index it
                    ex_meta = ExtendedMetadata(ex_meta_payload)
                    trans.sa_session.add(ex_meta)
                    v.extended_metadata = ex_meta
                    trans.sa_session.add(v)
                    trans.sa_session.flush()
                    for path, value in self._scan_json_block(ex_meta_payload):
                        meta_i = ExtendedMetadataIndex(ex_meta, path, value)
                        trans.sa_session.add(meta_i)
                    trans.sa_session.flush()
                if type(v) == trans.app.model.LibraryDatasetDatasetAssociation:
                    v = v.library_dataset
                encoded_id = trans.security.encode_id(v.id)
                if create_type == 'folder':
                    encoded_id = 'F' + encoded_id
                rval.append(
                    dict(id=encoded_id,
                         name=v.name,
                         url=url_for('library_content',
                                     library_id=library_id,
                                     id=encoded_id)))
            return rval