示例#1
0
    def create( self, trans, payload, **kwd ):
        """
        * POST /api/dataset_collections:
            create a new dataset collection instance.

        :type   payload: dict
        :param  payload: (optional) dictionary structure containing:
            * collection_type: dataset colltion type to create.
            * instance_type:   Instance type - 'history' or 'library'.
            * name:            the new dataset collections's name
            * datasets:        object describing datasets for collection
        :rtype:     dict
        :returns:   element view of new dataset collection
        """
        # TODO: Error handling...
        create_params = api_payload_to_create_params( payload )
        instance_type = payload.pop( "instance_type", "history" )
        if instance_type == "history":
            history_id = payload.get( 'history_id' )
            history = self.get_history( trans, history_id, check_ownership=True, check_accessible=False )
            create_params[ "parent" ] = history
        elif instance_type == "library":
            folder_id = payload.get( 'folder_id' )
            library_folder = self.get_library_folder( trans, folder_id, check_accessible=True )
            self.check_user_can_add_to_library_item( trans, library_folder, check_accessible=False )
            create_params[ "parent" ] = library_folder
        else:
            trans.status = 501
            return
        dataset_collection_instance = self.__service( trans ).create( trans=trans, **create_params )
        return dictify_dataset_collection_instance( dataset_collection_instance, security=trans.security, parent=create_params[ "parent" ] )
示例#2
0
 def show( self, trans, instance_type, id, **kwds ):
     dataset_collection_instance = self.__service( trans ).get(
         id=id,
         instance_type=instance_type,
     )
     if instance_type == 'history':
         parent = dataset_collection_instance.history
     elif instance_type == 'library':
         parent = dataset_collection_instance.folder
     else:
         trans.status = 501
         return
     return dictify_dataset_collection_instance( trans, dataset_collection_instance, parent )
示例#3
0
            ex_meta_payload = payload.pop('extended_metadata')

        # Now create the desired content object, either file or folder.
        if create_type == 'file':
            status, output = trans.webapp.controllers['library_common'].upload_library_dataset( trans, 'api', library_id, real_folder_id, **payload )
        elif create_type == 'folder':
            status, output = trans.webapp.controllers['library_common'].create_folder( trans, 'api', real_folder_id, library_id, **payload )
        elif create_type == 'collection':
            # Not delegating to library_common, so need to check access to parent
            # folder here.
            self.check_user_can_add_to_library_item( trans, parent, check_accessible=True )
            create_params = api_payload_to_create_params( payload )
            create_params[ 'parent' ] = parent
            service = trans.app.dataset_collections_service
            dataset_collection_instance = service.create( **create_params )
            return [ dictify_dataset_collection_instance( dataset_collection_instance, security=trans.security, parent=parent ) ]
        if status != 200:
            trans.response.status = status
            return output
        else:
            rval = []
            for k, v in output.items():
                if ex_meta_payload is not None:
                    """
                    If there is extended metadata, store it, attach it to the dataset, and index it
                    """
                    ex_meta = ExtendedMetadata(ex_meta_payload)
                    trans.sa_session.add( ex_meta )
                    v.extended_metadata = ex_meta
                    trans.sa_session.add(v)
                    trans.sa_session.flush()
示例#4
0
    def create(self, trans, payload, **kwd):
        """
        POST /api/tools
        Executes tool using specified inputs and returns tool's outputs.
        """
        # HACK: for now, if action is rerun, rerun tool.
        action = payload.get('action', None)
        if action == 'rerun':
            return self._rerun_tool(trans, payload, **kwd)

        # -- Execute tool. --

        # Get tool.
        tool = trans.app.toolbox.get_tool(
            payload['tool_id']) if 'tool_id' in payload else None
        if not tool:
            trans.response.status = 404
            return {
                "message": {
                    "type": "error",
                    "text": trans.app.model.Dataset.conversion_messages.NO_TOOL
                }
            }

        # Set running history from payload parameters.
        # History not set correctly as part of this API call for
        # dataset upload.
        history_id = payload.get("history_id", None)
        if history_id:
            target_history = self.get_history(trans, history_id)
        else:
            target_history = None

        # Set up inputs.
        inputs = payload.get('inputs', {})
        # Find files coming in as multipart file data and add to inputs.
        for k, v in payload.iteritems():
            if k.startswith("files_") or k.startswith("__files_"):
                inputs[k] = v

        #for inputs that are coming from the Library, copy them into the history
        input_patch = {}
        for k, v in inputs.iteritems():
            if isinstance(v, dict) and v.get('src',
                                             '') == 'ldda' and 'id' in v:
                ldda = trans.sa_session.query(
                    trans.app.model.LibraryDatasetDatasetAssociation).get(
                        trans.security.decode_id(v['id']))
                if trans.user_is_admin(
                ) or trans.app.security_agent.can_access_dataset(
                        trans.get_current_user_roles(), ldda.dataset):
                    input_patch[k] = ldda.to_history_dataset_association(
                        target_history, add_to_history=True)

        for k, v in input_patch.iteritems():
            inputs[k] = v

        # HACK: add run button so that tool.handle_input will run tool.
        inputs['runtool_btn'] = 'Execute'
        # TODO: encode data ids and decode ids.
        # TODO: handle dbkeys
        params = util.Params(inputs, sanitize=False)
        # process_state will be 'populate' or 'update'. When no tool
        # state is specified in input - it will be 'populate', and
        # tool will fully expand repeat and conditionals when building
        # up state. If tool state is found in input
        # parameters,process_state will be 'update' and complex
        # submissions (with repeats and conditionals) must be built up
        # over several iterative calls to the API - mimicing behavior
        # of web controller (though frankly API never returns
        # tool_state so this "legacy" behavior is probably impossible
        # through API currently).
        incoming = params.__dict__
        process_state = "update" if "tool_state" in incoming else "populate"
        template, vars = tool.handle_input(trans,
                                           incoming,
                                           history=target_history,
                                           process_state=process_state,
                                           source="json")
        if 'errors' in vars:
            trans.response.status = 400
            return {"message": {"type": "error", "data": vars['errors']}}

        # TODO: check for errors and ensure that output dataset(s) are available.
        output_datasets = vars.get('out_data', [])
        rval = {
            "outputs": [],
            "jobs": [],
            "implicit_collections": [],
        }

        job_errors = vars.get('job_errors', [])
        if job_errors:
            # If we are here - some jobs were successfully executed but some failed.
            rval["errors"] = job_errors

        outputs = rval["outputs"]
        #TODO:?? poss. only return ids?
        for output_name, output in output_datasets:
            output_dict = output.to_dict()
            #add the output name back into the output data structure
            #so it's possible to figure out which newly created elements
            #correspond with which tool file outputs
            output_dict['output_name'] = output_name
            outputs.append(trans.security.encode_dict_ids(output_dict))

        for job in vars.get('jobs', []):
            rval['jobs'].append(
                self.encode_all_ids(trans,
                                    job.to_dict(view='collection'),
                                    recursive=True))

        for output_name, collection_instance in vars.get(
                'implicit_collections', {}).iteritems():
            history = target_history or trans.history
            output_dict = dictify_dataset_collection_instance(
                collection_instance, security=trans.security, parent=history)
            output_dict['output_name'] = output_name
            rval['implicit_collections'].append(output_dict)

        return rval
 def __collection_dict( self, trans, dataset_collection_instance, view="collection" ):
     return dictify_dataset_collection_instance( dataset_collection_instance,
         security=trans.security, parent=dataset_collection_instance.history, view=view )