Пример #1
0
    def __walk_step_dicts(self, data):
        """ Walk over the supplid step dictionaries and return them in a way designed
        to preserve step order when possible.
        """
        supplied_steps = data['steps']
        # Try to iterate through imported workflow in such a way as to
        # preserve step order.
        step_indices = list(supplied_steps.keys())
        try:
            step_indices = sorted(step_indices, key=int)
        except ValueError:
            # to defensive, were these ever or will they ever not be integers?
            pass

        discovered_labels = set()
        discovered_uuids = set()

        discovered_output_labels = set()
        discovered_output_uuids = set()

        # First pass to build step objects and populate basic values
        for step_index in step_indices:
            step_dict = supplied_steps[step_index]
            uuid = step_dict.get("uuid", None)
            if uuid and uuid != "None":
                if uuid in discovered_uuids:
                    raise exceptions.DuplicatedIdentifierException(
                        "Duplicate step UUID in request.")
                discovered_uuids.add(uuid)
            label = step_dict.get("label", None)
            if label:
                if label in discovered_labels:
                    raise exceptions.DuplicatedIdentifierException(
                        "Duplicated step label in request.")
                discovered_labels.add(label)

            if 'workflow_outputs' in step_dict:
                outputs = step_dict['workflow_outputs']
                # outputs may be list of name (deprecated legacy behavior)
                # or dictionary of names to {uuid: <uuid>, label: <label>}
                if isinstance(outputs, dict):
                    for output_name in outputs:
                        output_dict = outputs[output_name]
                        output_label = output_dict.get("label", None)
                        if output_label:
                            if label in discovered_output_labels:
                                raise exceptions.DuplicatedIdentifierException(
                                    "Duplicated workflow output label in request."
                                )
                            discovered_output_labels.add(label)

                        output_uuid = step_dict.get("output_uuid", None)
                        if output_uuid:
                            if output_uuid in discovered_output_uuids:
                                raise exceptions.DuplicatedIdentifierException(
                                    "Duplicate workflow output UUID in request."
                                )
                            discovered_output_uuids.add(uuid)

            yield step_dict
Пример #2
0
    def __walk_step_dicts(self, data):
        """ Walk over the supplid step dictionaries and return them in a way designed
        to preserve step order when possible.
        """
        supplied_steps = data['steps']
        # Try to iterate through imported workflow in such a way as to
        # preserve step order.
        step_indices = supplied_steps.keys()
        try:
            step_indices = sorted(step_indices, key=int)
        except ValueError:
            # to defensive, were these ever or will they ever not be integers?
            pass

        discovered_labels = set()
        discovered_uuids = set()

        # First pass to build step objects and populate basic values
        for step_index in step_indices:
            step_dict = supplied_steps[step_index]
            uuid = step_dict.get("uuid", None)
            if uuid and uuid != "None":
                if uuid in discovered_uuids:
                    raise exceptions.DuplicatedIdentifierException(
                        "Duplicate step UUID in request.")
                discovered_uuids.add(uuid)
            label = step_dict.get("label", None)
            if label:
                if label in discovered_labels:
                    raise exceptions.DuplicatedIdentifierException(
                        "Duplicated step label in request.")
                discovered_labels.add(label)

            yield step_dict
Пример #3
0
    def add_custom_builds(self, trans, id, key, payload={}, **kwd):
        """
        PUT /api/users/{id}/custom_builds/{key}
        Add new custom build.

        :param id: the encoded id of the user
        :type  id: str

        :param id: custom build key
        :type  id: str

        :param payload: data with new build details
        :type  payload: dict
        """
        user = self._get_user(trans, id)
        dbkeys = json.loads(user.preferences['dbkeys']
                            ) if 'dbkeys' in user.preferences else {}
        name = payload.get('name')
        len_type = payload.get('len|type')
        len_value = payload.get('len|value')
        if len_type not in ['file', 'fasta', 'text'] or not len_value:
            raise exceptions.RequestParameterInvalidException(
                'Please specify a valid data source type.')
        if not name or not key:
            raise exceptions.RequestParameterMissingException(
                'You must specify values for all the fields.')
        elif key in dbkeys:
            raise exceptions.DuplicatedIdentifierException(
                'There is already a custom build with that key. Delete it first if you want to replace it.'
            )
        else:
            # Have everything needed; create new build.
            build_dict = {'name': name}
            if len_type in ['text', 'file']:
                # Create new len file
                new_len = trans.app.model.HistoryDatasetAssociation(
                    extension='len',
                    create_dataset=True,
                    sa_session=trans.sa_session)
                trans.sa_session.add(new_len)
                new_len.name = name
                new_len.visible = False
                new_len.state = trans.app.model.Job.states.OK
                new_len.info = 'custom build .len file'
                try:
                    trans.app.object_store.create(new_len.dataset)
                except ObjectInvalid:
                    raise exceptions.InternalServerError(
                        'Unable to create output dataset: object store is full.'
                    )
                trans.sa_session.flush()
                counter = 0
                lines_skipped = 0
                with open(new_len.file_name, 'w') as f:
                    # LEN files have format:
                    #   <chrom_name><tab><chrom_length>
                    for line in len_value.split('\n'):
                        # Splits at the last whitespace in the line
                        lst = line.strip().rsplit(None, 1)
                        if not lst or len(lst) < 2:
                            lines_skipped += 1
                            continue
                        chrom, length = lst[0], lst[1]
                        try:
                            length = int(length)
                        except ValueError:
                            lines_skipped += 1
                            continue
                        if chrom != escape(chrom):
                            build_dict[
                                'message'] = 'Invalid chromosome(s) with HTML detected and skipped.'
                            lines_skipped += 1
                            continue
                        counter += 1
                        f.write('{}\t{}\n'.format(chrom, length))
                build_dict['len'] = new_len.id
                build_dict['count'] = counter
            else:
                build_dict['fasta'] = trans.security.decode_id(len_value)
                dataset = trans.sa_session.query(
                    trans.app.model.HistoryDatasetAssociation).get(
                        build_dict['fasta'])
                try:
                    new_len = dataset.get_converted_dataset(trans, 'len')
                    new_linecount = new_len.get_converted_dataset(
                        trans, 'linecount')
                    build_dict['len'] = new_len.id
                    build_dict['linecount'] = new_linecount.id
                except Exception:
                    raise exceptions.ToolExecutionError(
                        'Failed to convert dataset.')
            dbkeys[key] = build_dict
            user.preferences['dbkeys'] = json.dumps(dbkeys)
            trans.sa_session.flush()
            return build_dict