def query(cls, project=None, volume=None, state=None, offset=None, limit=None, api=None): """ Query (List) exports. :param project: Optional project identifier. :param volume: Optional volume identifier. :param state: Optional import sate. :param api: Api instance. :return: Collection object. """ api = api or cls._API if project: project = Transform.to_project(project) if volume: volume = Transform.to_volume(volume) return super(Export, cls)._query(url=cls._URL['query'], project=project, volume=volume, state=state, offset=offset, limit=limit, fields='_all', api=api)
def submit_import(cls, volume, location, project, name=None, overwrite=False, api=None): """ Submits new import job. :param volume: Volume identifier. :param location: Volume location. :param project: Project identifier. :param name: Optional file name. :param overwrite: If true it will overwrite file if exists. :param api: Api instance. :return: Import object. """ data = {} volume = Transform.to_volume(volume) project = Transform.to_project(project) source = { 'volume': volume, 'location': location } destination = { 'project': project } if name: destination['name'] = name data['source'] = source data['destination'] = destination data['overwrite'] = overwrite api = api if api else cls._API _import = api.post(cls._URL['query'], data=data).json() return Import(api=api, **_import)
def upload(cls, path, project, file_name=None, overwrite=False, retry=5, timeout=10, part_size=PartSize.UPLOAD_MINIMUM_PART_SIZE, wait=True, api=None): """ Uploads a file using multipart upload and returns an upload handle if the wait parameter is set to False. If wait is set to True it will block until the upload is completed. :param path: File path on local disc. :param project: Project identifier :param file_name: Optional file name. :param overwrite: If true will overwrite the file on the server. :param retry: Number of retries if error occurs during upload. :param timeout: Timeout for http requests. :param part_size: Part size in bytes. :param wait: If true will wait for upload to complete. :param api: Api instance. """ api = api or cls._API project = Transform.to_project(project) upload = Upload( path, project, file_name=file_name, overwrite=overwrite, retry_count=retry, timeout=timeout, part_size=part_size, api=api ) if wait: upload.start() upload.wait() return upload else: return upload
def create_folder(cls, name, parent=None, project=None, api=None): """Create a new folder :param name: Folder name :param parent: Parent folder :param project: Project to create folder in :param api: Api instance :return: New folder """ api = api or cls._API data = { 'name': name, 'type': cls.FOLDER_TYPE } if not parent and not project: raise SbgError('Parent or project must be provided') if parent and project: raise SbgError( 'Providing both "parent" and "project" is not allowed' ) if parent: data['parent'] = Transform.to_file(file_=parent) if project: data['project'] = Transform.to_project(project=project) response = api.post(url=cls._URL['create_folder'], data=data).json() return cls(api=api, **response)
def query(cls, project=None, status=None, batch=None, parent=None, offset=None, limit=None, api=None): """ Query (List) tasks :param project: Target project. optional. :param status: Task status. :param batch: Only batch tasks. :param parent: Parent batch task identifier. :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: Collection object. """ api = api or cls._API if parent: parent = Transform.to_task(parent) if project: project = Transform.to_project(project) return super(Task, cls)._query(url=cls._URL['query'], project=project, status=status, batch=batch, parent=parent, offset=offset, limit=limit, fields='_all', api=api)
def query(cls, project=None, visibility=None, q=None, id=None, offset=None, limit=None, api=None): """ Query (List) apps. :param project: Source project. :param visibility: private|public for private or public apps. :param q: List containing search terms. :param id: List contains app ids. Fetch apps with specific ids. :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: collection object """ if project: project = Transform.to_project(project) api = api or cls._API return super(App, cls)._query(url=cls._URL['query'], project=project, visibility=visibility, q=q, id=id, offset=offset, limit=limit, api=api)
def upload(cls, path, project=None, parent=None, file_name=None, overwrite=False, retry=5, timeout=10, part_size=PartSize.UPLOAD_MINIMUM_PART_SIZE, wait=True, api=None): """ Uploads a file using multipart upload and returns an upload handle if the wait parameter is set to False. If wait is set to True it will block until the upload is completed. :param path: File path on local disc. :param project: Project identifier :param parent: Parent folder identifier :param file_name: Optional file name. :param overwrite: If true will overwrite the file on the server. :param retry: Number of retries if error occurs during upload. :param timeout: Timeout for http requests. :param part_size: Part size in bytes. :param wait: If true will wait for upload to complete. :param api: Api instance. """ api = api or cls._API extra = {'resource': cls.__name__, 'query': { 'path': path, 'project': project, 'file_name': file_name, 'overwrite': overwrite, 'retry': retry, 'timeout': timeout, 'part_size': part_size, 'wait': wait, }} logger.info('Uploading file', extra=extra) if not project and not parent: raise SbgError('A project or parent identifier is required.') if project and parent: raise SbgError( 'Project and parent identifiers are mutually exclusive.' ) if project: project = Transform.to_project(project) if parent: parent = Transform.to_file(parent) upload = Upload( file_path=path, project=project, parent=parent, file_name=file_name, overwrite=overwrite, retry_count=retry, timeout=timeout, part_size=part_size, api=api ) if wait: upload.start() upload.wait() return upload else: return upload
def upload(cls, path, project=None, parent=None, file_name=None, overwrite=False, retry=5, timeout=60, part_size=None, wait=True, api=None): """ Uploads a file using multipart upload and returns an upload handle if the wait parameter is set to False. If wait is set to True it will block until the upload is completed. :param path: File path on local disc. :param project: Project identifier :param parent: Parent folder identifier :param file_name: Optional file name. :param overwrite: If true will overwrite the file on the server. :param retry: Number of retries if error occurs during upload. :param timeout: Timeout for http requests. :param part_size: Part size in bytes. :param wait: If true will wait for upload to complete. :param api: Api instance. """ api = api or cls._API extra = {'resource': cls.__name__, 'query': { 'path': path, 'project': project, 'file_name': file_name, 'overwrite': overwrite, 'retry': retry, 'timeout': timeout, 'part_size': part_size, 'wait': wait, }} logger.info('Uploading file', extra=extra) if not project and not parent: raise SbgError('A project or parent identifier is required.') if project and parent: raise SbgError( 'Project and parent identifiers are mutually exclusive.' ) if project: project = Transform.to_project(project) if parent: parent = Transform.to_file(parent) upload = Upload( file_path=path, project=project, parent=parent, file_name=file_name, overwrite=overwrite, retry_count=retry, timeout=timeout, part_size=part_size, api=api ) if wait: upload.start() upload.wait() return upload else: return upload
def bulk_submit(cls, imports, api=None): """ Submit imports in bulk :param imports: List of dicts describing a wanted import. :param api: Api instance. :return: List of ImportBulkRecord objects. """ if not imports: raise SbgError('Imports are required') api = api or cls._API items = [] for import_ in imports: project = import_.get('project') parent = import_.get('parent') if project and parent: raise SbgError( 'Project and parent identifiers are mutually exclusive') elif project: destination = {'project': Transform.to_project(project)} elif parent: destination = {'parent': Transform.to_file(parent)} else: raise SbgError('Project or parent identifier is required.') volume = Transform.to_volume(import_.get('volume')) location = Transform.to_location(import_.get('location')) name = import_.get('name', None) overwrite = import_.get('overwrite', False) autorename = import_.get('autorename', None) preserve_folder_structure = import_.get( 'preserve_folder_structure', None) if name: destination['name'] = name import_config = { 'source': { 'volume': volume, 'location': location }, 'destination': destination, 'overwrite': overwrite, } if autorename is not None: import_config['autorename'] = autorename if preserve_folder_structure is not None: import_config['preserve_folder_structure'] = ( preserve_folder_structure) items.append(import_config) data = {'items': items} response = api.post(url=cls._URL['bulk_create'], data=data) return ImportBulkRecord.parse_records(response=response, api=api)
def query(cls, project, names=None, metadata=None, origin=None, tags=None, offset=None, limit=None, api=None): """ Query ( List ) projects :param project: Project id :param names: Name list :param metadata: Metadata query dict :param origin: Origin query dict :param tags: List of tags to filter on :param offset: Pagination offset :param limit: Pagination limit :param api: Api instance. :return: Collection object. """ api = api or cls._API project = Transform.to_project(project) query_params = {} if names is not None and isinstance(names, list): if len(names) == 0: names.append("") query_params['name'] = names metadata_params = {} if metadata and isinstance(metadata, dict): for k, v in metadata.items(): metadata_params['metadata.' + k] = metadata[k] if tags: query_params['tag'] = tags query_params.update(metadata_params) origin_params = {} if origin and isinstance(origin, dict): for k, v in origin.items(): origin_params['origin.' + k] = origin[k] query_params.update(origin_params) return super(File, cls)._query(api=api, url=cls._URL['query'], project=project, offset=offset, limit=limit, fields='_all', **query_params)
def copy(self, project, name=None): """ Copies the current file. :param project: Destination project. :param name: Destination file name. :return: Copied File object. """ project = Transform.to_project(project) data = {'project': project} if name: data['name'] = name new_file = self._api.post(url=self._URL['copy'].format(id=self.id), data=data).json() return File(api=self._api, **new_file)
def copy(self, project, name=None): """ Copies the current file. :param project: Destination project. :param name: Destination file name. :return: Copied File object. """ project = Transform.to_project(project) data = { 'project': project } if name: data['name'] = name new_file = self._api.post(url=self._URL['copy'].format(id=self.id), data=data).json() return File(api=self._api, **new_file)
def copy(self, project, name=None): """ Copies the current app. :param project: Destination project. :param name: Destination app name. :return: Copied App object. """ project = Transform.to_project(project) data = {'project': project} if name: data['name'] = name app = self._api.post(url=self._URL['copy'].format(id=self.id), data=data).json() return App(api=self._api, **app)
def copy(self, project, name=None, strategy=None, use_revision=False, api=None): """ Copies the current app. :param project: Destination project. :param name: Destination app name. :param strategy: App copy strategy. :param use_revision: Copy from set app revision. :param api: Api instance. :return: Copied App object. :Copy strategies: clone copy all revisions and continue getting updates form the original app (default method when the key is omitted) direct copy only the latest revision and get the updates from this point on clone_direct copy the app like the direct strategy, but keep all revisions transient copy only the latest revision and continue getting updates from the original app """ api = api or self._API app_id = self._id if use_revision else self.id strategy = strategy or AppCopyStrategy.CLONE project = Transform.to_project(project) data = { 'project': project, 'strategy': strategy } if name: data['name'] = name extra = { 'resource': self.__class__.__name__, 'query': { 'id': app_id, 'data': data } } logger.info('Copying app', extra=extra) app = api.post( url=self._URL['copy'].format(id=app_id), data=data ).json() return App(api=api, **app)
def query(cls, project=None, visibility=None, offset=None, limit=None, api=None): """ Query (List) apps. :param visibility: :param project: :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: collection object """ if project: project = Transform.to_project(project) api = api or cls._API return super(App, cls)._query(url=cls._URL['query'], project=project, visibility=visibility, offset=offset, limit=limit, api=api)
def copy(self, project, name=None): """ Copies the current app. :param project: Destination project. :param name: Destination app name. :return: Copied App object. """ project = Transform.to_project(project) data = { 'project': project } if name: data['name'] = name app = self._api.post(url=self._URL['copy'].format(id=self.id), data=data).json() return App(api=self._api, **app)
def copy(self, project, name=None): """ Copies the current file. :param project: Destination project. :param name: Destination file name. :return: Copied File object. """ project = Transform.to_project(project) data = { 'project': project } if name: data['name'] = name extra = {'resource': self.__class__.__name__, 'query': { 'id': self.id, 'data': data }} logger.info('Copying file', extra=extra) new_file = self._api.post(url=self._URL['copy'].format(id=self.id), data=data).json() return File(api=self._api, **new_file)
def bulk_submit(cls, imports, api=None): """ Submit imports in bulk :param imports: Imports to be retrieved. :param api: Api instance. :return: List of ImportBulkRecord objects. """ if not imports: raise SbgError('Imports are required') api = api or cls._API items = [] for import_ in imports: volume = Transform.to_volume(import_.get('volume')) location = Transform.to_location(import_.get('location')) project = Transform.to_project(import_.get('project')) name = import_.get('name', None) overwrite = import_.get('overwrite', False) item = { 'source': { 'volume': volume, 'location': location }, 'destination': { 'project': project }, 'overwrite': overwrite } if name: item['destination']['name'] = name items.append(item) data = {'items': items} response = api.post(url=cls._URL['bulk_create'], data=data) return ImportBulkRecord.parse_records(response=response, api=api)
def copy(self, project, name=None, strategy=None): """ Copies the current app. :param project: Destination project. :param name: Destination app name. :param strategy: App copy strategy. :return: Copied App object. :Copy strategies: clone copy all revisions and continue getting updates form the original app (default method when the key is omitted) direct copy only the latest revision and get the updates from this point on clone_direct copy the app like the direct strategy, but keep all revisions transient copy only the latest revision and continue getting updates from the original app """ strategy = strategy or AppCopyStrategy.CLONE project = Transform.to_project(project) data = { 'project': project, 'strategy': strategy } if name: data['name'] = name extra = {'resource': self.__class__.__name__, 'query': { 'id': self.id, 'data': data }} logger.info('Copying app', extra=extra) app = self._api.post(url=self._URL['copy'].format(id=self.id), data=data).json() return App(api=self._api, **app)
def query(cls, project, names=None, metadata=None, origin=None, offset=None, limit=None, api=None): """ Query ( List ) projects :param project: Project id :param names: Name list :param metadata: Metadata query dict :param origin: Origin query dict :param offset: Pagination offset :param limit: Pagination limit :param api: Api instance. :return: Collection object. """ api = api or cls._API project = Transform.to_project(project) query_params = {} if names and isinstance(names, list): query_params['name'] = names metadata_params = {} if metadata and isinstance(metadata, dict): for k, v in metadata.items(): metadata_params['metadata.' + k] = metadata[k] query_params.update(metadata_params) origin_params = {} if origin and isinstance(origin, dict): for k, v in origin.items(): origin_params['origin.' + k] = origin[k] query_params.update(origin_params) return super(File, cls)._query(api=api, url=cls._URL['query'], project=project, offset=offset, limit=limit, **query_params)
def query(cls, project=None, status=None, batch=None, parent=None, created_from=None, created_to=None, started_from=None, started_to=None, ended_from=None, ended_to=None, offset=None, limit=None, api=None): """ Query (List) tasks. Date parameters may be both strings and python date objects. :param project: Target project. optional. :param status: Task status. :param batch: Only batch tasks. :param parent: Parent batch task identifier. :param ended_to: All tasks that ended until this date. :param ended_from: All tasks that ended from this date. :param started_to: All tasks that were started until this date. :param started_from: All tasks that were started from this date. :param created_to: All tasks that were created until this date. :param created_from: All tasks that were created from this date. :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: Collection object. """ api = api or cls._API if parent: parent = Transform.to_task(parent) if project: project = Transform.to_project(project) if created_from: created_from = Transform.to_datestring(created_from) if created_to: created_to = Transform.to_datestring(created_to) if started_from: started_from = Transform.to_datestring(started_from) if started_to: started_to = Transform.to_datestring(started_to) if ended_from: ended_from = Transform.to_datestring(ended_from) if ended_to: ended_to = Transform.to_datestring(ended_to) return super(Task, cls)._query(url=cls._URL['query'], project=project, status=status, batch=batch, parent=parent, created_from=created_from, created_to=created_to, started_from=started_from, started_to=started_to, ended_from=ended_from, ended_to=ended_to, offset=offset, limit=limit, fields='_all', api=api)
def create(cls, name, project, app, revision=None, batch_input=None, batch_by=None, inputs=None, description=None, run=False, disable_batch=False, interruptible=True, api=None): """ Creates a task on server. :param name: Task name. :param project: Project identifier. :param app: CWL app identifier. :param revision: CWL app revision. :param batch_input: Batch input. :param batch_by: Batch criteria. :param inputs: Input map. :param description: Task description. :param run: True if you want to run a task upon creation. :param disable_batch: If True disables batching of a batch task. :param interruptible: If True interruptible instance will be used. :param api: Api instance. :return: Task object. :raises: TaskValidationError if validation Fails. :raises: SbgError if any exception occurs during request. """ task_data = {} params = {} project = Transform.to_project(project) app_id = Transform.to_app(app) if revision: app_id = app_id + "/" + six.text_type(revision) else: if isinstance(app, App): app_id = app_id + "/" + six.text_type(app.revision) task_inputs = { 'inputs': Task._serialize_inputs(inputs) if inputs else {} } if batch_input and batch_by: task_data['batch_input'] = batch_input task_data['batch_by'] = batch_by if disable_batch: params.update({'batch': False}) task_meta = { 'name': name, 'project': project, 'app': app_id, 'description': description, } task_data.update(task_meta) task_data.update(task_inputs) task_data['use_interruptible_instances'] = interruptible if run: params.update({'action': 'run'}) api = api if api else cls._API created_task = api.post(cls._URL['query'], data=task_data, params=params).json() if run and 'errors' in created_task: if bool(created_task['errors']): raise TaskValidationError( 'Unable to run task! Task contains errors.', task=Task(api=api, **created_task)) return Task(api=api, **created_task)
def create(cls, name, project, app, batch_input=None, batch_by=None, inputs=None, description=None, run=False, api=None): """ Creates a task on server. :param name: Task name. :param project: Project identifier. :param app: CWL app identifier. :param batch_input: Batch input. :param batch_by: Batch criteria. :param inputs: Input map. :param description: Task description. :param run: True if you want to run a task upon creation. :param api: Api instance. :return: Task object. """ task_data = {} project = Transform.to_project(project) app = Transform.to_app(app) task_inputs = {'inputs': {}} for k, v in inputs.items(): if isinstance(v, File): input = { 'class': 'File', 'path': v.id, } task_inputs['inputs'][k] = input elif isinstance(v, list): input_list = [] for inp in v: if isinstance(inp, File): input = { 'class': 'File', 'path': inp.id, } if inp.name: input['name'] = inp.name input_list.append(input) else: input_list.append(inp) task_inputs['inputs'][k] = input_list else: task_inputs['inputs'][k] = v if batch_input: task_data['batch_input'] = batch_input if batch_by: task_data['batch_by'] = batch_by task_meta = { 'name': name, 'project': project, 'app': app, 'description': description } task_data.update(task_meta) task_data.update(task_inputs) params = {'action': 'run'} if run else {} api = api if api else cls._API created_task = api.post(cls._URL['query'], data=task_data, params=params).json() return Task(api=api, **created_task)
def query(cls, project=None, names=None, metadata=None, origin=None, tags=None, offset=None, limit=None, dataset=None, api=None, parent=None): """ Query ( List ) files, requires project or dataset :param project: Project id :param names: Name list :param metadata: Metadata query dict :param origin: Origin query dict :param tags: List of tags to filter on :param offset: Pagination offset :param limit: Pagination limit :param dataset: Dataset id :param api: Api instance. :param parent: Folder id or File object with type folder :return: Collection object. """ api = api or cls._API query_params = {} if project: project = Transform.to_project(project) query_params['project'] = project if dataset: dataset = Transform.to_dataset(dataset) query_params['dataset'] = dataset if parent: query_params['parent'] = Transform.to_file(parent) if not (project or dataset or parent): raise SbgError('Project, dataset or parent must be provided!') if names is not None and isinstance(names, list): if len(names) == 0: names.append("") query_params['name'] = names metadata_params = {} if metadata and isinstance(metadata, dict): for k, v in metadata.items(): metadata_params['metadata.' + k] = metadata[k] if tags: query_params['tag'] = tags query_params.update(metadata_params) origin_params = {} if origin and isinstance(origin, dict): for k, v in origin.items(): origin_params['origin.' + k] = origin[k] query_params.update(origin_params) return super(File, cls)._query(api=api, url=cls._URL['query'], offset=offset, limit=limit, fields='_all', **query_params)
def test_transform_project_invalid_values(project): with pytest.raises(SbgError): Transform.to_project(project)
def query(cls, project=None, names=None, metadata=None, origin=None, tags=None, offset=None, limit=None, dataset=None, api=None, parent=None, cont_token=None): """ Query ( List ) files, requires project or dataset :param project: Project id :param names: Name list :param metadata: Metadata query dict :param origin: Origin query dict :param tags: List of tags to filter on :param offset: Pagination offset :param limit: Pagination limit :param dataset: Dataset id :param api: Api instance. :param parent: Folder id or File object with type folder :param cont_token: Pagination continuation token :return: Collection object. """ if cont_token and offset: raise SbgError( 'Offset and continuation token parameters' 'are mutually exclusive.' ) if cont_token and metadata: raise SbgError( 'Metadata filtering cannot be combined ' 'with continuation token pagination.' ) api = api or cls._API query_params = {} if project: project = Transform.to_project(project) query_params['project'] = project if dataset: dataset = Transform.to_dataset(dataset) query_params['dataset'] = dataset if parent: query_params['parent'] = Transform.to_file(parent) if not (project or dataset or parent): raise SbgError('Project, dataset or parent must be provided.') if [project, parent, dataset].count(None) < 2: raise SbgError( 'Only one out of project, parent or dataset must be provided.' ) if names is not None and isinstance(names, list): if len(names) == 0: names.append("") query_params['name'] = names metadata_params = {} if metadata and isinstance(metadata, dict): for k, v in metadata.items(): metadata_params['metadata.' + k] = metadata[k] if tags: query_params['tag'] = tags query_params.update(metadata_params) origin_params = {} if origin and isinstance(origin, dict): for k, v in origin.items(): origin_params['origin.' + k] = origin[k] query_params.update(origin_params) return super(File, cls)._query( api=api, url=cls._URL['scroll' if cont_token else 'query'], token=cont_token, offset=offset, limit=limit, fields='_all', **query_params )
def submit_import(cls, volume, location, project=None, name=None, overwrite=False, properties=None, parent=None, preserve_folder_structure=True, api=None): """ Submits new import job. :param volume: Volume identifier. :param location: Volume location. :param project: Project identifier. :param name: Optional file name. :param overwrite: If true it will overwrite file if exists. :param properties: Properties dictionary. :param parent: The ID of the target folder to which the item should be imported. Should not be used together with project. :param preserve_folder_structure: Whether to keep the exact source folder structure. The default value is true if the item being imported is a folder. Should not be used if you are importing a file. :param api: Api instance. :return: Import object. """ data = {} volume = Transform.to_volume(volume) if project and parent: raise SbgError( 'Project and parent identifiers are mutually exclusive') elif project: project = Transform.to_project(project) destination = {'project': project} elif parent: parent = Transform.to_file(parent) destination = {'parent': parent} else: raise SbgError('Project or parent identifier is required.') source = {'volume': volume, 'location': location} if name: destination['name'] = name data['source'] = source data['destination'] = destination data['overwrite'] = overwrite if not preserve_folder_structure: data['preserve_folder_structure'] = preserve_folder_structure if properties: data['properties'] = properties api = api if api else cls._API extra = {'resource': cls.__name__, 'query': data} logger.info('Submitting import', extra=extra) _import = api.post(cls._URL['query'], data=data).json() return Import(api=api, **_import)
def query(cls, project=None, names=None, metadata=None, origin=None, tags=None, offset=None, limit=None, dataset=None, api=None, parent=None): """ Query ( List ) files, requires project or dataset :param project: Project id :param names: Name list :param metadata: Metadata query dict :param origin: Origin query dict :param tags: List of tags to filter on :param offset: Pagination offset :param limit: Pagination limit :param dataset: Dataset id :param api: Api instance. :param parent: Folder id or File object with type folder :return: Collection object. """ api = api or cls._API query_params = {} if project: project = Transform.to_project(project) query_params['project'] = project if dataset: dataset = Transform.to_dataset(dataset) query_params['dataset'] = dataset if parent: query_params['parent'] = Transform.to_file(parent) if not (project or dataset or parent): raise SbgError('Project, dataset or parent must be provided.') if [project, parent, dataset].count(None) < 2: raise SbgError( 'Only one out of project, parent or dataset must be provided.' ) if names is not None and isinstance(names, list): if len(names) == 0: names.append("") query_params['name'] = names metadata_params = {} if metadata and isinstance(metadata, dict): for k, v in metadata.items(): metadata_params['metadata.' + k] = metadata[k] if tags: query_params['tag'] = tags query_params.update(metadata_params) origin_params = {} if origin and isinstance(origin, dict): for k, v in origin.items(): origin_params['origin.' + k] = origin[k] query_params.update(origin_params) return super(File, cls)._query( api=api, url=cls._URL['query'], offset=offset, limit=limit, fields='_all', **query_params )
def test_transform_project(project): Transform.to_project(project)
def create(cls, name, project, app, revision=None, batch_input=None, batch_by=None, inputs=None, description=None, run=False, api=None): """ Creates a task on server. :param name: Task name. :param project: Project identifier. :param app: CWL app identifier. :param revision: CWL app revision. :param batch_input: Batch input. :param batch_by: Batch criteria. :param inputs: Input map. :param description: Task description. :param run: True if you want to run a task upon creation. :param api: Api instance. :return: Task object. :raises: TaskValidationError if validation Fails. :raises: SbgError if any exception occurs during request. """ task_data = {} project = Transform.to_project(project) app = Transform.to_app(app) if revision: app = app + "/" + six.text_type(revision) task_inputs = {'inputs': {}} for k, v in inputs.items(): if isinstance(v, File): input = { 'class': 'File', 'path': v.id, } task_inputs['inputs'][k] = input elif isinstance(v, list): input_list = [] for inp in v: if isinstance(inp, File): input = { 'class': 'File', 'path': inp.id, } if inp.name: input['name'] = inp.name input_list.append(input) else: input_list.append(inp) task_inputs['inputs'][k] = input_list else: task_inputs['inputs'][k] = v if batch_input: task_data['batch_input'] = batch_input if batch_by: task_data['batch_by'] = batch_by task_meta = { 'name': name, 'project': project, 'app': app, 'description': description } task_data.update(task_meta) task_data.update(task_inputs) params = {'action': 'run'} if run else {} api = api if api else cls._API created_task = api.post(cls._URL['query'], data=task_data, params=params).json() if run and 'errors' in created_task: if bool(created_task['errors']): raise TaskValidationError( 'Unable to run task! Task contains errors.', task=Task(api=api, **created_task)) return Task(api=api, **created_task)