class Rate(Resource): """ Rate resource. """ limit = IntegerField(read_only=True) remaining = IntegerField(read_only=True) reset = DateTimeField(read_only=True) def __str__(self): return f'<Rate: limit={self.limit}, remaining={self.remaining}>'
class Rate(Resource): """ Rate resource. """ limit = IntegerField(read_only=True) remaining = IntegerField(read_only=True) reset = DateTimeField() def __str__(self): return six.text_type('<Rate: limit={limit}, remaining={rem}>'.format( limit=self.limit, rem=self.remaining))
class Error(Resource): """ Error resource describes the error that happened and provides http status, custom codes and messages as well as the link to online resources. """ status = IntegerField(read_only=True) code = IntegerField(read_only=True) message = StringField(read_only=True) more_info = StringField(read_only=True) def __str__(self): return f'<Error: status={self.status}, code={self.code}>'
class Disk(Resource): """ Disk resource contains information about EBS disk size. """ size = IntegerField(read_only=True) unit = StringField(read_only=True) type = StringField(read_only=True) def __str__(self): return f'<Disk size={self.size}, unit={self.unit}, type={self.type_}>'
class ExecutionStatus(Resource): """ Task execution status resource. Contains information about the number of completed task steps, total number of task steps and current execution message. In case of a batch task it also contains the number of queued, running, completed, failed and aborted tasks. """ steps_completed = IntegerField(read_only=True) steps_total = IntegerField(read_only=True) message = StringField(read_only=True) queued = IntegerField(read_only=True) running = IntegerField(read_only=True) completed = IntegerField(read_only=True) failed = IntegerField(read_only=True) aborted = IntegerField(read_only=True) def __str__(self): return six.text_type('<ExecutionStatus>')
class App(Resource): """ Central resource for managing apps. """ _URL = { 'query': '/apps', 'get': '/apps/{id}', 'get_revision': '/apps/{id}/{revision}', 'create_revision': '/apps/{id}/{revision}/raw', 'copy': '/apps/{id}/actions/copy', 'sync': '/apps/{id}/actions/sync', 'raw': '/apps/{id}/raw' } _CONTENT_TYPE = { AppRawFormat.JSON: 'application/json', AppRawFormat.YAML: 'application/yaml' } href = HrefField() _id = StringField(read_only=True, name='id') project = StringField(read_only=True) name = StringField(read_only=True) revision = IntegerField(read_only=True) raw = DictField(read_only=False) @property def id(self): _id, _rev = self._id.rsplit('/', 1) if re.match('^\d*$', _rev): return _id else: return self._id def __eq__(self, other): if not hasattr(other, '__class__'): return False if not self.__class__ == other.__class__: return False return self is other or self.id == other.id def __ne__(self, other): return not self.__eq__(other) def __str__(self): return six.text_type('<App: id={id} rev={rev}>'.format( id=self.id, rev=self.revision)) @classmethod def query(cls, project=None, visibility=None, q=None, id=None, offset=None, limit=None, api=None): """ Query (List) apps. :param project: Source project. :param visibility: private|public for private or public apps. :param q: List containing search terms. :param id: List contains app ids. Fetch apps with specific ids. :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: collection object """ if project: project = Transform.to_project(project) api = api or cls._API return super(App, cls)._query(url=cls._URL['query'], project=project, visibility=visibility, q=q, id=id, offset=offset, limit=limit, api=api) @classmethod def get_revision(cls, id, revision, api=None): """ Get app revision. :param id: App identifier. :param revision: App revision :param api: Api instance. :return: App object. """ api = api if api else cls._API extra = { 'resource': cls.__name__, 'query': { 'id': id, 'revision': revision } } logger.info('Get revision', extra=extra) app = api.get(url=cls._URL['get_revision'].format( id=id, revision=revision)).json() return App(api=api, **app) @classmethod def install_app(cls, id, raw, api=None, raw_format=None): """ Installs and app. :param id: App identifier. :param raw: Raw cwl data. :param api: Api instance. :param raw_format: Format of raw app data being sent, json by default :return: App object. """ api = api if api else cls._API raw_format = raw_format.lower() if raw_format else AppRawFormat.JSON extra = {'resource': cls.__name__, 'query': {'id': id, 'data': raw}} logger.info('Installing app', extra=extra) # Set content type for raw app data if raw_format not in cls._CONTENT_TYPE.keys(): raise SbgError( 'Unsupported raw data format: "{}".'.format(raw_format)) headers = {'Content-Type': cls._CONTENT_TYPE[raw_format]} app = api.post( url=cls._URL['raw'].format(id=id), data=raw, headers=headers, ).json() app_wrapper = api.get(url=cls._URL['get'].format( id=app['sbg:id'])).json() return App(api=api, **app_wrapper) @classmethod def create_revision(cls, id, revision, raw, api=None): """ Create a new app revision. :param id: App identifier. :param revision: App revision. :param raw: Raw cwl object. :param api: Api instance. :return: App object. """ api = api if api else cls._API extra = {'resource': cls.__name__, 'query': {'id': id, 'data': raw}} logger.info('Creating app revision', extra=extra) app = api.post(url=cls._URL['create_revision'].format( id=id, revision=revision), data=raw).json() app_wrapper = api.get(url=cls._URL['get'].format( id=app['sbg:id'])).json() return App(api=api, **app_wrapper) def copy(self, project, name=None, strategy=None): """ Copies the current app. :param project: Destination project. :param name: Destination app name. :param strategy: App copy strategy. :return: Copied App object. :Copy strategies: clone copy all revisions and continue getting updates form the original app (default method when the key is omitted) direct copy only the latest revision and get the updates from this point on clone_direct copy the app like the direct strategy, but keep all revisions transient copy only the latest revision and continue getting updates from the original app """ strategy = strategy or AppCopyStrategy.CLONE project = Transform.to_project(project) data = {'project': project, 'strategy': strategy} if name: data['name'] = name extra = { 'resource': self.__class__.__name__, 'query': { 'id': self.id, 'data': data } } logger.info('Copying app', extra=extra) app = self._api.post(url=self._URL['copy'].format(id=self.id), data=data).json() return App(api=self._api, **app) def sync(self): """ Syncs the parent app changes with the current app instance. :return: Synced App object. """ app = self._api.post(url=self._URL['sync'].format(id=self.id)).json() return App(api=self._api, **app)
class AutomationRun(Resource): """ Central resource for managing automation runs. """ _URL = { 'query': '/automation/runs', 'get': '/automation/runs/{id}', 'actions': '/automation/runs/{id}/actions/{action}', 'state': '/automation/runs/{id}/state', } href = HrefField(read_only=True) id = StringField(read_only=True) name = StringField(read_only=False) automation = CompoundField(Automation, read_only=True) package = CompoundField(AutomationPackage, read_only=True) inputs = DictField(read_only=False) outputs = DictField(read_only=True) settings = DictField(read_only=False) created_on = DateTimeField(read_only=True) start_time = DateTimeField(read_only=True) end_time = DateTimeField(read_only=True) resumed_from = StringField(read_only=True) created_by = StringField(read_only=True) status = StringField(read_only=True) message = StringField(read_only=True) execution_details = DictField(read_only=True) memory_limit = IntegerField(read_only=False) project_id = StringField(read_only=True) def __eq__(self, other): if type(other) is not type(self): return False return self is other or self.id == other.id def __str__(self): return f'<AutomationRun: id={self.id}>' @classmethod def query(cls, automation=None, package=None, status=None, name=None, created_by=None, created_from=None, created_to=None, project_id=None, order_by=None, order=None, offset=None, limit=None, api=None): """ Query (List) automation runs. :param name: Automation run name :param automation: Automation template :param package: Package :param status: Run status :param created_by: Username of user that created the run :param order_by: Property by which to order results :param order: Ascending or descending ("asc" or "desc") :param created_from: Date the run is created after :param created_to: Date the run is created before :param project_id: Id of project if Automation run is project based :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: collection object """ if automation: automation = Transform.to_automation(automation) if package: package = Transform.to_automation_package(package) api = api or cls._API return super()._query( url=cls._URL['query'], name=name, automation_id=automation, package_id=package, status=status, created_by=created_by, created_from=created_from, created_to=created_to, project_id=project_id, order_by=order_by, order=order, offset=offset, limit=limit, api=api, ) @classmethod def create(cls, package, inputs=None, settings=None, resume_from=None, name=None, secret_settings=None, memory_limit=None, api=None): """ Create and start a new run. :param package: Automation package id :param inputs: Input dictionary :param settings: Settings override dictionary :param resume_from: Run to resume from :param name: Automation run name :param secret_settings: dict to override secret_settings from automation template :param memory_limit: Memory limit in MB. :param api: sevenbridges Api instance :return: AutomationRun object """ package = Transform.to_automation_package(package) data = {'package': package} if inputs: data['inputs'] = inputs else: data['inputs'] = dict() if settings: data['settings'] = settings if resume_from: data['resume_from'] = resume_from if name: data['name'] = name if secret_settings: data['secret_settings'] = secret_settings if memory_limit: data['memory_limit'] = memory_limit api = api or cls._API automation_run = api.post( url=cls._URL['query'], data=data, ).json() return AutomationRun(api=api, **automation_run) @inplace_reload def save(self, inplace=True): """ Saves all modification to the automation run on the server. :param inplace Apply edits on the current instance or get a new one. :return: Automation run instance. """ modified_data = self._modified_data() if modified_data: extra = { 'resource': type(self).__name__, 'query': { 'id': self.id, 'modified_data': modified_data } } logger.info('Saving automation run', extra=extra) data = self._api.patch(url=self._URL['get'].format(id=self.id), data=modified_data).json() return AutomationRun(api=self._api, **data) else: raise ResourceNotModified() @classmethod def rerun(cls, id, package=None, inputs=None, settings=None, resume_from=None, name=None, secret_settings=None, merge=True, api=None): """ Create and start rerun of existing automation. :param id: Automation id to rerun :param package: Automation package id :param inputs: Input dictionary :param settings: Settings override dictionary :param resume_from: Run to resume from :param name: Automation run name :param secret_settings: dict to override secret_settings from automation template :param merge: merge settings and inputs of run :param api: sevenbridges Api instance :return: AutomationRun object """ data = {'merge': merge} if package: data['package'] = package if inputs: data['inputs'] = inputs if settings: data['settings'] = settings if resume_from: data['resume_from'] = resume_from if name: data['name'] = name if secret_settings: data['secret_settings'] = secret_settings api = api or cls._API automation_run = api.post(url=cls._URL['actions'].format( id=id, action=AutomationRunActions.RERUN)).json() return AutomationRun(api=api, **automation_run) def stop(self, api=None): """ Stop automation run. :param api: sevenbridges Api instance. :return: AutomationRun object """ api = api or self._API return api.post(url=self._URL['actions'].format( id=self.id, action=AutomationRunActions.STOP)).content def get_log_file(self, api=None): """ Retrieve automation run log. :param api: sevenbridges Api instance :return: Log string """ api = api or self._API log_file_data = self.execution_details.get('log_file') return File(api=api, **log_file_data) if log_file_data else None def get_state(self, api=None): """ Retrieve automation run state. :param api: sevenbridges Api instance :return: State file json contents as string """ api = api or self._API return api.get(self._URL['state'].format(id=self.id)).json()
class Automation(Resource): """ Central resource for managing automations. """ # noinspection PyProtectedMember _URL = { 'query': '/automation/automations', 'get': '/automation/automations/{id}', 'member': AutomationMember._URL['get'], 'members': AutomationMember._URL['query'], 'packages': AutomationPackage._URL['query'], 'archive': '/automation/automations/{automation_id}/actions/archive', 'restore': '/automation/automations/{automation_id}/actions/restore' } href = HrefField(read_only=True) id = UuidField(read_only=True) name = StringField(read_only=False) description = StringField(read_only=False) billing_group = UuidField(read_only=False) owner = StringField(read_only=True) created_by = StringField(read_only=True) created_on = DateTimeField(read_only=True) modified_by = StringField(read_only=True) modified_on = DateTimeField(read_only=False) archived = BooleanField(read_only=True) secret_settings = DictField(read_only=False) memory_limit = IntegerField(read_only=False) project_based = BooleanField(read_only=False) def __eq__(self, other): if type(other) is not type(self): return False return self is other or self.id == other.id def __str__(self): return f'<Automation: id={self.id} name={self.name}>' @classmethod def query(cls, name=None, include_archived=False, project_based=None, offset=None, limit=None, api=None): """ Query (List) automations. :param name: Automation name. :param include_archived: Include archived automations also :param project_based: Search project based automations :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: collection object """ api = api or cls._API return super()._query( url=cls._URL['query'], name=name, include_archived=include_archived, project_based=project_based, offset=offset, limit=limit, api=api, ) @classmethod def create(cls, name, description=None, billing_group=None, secret_settings=None, project_based=None, memory_limit=None, api=None): """ Create a automation template. :param name: Automation name. :param billing_group: Automation billing group. :param description: Automation description. :param secret_settings: Automation settings. :param project_based: Create project based automation template. :param memory_limit: Memory limit in MB. :param api: Api instance. :return: """ api = api if api else cls._API if name is None: raise SbgError('Automation name is required!') data = { 'name': name, } if billing_group: data['billing_group'] = Transform.to_billing_group(billing_group) if description: data['description'] = description if secret_settings: data['secret_settings'] = secret_settings if project_based: data['project_based'] = project_based if memory_limit: data['memory_limit'] = memory_limit extra = {'resource': cls.__name__, 'query': data} logger.info('Creating automation template', extra=extra) automation_data = api.post(url=cls._URL['query'], data=data).json() return Automation(api=api, **automation_data) @inplace_reload def save(self, inplace=True): """ Saves all modification to the automation template on the server. :param inplace Apply edits on the current instance or get a new one. :return: Automation instance. """ modified_data = self._modified_data() if modified_data: extra = { 'resource': type(self).__name__, 'query': { 'id': self.id, 'modified_data': modified_data } } logger.info('Saving automation template', extra=extra) data = self._api.patch(url=self._URL['get'].format(id=self.id), data=modified_data).json() return Automation(api=self._api, **data) else: raise ResourceNotModified() @inplace_reload def archive(self): """ Archive automation :return: Automation instance. """ extra = { 'resource': type(self).__name__, 'query': { 'id': self.id, } } logger.info('Archive automation', extra=extra) automation_data = self._api.post(url=self._URL['archive'].format( automation_id=self.id)).json() return Automation(api=self._api, **automation_data) @inplace_reload def restore(self): """ Restore archived automation :return: Automation instance. """ extra = { 'resource': type(self).__name__, 'query': { 'id': self.id, } } logger.info('Restore archived automation', extra=extra) automation_data = self._api.post(url=self._URL['restore'].format( automation_id=self.id)).json() return Automation(api=self._api, **automation_data) def get_packages(self, offset=None, limit=None, api=None): """ Return list of packages that belong to this automation :param offset: Pagination offset. :param limit: Pagination limit. :param api: sevenbridges Api instance. :return: AutomationPackage collection """ api = api or self._API return AutomationPackage.query(automation=self.id, offset=offset, limit=limit, api=api) @classmethod def get_package(cls, package, api=None): """ Return specified automation member :param package: Automation Package Id :param api: sevenbridges Api instance. :return: AutomationMember object """ package_id = Transform.to_automation_package(package) api = api or cls._API return AutomationPackage.get(id=package_id, api=api) def add_package(self, version, file_path, schema, file_name=None, retry_count=RequestParameters.DEFAULT_RETRY_COUNT, timeout=RequestParameters.DEFAULT_TIMEOUT, part_size=None, api=None): """ Add a code package to automation template. :param version: The code package version. :param file_path: Path to the code package file to be uploaded. :param schema: IO schema for main step of execution. :param part_size: Size of upload part in bytes. :param file_name: Optional file name. :param retry_count: Upload retry count. :param timeout: Timeout for s3/google session. :param api: sevenbridges Api instance. :return: AutomationPackage """ api = api or self._API if version is None: raise SbgError('Code package version is required!') if file_path is None: raise SbgError('Code package file path is required!') # Multipart upload the code package: upload = CodePackageUpload(file_path, self.id, api=api, part_size=part_size, file_name=file_name, retry_count=retry_count, timeout=timeout) upload.start() upload.wait() package_file = upload.result() # Create the automation package: return AutomationPackage.create(self.id, version=version, location=package_file.id, schema=schema, api=api) def get_member(self, username, api=None): """ Return specified automation member :param username: Member username :param api: sevenbridges Api instance. :return: AutomationMember object """ member = Transform.to_automation_member(username) api = api or self._API return AutomationMember.get(id=member, automation=self.id, api=api) def get_members(self, offset=None, limit=None, api=None): """ Return list of automation members :param offset: Pagination offset. :param limit: Pagination limit. :param api: sevenbridges Api instance. :return: AutomationMember collection """ api = api or self._API return AutomationMember.query(automation=self.id, offset=offset, limit=limit, api=api) def add_member(self, user, permissions, api=None): """ Add member to the automation :param user: Member username :param permissions: Member permissions :param api: sevenbridges Api instance :return: AutomationMember object """ api = api or self._API return AutomationMember.add(automation=self.id, user=user, permissions=permissions, api=api) def remove_member(self, user, api=None): """ Remove a member from the automation :param user: Member username :param api: sevenbridges Api instance :return: None """ api = api or self._API AutomationMember.remove(automation=self.id, user=user, api=api) def get_runs(self, package=None, status=None, name=None, created_by=None, created_from=None, created_to=None, project_id=None, order_by=None, order=None, offset=None, limit=None, api=None): """ Query automation runs that belong to this automation :param package: Package id :param status: Run status :param name: Automation run name :param created_by: Username of member that created the run :param created_from: Date the run was created after :param created_to: Date the run was created before :param project_id: Search runs by project id, if run is project based :param order_by: Property by which to order results :param order: Ascending or Descending ("asc" or "desc") :param offset: Pagination offset. :param limit: Pagination limit. :param api: sevenbridges Api instance :return: AutomationRun collection """ api = api or self._API return AutomationRun.query(automation=self.id, package=package, status=status, name=name, created_by=created_by, created_from=created_from, created_to=created_to, project_id=project_id, order_by=order_by, order=order, offset=offset, limit=limit, api=api)
class AutomationPackage(Resource): """ Central resource for managing automation packages. """ _URL = { 'query': '/automation/automations/{automation_id}/packages', 'get': '/automation/packages/{id}', 'archive': "/automation/automations/{automation_id}" "/packages/{id}/actions/archive", 'restore': "/automation/automations/{automation_id}" "/packages/{id}/actions/restore", } id = StringField(read_only=True) automation = UuidField(read_only=True) version = StringField(read_only=True) location = StringField(read_only=True) schema = DictField(read_only=True) created_by = StringField(read_only=True) created_on = DateTimeField(read_only=True) archived = BooleanField(read_only=True) custom_url = StringField(read_only=False) memory_limit = IntegerField(read_only=False) def __eq__(self, other): if type(other) is not type(self): return False return self is other or self.id == other.id def __str__(self): return f'<AutomationPackage: id={self.id}>' @classmethod def query(cls, automation, offset=None, limit=None, api=None): """ Query (List) automation packages. :param automation: Automation id. :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: collection object """ automation_id = Transform.to_automation(automation) api = api or cls._API return super()._query( url=cls._URL['query'].format(automation_id=automation_id), offset=offset, limit=limit, api=api, ) @classmethod def create(cls, automation, version, location, schema, memory_limit=None, api=None): """ Create a code package. :param automation: Automation id. :param version: File ID of the uploaded code package. :param location: The code package version. :param schema: IO schema for main step of execution. :param memory_limit: Memory limit in MB. :param api: Api instance. :return: """ automation_id = Transform.to_automation(automation) api = api if api else cls._API if version is None: raise SbgError('Code package version is required!') if location is None: raise SbgError('Code package location is required!') if schema is None: raise SbgError('Schema is required!') data = { 'version': version, 'location': location, 'schema': schema, 'memory_limit': memory_limit, } extra = {'resource': cls.__name__, 'query': data} package_data = api.post( cls._URL['query'].format(automation_id=automation_id), data=data).json() logger.info('Add code package to automation with id %s', automation_id, extra=extra) return AutomationPackage(api=api, **package_data) @inplace_reload def archive(self): """ Archive package :return: AutomationPackage object. """ automation_id = Transform.to_automation(self.automation) extra = { 'resource': type(self).__name__, 'query': { 'id': self.id, } } logger.info('Archive automation package', extra=extra) package_data = self._api.post(url=self._URL['archive'].format( automation_id=automation_id, id=self.id)).json() return AutomationPackage(api=self._api, **package_data) @inplace_reload def restore(self): """ Restore archived package :return: AutomationPackage object. """ automation_id = Transform.to_automation(self.automation) extra = { 'resource': type(self).__name__, 'query': { 'id': self.id, } } logger.info('Restore archived automation package', extra=extra) package_data = self._api.post(url=self._URL['restore'].format( automation_id=automation_id, id=self.id)).json() return AutomationPackage(api=self._api, **package_data) @inplace_reload def save(self, inplace=True): """ Saves all modification to the automation package on the server. :param inplace Apply edits on the current instance or get a new one. :return: AutomationPackage instance. """ modified_data = self._modified_data() if modified_data: extra = { 'resource': type(self).__name__, 'query': { 'id': self.id, 'modified_data': modified_data } } logger.info('Saving automation package', extra=extra) data = self._api.patch(url=self._URL['get'].format(id=self.id), data=modified_data).json() return AutomationPackage(api=self._api, **data) else: raise ResourceNotModified()
class ExecutionStatus(Resource): """ Task execution status resource. Contains information about the number of completed task steps, total number of task steps, current execution message and information regarding computation limits. In case of a batch task it also contains the number of queued, running, completed, failed and aborted tasks. """ steps_completed = IntegerField(read_only=True) steps_total = IntegerField(read_only=True) message = StringField(read_only=True) message_code = StringField(read_only=True) queued = IntegerField(read_only=True) running = IntegerField(read_only=True) completed = IntegerField(read_only=True) failed = IntegerField(read_only=True) aborted = IntegerField(read_only=True) system_limit = BooleanField(read_only=True) account_limit = BooleanField(read_only=True) instance_init = BooleanField(read_only=True) queued_duration = IntegerField(read_only=True) running_duration = IntegerField(read_only=True) execution_duration = IntegerField(read_only=True) duration = IntegerField(read_only=True) def __str__(self): return '<ExecutionStatus>'
class File(Resource): """ Central resource for managing files. """ FOLDER_TYPE = 'folder' _URL = { 'query': '/files', 'scroll': '/files/scroll', 'get': '/files/{id}', 'delete': '/files/{id}', 'copy': '/files/{id}/actions/copy', 'download_info': '/files/{id}/download_info', 'metadata': '/files/{id}/metadata', 'tags': '/files/{id}/tags', 'bulk_get': '/bulk/files/get', 'bulk_delete': '/bulk/files/delete', 'bulk_update': '/bulk/files/update', 'bulk_edit': '/bulk/files/edit', 'create_folder': '/files', 'list_folder': '/files/{id}/list', 'scroll_folder': '/files/{id}/scroll', 'copy_to_folder': '/files/{file_id}/actions/copy', 'move_to_folder': '/files/{file_id}/actions/move', } href = HrefField() id = StringField(read_only=True) type = StringField(read_only=True) name = StringField() size = IntegerField(read_only=True) parent = StringField(read_only=True) project = StringField(read_only=True) created_on = DateTimeField(read_only=True) modified_on = DateTimeField(read_only=True) origin = CompoundField(FileOrigin, read_only=True) storage = CompoundField(FileStorage, read_only=True) metadata = CompoundField(Metadata) tags = BasicListField() _secondary_files = BasicListField(name='_secondary_files') def __str__(self): return six.text_type('<File: id={id}>'.format(id=self.id)) def __eq__(self, other): if not hasattr(other, '__class__'): return False if not self.__class__ == other.__class__: return False return self is other or self.id == other.id def __ne__(self, other): return not self.__eq__(other) def is_folder(self): return self.type.lower() == self.FOLDER_TYPE @property def secondary_files(self): if self._secondary_files: return [ File(api=self._api, **data) for data in self._secondary_files ] @classmethod def query(cls, project=None, names=None, metadata=None, origin=None, tags=None, offset=None, limit=None, dataset=None, api=None, parent=None, cont_token=None): """ Query ( List ) files, requires project or dataset :param project: Project id :param names: Name list :param metadata: Metadata query dict :param origin: Origin query dict :param tags: List of tags to filter on :param offset: Pagination offset :param limit: Pagination limit :param dataset: Dataset id :param api: Api instance. :param parent: Folder id or File object with type folder :param cont_token: Pagination continuation token :return: Collection object. """ if cont_token and offset: raise SbgError( 'Offset and continuation token parameters' 'are mutually exclusive.' ) if cont_token and metadata: raise SbgError( 'Metadata filtering cannot be combined ' 'with continuation token pagination.' ) api = api or cls._API query_params = {} if project: project = Transform.to_project(project) query_params['project'] = project if dataset: dataset = Transform.to_dataset(dataset) query_params['dataset'] = dataset if parent: query_params['parent'] = Transform.to_file(parent) if not (project or dataset or parent): raise SbgError('Project, dataset or parent must be provided.') if [project, parent, dataset].count(None) < 2: raise SbgError( 'Only one out of project, parent or dataset must be provided.' ) if names is not None and isinstance(names, list): if len(names) == 0: names.append("") query_params['name'] = names metadata_params = {} if metadata and isinstance(metadata, dict): for k, v in metadata.items(): metadata_params['metadata.' + k] = metadata[k] if tags: query_params['tag'] = tags query_params.update(metadata_params) origin_params = {} if origin and isinstance(origin, dict): for k, v in origin.items(): origin_params['origin.' + k] = origin[k] query_params.update(origin_params) return super(File, cls)._query( api=api, url=cls._URL['scroll' if cont_token else 'query'], token=cont_token, offset=offset, limit=limit, fields='_all', **query_params ) @classmethod def upload(cls, path, project=None, parent=None, file_name=None, overwrite=False, retry=5, timeout=60, part_size=None, wait=True, api=None): """ Uploads a file using multipart upload and returns an upload handle if the wait parameter is set to False. If wait is set to True it will block until the upload is completed. :param path: File path on local disc. :param project: Project identifier :param parent: Parent folder identifier :param file_name: Optional file name. :param overwrite: If true will overwrite the file on the server. :param retry: Number of retries if error occurs during upload. :param timeout: Timeout for http requests. :param part_size: Part size in bytes. :param wait: If true will wait for upload to complete. :param api: Api instance. """ api = api or cls._API extra = {'resource': cls.__name__, 'query': { 'path': path, 'project': project, 'file_name': file_name, 'overwrite': overwrite, 'retry': retry, 'timeout': timeout, 'part_size': part_size, 'wait': wait, }} logger.info('Uploading file', extra=extra) if not project and not parent: raise SbgError('A project or parent identifier is required.') if project and parent: raise SbgError( 'Project and parent identifiers are mutually exclusive.' ) if project: project = Transform.to_project(project) if parent: parent = Transform.to_file(parent) upload = Upload( file_path=path, project=project, parent=parent, file_name=file_name, overwrite=overwrite, retry_count=retry, timeout=timeout, part_size=part_size, api=api ) if wait: upload.start() upload.wait() return upload else: return upload def copy(self, project, name=None): """ Copies the current file. :param project: Destination project. :param name: Destination file name. :return: Copied File object. """ project = Transform.to_project(project) data = { 'project': project } if name: data['name'] = name extra = {'resource': self.__class__.__name__, 'query': { 'id': self.id, 'data': data }} logger.info('Copying file', extra=extra) new_file = self._api.post(url=self._URL['copy'].format(id=self.id), data=data).json() return File(api=self._api, **new_file) def download_info(self): """ Fetches download information containing file url that can be used to download file. :return: Download info object. """ info = self._api.get(url=self._URL['download_info'].format(id=self.id)) return DownloadInfo(api=self._api, **info.json()) def download(self, path, retry=5, timeout=10, chunk_size=PartSize.DOWNLOAD_MINIMUM_PART_SIZE, wait=True, overwrite=False): """ Downloads the file and returns a download handle. Download will not start until .start() method is invoked. :param path: Full path to the new file. :param retry: Number of retries if error occurs during download. :param timeout: Timeout for http requests. :param chunk_size: Chunk size in bytes. :param wait: If true will wait for download to complete. :param overwrite: If True will silently overwrite existing file. :return: Download handle. """ if not overwrite and os.path.exists(path): raise LocalFileAlreadyExists(message=path) extra = {'resource': self.__class__.__name__, 'query': { 'id': self.id, 'path': path, 'overwrite': overwrite, 'retry': retry, 'timeout': timeout, 'chunk_size': chunk_size, 'wait': wait, }} logger.info('Downloading file', extra=extra) info = self.download_info() download = Download( url=info.url, file_path=path, retry_count=retry, timeout=timeout, part_size=chunk_size, api=self._api ) if wait: download.start() download.wait() else: return download @inplace_reload def save(self, inplace=True, silent=False): """ Saves all modification to the file on the server. By default this method raises an error if you are trying to save an instance that was not changed. Set check_if_modified param to False to disable this behaviour. :param inplace: Apply edits to the current instance or get a new one. :param silent: If Raises exception if file wasn't modified. :raise ResourceNotModified :return: File instance. """ modified_data = self._modified_data() if silent or bool(modified_data): # If metadata is to be set if 'metadata' in modified_data: if hasattr(self, '_overwrite_metadata'): self._api.put( url=self._URL['metadata'].format(id=self.id), data=modified_data['metadata'] ) delattr(self, '_overwrite_metadata') else: self._api.patch( url=self._URL['metadata'].format(id=self.id), data=modified_data['metadata'] ) modified_data.pop('metadata') if 'tags' in modified_data: self._api.put( url=self._URL['tags'].format(id=self.id), data=modified_data['tags'] ) modified_data.pop('tags') # Change everything else if bool(modified_data): self._api.patch( url=self._URL['get'].format(id=self.id), data=modified_data ) else: raise ResourceNotModified() return self.reload() def stream(self, part_size=32 * PartSize.KB): """ Creates an iterator which can be used to stream the file content. :param part_size: Size of the part in bytes. Default 32KB :return Iterator """ download_info = self.download_info() response = self._api.get( url=download_info.url, stream=True, append_base=False ) for part in response.iter_content(part_size): yield part # noinspection PyAttributeOutsideInit def reload(self): """ Refreshes the file with the data from the server. """ try: data = self._api.get(self.href, append_base=False).json() resource = File(api=self._api, **data) except Exception: try: data = self._api.get( self._URL['get'].format(id=self.id)).json() resource = File(api=self._api, **data) except Exception as e: raise SbgError( 'Resource can not be refreshed due to an error: {}' .format(six.text_type(e)) ) self._data = resource._data self._dirty = resource._dirty self.update_old() # If file.metadata = value was executed # file object will have attribute _overwrite_metadata=True, # which tells us to force overwrite of metadata on the server. # This is metadata specific. Once we reload the resource we delete the # attribute _overwrite_metadata from the instance. try: delattr(self, '_overwrite_metadata') except AttributeError: pass def content(self, path=None, overwrite=True, encoding='utf-8'): """ Downloads file to the specified path or as temporary file and reads the file content in memory. Should not be used on very large files. :param path: Path for file download If omitted tmp file will be used. :param overwrite: Overwrite file if exists locally :param encoding: File encoding, by default it is UTF-8 :return: File content. """ if path: self.download(wait=True, path=path, overwrite=overwrite) with io.open(path, 'r', encoding=encoding) as fp: return fp.read() with tempfile.NamedTemporaryFile() as tmpfile: self.download(wait=True, path=tmpfile.name, overwrite=overwrite) with io.open(tmpfile.name, 'r', encoding=encoding) as fp: return fp.read() @classmethod def bulk_get(cls, files, api=None): """ Retrieve files with specified ids in bulk :param files: Files to be retrieved. :param api: Api instance. :return: List of FileBulkRecord objects. """ api = api or cls._API file_ids = [Transform.to_file(file_) for file_ in files] data = {'file_ids': file_ids} logger.debug('Getting files in bulk.') response = api.post(url=cls._URL['bulk_get'], data=data) return FileBulkRecord.parse_records(response=response, api=api) @classmethod def bulk_delete(cls, files, api=None): """ Delete files with specified ids in bulk :param files: Files to be deleted. :param api: Api instance. :return: List of FileBulkRecord objects. """ api = api or cls._API file_ids = [Transform.to_file(file_) for file_ in files] data = {'file_ids': file_ids} logger.debug('Deleting files in bulk.') response = api.post(url=cls._URL['bulk_delete'], data=data) return FileBulkRecord.parse_records(response=response, api=api) @classmethod def bulk_update(cls, files, api=None): """ This call updates the details for multiple specified files. Use this call to set new information for the files, thus replacing all existing information and erasing omitted parameters. For each of the specified files, the call sets a new name, new tags and metadata. :param files: List of file instances. :param api: Api instance. :return: List of FileBulkRecord objects. """ if not files: raise SbgError('Files are required.') api = api or cls._API data = { 'items': [ { 'id': file_.id, 'name': file_.name, 'tags': file_.tags, 'metadata': file_.metadata, } for file_ in files ] } logger.debug('Updating files in bulk.') response = api.post(url=cls._URL['bulk_update'], data=data) return FileBulkRecord.parse_records(response=response, api=api) @classmethod def bulk_edit(cls, files, api=None): """ This call edits the details for multiple specified files. Use this call to modify the existing information for the files or add new information while preserving omitted parameters. For each of the specified files, the call edits its name, tags and metadata. :param files: List of file instances. :param api: Api instance. :return: List of FileBulkRecord objects. """ if not files: raise SbgError('Files are required.') api = api or cls._API data = { 'items': [ { 'id': file_.id, 'name': file_.name, 'tags': file_.tags, 'metadata': file_.metadata, } for file_ in files ] } logger.debug('Editing files in bulk.') response = api.post(url=cls._URL['bulk_edit'], data=data) return FileBulkRecord.parse_records(response=response, api=api) def list_files(self, offset=None, limit=None, api=None, cont_token=None): """List files in a folder :param api: Api instance :param offset: Pagination offset :param limit: Pagination limit :param cont_token: Pagination continuation token :return: List of files """ if cont_token and offset: raise SbgError( 'Offset and continuation token parameters' 'are mutually exclusive.' ) api = api or self._API if not self.is_folder(): raise SbgError('{name} is not a folder'.format(name=self.name)) url = self._URL[ 'scroll_folder' if cont_token else 'list_folder' ].format(id=self.id) return super(File, self.__class__)._query( api=api, url=url, token=cont_token, offset=offset, limit=limit, fields='_all' ) @classmethod def create_folder(cls, name, parent=None, project=None, api=None): """Create a new folder :param name: Folder name :param parent: Parent folder :param project: Project to create folder in :param api: Api instance :return: New folder """ api = api or cls._API data = { 'name': name, 'type': cls.FOLDER_TYPE } if not parent and not project: raise SbgError('Parent or project must be provided') if parent and project: raise SbgError( 'Providing both "parent" and "project" is not allowed' ) if parent: data['parent'] = Transform.to_file(file_=parent) if project: data['project'] = Transform.to_project(project=project) response = api.post(url=cls._URL['create_folder'], data=data).json() return cls(api=api, **response) def copy_to_folder(self, parent, name=None, api=None): """Copy file to folder :param parent: Folder to copy file to :param name: New file name :param api: Api instance :return: New file instance """ api = api or self._API if self.is_folder(): raise SbgError('Copying folders is not supported') data = { 'parent': Transform.to_file(parent) } if name: data['name'] = name response = api.post( url=self._URL['copy_to_folder'].format(file_id=self.id), data=data ).json() return File(api=api, **response) def move_to_folder(self, parent, name=None, api=None): """Move file to folder :param parent: Folder to move file to :param name: New file name :param api: Api instance :return: New file instance """ api = api or self._API if self.is_folder(): raise SbgError('Moving folders is not supported') data = { 'parent': Transform.to_file(parent) } if name: data['name'] = name response = api.post( url=self._URL['move_to_folder'].format(file_id=self.id), data=data ).json() return File(api=api, **response)
class File(Resource): """ Central resource for managing files. """ _URL = { 'query': '/files', 'get': '/files/{id}', 'delete': '/files/{id}', 'copy': '/files/{id}/actions/copy', 'download_info': '/files/{id}/download_info', 'metadata': '/files/{id}/metadata', 'tags': '/files/{id}/tags' } href = HrefField() id = StringField() name = StringField(read_only=False) size = IntegerField(read_only=True) project = StringField(read_only=True) created_on = DateTimeField(read_only=True) modified_on = DateTimeField(read_only=True) origin = CompoundField(FileOrigin, read_only=True) storage = CompoundField(FileStorage, read_only=True) metadata = CompoundField(Metadata, read_only=False) tags = BasicListField(read_only=False) def __str__(self): return six.text_type('<File: id={id}>'.format(id=self.id)) @classmethod def query(cls, project, names=None, metadata=None, origin=None, offset=None, limit=None, api=None): """ Query ( List ) projects :param project: Project id :param names: Name list :param metadata: Metadata query dict :param origin: Origin query dict :param offset: Pagination offset :param limit: Pagination limit :param api: Api instance. :return: Collection object. """ api = api or cls._API project = Transform.to_project(project) query_params = {} if names and isinstance(names, list): query_params['name'] = names metadata_params = {} if metadata and isinstance(metadata, dict): for k, v in metadata.items(): metadata_params['metadata.' + k] = metadata[k] query_params.update(metadata_params) origin_params = {} if origin and isinstance(origin, dict): for k, v in origin.items(): origin_params['origin.' + k] = origin[k] query_params.update(origin_params) return super(File, cls)._query( api=api, url=cls._URL['query'], project=project, offset=offset, limit=limit, fields='_all', **query_params ) @classmethod def upload(cls, path, project, file_name=None, overwrite=False, retry=5, timeout=10, part_size=PartSize.UPLOAD_MINIMUM_PART_SIZE, wait=True, api=None): """ Uploads a file using multipart upload and returns an upload handle if the wait parameter is set to False. If wait is set to True it will block until the upload is completed. :param path: File path on local disc. :param project: Project identifier :param file_name: Optional file name. :param overwrite: If true will overwrite the file on the server. :param retry: Number of retries if error occurs during upload. :param timeout: Timeout for http requests. :param part_size: Part size in bytes. :param wait: If true will wait for upload to complete. :param api: Api instance. """ api = api or cls._API project = Transform.to_project(project) upload = Upload( path, project, file_name=file_name, overwrite=overwrite, retry_count=retry, timeout=timeout, part_size=part_size, api=api ) if wait: upload.start() upload.wait() return upload else: return upload def copy(self, project, name=None): """ Copies the current file. :param project: Destination project. :param name: Destination file name. :return: Copied File object. """ project = Transform.to_project(project) data = { 'project': project } if name: data['name'] = name new_file = self._api.post(url=self._URL['copy'].format(id=self.id), data=data).json() return File(api=self._api, **new_file) def download_info(self): """ Fetches download information containing file url that can be used to download file. :return: Download info object. """ info = self._api.get(url=self._URL['download_info'].format(id=self.id)) return DownloadInfo(api=self._api, **info.json()) def download(self, path, retry=5, timeout=10, chunk_size=PartSize.DOWNLOAD_MINIMUM_PART_SIZE, wait=True, overwrite=False): """ Downloads the file and returns a download handle. Download will not start until .start() method is invoked. :param path: Full path to the new file. :param retry: Number of retries if error occurs during download. :param timeout: Timeout for http requests. :param chunk_size: Chunk size in bytes. :param wait: If true will wait for download to complete. :param over_write: If True will silently overwrite existing file, otherwise OSError is raised :return: Download handle. """ if not overwrite and os.path.exists(path): raise LocalFileAlreadyExists(message=path) info = self.download_info() download = Download( url=info.url, file_path=path, retry_count=retry, timeout=timeout, part_size=chunk_size, api=self._api ) if wait: download.start() download.wait() else: return download @inplace_reload def save(self, inplace=True): """ Saves all modification to the file on the server. :param inplace Apply edits to the current instance or get a new one. :return: File instance. """ modified_data = self._modified_data() if bool(modified_data): # If metadata is to be set if 'metadata' in modified_data: try: _ = self._method self._api.put( url=self._URL['metadata'].format(id=self.id), data=modified_data['metadata'] ) except AttributeError: self._api.patch( url=self._URL['metadata'].format(id=self.id), data=modified_data['metadata'] ) modified_data.pop('metadata') if 'tags' in modified_data: self._api.put( url=self._URL['tags'].format(id=self.id), data=modified_data['tags'] ) modified_data.pop('tags') # Change everything else if bool(modified_data): self._api.patch( url=self._URL['get'].format(id=self.id), data=modified_data ) else: raise ResourceNotModified() return self.reload() def stream(self, part_size=32 * PartSize.KB): """ Creates an iterator which can be used to stream the file content. :param part_size: Size of the part in bytes. Default 32KB :return Iterator """ download_info = self.download_info() response = self._api.get( url=download_info.url, stream=True, append_base=False ) for part in response.iter_content(part_size): yield part # noinspection PyAttributeOutsideInit def reload(self): """ Refreshes the file with the data from the server. """ try: data = self._api.get(self.href, append_base=False).json() resource = File(api=self._api, **data) except Exception: try: data = self._api.get( self._URL['get'].format(id=self.id)).json() resource = File(api=self._api, **data) except Exception: raise SbgError('Resource can not be refreshed!') self._data = resource._data self._dirty = resource._dirty # If file.metadata = value was executed # file object will have attribute _method='PUT', which tells us # to force overwrite of metadata on the server. This is metadata # specific. Once we reload the resource we delete the attribute # _method from the instance. try: delattr(self, '_method') except AttributeError: pass
class File(Resource): """ Central resource for managing files. """ _URL = { 'query': '/files', 'get': '/files/{id}', 'delete': '/files/{id}', 'copy': '/files/{id}/actions/copy', 'download_info': '/files/{id}/download_info', 'metadata': '/files/{id}/metadata' } href = HrefField() id = StringField() name = StringField(read_only=False) size = IntegerField(read_only=True) project = StringField(read_only=True) created_on = DateTimeField(read_only=True) modified_on = DateTimeField(read_only=True) origin = CompoundField(FileOrigin) metadata = CompoundField(Metadata, read_only=False) def __str__(self): return six.text_type('<File: id={id}>'.format(id=self.id)) @classmethod def query(cls, project, names=None, metadata=None, origin=None, offset=None, limit=None, api=None): """ Query ( List ) projects :param project: Project id :param names: Name list :param metadata: Metadata query dict :param origin: Origin query dict :param offset: Pagination offset :param limit: Pagination limit :param api: Api instance. :return: Collection object. """ api = api or cls._API project = Transform.to_project(project) query_params = {} if names and isinstance(names, list): query_params['name'] = names metadata_params = {} if metadata and isinstance(metadata, dict): for k, v in metadata.items(): metadata_params['metadata.' + k] = metadata[k] query_params.update(metadata_params) origin_params = {} if origin and isinstance(origin, dict): for k, v in origin.items(): origin_params['origin.' + k] = origin[k] query_params.update(origin_params) return super(File, cls)._query(api=api, url=cls._URL['query'], project=project, offset=offset, limit=limit, **query_params) def copy(self, project, name=None): """ Copies the current file. :param project: Destination project. :param name: Destination file name. :return: Copied File object. """ project = Transform.to_project(project) data = {'project': project} if name: data['name'] = name new_file = self._api.post(url=self._URL['copy'].format(id=self.id), data=data).json() return File(api=self._api, **new_file) def download_info(self): """ Fetches download information containing file url that can be used to download file. :return: Download info object. """ info = self._api.get(url=self._URL['download_info'].format(id=self.id)) return DownloadInfo(api=self._api, **info.json()) def download(self, path, retry=5, timeout=10, chunk_size=67108864, wait=True): """ Downloads the file and returns a download handle. Download will not start until .start() method is invoked. :param path: Full path to the new file/ :param retry: Number of retries if error occurs during download. :param timeout: Timeout for http requests. :param chunk_size: Chunk size in bytes. :param wait: If true will wait for download to complete. :return: Download handle. """ info = self.download_info() download = Download(url=info.url, file_path=path, retry=retry, timeout=timeout, chunk_size=chunk_size, api=self._api) if wait: download.start() download.wait() else: return download @inplace_reload def save(self, inplace=True): """ Saves all modification to the file on the server. :param inplace Apply edits to the current instance or get a new one. :return: File instance. """ modified_data = self._modified_data() if bool(modified_data): if 'metadata' in modified_data: self._api.patch(url=self._URL['metadata'].format(id=self.id), data=modified_data['metadata']) self.metadata.dirty = {} return self.get(id=self.id) else: data = self._api.patch(url=self._URL['get'].format(id=self.id), data=modified_data).json() file = File(api=self._api, **data) return file def stream(self, part_size=32 * PartSize.KB): """ Creates an iterator which can be used to stream the file content. :param part_size: Size of the part in bytes. Default 32KB :return Iterator """ download_info = self.download_info() response = self._api.get(url=download_info.url, stream=True, append_base=False) for part in response.iter_content(part_size): yield part
class AsyncJob(Resource): """ Central resource for managing async jobs """ _URL = { 'list_file_jobs': '/async/files', 'get_file_copy_job': '/async/files/copy/{id}', 'get_file_delete_job': '/async/files/delete/{id}', 'bulk_copy_files': '/async/files/copy', 'bulk_delete_files': '/async/files/delete', 'get_file_move_job': '/async/files/move/{id}', 'bulk_move_files': '/async/files/move', } id = StringField(read_only=True) type = StringField(read_only=True) state = StringField(read_only=True) result = BasicListField(read_only=True) total_files = IntegerField(read_only=True) failed_files = IntegerField(read_only=True) completed_files = IntegerField(read_only=True) started_on = DateTimeField(read_only=True) finished_on = DateTimeField(read_only=True) def __str__(self): return six.text_type('<AsyncJob: type={type} id={id}>'.format( id=self.id, type=self.type)) def __eq__(self, other): if not hasattr(other, '__class__'): return False if not self.__class__ == other.__class__: return False return self is other or self.id == other.id def __ne__(self, other): return not self.__eq__(other) @classmethod def get_file_copy_job(cls, id, api=None): """ Retrieve file copy async job :param id: Async job identifier :param api: Api instance :return: """ id = Transform.to_async_job(id) api = api if api else cls._API async_job = api.get(url=cls._URL['get_file_copy_job'].format( id=id)).json() return AsyncJob(api=api, **async_job) @classmethod def get_file_move_job(cls, id, api=None): """ Retrieve file move async job :param id: Async job identifier :param api: Api instance :return: """ id = Transform.to_async_job(id) api = api if api else cls._API async_job = api.get(url=cls._URL['get_file_move_job'].format( id=id)).json() return AsyncJob(api=api, **async_job) @classmethod def get_file_delete_job(cls, id, api=None): """ :param id: Async job identifier :param api: Api instance :return: """ id = Transform.to_async_job(id) api = api if api else cls._API async_job = api.get(url=cls._URL['get_file_delete_job'].format( id=id)).json() return AsyncJob(api=api, **async_job) def get_result(self, api=None): """ Get async job result in bulk format :return: List of AsyncFileBulkRecord objects """ api = api or self._API if not self.result: return [] return AsyncFileBulkRecord.parse_records(result=self.result, api=api) @classmethod def list_file_jobs(cls, offset=None, limit=None, api=None): """Query ( List ) async jobs :param offset: Pagination offset :param limit: Pagination limit :param api: Api instance :return: Collection object """ api = api or cls._API return super(AsyncJob, cls)._query( api=api, url=cls._URL['list_file_jobs'], offset=offset, limit=limit, ) @classmethod def file_bulk_copy(cls, files, api=None): api = api or cls._API data = {'items': files} logger.info('Submitting async job for copying files in bulk') response = api.post(url=cls._URL['bulk_copy_files'], data=data).json() return AsyncJob(api=api, **response) @classmethod def file_bulk_move(cls, files, api=None): api = api or cls._API data = {'items': files} logger.info('Submitting async job for moving files in bulk') response = api.post(url=cls._URL['bulk_move_files'], data=data).json() return AsyncJob(api=api, **response) @classmethod def file_bulk_delete(cls, files, api=None): api = api or cls._API data = {'items': files} logger.info('Submitting async job for deleting files in bulk') response = api.post(url=cls._URL['bulk_delete_files'], data=data).json() return AsyncJob(api=api, **response)
class App(Resource): """ Central resource for managing apps. """ _URL = { 'query': '/apps', 'get': '/apps/{id}', 'get_revision': '/apps/{id}/{revision}', 'create_revision': '/apps/{id}/{revision}/raw', 'copy': '/apps/{id}/actions/copy', 'raw': '/apps/{id}/raw' } href = HrefField() _id = StringField(read_only=True, name='id') project = StringField(read_only=True) name = StringField(read_only=True) revision = IntegerField(read_only=True) raw = DictField(read_only=False) @property def id(self): _id, _rev = self._id.rsplit('/', 1) if re.match('^\d*$', _rev): return _id else: return self._id def __str__(self): return six.text_type('<App: id={id}>'.format(id=self.id)) @classmethod def query(cls, project=None, visibility=None, offset=None, limit=None, api=None): """ Query (List) apps. :param visibility: :param project: :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: collection object """ if project: project = Transform.to_project(project) api = api or cls._API return super(App, cls)._query(url=cls._URL['query'], project=project, visibility=visibility, offset=offset, limit=limit, api=api) @classmethod def get_revision(cls, id, revision, api=None): """ Get app revision. :param id: App identifier. :param revision: App revision :param api: Api instance. :return: App object. """ api = api if api else cls._API app = api.get(url=cls._URL['get_revision'].format( id=id, revision=revision)).json() return App(api=api, **app) @classmethod def install_app(cls, id, raw, api=None): """ Installs and app. :param id: App identifier. :param raw: Raw cwl data. :param api: Api instance. :return: App object. """ api = api if api else cls._API app = api.post(url=cls._URL['raw'].format(id=id), data=raw).json() app_wrapper = api.get(url=cls._URL['get'].format( id=app['sbg:id'])).json() return App(api=api, **app_wrapper) @classmethod def create_revision(cls, id, revision, raw, api=None): """ Create a new app revision. :param id: App identifier. :param revision: App revision. :param raw: Raw cwl object. :param api: Api instance. :return: App object. """ api = api if api else cls._API app = api.post(url=cls._URL['create_revision'].format( id=id, revision=revision), data=raw).json() app_wrapper = api.get( url=cls._URL['get'].format(id=app['sbg:id'])).json() return App(api=api, **app_wrapper) def copy(self, project, name=None): """ Copies the current app. :param project: Destination project. :param name: Destination app name. :return: Copied App object. """ project = Transform.to_project(project) data = { 'project': project } if name: data['name'] = name app = self._api.post(url=self._URL['copy'].format(id=self.id), data=data).json() return App(api=self._api, **app)