def mark_as_read(self, *, project: ResourceId, type: str = None, target: ResourceId = None, flight: ResourceId = None, **kwargs) -> None: """Mark all the comments of a target or project as read. Args: project: Identifier of project. type: Optional comment type (must be one of ``project``, ``annotation``, ``flight``, ``photo``, ``dataset``, ``feature``, ``gcp``, ``task``). target: Optional identifier of the target. flight: Optional identifier of the flight (mandatory when the comment type is ``photo``). **kwargs: Optional keyword arguments. Those arguments are passed as is to the API provider. Examples: >>> sdk.comments.mark_as_read( ... project='5d63cf972fb3880011e57f22', ... type='dataset', ... target='5d63cf972fb3880011e57e34') """ data = kwargs data.update({'project_id': project}) if type: data['target'] = {'type': type} if type == 'photo': if target is None or flight is None: raise ParameterError( 'When dealing with a photo target, ' 'the target and flight must be defined') else: data['target']['id'] = flight data['target']['subId'] = target elif target is not None: data['target']['id'] = target self._provider.post('comments/mark-as-read', data=data, as_json=False)
def _adapt_params(params: Dict[str, Any]) -> Dict[str, Any]: p = params.copy() for common_param in __creation_common_params: if common_param in p: if p[common_param] is not None: p[common_param] = p.pop(common_param) else: p.pop(common_param) if all([p.get('company') is None, p.get('project') is None]): raise ParameterError('One of "company" or "project" must be specified') if 'dataset_format' in p: p['format'] = p.pop('dataset_format') if 'source_name' in p: p['source'] = { 'name': p.pop('source_name') } return p
def order(self, analytic: ResourceId = None, *, name: str = None, version: str = None, inputs: dict = None, parameters: dict = None, deliverables: List[str] = None, project: ResourceId = None, mission: ResourceId = None, **kwargs) -> Resource: """Order an analytic. The analytic to order can be specified by identifier using the ``analytic`` argument, or by name using the ``name`` argument. When using the ``name`` argument to select the analytic but ``version`` is equal to ``None``, the analytic with highest version matching ``name`` is ordered. The argument ``version`` can be used to order the analytic with a specific version. When ``version`` is a version range, the analytic with highest version within the specified range will be ordered. Note that ``version`` is expected to follow the `syntax defined by npm <https://semver.npmjs.com/>`_. Args: analytic: Identifier of the analytic to order. name: Name of the analytic to order. version: Optional version or version range used to select the analytic to order when specified by name. inputs: Optional inputs of the analytic. parameters: Optional parameters of the analytic. deliverables: List of optional deliverables to generate. When empty or ``None`` only required deliverables are generated. project: Optional project of the analytic. mission: Optional mission of the analytic. **kwargs: Optional keyword arguments. Those arguments are passed as is to the API provider. Returns: The created ``product`` description. Examples: >>> sdk.analytics.order('5d5a73b58cf5360006397aa0', ... inputs={"ortho": "5d3714e14c50356e2abd1f97"}, ... deliverables=["vehicles"], ... parameters={"buffer_size": 5.0}, ... project='5d3195209755b0349d0539ad') Resource(_id='60c331fed5ffbd0012f1f754') >>> sdk.analytics.order(name='vehicle_detection', ... version='1.0.x', ... inputs={"ortho": "5d3714e14c50356e2abd1f97"}, ... deliverables=["vehicles"], ... parameters={"buffer_size": 5.0}, ... project='5d3195209755b0349d0539ad') Resource(_id='60c331fed5ffbd0012f1fde8') """ if not name and not analytic: raise ParameterError('Expecting one of analytic or parameter to be defined') if not name and version: warnings.warn('Ignoring version argument since analytic is specified by identifier') if name: found_analytic = self.describe_by_name(name, version=version) if not found_analytic: raise ParameterError('No analytic with matching name and version') analytic = str(found_analytic.id) data: Dict[str, Any] = {'analytic': analytic} # Update to the format expected by analytics-service deliverable_obj: Optional[Dict[str, None]] = None if deliverables: deliverable_obj = {d: None for d in deliverables} for k, v in [('inputs', inputs), ('parameters', parameters), ('deliverables', deliverable_obj), ('project', project), ('mission', mission)]: if v: data.update({k: v}) data.update(kwargs) desc = self._provider.post(path='order-analytic', data=data) return Resource(**desc)
def create(self, name: str, *, project: ResourceId, type: str, target: ResourceId = None, flight: ResourceId = None, **kwargs) -> Tag: """Create a tag. Args: name: Tag name. project: Identifier of project to tag. type: Tag type (must be one of ``project``, ``annotation``, ``flight``, ``photo``, ``dataset``, ``feature``, ``gcp``, ``task``). target: Optional identifier of the target. flight: Optional identifier of the flight (mandatory when the tag type is ``photo``). **kwargs: Optional keyword arguments. Those arguments are passed as is to the API provider. Returns: Tag: The created tag. Examples: >>> sdk.tags.create( ... name='my tag', ... project='5d63cf972fb3880011e57f22', ... type='dataset', ... target='5d63cf972fb3880011e57e34') Tag(_id='5f6155ae8dcb064fcbf4ae35') """ data = kwargs data.update({ 'project_id': project, 'text': name, 'target': { 'type': type } }) if type == 'photo': if target is None or flight is None: raise ParameterError('When tagging a photo, ' 'the target and flight must be defined') else: data['target']['id'] = flight data['target']['subId'] = target elif target is not None: data['target']['id'] = target res = self._provider.post('tags', data=data) return self._convert_uisrv_desc_to_Tag(res['tag'])
def add_attachments(self, annotation: ResourceId, *, attachments: List[ResourceId] = None, file_paths: List[str] = None, **kwargs): """Attach datasets to the annotation. An attachment is a reference to a dataset handled by the Data Management API. Items of the ``file_paths`` argument are interpreted as file paths on the host file system. For each item, a dataset is created and the file at the given path is uploaded. The dataset will be attached to the created annotation. The created dataset has the following properties: - It's type is equal to ``file`` or ``image`` depending on the local file MIME type. - It belongs to the same project as the annotation. - It's named is equal to the basename of the local file. For fine control of the dataset type, mission, published status, etc. or when the dataset has multiple components, one must create the dataset separately and use the ``attachment`` argument. Args: annotation: Identifier of the annotation to attach to. attachments: Identifiers of dataset to attach to the annotation. file_paths: List of file path to upload and attach to the annotation. **kwargs: Optional keyword arguments. Those arguments are passed as is to the API provider. """ data = kwargs if attachments is None and file_paths is None: raise ParameterError( 'One of "attachments" or "file_paths" must be specified') if file_paths is not None: if attachments is None: attachments = [] a = self.describe(annotation) if not isinstance(a, Resource): raise TypeError('Expecting a single Resource') datasets = self.__upload_files(project=a.project, file_paths=file_paths) attachments += datasets data.update({'annotation': annotation, 'attachments': attachments}) self._provider.post('add-attachments', data=data)
def create_vector_dataset(self, *, name: str, categories: Sequence[str] = None, company: ResourceId = None, project: ResourceId = None, mission: ResourceId = None, hidden: bool = None, published: bool = None, collection: ResourceId = None, origin: ResourceId = None, horizontal_srs_wkt: str = None, vertical_srs_wkt: str = None, dataset_format: str = None, geometry: dict = None, properties: dict = None, is_shape_file: bool = False, is_archive: bool = False, has_projection_file: bool = False, **kwargs) -> Resource: """Create a dataset of type ``vector``. When ``is_archive`` is True, ``is_shape_file`` and ``has_projection_file`` must be False. One of ``company`` or ``project`` must be defined. Args: name: Name of the dataset. categories: Sequence of categories or None if there's no category to set on the dataset. company: Optional company identifier. project: Optional project identifier. mission: Optional mission identifier. hidden: Whether not to display the dataset to end-users or not. published: Whether the dataset is ready for delivery or not. collection: Optional map-service collection to use as data source. Providing a collection isn't compatible with setting ``is_shape_file``, ``has_projection_file``, ``is_archive`` to True, nor setting ``dataset_format``. origin: Optional origin vector dataset (source: data-manager) for a vector collection dataset (source: map-service). horizontal_srs_wkt: Optional geographic coordinate system for horizontal coordinattes in WKT format. vertical_srs_wkt: Optional geographic coordinate system for vertical coordinattes in WKT format. dataset_format: Optional file format. geometry: Optional geometry of the dataset. properties: Optional custom properties of the dataset. is_shape_file: Whether it is an ESRI Shapefile. is_archive: Whether it is an archive. has_projection_file: Whether there is a sidecar file to define the shapes projection. **kwargs: Optional keyword arguments. Those arguments are passed as is to the API provider. Returns: Resource: Resource for the created dataset. """ params = kwargs params.update({ 'name': name, 'categories': categories, 'company': company, 'project': project, 'mission': mission, 'hidden': hidden, 'published': published, 'horizontal_srs_wkt': horizontal_srs_wkt, 'vertical_srs_wkt': vertical_srs_wkt, 'dataset_format': dataset_format, 'geometry': geometry, 'properties': properties, }) components: Union[List[str], List[Dict[str, Any]]] if collection: component = {'name': 'collection', 'collection': {'id': collection}} if origin: component['origin'] = {'id': origin} components = [component] params['source'] = {'name': 'map-service'} if any([is_shape_file, has_projection_file, is_archive, dataset_format]): raise ParameterError('Incompatible arguments') elif is_archive: components = ['archive'] if any([is_shape_file, has_projection_file]): raise ParameterError('Incompatible arguments') else: components = ['vector'] if is_shape_file: components += ['database', 'index'] if has_projection_file: components += ['projection'] return self._create('vector', components, **params)