def download_videos(self, path, preview_videos=False, no_meter=False): """ Downloads videos to given path. If `preview_videos` is set, download preview videos in place of full videos. """ os.makedirs(path, exist_ok=True) fields = FieldsRequest() fields.include_field("filename", "preview_video_url", "url", "md5") downloads = [] for video in self.get_videos(fields=fields): video_url = video.url if preview_videos: if video.preview_video_url: video_url = video.preview_video_url else: logger.warning( "No preview available for '%s`, downloading full video", video.filename, ) local_path = os.path.join(path, video.filename) download = DownloadRequest( url=video_url, local_path=local_path, expected_md5=video.md5 ) downloads.append(download) self._conservator.files.download_many(downloads, no_meter=no_meter)
def download_datasets(self, path, no_meter=False): """Clones and pulls all datasets in the collection.""" fields = FieldsRequest() fields.include_field("name", "repository.master") datasets = self.get_datasets(fields=fields) for dataset in datasets: clone_path = os.path.join(path, dataset.name) lds = LocalDataset.clone(dataset, clone_path=clone_path) lds.download(no_meter=no_meter)
def download_images(self, path, no_meter=False): """Downloads images to given path.""" os.makedirs(path, exist_ok=True) fields = FieldsRequest() fields.include_field("filename", "url", "md5") downloads = [] for image in self.get_images(fields=fields): local_path = os.path.join(path, image.filename) download = DownloadRequest( url=image.url, local_path=local_path, expected_md5=image.md5 ) downloads.append(download) self._conservator.files.download_many(downloads, no_meter=no_meter)
def wrapper(properties="", exclude="", **kwargs): include = list(filter(lambda p: p != "", properties.split(","))) exclude = list(filter(lambda p: p != "", exclude.split(","))) fields = {name: True for name in include} fields.update(**{name: False for name in exclude}) kwargs["fields"] = FieldsRequest.create(fields) return func(**kwargs)
def populate_all(self): """ .. deprecated:: 1.0.2 This no longer queries all fields, instead only selecting the defaults, which is equivalent to calling :meth:`populate` with no arguments. """ self.populate(fields=FieldsRequest())
def __init__( self, conservator, wrapping_type=None, query=None, base_operation=None, fields=None, page_size=25, unpack_field=None, reverse=False, total_unpack_field=None, **kwargs, ): # Unfortunately, query is a required arg, but for backwards-compatibility reasons can't be made required. assert query is not None self._conservator = conservator self._query = query self.fields = FieldsRequest.create(fields) self._page = 0 self._limit = page_size self.unpack_field = unpack_field self.results = [] self.reverse = reverse self._total_items = 0 if reverse: if not total_unpack_field: raise KeyError( f"total_unpack_field must be supplied if reverse is True") self.fields.include_field(total_unpack_field) # Perform a single-entry query to collect the total count of items. try: results = self._conservator.query(query=self._query, fields=self.fields, page=1, limit=1, **kwargs) except AttributeError as exc: if str(exc).endswith(total_unpack_field): raise KeyError(total_unpack_field) raise self._total_items = getattr(results, total_unpack_field) if self._limit > self._total_items: self._limit = self._total_items # Don't confuse the API. if self._limit == 0: self._limit = 1 # Set the page number to the last page of results. if self._total_items > self._limit: self._page = self._total_items // self._limit if self._total_items % self._limit: # Count any partial page. self._page += 1 # Page numbers are 0-based. self._page -= 1 self.kwargs = kwargs self.started = False self.done = False self.filters = []
def including_all_fields(self): """ Include all non-excluded fields in the results. """ if self.started: raise ConcurrentQueryModificationException() self.fields = FieldsRequest.create(None) return self
def wrapper(self, *args, **kwargs): fr = FieldsRequest.create(fields) if hasattr(self, "populate"): self.populate(fr) for field in fields: if not self.has_field(field): raise MissingFieldException( f"Missing required field '{field}'") return f(self, *args, **kwargs)
def list_media(identifier, recursive): manager = get_instance() collection_fields = FieldsRequest.create(("id", "name", "path")) top_collection = manager.from_string(identifier, collection_fields) media_fields = FieldsRequest.create(("id", "name")) if recursive: collection_paths = top_collection.recursively_get_children( include_self=True, fields=collection_fields) else: collection_paths = [top_collection] no_results = True for coll in collection_paths: for media_file in coll.get_media(media_fields): click.echo(f"{coll.path}/{media_file.name}") no_results = False if no_results: click.echo(f"No media found in collection {identifier}") return True
def test_prepare_query_simple_exclude(): op = Operation(Query) q = op.project q(id="123") fields = FieldsRequest() fields.include_field("file_locker_files", "acl") fields.exclude_field("file_locker_files") fields.prepare_query(q) assert 'project(id: "123")' in str(op) assert "fileLockerFiles" not in str(op) assert "acl" in str(op)
def tree(): collections = [] if pwd == "/": for project in conservator.projects.all().including_fields( "root_collection.path"): collections.append(project.root_collection) elif current_collection is None: click.secho("Not in a valid Collection", fg="red") return else: collections.append(current_collection) child_paths = FieldsRequest() child_paths.include_field("children.path", "children.id") while len(collections) > 0: collection = collections.pop() click.echo(collection.path) collection.populate(child_paths) collections.extend(collection.children)
def cd(path): path = " ".join(path) global pwd, current_collection new_path = os.path.abspath(os.path.join(pwd, path)) new_path = new_path.replace("//", "/") if new_path == "/": pwd = "/" current_collection = None return fields = FieldsRequest() fields.include_field("path", "id", "children.name", "children.path") try: collection = conservator.collections.from_remote_path(new_path, fields=fields) except InvalidRemotePathException: click.secho(f"Invalid path '{new_path}'", fg="red") return pwd = collection.path current_collection = collection
def download_metadata(self, path): """Downloads image and video metadata to ``media_metadata/``.""" path = os.path.join(path, "media_metadata") os.makedirs(path, exist_ok=True) fields = FieldsRequest.create(["metadata", "filename"]) videos = self.get_videos(fields=fields) for video in videos: video.download_metadata(path) images = self.get_images(fields=fields) for image in images: image.download_metadata(path)
def ls(): click.secho(".", fg="blue") if pwd != "/": click.secho("..", fg="blue") for child in get_child_collections(): click.secho(child.name + "/", fg="blue") if current_collection is None: return for video in get_videos(): click.secho(video.name, fg="green") for image in get_images(): click.secho(image.name, fg="bright_green") file_fields = FieldsRequest() file_fields.include_field("file_locker_files.name") current_collection.populate(file_fields) for file in current_collection.file_locker_files: click.secho(file.name, fg="yellow")
def _query(self, query, fields, **kwargs): type_ = query.type op = Operation(query.container) query_name = query.name query = getattr(op, query_name) query(**kwargs) fr = FieldsRequest.create(fields) fr.prepare_query(query) result = self.run(op) value = getattr(result, query_name) return TypeProxy.wrap(self, type_, value)
def test_prepare_query_simple(): op = Operation(Query) q = op.project q(id="123") fields = FieldsRequest() fields.include_field("name") fields.prepare_query(q) assert 'project(id: "123")' in str(op) assert "name" in str(op) assert "repository" not in str(op)
def _videos(size): video_fields = FieldsRequest() video_fields.include_field("name") if size: video_fields.include_field("file_size") for video in current_collection.get_videos(video_fields): line = video.name if size: mb = int(video.file_size // 1024 // 1024) line += f"\t({mb} mb)" click.echo(line)
def details(filename): item = get_from_path(filename) if item is None: click.secho(f"Couldn't find '{filename}' in current collection", fg="red") return detail_fields = FieldsRequest() if isinstance(item, Collection): detail_fields.include_field( "name", "path", "owner", "created_by_name", "recursive_video_count", "recursive_dataset_count", "recursive_image_count", "recursive_child_count", "description", ) item.populate(detail_fields) click.echo(f"Name: {item.name}") click.echo(f"Collection ID: {item.id}") click.echo(f"Path: {item.path}") click.echo(f"Owner: {item.owner}") click.echo(f"Creator: {item.created_by_name}") click.echo(f"Total Videos: {item.recursive_video_count}") click.echo(f"Total Images: {item.recursive_image_count}") click.echo(f"Total Datasets: {item.recursive_dataset_count}") click.echo(f"Total Child Collections: {item.recursive_child_count}") click.echo(f"Description: {item.description}") elif isinstance(item, Video) or isinstance(item, Image): detail_fields.include_field( "name", "owner", "uploaded_by_name", "file_size", "location", "tags", "spectrum", "description", ) item.populate(detail_fields) click.echo(f"Name: {item.name}") click.echo(f"{item.__class__.__name__} ID: {item.id}") click.echo(f"Owner: {item.owner}") click.echo(f"Uploader: {item.uploaded_by_name}") click.echo(f"File Size: {item.file_size / 1024 / 1024:.2f} MB") click.echo(f"Location: {item.location}") click.echo(f"Tags: {item.tags}") click.echo(f"Spectrum: {item.spectrum}") click.echo(f"Description: {item.description}") else: click.echo("Unknown type")
def populate(self, fields=None): """ Query conservator for the specified fields, even if they already exist on the object. To filter existing fields, use :func:`~FLIR.conservator.wrappers.type_proxy.requires_fields` """ fields = FieldsRequest.create(fields) result = self._populate( fields) # returns a TypeProxy with the new fields if result is None: raise InvalidIdException( f"Query with id='{self.id}' returned None") # copy over fields from other _instance (to get unproxied) for field in result._instance: v = getattr(result._instance, field) setattr(self._instance, field, v) self._initialized_fields.append(field)
def recursively_get_children(self, include_self=False, fields=None): """ Yields all child collections recursively. :param include_self: If `True`, yield this collection too. :param fields: The `fields` to populate on children. """ fields = FieldsRequest.create(fields) fields.include("children.id") self.populate(fields) if include_self: yield self collections = [*self.children] while len(collections) > 0: collection = collections.pop() collection.populate(fields) yield collection collections.extend(collection.children)
def _files(url): fields = FieldsRequest() fields.include_field("file_locker_files.name") if url: fields.include_field("file_locker_files.url") current_collection.populate(fields) if len(current_collection.file_locker_files) == 0: click.secho("Collection has no files", fg="red") return for file in current_collection.file_locker_files: line = file.name if url: line += f" {file.url}" click.echo(line)
def from_path(self, string, fields="id"): if "/" not in string: return None # start by path lookup parent_path = "/".join(string.split("/")[:-1]) name = string.split("/")[-1] parent = self._conservator.collections.from_remote_path( path=parent_path, make_if_no_exist=False, fields="id" ) # look inside parent for media with exact name match fields = FieldsRequest.create(fields) fields.include_field("name") media = list(parent.get_media(fields=fields, search_text="name:" + name)) media = [m for m in media if m.name == name] if len(media) == 1: return media[0] if len(media) > 1: raise AmbiguousIdentifierException(string) return None
def _query_frames(self, start_frame_index=None, frame_index=None, fields=None): fields = FieldsRequest.create(fields) video_fields = { "frames": { "start_frame_index": start_frame_index, "frame_index": frame_index, } } for path, value in fields.paths.items(): new_path = "frames." + path video_fields[new_path] = value video = self._conservator.query( query=self.by_id_query, fields=video_fields, id=self.id, ) return video.frames
def with_fields(self, fields): """Sets the query's :class:`~FLIR.conservator.fields_request.FieldsRequest` to `fields`.""" if self.started: raise ConcurrentQueryModificationException() self.fields = FieldsRequest.create(fields) return self
from FLIR.conservator.conservator import Conservator from FLIR.conservator.fields_request import FieldsRequest conservator = Conservator.default() fields = FieldsRequest() fields.include_field("video_id") fields.include_field("width") fields.include_field("height") fields.include_field("annotations.bounding_box") fields.include_field("annotations.labels") video = conservator.videos.all().including("name", "frame_count").first() print(video) # Unless you're working with huge videos (10k+ frames), you'll probably # find it easier and faster to just populate all frames at once. Only # use this method if your request for all frames is timing out, or too large # to handle at once. for frame in video.get_all_frames_paginated(fields): print(frame)
from FLIR.conservator.conservator import Conservator from FLIR.conservator.fields_request import FieldsRequest conservator = Conservator.default() fields = FieldsRequest() fields.include_field("dataset_frames.frame_id") fields.include_field("dataset_frames.width") fields.include_field("dataset_frames.height") fields.include_field("dataset_frames.annotations.bounding_box") fields.include_field("dataset_frames.annotations.labels") dataset = conservator.datasets.from_id("RkAXSN4ychHgiNkMk") for frame in dataset.get_frames(fields=fields): print(frame)