def download_videos(self, path, preview_videos=False, no_meter=False): """ Downloads videos to given path. If `preview_videos` is set, download preview videos in place of full videos. """ os.makedirs(path, exist_ok=True) fields = FieldsRequest() fields.include_field("filename", "preview_video_url", "url", "md5") downloads = [] for video in self.get_videos(fields=fields): video_url = video.url if preview_videos: if video.preview_video_url: video_url = video.preview_video_url else: logger.warning( "No preview available for '%s`, downloading full video", video.filename, ) local_path = os.path.join(path, video.filename) download = DownloadRequest( url=video_url, local_path=local_path, expected_md5=video.md5 ) downloads.append(download) self._conservator.files.download_many(downloads, no_meter=no_meter)
def download_datasets(self, path, no_meter=False): """Clones and pulls all datasets in the collection.""" fields = FieldsRequest() fields.include_field("name", "repository.master") datasets = self.get_datasets(fields=fields) for dataset in datasets: clone_path = os.path.join(path, dataset.name) lds = LocalDataset.clone(dataset, clone_path=clone_path) lds.download(no_meter=no_meter)
def test_prepare_query_simple(): op = Operation(Query) q = op.project q(id="123") fields = FieldsRequest() fields.include_field("name") fields.prepare_query(q) assert 'project(id: "123")' in str(op) assert "name" in str(op) assert "repository" not in str(op)
def _videos(size): video_fields = FieldsRequest() video_fields.include_field("name") if size: video_fields.include_field("file_size") for video in current_collection.get_videos(video_fields): line = video.name if size: mb = int(video.file_size // 1024 // 1024) line += f"\t({mb} mb)" click.echo(line)
def test_prepare_query_simple_exclude(): op = Operation(Query) q = op.project q(id="123") fields = FieldsRequest() fields.include_field("file_locker_files", "acl") fields.exclude_field("file_locker_files") fields.prepare_query(q) assert 'project(id: "123")' in str(op) assert "fileLockerFiles" not in str(op) assert "acl" in str(op)
def details(filename): item = get_from_path(filename) if item is None: click.secho(f"Couldn't find '{filename}' in current collection", fg="red") return detail_fields = FieldsRequest() if isinstance(item, Collection): detail_fields.include_field( "name", "path", "owner", "created_by_name", "recursive_video_count", "recursive_dataset_count", "recursive_image_count", "recursive_child_count", "description", ) item.populate(detail_fields) click.echo(f"Name: {item.name}") click.echo(f"Collection ID: {item.id}") click.echo(f"Path: {item.path}") click.echo(f"Owner: {item.owner}") click.echo(f"Creator: {item.created_by_name}") click.echo(f"Total Videos: {item.recursive_video_count}") click.echo(f"Total Images: {item.recursive_image_count}") click.echo(f"Total Datasets: {item.recursive_dataset_count}") click.echo(f"Total Child Collections: {item.recursive_child_count}") click.echo(f"Description: {item.description}") elif isinstance(item, Video) or isinstance(item, Image): detail_fields.include_field( "name", "owner", "uploaded_by_name", "file_size", "location", "tags", "spectrum", "description", ) item.populate(detail_fields) click.echo(f"Name: {item.name}") click.echo(f"{item.__class__.__name__} ID: {item.id}") click.echo(f"Owner: {item.owner}") click.echo(f"Uploader: {item.uploaded_by_name}") click.echo(f"File Size: {item.file_size / 1024 / 1024:.2f} MB") click.echo(f"Location: {item.location}") click.echo(f"Tags: {item.tags}") click.echo(f"Spectrum: {item.spectrum}") click.echo(f"Description: {item.description}") else: click.echo("Unknown type")
def download_images(self, path, no_meter=False): """Downloads images to given path.""" os.makedirs(path, exist_ok=True) fields = FieldsRequest() fields.include_field("filename", "url", "md5") downloads = [] for image in self.get_images(fields=fields): local_path = os.path.join(path, image.filename) download = DownloadRequest( url=image.url, local_path=local_path, expected_md5=image.md5 ) downloads.append(download) self._conservator.files.download_many(downloads, no_meter=no_meter)
def _files(url): fields = FieldsRequest() fields.include_field("file_locker_files.name") if url: fields.include_field("file_locker_files.url") current_collection.populate(fields) if len(current_collection.file_locker_files) == 0: click.secho("Collection has no files", fg="red") return for file in current_collection.file_locker_files: line = file.name if url: line += f" {file.url}" click.echo(line)
def tree(): collections = [] if pwd == "/": for project in conservator.projects.all().including_fields( "root_collection.path"): collections.append(project.root_collection) elif current_collection is None: click.secho("Not in a valid Collection", fg="red") return else: collections.append(current_collection) child_paths = FieldsRequest() child_paths.include_field("children.path", "children.id") while len(collections) > 0: collection = collections.pop() click.echo(collection.path) collection.populate(child_paths) collections.extend(collection.children)
def cd(path): path = " ".join(path) global pwd, current_collection new_path = os.path.abspath(os.path.join(pwd, path)) new_path = new_path.replace("//", "/") if new_path == "/": pwd = "/" current_collection = None return fields = FieldsRequest() fields.include_field("path", "id", "children.name", "children.path") try: collection = conservator.collections.from_remote_path(new_path, fields=fields) except InvalidRemotePathException: click.secho(f"Invalid path '{new_path}'", fg="red") return pwd = collection.path current_collection = collection
def ls(): click.secho(".", fg="blue") if pwd != "/": click.secho("..", fg="blue") for child in get_child_collections(): click.secho(child.name + "/", fg="blue") if current_collection is None: return for video in get_videos(): click.secho(video.name, fg="green") for image in get_images(): click.secho(image.name, fg="bright_green") file_fields = FieldsRequest() file_fields.include_field("file_locker_files.name") current_collection.populate(file_fields) for file in current_collection.file_locker_files: click.secho(file.name, fg="yellow")
from FLIR.conservator.conservator import Conservator from FLIR.conservator.fields_request import FieldsRequest conservator = Conservator.default() fields = FieldsRequest() fields.include_field("video_id") fields.include_field("width") fields.include_field("height") fields.include_field("annotations.bounding_box") fields.include_field("annotations.labels") video = conservator.videos.all().including("name", "frame_count").first() print(video) # Unless you're working with huge videos (10k+ frames), you'll probably # find it easier and faster to just populate all frames at once. Only # use this method if your request for all frames is timing out, or too large # to handle at once. for frame in video.get_all_frames_paginated(fields): print(frame)
from FLIR.conservator.conservator import Conservator from FLIR.conservator.fields_request import FieldsRequest conservator = Conservator.default() fields = FieldsRequest() fields.include_field("dataset_frames.frame_id") fields.include_field("dataset_frames.width") fields.include_field("dataset_frames.height") fields.include_field("dataset_frames.annotations.bounding_box") fields.include_field("dataset_frames.annotations.labels") dataset = conservator.datasets.from_id("RkAXSN4ychHgiNkMk") for frame in dataset.get_frames(fields=fields): print(frame)