def delete_rosbag(self, project_id, user_id, original_id): rosbag = Original.objects.filter(project_id=project_id, id=original_id).first() if rosbag is None: raise ObjectDoesNotExist() storage = StorageSerializer().get_storage(project_id, rosbag.storage_id) config = storage['storage_config'] dataset_manager = DatasetManager() if dataset_manager.get_datasets_count_by_original(original_id) == 0: candidates = DatasetCandidate.objects.filter(original=original_id) for candidate in candidates: candidate.delete() rosbag.delete() if storage['storage_type'] == 'LOCAL_NFS': path = (config['mount_path'] + config['base_dir'] + '/' + rosbag.name + '/') shutil.rmtree(path) elif storage['storage_type'] == 'AWS_S3': key = config['base_dir'] + '/raws/' + rosbag.name AwsS3Client().delete_s3_files(config['bucket'], key) return True
def get_frame(request, project_id, dataset_id, candidate_id, frame): username = request.user user_id = AccountManager.get_id_by_username(username) if not Permission.hasPermission(user_id, 'get_annotationwork', project_id): raise PermissionDenied dataset = DatasetManager().get_dataset(user_id, dataset_id) original = OriginalManager().get_original(project_id, dataset['original_id']) storage = StorageSerializer().get_storage(project_id, original['storage_id']) if storage['storage_type'] == 'LOCAL_NFS': image_link = request.build_absolute_uri(request.path) + 'image/' elif storage['storage_type'] == 'AWS_S3': ext = __get_extension(candidate_id) key = (dataset['file_path'] + candidate_id + '_' + str(frame).zfill(6) + ext) image_link = AwsS3Client().get_s3_down_url( storage['storage_config']['bucket'], key) else: raise UnknownStorageTypeError frame = DatasetFrameManager.get_dataset_frame(project_id, dataset_id, frame, storage['storage_type']) content = {'image_link': image_link, 'frame': frame['frame']} return HttpResponse(status=200, content=json.dumps(content), content_type='application/json')
def __init__(self, storage_type, storage_config, automan_config, k8s_config_path=None, ros_distrib='kinetic'): super(RosbagAnalyzer, self).__init__(k8s_config_path) self.storage_type = storage_type if storage_type == 'LOCAL_NFS': self.mount_path = storage_config['mount_path'] self.volume_name = storage_config['volume_name'] self.claim_name = storage_config['claim_name'] self.storage_info = json.dumps({'path': storage_config['path']}, separators=(',', ':')) self.automan_info = json.dumps(automan_config, separators=(',', ':')) elif storage_type == 'AWS_S3': self.storage_info = json.dumps( { 'bucket': storage_config['bucket'], 'base_dir': storage_config['base_dir'], 'target_url': AwsS3Client().get_s3_down_url(storage_config['bucket'], storage_config['path']) }, separators=(',', ':')) print(self.storage_info) self.automan_info = json.dumps(automan_config, separators=(',', ':')) else: raise NotImplementedError # FIXME
def get_url(self): if self.storage['storage_type'] == 'AWS_S3': config = self.storage['storage_config'] key = (config['base_dir'] + '/' + 'raws' + '/' + name) return AwsS3Client().get_s3_put_url(config['bucket'], key) raise UnknownStorageTypeError
def original_file_exists(self, filename): if self.storage['storage_type'] == 'LOCAL_NFS': filepath = self.get_original_filepath(filename) return os.path.exists(filepath) elif self.storage['storage_type'] == 'AWS_S3': return AwsS3Client().check_s3_key_exists( self.storage['storage_config']['bucket'], filename) else: raise NotImplementedError # FIXME
def delete_annotation(self, annotation_id, storage): archives = ArchivedLabelDataset.objects.filter( annotation_id=annotation_id) for archive in archives: path = archive.file_path.rstrip('/') + '/' + archive.file_name if storage['storage_type'] == 'LOCAL_NFS': os.remove(path) elif storage['storage_type'] == 'AWS_S3': AwsS3Client().delete_s3_files( storage['storage_config']['bucket'], path) annotation = Annotation.objects.filter(id=annotation_id).first() annotation.delete()
def delete_project(self, project_id, user_id): content = Projects.objects.filter(id=project_id).first() if content is None: raise ObjectDoesNotExist() storages = StorageSerializer().get_storages(project_id) for storage in storages: config = storage['storage_config'] if storage['storage_type'] == 'LOCAL_NFS': path = (config['mount_path'] + config['base_dir']) if os.path.isdir(path): shutil.rmtree(path) elif storage['storage_type'] == 'AWS_S3': AwsS3Client().delete_s3_files( config['bucket'], config['base_dir'] + '/') content.delete()
def delete_dataset(self, admin_id, dataset_id, storage): dataset = LabelDataset.objects.filter(id=dataset_id).first() if dataset is None: raise ObjectDoesNotExist() # check original_id exist candidate_manager = CandidateManager() if not candidate_manager.is_exist_original(dataset.original): # delete candidate candidate_manager.delete_candidate(dataset.original) # delete dataset files (image, pcd) if storage['storage_type'] == 'LOCAL_NFS': shutil.rmtree(dataset.file_path) elif storage['storage_type'] == 'AWS_S3': AwsS3Client().delete_s3_files(storage['storage_config']['bucket'], dataset.file_path) AnnotationManager().delete_annotations(dataset_id, storage) dataset.delete()
def download_archived_link(request, project_id, annotation_id): username = request.user user_id = AccountManager.get_id_by_username(username) annotation_manager = AnnotationManager() dataset_id = annotation_manager.get_annotation(annotation_id)['dataset_id'] original_id = DatasetManager().get_dataset(user_id, dataset_id)['original_id'] storage_id = OriginalManager().get_original(project_id, original_id)['storage_id'] storage = StorageSerializer().get_storage(project_id, storage_id) if storage['storage_type'] == 'LOCAL_NFS': content = request.build_absolute_uri(request.path) + 'local/' elif storage['storage_type'] == 'AWS_S3': archive_path = annotation_manager.get_archive_path(annotation_id) content = AwsS3Client().get_s3_down_url( storage['storage_config']['bucket'], archive_path) else: raise UnknownStorageTypeError return HttpResponse(status=200, content=content, content_type='text/plain')
def __init__(self, storage_type, storage_config, automan_config, archive_config, k8s_config_path=None, ros_distrib='kinetic'): super(AnnotationArchiver, self).__init__(k8s_config_path) self.ros_distrib = ros_distrib self.storage_type = storage_type if storage_type == 'LOCAL_NFS': self.mount_path = storage_config['mount_path'] self.volume_name = storage_config['volume_name'] self.claim_name = storage_config['claim_name'] self.storage_info = json.dumps({}, separators=(',', ':')) self.automan_info = json.dumps(automan_config, separators=(',', ':')) self.archive_info = json.dumps(archive_config, separators=(',', ':')) elif storage_type == 'AWS_S3': self.storage_info = json.dumps( { 'storage_id': storage_config['storage_id'], 'target_url': AwsS3Client().get_s3_down_url(storage_config['bucket'], storage_config['path']), }, separators=(',', ':')) self.automan_info = json.dumps(automan_config, separators=(',', ':')) self.archive_info = json.dumps(archive_config, separators=(',', ':')) else: raise NotImplementedError # FIXME
def get_s3_presigned_url(self, bucket, key): return AwsS3Client().get_s3_put_url(bucket, key)