Exemplo n.º 1
0
def download_local_nfs_image(request, project_id, dataset_id, candidate_id,
                             frame):
    username = request.user
    user_id = AccountManager.get_id_by_username(username)
    if not Permission.hasPermission(user_id, 'get_annotationwork', project_id):
        raise PermissionDenied
    dataset_manager = DatasetManager()
    dataset_dir = dataset_manager.get_dataset_file_path(user_id, dataset_id)

    original_manager = OriginalManager()
    candidate = original_manager.get_dataset_candidate(candidate_id)
    analyzed_info = json.loads(candidate['analyzed_info'])
    msg_type = analyzed_info['msg_type']
    if msg_type == 'sensor_msgs/Image':
        extension = '.jpg'
    elif msg_type == 'sensor_msgs/PointCloud2':
        extension = '.pcd'

    file_path = dataset_dir + candidate_id + '_' + str(frame).zfill(
        6) + extension
    image = open(file_path, "rb").read()

    if msg_type == 'sensor_msgs/Image':
        return HttpResponse(image, content_type="image/jpeg")
    return HttpResponse(image, content_type="application/octet-stream")
Exemplo n.º 2
0
    def __get_raw_data_config(original_id, candidates):
        records = {}
        for candidate_id in candidates:
            original_manager = OriginalManager()
            candidate = original_manager.get_dataset_candidate(candidate_id)
            analyzed_info = json.loads(candidate['analyzed_info'])
            records[analyzed_info['topic_name']] = candidate_id

        raw_data_config = {
            'original_id': original_id,
            'candidates': candidates,
            'records': records,
        }
        return raw_data_config
Exemplo n.º 3
0
 def analyze(cls, user_id, project_id, original_id):
     project = ProjectManager().get_project(project_id, user_id)
     label_type = project['label_type']
     original = OriginalManager().get_original(project_id, original_id, status='uploaded')
     storage = StorageSerializer().get_storage(project_id, original['storage_id'])
     storage_config = copy.deepcopy(storage['storage_config'])
     original_path = StorageSerializer.get_original_path(
         storage['storage_type'], storage['storage_config'], original['name'])
     storage_config.update({'path': original_path})
     automan_config = cls.__get_automan_config(user_id)
     automan_config.update({'path': '/projects/' + project_id + '/originals/' + str(original_id) + '/', 'label_type': label_type})
     job_config = {
         'storage_type': storage['storage_type'],
         'storage_config': storage_config,
         'automan_config': automan_config
     }
     job_config_json = json.dumps(job_config)
     new_job = Job(
         job_type='analyzer',
         user_id=user_id,
         project_id=project_id,
         job_config=job_config_json)
     new_job.save()
     if original['file_type'] == 'rosbag':
         job = RosbagAnalyzer(**job_config)
         job.create(cls.__generate_job_name(new_job.id, 'analyzer'))
         res = job.run()
         return res
     else:
         raise ValidationError()
Exemplo n.º 4
0
    def extract(cls, user_id, project_id, original_id, candidates):
        original = OriginalManager().get_original(project_id, original_id, status='analyzed')
        storage = StorageSerializer().get_storage(project_id, original['storage_id'])
        storage_config = copy.deepcopy(storage['storage_config'])
        original_path = StorageSerializer.get_original_path(
            storage['storage_type'], storage['storage_config'], original['name'])
        storage_config.update({'path': original_path})
        output_dir = StorageSerializer.get_dataset_output_dir(
            storage['storage_type'], storage['storage_config'], original['name'], candidates)
        storage_config.update({'output_dir': output_dir})
        automan_config = cls.__get_automan_config(user_id)
        automan_config.update({'path': '/projects/' + project_id + '/datasets/'})
        raw_data_config = cls.__get_raw_data_config(project_id, original_id, candidates)
        job_config = {
            'storage_type': storage['storage_type'],
            'storage_config': storage_config,
            'automan_config': automan_config,
            'raw_data_config': raw_data_config
        }

        job_config_json = json.dumps(job_config)
        new_job = Job(
            job_type='extractor',
            user_id=user_id,
            project_id=project_id,
            job_config=job_config_json)
        new_job.save()

        if original['file_type'] == 'rosbag':
            job = RosbagExtractor(**job_config)
            job.create(cls.__generate_job_name(new_job.id, 'extractor'))
            res = job.run()
            return res
        else:
            raise ValidationError()
Exemplo n.º 5
0
    def archive(cls, user_id, project_id, dataset_id, original_id, annotation_id):
        original = OriginalManager().get_original(project_id, original_id, status='analyzed')
        storage = StorageSerializer().get_storage(project_id, original['storage_id'])
        storage_config = copy.deepcopy(storage['storage_config'])

        automan_config = cls.__get_automan_config(user_id)
        automan_config.update({'path': '/projects/' + str(project_id) + '/annotations/' + str(annotation_id) + '/'})
        archive_config = cls.__get_archive_info(user_id, project_id, dataset_id, annotation_id, original_id)
        job_config = {
            'storage_type': storage['storage_type'],
            'storage_config': storage_config,
            'automan_config': automan_config,
            'archive_config': archive_config,
        }
        job_config_json = json.dumps(job_config)
        new_job = Job(
            job_type='archiver',
            user_id=user_id,
            project_id=project_id,
            job_config=job_config_json)
        new_job.save()
        job = AnnotationArchiver(**job_config)
        job.create(cls.__generate_job_name(new_job.id, 'archiver'))
        res = job.run()
        return res
Exemplo n.º 6
0
def get_frame(request, project_id, dataset_id, candidate_id, frame):
    username = request.user
    user_id = AccountManager.get_id_by_username(username)
    if not Permission.hasPermission(user_id, 'get_annotationwork', project_id):
        raise PermissionDenied

    dataset = DatasetManager().get_dataset(user_id, dataset_id)
    original = OriginalManager().get_original(project_id,
                                              dataset['original_id'])
    storage = StorageSerializer().get_storage(project_id,
                                              original['storage_id'])

    if storage['storage_type'] == 'LOCAL_NFS':
        image_link = request.build_absolute_uri(request.path) + 'image/'
    elif storage['storage_type'] == 'AWS_S3':
        ext = __get_extension(candidate_id)
        key = (dataset['file_path'] + candidate_id + '_' +
               str(frame).zfill(6) + ext)
        image_link = AwsS3Client().get_s3_down_url(
            storage['storage_config']['bucket'], key)
    else:
        raise UnknownStorageTypeError

    frame = DatasetFrameManager.get_dataset_frame(project_id, dataset_id,
                                                  frame,
                                                  storage['storage_type'])

    content = {'image_link': image_link, 'frame': frame['frame']}
    return HttpResponse(status=200,
                        content=json.dumps(content),
                        content_type='application/json')
Exemplo n.º 7
0
def annotation(request, project_id, annotation_id):
    username = request.user
    user_id = AccountManager.get_id_by_username(username)
    annotation_manager = AnnotationManager()
    if request.method == 'GET':
        if not Permission.hasPermission(user_id, 'get_annotationwork',
                                        project_id):
            raise PermissionDenied
        contents = annotation_manager.get_annotation(annotation_id)
        return HttpResponse(content=json.dumps(contents),
                            status=200,
                            content_type='application/json')

    elif request.method == 'POST':
        file_path = request.data.get('file_path')
        file_name = request.data.get('file_name')
        annotation_manager.set_archive(annotation_id, file_path, file_name)
        return HttpResponse(status=201,
                            content=json.dumps({}),
                            content_type='application/json')

    else:
        if not Permission.hasPermission(user_id, 'delete_annotationwork',
                                        project_id):
            raise PermissionDenied
        dataset_id = annotation_manager.get_annotation(
            annotation_id)['dataset_id']
        original_id = DatasetManager().get_dataset(user_id,
                                                   dataset_id)['original_id']
        storage_id = OriginalManager().get_original(project_id,
                                                    original_id)['storage_id']
        storage = StorageSerializer().get_storage(project_id, storage_id)
        annotation_manager.delete_annotation(annotation_id, storage)
        return HttpResponse(status=204)
Exemplo n.º 8
0
 def destroy(self, request, project_id, dataset_id):
     username = request.user
     dataset_manager = DatasetManager()
     user_id = AccountManager.get_id_by_username(username)
     if not Permission.hasPermission(user_id, 'delete_dataset', project_id):
         raise PermissionDenied
     original_id = DatasetManager().get_dataset(user_id,
                                                dataset_id)['original_id']
     storage_id = OriginalManager().get_original(project_id,
                                                 original_id)['storage_id']
     storage = StorageSerializer().get_storage(project_id, storage_id)
     dataset_manager.delete_dataset(user_id, dataset_id, storage)
     return HttpResponse(status=204)
Exemplo n.º 9
0
    def extract(cls, user_id, project_id, original_id, candidates, name):
        original = OriginalManager().get_original(project_id,
                                                  original_id,
                                                  status='analyzed')
        storage_manager = StorageManager(project_id, original['storage_id'])
        storage_config = copy.deepcopy(
            storage_manager.storage['storage_config'])
        original_path = storage_manager.get_original_filepath(original['name'])
        output_dir = storage_manager.get_dataset_dirname(
            original['name'], candidates)
        print('output_dirname: ' + output_dir)
        storage_config.update({
            'path': original_path,
            'output_dir': output_dir,
            'storage_id': original['storage_id']
        })
        automan_config = cls.__get_automan_config(user_id)
        automan_config.update({
            'path':
            '/projects/' + project_id + '/datasets/',
            'presigned':
            '/projects/' + project_id + '/storages/upload/'
        })
        raw_data_config = cls.__get_raw_data_config(project_id, original_id,
                                                    candidates, name)
        job_config = {
            'storage_type': storage_manager.storage['storage_type'],
            'storage_config': storage_config,
            'automan_config': automan_config,
            'raw_data_config': raw_data_config,
        }

        job_config_json = json.dumps(job_config)
        new_job = Job(job_type='extractor',
                      user_id=user_id,
                      project_id=project_id,
                      job_config=job_config_json)
        new_job.save()

        if original['file_type'] == 'rosbag':
            job = RosbagExtractor(**job_config)
            job.create(cls.__generate_job_name(new_job.id, 'extractor'))
            res = job.run(namespace=settings.JOB_NAMESPACE)
            return res
        else:
            raise ValidationError()
Exemplo n.º 10
0
def download_archived_link(request, project_id, annotation_id):
    username = request.user
    user_id = AccountManager.get_id_by_username(username)
    annotation_manager = AnnotationManager()
    dataset_id = annotation_manager.get_annotation(annotation_id)['dataset_id']
    original_id = DatasetManager().get_dataset(user_id,
                                               dataset_id)['original_id']
    storage_id = OriginalManager().get_original(project_id,
                                                original_id)['storage_id']
    storage = StorageSerializer().get_storage(project_id, storage_id)
    if storage['storage_type'] == 'LOCAL_NFS':
        content = request.build_absolute_uri(request.path) + 'local/'
    elif storage['storage_type'] == 'AWS_S3':
        archive_path = annotation_manager.get_archive_path(annotation_id)
        content = AwsS3Client().get_s3_down_url(
            storage['storage_config']['bucket'], archive_path)
    else:
        raise UnknownStorageTypeError
    return HttpResponse(status=200, content=content, content_type='text/plain')
Exemplo n.º 11
0
    def archive(cls, user_id, project_id, dataset_id, original_id,
                annotation_id, include_image: bool):
        original = OriginalManager().get_original(project_id,
                                                  original_id,
                                                  status='analyzed')
        storage_manager = StorageManager(project_id, original['storage_id'])
        storage_config = copy.deepcopy(
            storage_manager.storage['storage_config'])
        original_path = storage_manager.get_original_filepath(original['name'])
        storage_config.update({
            'path': original_path,
            'storage_id': original['storage_id']
        })
        automan_config = cls.__get_automan_config(user_id)
        automan_config.update({
            'path':
            '/projects/' + str(project_id) + '/annotations/' +
            str(annotation_id) + '/',
            'presigned':
            '/projects/' + str(project_id) + '/storages/upload/'
        })

        archive_config = cls.__get_archive_info(
            storage_manager.storage['storage_type'], user_id, project_id,
            dataset_id, annotation_id, original_id, include_image)
        job_config = {
            'storage_type': storage_manager.storage['storage_type'],
            'storage_config': storage_config,
            'automan_config': automan_config,
            'archive_config': archive_config,
        }
        job_config_json = json.dumps(job_config)
        new_job = Job(job_type='archiver',
                      user_id=user_id,
                      project_id=project_id,
                      job_config=job_config_json)
        new_job.save()
        job = AnnotationArchiver(**job_config)
        job.create(cls.__generate_job_name(new_job.id, 'archiver'))
        res = job.run(namespace=settings.JOB_NAMESPACE)
        return res
Exemplo n.º 12
0
 def test_get_originals(self):
     original_manager = OriginalManager()
     rosbags = original_manager.get_originals(PROJECT_ID)
     self.assertEqual(len(rosbags), 2)
Exemplo n.º 13
0
def __get_extension(candidate_id):
    candidate = OriginalManager().get_dataset_candidate(candidate_id)
    msg_type = json.loads(candidate['analyzed_info'])['msg_type']
    if msg_type == 'sensor_msgs/PointCloud2':
        return '.pcd'
    return '.jpg'