コード例 #1
0
 def analyze(cls, user_id, project_id, original_id):
     project = ProjectManager().get_project(project_id, user_id)
     label_type = project['label_type']
     original = OriginalManager().get_original(project_id, original_id, status='uploaded')
     storage = StorageSerializer().get_storage(project_id, original['storage_id'])
     storage_config = copy.deepcopy(storage['storage_config'])
     original_path = StorageSerializer.get_original_path(
         storage['storage_type'], storage['storage_config'], original['name'])
     storage_config.update({'path': original_path})
     automan_config = cls.__get_automan_config(user_id)
     automan_config.update({'path': '/projects/' + project_id + '/originals/' + str(original_id) + '/', 'label_type': label_type})
     job_config = {
         'storage_type': storage['storage_type'],
         'storage_config': storage_config,
         'automan_config': automan_config
     }
     job_config_json = json.dumps(job_config)
     new_job = Job(
         job_type='analyzer',
         user_id=user_id,
         project_id=project_id,
         job_config=job_config_json)
     new_job.save()
     if original['file_type'] == 'rosbag':
         job = RosbagAnalyzer(**job_config)
         job.create(cls.__generate_job_name(new_job.id, 'analyzer'))
         res = job.run()
         return res
     else:
         raise ValidationError()
コード例 #2
0
ファイル: views.py プロジェクト: yokada/AutomanTools
    def create(self, request):
        username = request.user
        user_id = AccountManager.get_id_by_username(username)
        name = request.data.get('name', None)

        # create project
        serializer = ProjectSerializer(data={
            'name': name,
            'description': request.data.get('description', None),
            'label_type': request.data.get('label_type', None),
            'owner_id': user_id
        })
        if not serializer.is_valid():
            raise ValidationError
        serializer.save()

        # create klassset
        project_manager = ProjectManager()
        project_id = project_manager.get_project_id_by_name(name)
        klassset_manager = KlasssetManager()
        klassset = request.data.get('klasses')
        klassset_manager.set_klassset(project_id, user_id, klassset)

        # create storage
        storage = StorageSerializer(data={
            'storage_type': request.data.get('storage_type', None),
            'storage_config': json.dumps(request.data.get('storage_config', None)),
            'project': project_id
        })
        if not storage.is_valid():
            raise ValidationError
        storage.save()

        return HttpResponse(status=201, content=json.dumps({}), content_type='application/json')
コード例 #3
0
    def extract(cls, user_id, project_id, original_id, candidates):
        original = OriginalManager().get_original(project_id, original_id, status='analyzed')
        storage = StorageSerializer().get_storage(project_id, original['storage_id'])
        storage_config = copy.deepcopy(storage['storage_config'])
        original_path = StorageSerializer.get_original_path(
            storage['storage_type'], storage['storage_config'], original['name'])
        storage_config.update({'path': original_path})
        output_dir = StorageSerializer.get_dataset_output_dir(
            storage['storage_type'], storage['storage_config'], original['name'], candidates)
        storage_config.update({'output_dir': output_dir})
        automan_config = cls.__get_automan_config(user_id)
        automan_config.update({'path': '/projects/' + project_id + '/datasets/'})
        raw_data_config = cls.__get_raw_data_config(project_id, original_id, candidates)
        job_config = {
            'storage_type': storage['storage_type'],
            'storage_config': storage_config,
            'automan_config': automan_config,
            'raw_data_config': raw_data_config
        }

        job_config_json = json.dumps(job_config)
        new_job = Job(
            job_type='extractor',
            user_id=user_id,
            project_id=project_id,
            job_config=job_config_json)
        new_job.save()

        if original['file_type'] == 'rosbag':
            job = RosbagExtractor(**job_config)
            job.create(cls.__generate_job_name(new_job.id, 'extractor'))
            res = job.run()
            return res
        else:
            raise ValidationError()
コード例 #4
0
    def save_file(self, project_id, original_id, file):
        # if target files is existed and the limit time has passed, delete it.
        original = self.get_original(project_id,
                                     original_id,
                                     status='registered')
        storage = StorageSerializer().get_storage(project_id,
                                                  original['storage_id'])
        if storage['storage_type'] != 'LOCAL_NFS':
            raise ValidationError()
        dir_path = (storage['storage_config']['mount_path'] +
                    storage['storage_config']['base_dir'] + '/' +
                    original['name'] + '/raw/')  # FIXME: Rule Aggregation
        file_path = dir_path + file.name
        try:
            os.makedirs(dir_path)
        except Exception:
            original = Original.objects.filter(id=original_id).first()
            if original is None:
                raise ObjectDoesNotExist()
            file_name = file.name + '_' + datetime.now().strftime('%s')
            dir_path = storage['storage_config']['mount_path']
            +storage['storage_config']['base_dir']
            + '/' + file_name + '/raw/'  # FIXME: Rule Aggregation
            os.makedirs(dir_path)
            original.name = file_name
            original.save()
            file_path = dir_path + file_name

        # write
        with open(file_path, 'ab') as destination:
            for chunk in file.chunks():
                destination.write(chunk)

        return {'files': [file.name]}
コード例 #5
0
    def archive(cls, user_id, project_id, dataset_id, original_id, annotation_id):
        original = OriginalManager().get_original(project_id, original_id, status='analyzed')
        storage = StorageSerializer().get_storage(project_id, original['storage_id'])
        storage_config = copy.deepcopy(storage['storage_config'])

        automan_config = cls.__get_automan_config(user_id)
        automan_config.update({'path': '/projects/' + str(project_id) + '/annotations/' + str(annotation_id) + '/'})
        archive_config = cls.__get_archive_info(user_id, project_id, dataset_id, annotation_id, original_id)
        job_config = {
            'storage_type': storage['storage_type'],
            'storage_config': storage_config,
            'automan_config': automan_config,
            'archive_config': archive_config,
        }
        job_config_json = json.dumps(job_config)
        new_job = Job(
            job_type='archiver',
            user_id=user_id,
            project_id=project_id,
            job_config=job_config_json)
        new_job.save()
        job = AnnotationArchiver(**job_config)
        job.create(cls.__generate_job_name(new_job.id, 'archiver'))
        res = job.run()
        return res
コード例 #6
0
ファイル: views.py プロジェクト: yokada/AutomanTools
def get_frame(request, project_id, dataset_id, candidate_id, frame):
    username = request.user
    user_id = AccountManager.get_id_by_username(username)
    if not Permission.hasPermission(user_id, 'get_annotationwork', project_id):
        raise PermissionDenied

    dataset = DatasetManager().get_dataset(user_id, dataset_id)
    original = OriginalManager().get_original(project_id,
                                              dataset['original_id'])
    storage = StorageSerializer().get_storage(project_id,
                                              original['storage_id'])

    if storage['storage_type'] == 'LOCAL_NFS':
        image_link = request.build_absolute_uri(request.path) + 'image/'
    elif storage['storage_type'] == 'AWS_S3':
        ext = __get_extension(candidate_id)
        key = (dataset['file_path'] + candidate_id + '_' +
               str(frame).zfill(6) + ext)
        image_link = AwsS3Client().get_s3_down_url(
            storage['storage_config']['bucket'], key)
    else:
        raise UnknownStorageTypeError

    frame = DatasetFrameManager.get_dataset_frame(project_id, dataset_id,
                                                  frame,
                                                  storage['storage_type'])

    content = {'image_link': image_link, 'frame': frame['frame']}
    return HttpResponse(status=200,
                        content=json.dumps(content),
                        content_type='application/json')
コード例 #7
0
ファイル: views.py プロジェクト: yokada/AutomanTools
def annotation(request, project_id, annotation_id):
    username = request.user
    user_id = AccountManager.get_id_by_username(username)
    annotation_manager = AnnotationManager()
    if request.method == 'GET':
        if not Permission.hasPermission(user_id, 'get_annotationwork',
                                        project_id):
            raise PermissionDenied
        contents = annotation_manager.get_annotation(annotation_id)
        return HttpResponse(content=json.dumps(contents),
                            status=200,
                            content_type='application/json')

    elif request.method == 'POST':
        file_path = request.data.get('file_path')
        file_name = request.data.get('file_name')
        annotation_manager.set_archive(annotation_id, file_path, file_name)
        return HttpResponse(status=201,
                            content=json.dumps({}),
                            content_type='application/json')

    else:
        if not Permission.hasPermission(user_id, 'delete_annotationwork',
                                        project_id):
            raise PermissionDenied
        dataset_id = annotation_manager.get_annotation(
            annotation_id)['dataset_id']
        original_id = DatasetManager().get_dataset(user_id,
                                                   dataset_id)['original_id']
        storage_id = OriginalManager().get_original(project_id,
                                                    original_id)['storage_id']
        storage = StorageSerializer().get_storage(project_id, storage_id)
        annotation_manager.delete_annotation(annotation_id, storage)
        return HttpResponse(status=204)
コード例 #8
0
    def delete_rosbag(self, project_id, user_id, original_id):
        rosbag = Original.objects.filter(project_id=project_id,
                                         id=original_id).first()
        if rosbag is None:
            raise ObjectDoesNotExist()

        storage = StorageSerializer().get_storage(project_id,
                                                  rosbag.storage_id)
        config = storage['storage_config']

        dataset_manager = DatasetManager()
        if dataset_manager.get_datasets_count_by_original(original_id) == 0:
            candidates = DatasetCandidate.objects.filter(original=original_id)
            for candidate in candidates:
                candidate.delete()

        rosbag.delete()
        if storage['storage_type'] == 'LOCAL_NFS':
            path = (config['mount_path'] + config['base_dir'] + '/' +
                    rosbag.name + '/')
            shutil.rmtree(path)
        elif storage['storage_type'] == 'AWS_S3':
            key = config['base_dir'] + '/raws/' + rosbag.name
            AwsS3Client().delete_s3_files(config['bucket'], key)
        return True
コード例 #9
0
ファイル: views.py プロジェクト: yokada/AutomanTools
 def destroy(self, request, project_id, dataset_id):
     username = request.user
     dataset_manager = DatasetManager()
     user_id = AccountManager.get_id_by_username(username)
     if not Permission.hasPermission(user_id, 'delete_dataset', project_id):
         raise PermissionDenied
     original_id = DatasetManager().get_dataset(user_id,
                                                dataset_id)['original_id']
     storage_id = OriginalManager().get_original(project_id,
                                                 original_id)['storage_id']
     storage = StorageSerializer().get_storage(project_id, storage_id)
     dataset_manager.delete_dataset(user_id, dataset_id, storage)
     return HttpResponse(status=204)
コード例 #10
0
 def delete_project(self, project_id, user_id):
     content = Projects.objects.filter(id=project_id).first()
     if content is None:
         raise ObjectDoesNotExist()
     storages = StorageSerializer().get_storages(project_id)
     for storage in storages:
         config = storage['storage_config']
         if storage['storage_type'] == 'LOCAL_NFS':
             path = (config['mount_path'] + config['base_dir'])
             if os.path.isdir(path):
                 shutil.rmtree(path)
         elif storage['storage_type'] == 'AWS_S3':
             AwsS3Client().delete_s3_files(
                 config['bucket'], config['base_dir'] + '/')
     content.delete()
コード例 #11
0
    def save_file(self, project_id, file):
        # if target files is existed and the limit time has passed, delete it.
        storage = StorageSerializer().get_storages(project_id)[0]
        if storage['storage_type'] != 'LOCAL_NFS':
            raise ValidationError()

        storage_manager = StorageManager(project_id, storage['id'])
        dir_path = storage_manager.original_dirname
        file_path = storage_manager.get_original_filepath(file.name)
        os.makedirs(dir_path, exist_ok=True)

        # write
        with open(file_path, 'ab') as destination:
            for chunk in file.chunks():
                destination.write(chunk)

        return {'files': [file.name]}
コード例 #12
0
    def delete_rosbag(self, project_id, user_id, original_id):
        rosbag = Original.objects.filter(project_id=project_id, id=original_id).first()
        if rosbag is None:
            raise ObjectDoesNotExist()

        storage = StorageSerializer().get_storage(project_id, rosbag.storage_id)
        dir_path = (storage['storage_config']['mount_path']
                    + storage['storage_config']['base_dir']
                    + '/' + rosbag.name + '/')

        dataset_manager = DatasetManager()
        if dataset_manager.get_datasets_count_by_original(original_id) == 0:
            candidates = DatasetCandidate.objects.filter(original=original_id)
            for candidate in candidates:
                candidate.delete()

        rosbag.delete()
        shutil.rmtree(dir_path)
        return True
コード例 #13
0
ファイル: views.py プロジェクト: yokada/AutomanTools
def download_archived_link(request, project_id, annotation_id):
    username = request.user
    user_id = AccountManager.get_id_by_username(username)
    annotation_manager = AnnotationManager()
    dataset_id = annotation_manager.get_annotation(annotation_id)['dataset_id']
    original_id = DatasetManager().get_dataset(user_id,
                                               dataset_id)['original_id']
    storage_id = OriginalManager().get_original(project_id,
                                                original_id)['storage_id']
    storage = StorageSerializer().get_storage(project_id, storage_id)
    if storage['storage_type'] == 'LOCAL_NFS':
        content = request.build_absolute_uri(request.path) + 'local/'
    elif storage['storage_type'] == 'AWS_S3':
        archive_path = annotation_manager.get_archive_path(annotation_id)
        content = AwsS3Client().get_s3_down_url(
            storage['storage_config']['bucket'], archive_path)
    else:
        raise UnknownStorageTypeError
    return HttpResponse(status=200, content=content, content_type='text/plain')