Пример #1
0
    def add_task(self, task_fields: dict, files: dict, project_data: ProjectData = None):
        def split_name(file):
            _, name = file.split(files['data_root'])
            return name


        data_serializer = DataSerializer(data={
            "server_files": files['media'],
            #TODO: followed fields whould be replaced with proper input values from request in future
            "use_cache": False,
            "use_zip_chunks": True,
            "image_quality": 70,
        })
        data_serializer.is_valid(raise_exception=True)
        db_data = data_serializer.save()
        db_task = TaskSerializer.create(None, {
            **task_fields,
            'data_id': db_data.id,
            'project_id': self.db_project.id
        })
        data = {k:v for k, v in data_serializer.data.items()}
        data['use_zip_chunks'] = data_serializer.validated_data['use_zip_chunks']
        data['use_cache'] = data_serializer.validated_data['use_cache']
        data['copy_data'] = data_serializer.validated_data['copy_data']
        data['server_files_path'] = files['data_root']
        data['stop_frame'] = None
        data['server_files'] = list(map(split_name, data['server_files']))

        create_task(db_task, data, isDatasetImport=True)
        self.db_tasks = models.Task.objects.filter(project__id=self.db_project.id).order_by('id')
        self.init_from_db()
        if project_data is not None:
            project_data.new_tasks.add(db_task.id)
            project_data.init()
Пример #2
0
    def _import_task(self):
        def _create_comment(comment, db_issue):
            comment['issue'] = db_issue.id
            comment_serializer = CommentSerializer(data=comment)
            comment_serializer.is_valid(raise_exception=True)
            db_comment = comment_serializer.save()
            return db_comment

        def _create_issue(issue, db_review, db_job):
            issue['review'] = db_review.id
            issue['job'] = db_job.id
            comments = issue.pop('comments')

            issue_serializer = IssueSerializer(data=issue)
            issue_serializer.is_valid(raise_exception=True)
            db_issue = issue_serializer.save()

            for comment in comments:
                _create_comment(comment, db_issue)

            return db_issue

        def _create_review(review, db_job):
            review['job'] = db_job.id
            issues = review.pop('issues')

            review_serializer = ReviewSerializer(data=review)
            review_serializer.is_valid(raise_exception=True)
            db_review = review_serializer.save()

            for issue in issues:
                _create_issue(issue, db_review, db_job)

            return db_review

        data = self._manifest.pop('data')
        labels = self._manifest.pop('labels')
        jobs = self._manifest.pop('jobs')

        self._prepare_task_meta(self._manifest)
        self._manifest['segment_size'], self._manifest[
            'overlap'] = self._calculate_segment_size(jobs)
        self._manifest["owner_id"] = self._user_id

        self._db_task = models.Task.objects.create(**self._manifest)
        task_path = self._db_task.get_task_dirname()
        if os.path.isdir(task_path):
            shutil.rmtree(task_path)

        os.makedirs(self._db_task.get_task_logs_dirname())
        os.makedirs(self._db_task.get_task_artifacts_dirname())

        self._labels_mapping = self._create_labels(self._db_task, labels)

        self._prepare_data_meta(data)
        data_serializer = DataSerializer(data=data)
        data_serializer.is_valid(raise_exception=True)
        db_data = data_serializer.save()
        self._db_task.data = db_data
        self._db_task.save()

        data_path = self._db_task.data.get_upload_dirname()
        uploaded_files = []
        with ZipFile(self._filename, 'r') as input_file:
            for f in input_file.namelist():
                if f.startswith(self.DATA_DIRNAME + os.path.sep):
                    target_file = os.path.join(
                        data_path, os.path.relpath(f, self.DATA_DIRNAME))
                    self._prepare_dirs(target_file)
                    with open(target_file, "wb") as out:
                        out.write(input_file.read(f))
                    uploaded_files.append(os.path.relpath(
                        f, self.DATA_DIRNAME))
                elif f.startswith(self.TASK_DIRNAME + os.path.sep):
                    target_file = os.path.join(
                        task_path, os.path.relpath(f, self.TASK_DIRNAME))
                    self._prepare_dirs(target_file)
                    with open(target_file, "wb") as out:
                        out.write(input_file.read(f))

        data['use_zip_chunks'] = data.pop('chunk_type') == DataChoice.IMAGESET
        data = data_serializer.data
        data['client_files'] = uploaded_files
        _create_thread(self._db_task.pk, data.copy(), True)
        db_data.start_frame = data['start_frame']
        db_data.stop_frame = data['stop_frame']
        db_data.frame_filter = data['frame_filter']
        db_data.storage = StorageChoice.LOCAL
        db_data.save(update_fields=[
            'start_frame', 'stop_frame', 'frame_filter', 'storage'
        ])

        for db_job, job in zip(self._get_db_jobs(), jobs):
            db_job.status = job['status']
            db_job.save()

            for review in job['reviews']:
                _create_review(review, db_job)
Пример #3
0
 def serialize_data():
     data_serializer = DataSerializer(self._db_data)
     data = data_serializer.data
     data['chunk_type'] = data.pop('compressed_chunk_type')
     return self._prepare_data_meta(data)
Пример #4
0
 def get_request_client_files(self, request):
     db_model = self.get_object()
     serializer = DataSerializer(db_model, data=request.data)
     serializer.is_valid(raise_exception=True)
     data = {k: v for k, v in serializer.validated_data.items()}
     return data.get('client_files', None)
Пример #5
0
    def data(self, request, pk):
        if request.method == 'POST':
            db_task = self.get_object(
            )  # call check_object_permissions as well
            serializer = DataSerializer(data=request.data)
            serializer.is_valid(raise_exception=True)
            db_data = serializer.save()
            db_task.data = db_data
            db_task.save()
            data = {k: v for k, v in serializer.data.items()}
            data['use_zip_chunks'] = serializer.validated_data[
                'use_zip_chunks']
            data['use_cache'] = serializer.validated_data['use_cache']
            if data['use_cache']:
                db_task.data.storage_method = StorageMethodChoice.CACHE
                db_task.data.save(update_fields=['storage_method'])

            # if the value of stop_frame is 0, then inside the function we cannot know
            # the value specified by the user or it's default value from the database
            if 'stop_frame' not in serializer.validated_data:
                data['stop_frame'] = None
            task.create(db_task.id, data)
            return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
        else:
            data_type = request.query_params.get('type', None)
            data_id = request.query_params.get('number', None)
            data_quality = request.query_params.get('quality', 'compressed')

            possible_data_type_values = ('chunk', 'frame', 'preview')
            possible_quality_values = ('compressed', 'original')

            if not data_type or data_type not in possible_data_type_values:
                return Response(
                    data='data type not specified or has wrong value',
                    status=status.HTTP_400_BAD_REQUEST)
            elif data_type == 'chunk' or data_type == 'frame':
                if not data_id:
                    return Response(data='number not specified',
                                    status=status.HTTP_400_BAD_REQUEST)
                elif data_quality not in possible_quality_values:
                    return Response(data='wrong quality value',
                                    status=status.HTTP_400_BAD_REQUEST)

            try:
                db_task = self.get_object()
                db_data = db_task.data
                frame_provider = FrameProvider(db_task.data)

                if data_type == 'chunk':
                    data_id = int(data_id)

                    data_quality = FrameProvider.Quality.COMPRESSED \
                        if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL

                    #TODO: av.FFmpegError processing
                    if settings.USE_CACHE and db_data.storage_method == StorageMethodChoice.CACHE:
                        buff, mime_type = frame_provider.get_chunk(
                            data_id, data_quality)
                        return HttpResponse(buff.getvalue(),
                                            content_type=mime_type)

                    # Follow symbol links if the chunk is a link on a real image otherwise
                    # mimetype detection inside sendfile will work incorrectly.
                    path = os.path.realpath(
                        frame_provider.get_chunk(data_id, data_quality))
                    return sendfile(request, path)

                elif data_type == 'frame':
                    data_id = int(data_id)
                    data_quality = FrameProvider.Quality.COMPRESSED \
                        if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL
                    buf, mime = frame_provider.get_frame(data_id, data_quality)

                    return HttpResponse(buf.getvalue(), content_type=mime)

                elif data_type == 'preview':
                    return sendfile(request, frame_provider.get_preview())
                else:
                    return Response(
                        data='unknown data type {}.'.format(data_type),
                        status=status.HTTP_400_BAD_REQUEST)
            except APIException as e:
                return Response(data=e.default_detail, status=e.status_code)
            except Exception as e:
                msg = 'cannot get requested data type: {}, number: {}, quality: {}'.format(
                    data_type, data_id, data_quality)
                slogger.task[pk].error(msg, exc_info=True)
                return Response(data=msg + '\n' + str(e),
                                status=status.HTTP_400_BAD_REQUEST)
Пример #6
0
    def _import_task(self):
        def _write_data(zip_object):
            data_path = self._db_task.data.get_upload_dirname()
            task_dirname = os.path.join(
                self._subdir,
                self.TASK_DIRNAME) if self._subdir else self.TASK_DIRNAME
            data_dirname = os.path.join(
                self._subdir,
                self.DATA_DIRNAME) if self._subdir else self.DATA_DIRNAME
            uploaded_files = []
            for f in zip_object.namelist():
                if f.endswith(os.path.sep):
                    continue
                if f.startswith(data_dirname + os.path.sep):
                    target_file = os.path.join(
                        data_path, os.path.relpath(f, data_dirname))
                    self._prepare_dirs(target_file)
                    with open(target_file, "wb") as out:
                        out.write(zip_object.read(f))
                    uploaded_files.append(os.path.relpath(f, data_dirname))
                elif f.startswith(task_dirname + os.path.sep):
                    target_file = os.path.join(
                        task_path, os.path.relpath(f, task_dirname))
                    self._prepare_dirs(target_file)
                    with open(target_file, "wb") as out:
                        out.write(zip_object.read(f))

            return uploaded_files

        data = self._manifest.pop('data')
        labels = self._manifest.pop('labels')
        jobs = self._manifest.pop('jobs')

        self._prepare_task_meta(self._manifest)
        self._manifest['segment_size'], self._manifest[
            'overlap'] = self._calculate_segment_size(jobs)
        self._manifest['owner_id'] = self._user_id
        self._manifest['project_id'] = self._project_id

        self._db_task = models.Task.objects.create(
            **self._manifest, organization_id=self._org_id)
        task_path = self._db_task.get_task_dirname()
        if os.path.isdir(task_path):
            shutil.rmtree(task_path)

        os.makedirs(self._db_task.get_task_logs_dirname())
        os.makedirs(self._db_task.get_task_artifacts_dirname())

        if not self._labels_mapping:
            self._labels_mapping = self._create_labels(db_task=self._db_task,
                                                       labels=labels)

        self._prepare_data_meta(data)
        data_serializer = DataSerializer(data=data)
        data_serializer.is_valid(raise_exception=True)
        db_data = data_serializer.save()
        self._db_task.data = db_data
        self._db_task.save()

        if isinstance(self._file, str):
            with ZipFile(self._file, 'r') as zf:
                uploaded_files = _write_data(zf)
        else:
            uploaded_files = _write_data(self._file)

        data['use_zip_chunks'] = data.pop('chunk_type') == DataChoice.IMAGESET
        data = data_serializer.data
        data['client_files'] = uploaded_files
        _create_thread(self._db_task.pk, data.copy(), True)
        db_data.start_frame = data['start_frame']
        db_data.stop_frame = data['stop_frame']
        db_data.frame_filter = data['frame_filter']
        db_data.storage = StorageChoice.LOCAL
        db_data.save(update_fields=[
            'start_frame', 'stop_frame', 'frame_filter', 'storage'
        ])

        for db_job, job in zip(self._get_db_jobs(), jobs):
            db_job.status = job['status']
            db_job.save()