def predictions_to_annotations(project, queryset, **kwargs):
    request = kwargs['request']
    user = request.user
    model_version = request.data.get('model_version')
    queryset = queryset.filter(predictions__isnull=False)
    predictions = Prediction.objects.filter(task__in=queryset, child_annotations__isnull=True)

    # model version filter
    if model_version is not None:
        predictions = predictions.filter(model_version=model_version)

    predictions_values = list(predictions.values_list(
        'result', 'model_version', 'task_id', 'id'
    ))

    # prepare annotations
    annotations = []
    for result, model_version, task_id, prediction_id in predictions_values:
        annotations.append({
            'result': result,
            'completed_by': user.pk,
            'task': task_id,
            'parent_prediction': prediction_id
        })

    count = len(annotations)
    logger.debug(f'{count} predictions will be converter to annotations')
    annotation_ser = AnnotationSerializer(data=annotations, many=True)
    annotation_ser.is_valid(raise_exception=True)
    annotation_ser.save()

    return {'response_code': 200, 'detail': f'Created {count} annotations'}
Пример #2
0
    def post(self, request, *args, **kwargs):
        # get the cancelled task
        task = get_object_with_permissions(self.request, Task,
                                           self.kwargs['pk'],
                                           'tasks.change_task')

        # validate data from annotation
        annotation = AnnotationSerializer(data=request.data)
        annotation.is_valid(raise_exception=True)

        # set annotator last activity
        user = request.user
        user.activity_at = timezone.now()
        user.save()

        # serialize annotation, update task and save
        com = annotation.save(completed_by=user, was_cancelled=True, task=task)
        task.annotations.add(com)
        task.save()
        return Response(annotation.data, status=status.HTTP_200_OK)
Пример #3
0
    def _scan_and_create_links(self, link_class):
        tasks_created = 0
        maximum_annotations = self.project.maximum_annotations
        task = self.project.tasks.order_by('-inner_id').first()
        max_inner_id = (task.inner_id + 1) if task else 1

        for key in self.iterkeys():
            logger.debug(f'Scanning key {key}')

            # skip if task already exists
            if link_class.exists(key, self):
                logger.debug(
                    f'{self.__class__.__name__} link {key} already exists')
                continue

            logger.debug(f'{self}: found new key {key}')
            try:
                data = self.get_data(key)
            except (UnicodeDecodeError, json.decoder.JSONDecodeError) as exc:
                logger.error(exc, exc_info=True)
                raise ValueError(
                    f'Error loading JSON from file "{key}".\nIf you\'re trying to import non-JSON data '
                    f'(images, audio, text, etc.), edit storage settings and enable '
                    f'"Treat every bucket object as a source file"')

            # predictions
            predictions = data.get('predictions', [])
            if predictions:
                if 'data' not in data:
                    raise ValueError(
                        'If you use "predictions" field in the task, '
                        'you must put "data" field in the task too')

            # annotations
            annotations = data.get('annotations', [])
            if annotations:
                if 'data' not in data:
                    raise ValueError(
                        'If you use "annotations" field in the task, '
                        'you must put "data" field in the task too')

            if 'data' in data and isinstance(data['data'], dict):
                data = data['data']

            with transaction.atomic():
                task = Task.objects.create(
                    data=data,
                    project=self.project,
                    overlap=maximum_annotations,
                    is_labeled=len(annotations) >= maximum_annotations,
                    inner_id=max_inner_id)
                max_inner_id += 1

                link_class.create(task, key, self)
                logger.debug(
                    f'Create {self.__class__.__name__} link with key={key} for task={task}'
                )
                tasks_created += 1

                # add predictions
                logger.debug(
                    f'Create {len(predictions)} predictions for task={task}')
                for prediction in predictions:
                    prediction['task'] = task.id
                prediction_ser = PredictionSerializer(data=predictions,
                                                      many=True)
                prediction_ser.is_valid(raise_exception=True)
                prediction_ser.save()

                # add annotations
                logger.debug(
                    f'Create {len(annotations)} annotations for task={task}')
                for annotation in annotations:
                    annotation['task'] = task.id
                annotation_ser = AnnotationSerializer(data=annotations,
                                                      many=True)
                annotation_ser.is_valid(raise_exception=True)
                annotation_ser.save()

        self.last_sync = timezone.now()
        self.last_sync_count = tasks_created
        self.save()

        self.project.update_tasks_states(
            maximum_annotations_changed=False,
            overlap_cohort_percentage_changed=False,
            tasks_number_changed=True)