Exemplo n.º 1
0
class SubjectTasksSerializer(serializers.ModelSerializer):
    tasks = TaskSimpleSerializer(many=True)

    class Meta:
        model = Subject
        fields = (
            'id',
            'name',
            'tasks',
        )
Exemplo n.º 2
0
    def predict_many_tasks(self, tasks):
        self.update_state()
        if self.not_ready:
            logger.debug(f'ML backend {self} is not ready')
            return

        if isinstance(tasks, list):
            from tasks.models import Task
            tasks = Task.objects.filter(id__in=[task.id for task in tasks])

        tasks_ser = TaskSimpleSerializer(tasks, many=True).data
        ml_api_result = self.api.make_predictions(tasks_ser,
                                                  self.model_version,
                                                  self.project)
        if ml_api_result.is_error:
            logger.error(
                f'Prediction not created for project {self}: {ml_api_result.error_message}'
            )
            return

        responses = ml_api_result.response['results']

        if len(responses) == 0:
            logger.error(
                f'ML backend returned empty prediction for project {self}')
            return

        # ML Backend doesn't support batch of tasks, do it one by one
        elif len(responses) == 1:
            logger.warning(
                f"'ML backend '{self.title}' doesn't support batch processing of tasks, "
                f"switched to one-by-one task retrieving")
            for task in tasks:
                self.predict_one_task(task)
            return

        # wrong result number
        elif len(responses) != len(tasks_ser):
            logger.error(
                f'ML backend returned response number {len(responses)} != task number {len(tasks_ser)}'
            )

        predictions = []
        for task, response in zip(tasks_ser, responses):
            predictions.append({
                'task': task['id'],
                'result': response['result'],
                'score': response.get('score'),
                'model_version': self.model_version
            })
        with conditional_atomic():
            prediction_ser = PredictionSerializer(data=predictions, many=True)
            prediction_ser.is_valid(raise_exception=True)
            prediction_ser.save()
Exemplo n.º 3
0
    def interactive_annotating(self, task, context=None):
        result = {}
        if not self.is_interactive:
            result['errors'] = [
                "Model is not set to be used for interactive preannotations"
            ]
            return result

        tasks_ser = TaskSimpleSerializer([task], many=True).data
        ml_api_result = self.api.make_predictions(
            tasks=tasks_ser,
            model_version=self.model_version,
            project=self.project,
            context=context,
        )
        if ml_api_result.is_error:
            logger.warning(
                f'Prediction not created for project {self}: {ml_api_result.error_message}'
            )
            result['errors'] = [ml_api_result.error_message]
            return result

        if not (isinstance(ml_api_result.response, dict)
                and 'results' in ml_api_result.response):
            logger.warning(
                f'ML backend returns an incorrect response, it must be a dict: {ml_api_result.response}'
            )
            result['errors'] = [
                'Incorrect response from ML service: '
                'ML backend returns an incorrect response, it must be a dict.'
            ]
            return result

        ml_results = ml_api_result.response.get(
            'results',
            [
                None,
            ],
        )
        if not isinstance(ml_results, list) or len(ml_results) < 1:
            logger.warning(
                f'ML backend has to return list with 1 annotation but it returned: {ml_results}'
            )
            result['errors'] = [
                'Incorrect response from ML service: '
                'ML backend has to return list with more than 1 result.'
            ]
            return result

        result['data'] = ml_results[0]
        return result
Exemplo n.º 4
0
    def __predict_one_task(self, task):
        self.update_state()
        if self.not_ready:
            logger.debug(f'ML backend {self} is not ready to predict {task}')
            return
        if task.predictions.filter(model_version=self.model_version).exists():
            # prediction already exists
            logger.info(
                f'Skip creating prediction with ML backend {self} for task {task}: model version '
                f'{self.model_version} is up-to-date')
            return
        ml_api = self.api

        task_ser = TaskSimpleSerializer(task).data
        ml_api_result = ml_api.make_predictions([task_ser], self.model_version,
                                                self.project)
        if ml_api_result.is_error:
            logger.warning(
                f'Prediction not created for project {self}: {ml_api_result.error_message}'
            )
            return
        results = ml_api_result.response['results']
        if len(results) == 0:
            logger.error(
                f'ML backend returned empty prediction for project {self}',
                extra={'sentry_skip': True})
            return
        prediction_response = results[0]
        task_id = task_ser['id']
        r = prediction_response['result']
        score = prediction_response.get('score')
        with conditional_atomic():
            prediction = Prediction.objects.create(
                result=r,
                score=safe_float(score),
                model_version=self.model_version,
                task_id=task_id,
                cluster=prediction_response.get('cluster'),
                neighbors=prediction_response.get('neighbors'),
                mislabeling=safe_float(
                    prediction_response.get('mislabeling', 0)),
            )
            logger.debug(f'Prediction {prediction} created')

        return prediction