예제 #1
0
def agree_tags(delta, column):
    """
    egytokenes címkézési feladatokra számol egyetértést
    :param delta:  az összevetett adat
    :param column:  az az oszlop, amelyre egyetértést akarunk számolni
    :return:
    """
    by_field = reverse_tags(delta, column)

    task = AnnotationTask(data=by_field)

    oa = task.avg_Ao()      # observed agreement
    s = task.S()            # Bennett, Albert and Goldstein S (1954) all categories are equally likely
    pi = task.pi()          # Scott pi (1955) single distribution
    kappa = task.kappa()    # Cohen kappa (1960) individual coder distribution
    w_kappa = task.weighted_kappa()
    alpha = task.alpha()    # Krippendorff alpha (1980)

    return oa, s, pi, kappa, w_kappa, alpha
예제 #2
0
def status_view(request, task_id=None):
    """
    Renders the evaluation tasks status page for staff users.
    """
    LOGGER.info('Rendering evaluation task overview for user "{0}".'.format(
        request.user.username))

    # Check if user is member in WMT13 group.  If so, redirect to wmt13 app.
    if request.user.groups.filter(name="WMT13").exists():
        LOGGER.info('Redirecting user "{0}" to WMT13 overview.'.format(
            request.user.username))
        return redirect('appraise.wmt13.views.overview')

    if task_id:
        task = get_object_or_404(EvaluationTask, task_id=task_id)

        headers = task.get_status_header()
        status = []

        for user in task.users.all():
            status.append((user.username, task.get_status_for_user(user)))

        scores = None
        result_data = []
        raw_result_data = Counter()
        users = list(task.users.all())

        for item in EvaluationItem.objects.filter(task=task):
            results = []
            for user in users:
                qset = EvaluationResult.objects.filter(user=user, item=item)
                if qset.exists():
                    category = str(qset[0].results)
                    results.append((user.id, item.id, category))
                    raw_result_data[qset[0].raw_result] += 1

            if len(results) == len(users):
                result_data.extend(results)

        _raw_results = []
        _keys = raw_result_data.keys()
        _total_results = float(sum(raw_result_data.values()))
        for key in sorted(_keys):
            value = raw_result_data[key]
            _raw_results.append((key, value, 100 * value / _total_results))

        try:
            # Computing inter-annotator agreement only makes sense for more
            # than one coder -- otherwise, we only display result_data...
            if len(users) > 1:
                # Check if we can safely use NLTK's AnnotationTask class.
                try:
                    from nltk.metrics.agreement import AnnotationTask
                    chk = AnnotationTask(data=[('b', '1', 'k'), ('a', '1',
                                                                 'k')])
                    assert (chk == 1.0)

                except AssertionError:
                    LOGGER.debug('Fixing outdated version of AnnotationTask.')
                    from appraise.utils import AnnotationTask

                # We have to sort annotation data to prevent StopIterator errors.
                result_data.sort()
                annotation_task = AnnotationTask(result_data)

                scores = (annotation_task.alpha(), annotation_task.kappa(),
                          annotation_task.S(), annotation_task.pi())

        except ZeroDivisionError:
            scores = None

        except ImportError:
            scores = None

        dictionary = {
            'combined': task.get_status_for_users(),
            'commit_tag': COMMIT_TAG,
            'headers': headers,
            'scores': scores,
            'raw_results': _raw_results,
            'status': status,
            'task_id': task.task_id,
            'task_name': task.task_name,
            'title': 'Evaluation Task Status',
        }

        return render(request, 'evaluation/status_task.html', dictionary)

    else:
        evaluation_tasks = {}
        for task_type_id, task_type in APPRAISE_TASK_TYPE_CHOICES:
            # We collect a list of task descriptions for this task_type.
            evaluation_tasks[task_type] = []

            # Super users see all EvaluationTask items, even non-active ones.
            if request.user.is_superuser:
                _tasks = EvaluationTask.objects.filter(task_type=task_type_id)

            else:
                _tasks = EvaluationTask.objects.filter(task_type=task_type_id,
                                                       active=True)

            # Loop over the QuerySet and compute task description data.
            for _task in _tasks:
                if not APPRAISE_TASK_CACHE.has_key(_task.task_id):
                    APPRAISE_TASK_CACHE[_task.task_id] = {}

                _cache = APPRAISE_TASK_CACHE[_task.task_id]
                if not _cache.has_key(request.user.username):
                    _update_task_cache(_task, request.user)

                _task_data = _cache[request.user.username]

                # Append new task description to current task_type list.
                evaluation_tasks[task_type].append(_task_data)

            # If there are no tasks descriptions for this task_type, we skip it.
            if len(evaluation_tasks[task_type]) == 0:
                evaluation_tasks.pop(task_type)

        dictionary = {
            'active_page': "STATUS",
            'commit_tag': COMMIT_TAG,
            'evaluation_tasks': evaluation_tasks,
            'title': 'Evaluation Task Status',
        }

        return render(request, 'evaluation/status.html', dictionary)