Esempio n. 1
0
def annotation_tool(request, image_id):
    """
    View for the annotation tool.
    """

    image = get_object_or_404(Image, id=image_id)
    source = image.source
    metadata = image.metadata


    # Image navigation history.
    # Establish default values for the history, first.
    nav_history_form =\
        annotations_forms.AnnotationToolNavHistoryForm(
            initial=dict(back="[]", forward="[]", from_image_id=image_id)
        )
    nav_history = dict(
        form=nav_history_form,
        back=None,
        forward=None,
    )
    # We made a POST request if the user is going to another image,
    # going back, or going forward.
    # (It's non-POST if they came from a non annotation tool page, or
    # came via URL typing.)
    if request.method == 'POST':
        nav_history_form_submitted =\
            annotations_forms.AnnotationToolNavHistoryForm(request.POST)

        if nav_history_form_submitted.is_valid():
            # Nav history is a serialized list of image ids.
            # For example: "[258,259,268,109]"
            form_data = nav_history_form_submitted.cleaned_data
            back_submitted_list = json.loads(form_data['back'])
            forward_submitted_list = json.loads(form_data['forward'])
            from_image_id = form_data['from_image_id']

            # Construct new back and forward lists based on
            # where we're navigating.
            if request.POST.get('nav_next', None):
                back_list = back_submitted_list + [from_image_id]
                forward_list = []
            elif request.POST.get('nav_back', None):
                back_list = back_submitted_list[:-1]
                forward_list = [from_image_id] + forward_submitted_list
            else:  # 'nav_forward'
                back_list = back_submitted_list + [from_image_id]
                forward_list = forward_submitted_list[1:]

            limit = 10
            nav_history_form = \
                annotations_forms.AnnotationToolNavHistoryForm(
                    initial=dict(
                        back=json.dumps(back_list[-limit:]),
                        forward=json.dumps(forward_list[:limit]),
                        from_image_id=image_id,
                    )
                )

            if len(back_list) > 0:
                back = Image.objects.get(pk=back_list[-1])
            else:
                back = None
            if len(forward_list) > 0:
                forward = Image.objects.get(pk=forward_list[0])
            else:
                forward = None
            nav_history = dict(
                form=nav_history_form,
                back=back,
                forward=forward,
            )
        else:
            # Invalid form for some reason.
            # Fail silently, I guess? That is, use an empty history.
            pass


    # Get the settings object for this user.
    # If there is no such settings object, then create it.
    settings_obj, created = AnnotationToolSettings.objects.get_or_create(user=request.user)
    settings_form = AnnotationToolSettingsForm(instance=settings_obj)


    # Get all labels, ordered first by functional group, then by short code.
    labels = source.labelset.labels.all().order_by('group', 'code')
    # Get labels in the form {'code': <short code>, 'group': <functional group>, 'name': <full name>}.
    # Convert from a ValuesQuerySet to a list to make the structure JSON-serializable.
    labelValues = list(labels.values('code', 'group', 'name'))

    error_message = []
    # Get the machine's label probabilities, if applicable.
    if not settings_obj.show_machine_annotations:
        label_probabilities = None
    elif not image.status.annotatedByRobot:
        label_probabilities = None
    else:
        label_probabilities = task_utils.get_label_probabilities_for_image(image_id)
        # label_probabilities can still be None here if something goes wrong.
        # But if not None, apply Alleviate.
        if label_probabilities:
            annotations_utils.apply_alleviate(image_id, label_probabilities)
        else:
            error_message.append('Woops! Could not read the label probabilities. Manual annotation still works.')


    # Get points and annotations.
    form = AnnotationForm(
        image=image,
        user=request.user,
        show_machine_annotations=settings_obj.show_machine_annotations
    )

    pointValues = Point.objects.filter(image=image).values(
        'point_number', 'row', 'column')
    annotationValues = Annotation.objects.filter(image=image).values(
        'point__point_number', 'label__name', 'label__code')

    # annotationsDict
    # keys: point numbers
    # values: dicts containing the values in pointValues and
    #         annotationValues (if the point has an annotation) above
    annotationsDict = dict()
    for p in pointValues:
        annotationsDict[p['point_number']] = p
    for a in annotationValues:
        annotationsDict[a['point__point_number']].update(a)

    # Get a list of the annotationsDict values (the keys are discarded)
    # Sort by point_number
    annotations = list(annotationsDict.values())
    annotations.sort(key=lambda x:x['point_number'])

    # Now we've gotten all the relevant points and annotations
    # from the database, in a list of dicts:
    # [{'point_number':1, 'row':294, 'column':749, 'label__name':'Porites', 'label__code':'Porit', 'user_is_robot':False},
    #  {'point_number':2, ...},
    #  ...]
    # TODO: Are we even using anything besides row, column, and point_number?  If not, discard the annotation fields to avoid confusion.


    # Image tools form (brightness, contrast, etc.)
    image_options_form = AnnotationImageOptionsForm()


    # Image dimensions.
    IMAGE_AREA_WIDTH = 850
    IMAGE_AREA_HEIGHT = 650

    source_images = dict(full=dict(
        url=image.original_file.url,
        width=image.original_file.width,
        height=image.original_file.height,
    ))
    if image.original_width > IMAGE_AREA_WIDTH:
        # Set scaled image's dimensions (Specific width, height that keeps the aspect ratio)
        thumbnail_dimensions = (IMAGE_AREA_WIDTH, 0)

        # Generate the thumbnail if it doesn't exist, and get the thumbnail's URL and dimensions.
        thumbnailer = get_thumbnailer(image.original_file)
        thumb = thumbnailer.get_thumbnail(dict(size=thumbnail_dimensions))
        source_images.update(dict(scaled=dict(
            url=thumb.url,
            width=thumb.width,
            height=thumb.height,
        )))


    # Get the next image to annotate.
    # This'll be the next image that needs annotation;
    # or if we're at the last image, wrap around to the first image.
    next_image_to_annotate = get_next_image(image, dict(status__annotatedByHuman=False))

    if next_image_to_annotate is None:
        next_image_to_annotate = get_first_image(
            image.source, dict(status__annotatedByHuman=False)
        )
        # Don't allow getting the current image as the next image to annotate.
        if next_image_to_annotate is not None and next_image_to_annotate.id == image.id:
            next_image_to_annotate = None


    # Record this access of the annotation tool page.
    access = AnnotationToolAccess(image=image, source=source, user=request.user)
    access.save()

    return render_to_response('annotations/annotation_tool.html', {
        'source': source,
        'image': image,
        'next_image_to_annotate': next_image_to_annotate,
        'nav_history': nav_history,
        'metadata': metadata,
        'labels': labelValues,
        'form': form,
        'settings_form': settings_form,
        'image_options_form': image_options_form,
        'annotations': annotations,
        'annotationsJSON': simplejson.dumps(annotations),
        'label_probabilities': label_probabilities,
        'IMAGE_AREA_WIDTH': IMAGE_AREA_WIDTH,
        'IMAGE_AREA_HEIGHT': IMAGE_AREA_HEIGHT,
        'source_images': source_images,
        'num_of_points': len(annotations),
        'num_of_annotations': len(annotationValues),
        'messages': error_message,
        },
        context_instance=RequestContext(request)
    )
Esempio n. 2
0
def annotation_tool(request, image_id):
    """
    View for the annotation tool.
    """

    image = get_object_or_404(Image, id=image_id)
    source = image.source
    metadata = image.metadata

    # Get all labels, ordered first by functional group, then by short code.
    labels = source.labelset.labels.all().order_by('group', 'code')
    # Get labels in the form {'code': <short code>, 'group': <functional group>, 'name': <full name>}.
    # Convert from a ValuesQuerySet to a list to make the structure JSON-serializable.
    labelValues = list(labels.values('code', 'group', 'name'))

    form = AnnotationForm(image=image, user=request.user)

    pointValues = Point.objects.filter(image=image).values(
        'point_number', 'row', 'column')
    annotationValues = Annotation.objects.filter(image=image).values(
        'point__point_number', 'label__name', 'label__code')

    # annotationsDict
    # keys: point numbers
    # values: dicts containing the values in pointValues and
    #         annotationValues (if the point has an annotation) above
    annotationsDict = dict()
    for p in pointValues:
        annotationsDict[p['point_number']] = p
    for a in annotationValues:
        annotationsDict[a['point__point_number']].update(a)

    # Get a list of the annotationsDict values (the keys are discarded)
    # Sort by point_number
    annotations = list(annotationsDict.values())
    annotations.sort(key=lambda x:x['point_number'])

    # Now we've gotten all the relevant points and annotations
    # from the database, in a list of dicts:
    # [{'point_number':1, 'row':294, 'column':749, 'label__name':'Porites', 'label__code':'Porit', 'user_is_robot':False},
    #  {'point_number':2, ...},
    #  ...]
    # TODO: Are we even using anything besides row, column, and point_number?  If not, discard the annotation fields to avoid confusion.

    need_human_anno_next = get_next_image(image, dict(status__annotatedByHuman=False))
    need_human_anno_prev = get_prev_image(image, dict(status__annotatedByHuman=False))

    # Get the settings object for this user.
    # If there is no such settings object, then create it.
    settings_obj, created = AnnotationToolSettings.objects.get_or_create(user=request.user)
    settings_form = AnnotationToolSettingsForm(instance=settings_obj)

    # Image tools form (brightness, contrast, etc.)
    image_options_form = AnnotationImageOptionsForm()

    IMAGE_AREA_WIDTH = 800
    IMAGE_AREA_HEIGHT = 600

    source_images = dict(full=dict(
        url=image.original_file.url,
        width=image.original_file.width,
        height=image.original_file.height,
    ))
    if image.original_width > IMAGE_AREA_WIDTH:
        # Set scaled image's dimensions (Specific width, height that keeps the aspect ratio)
        thumbnail_dimensions = (IMAGE_AREA_WIDTH, 0)

        # Generate the thumbnail if it doesn't exist, and get the thumbnail's URL and dimensions.
        thumbnailer = get_thumbnailer(image.original_file)
        thumb = thumbnailer.get_thumbnail(dict(size=thumbnail_dimensions))
        source_images.update(dict(scaled=dict(
            url=thumb.url,
            width=thumb.width,
            height=thumb.height,
        )))

    access = AnnotationToolAccess(image=image, source=source, user=request.user)
    access.save()

    return render_to_response('annotations/annotation_tool.html', {
        'source': source,
        'image': image,
        'next_image': need_human_anno_next,
        'prev_image': need_human_anno_prev,
        'metadata': metadata,
        'labels': labelValues,
        'form': form,
        'settings_form': settings_form,
        'image_options_form': image_options_form,
        'annotations': annotations,
        'annotationsJSON': simplejson.dumps(annotations),
        'IMAGE_AREA_WIDTH': IMAGE_AREA_WIDTH,
        'IMAGE_AREA_HEIGHT': IMAGE_AREA_HEIGHT,
        'source_images': source_images,
        'num_of_points': len(annotations),
        'num_of_annotations': len(annotationValues),
        },
        context_instance=RequestContext(request)
    )