예제 #1
0
파일: utils.py 프로젝트: DevangS/CoralNet
def image_has_any_human_annotations(image):
    """
    Return True if the image has at least one human-made Annotation.
    Return False otherwise.
    """
    human_annotations = Annotation.objects.filter(image=image).exclude(user=get_robot_user()).exclude(user=get_alleviate_user())
    return human_annotations.count() > 0
예제 #2
0
파일: utils.py 프로젝트: DevangS/CoralNet
def generate_points(img, usesourcemethod=True):
    """
    Generate annotation points for the Image img,
    and delete any points that had previously existed.

    Does nothing if the image already has human annotations,
    because we don't want to delete any human work.
    """

    # If there are any human annotations for this image,
    # abort point generation.
    human_annotations = Annotation.objects.filter(image = img).exclude(user = get_robot_user()).exclude(user = get_alleviate_user())
    if human_annotations:
        return

    # Find the annotation area, expressed in pixels.
    d = AnnotationAreaUtils.db_format_to_numbers(img.metadata.annotation_area)
    annoarea_type = d.pop('type')
    if annoarea_type == AnnotationAreaUtils.TYPE_PERCENTAGES:
        annoarea_dict = AnnotationAreaUtils.percentages_to_pixels(width=img.original_width, height=img.original_height, **d)
    elif annoarea_type == AnnotationAreaUtils.TYPE_PIXELS:
        annoarea_dict = d
    else:
        raise ValueError("Can't generate points with annotation area type '{0}'.".format(annoarea_type))

    # Calculate points.
    if usesourcemethod:
        point_gen_method = img.source.default_point_generation_method
    else:
        point_gen_method = img.point_generation_method
    
    new_points = calculate_points(
        img, annotation_area=annoarea_dict,
        **PointGen.db_to_args_format(point_gen_method)
    )

    # Delete old points for this image, if any.
    old_points = Point.objects.filter(image=img)
    for old_point in old_points:
        old_point.delete()

    # Save the newly calculated points.
    for new_point in new_points:
        Point(row=new_point['row'],
              column=new_point['column'],
              point_number=new_point['point_number'],
              image=img,
        ).save()

    # Update image status.
    # Make sure the image goes through the feature-making step again.
    status = img.status
    status.hasRandomPoints = True
    status.save()
예제 #3
0
파일: views.py 프로젝트: DevangS/CoralNet
def export_annotations(request, source_id):
    # get the response object, this can be used as a stream.
    response = HttpResponse(mimetype='text/csv')
    # force download.
    response['Content-Disposition'] = 'attachment;filename="annotations.csv"'
    # the csv writer
    writer = csv.writer(response)

    source = get_object_or_404(Source, id=source_id)
    images = Image.objects.filter(source=source).select_related()
    if request.GET.get('robot', ''):
        all_annotations = Annotation.objects.filter(source=source)
    else:
        all_annotations = Annotation.objects.filter(source=source).exclude(user=get_robot_user())

    #Add table headings: locKey1 locKey2 locKey3 locKey4 photo_date anno_date row col label shortcode fun_group annotator
    header = []
    header.extend(source.get_key_list())
    header.extend(['original_file_name', 'date_taken','date_annotated','annotator', 'row', 'col', 'label','shortcode', 'func_group'])
    writer.writerow(header)

    #Adds the relevant annotation data in a row
    #Example row: lter1 out10m line1-2 qu1 20100427 20110101 130 230 Porit
    for image in images:
        locKeys = image.get_location_value_str_list_robust()
        original_file_name = str(image.metadata.name)        
        photo_date = str(image.metadata.photo_date)
        annotations = all_annotations.filter(image=image).order_by('point').select_related()

        for annotation in annotations:
            label_name = str(annotation.label.name)
            annotation_date = str(annotation.annotation_date)
            annotator = str(annotation.user)
            point_row = str(annotation.point.row)
            point_col = str(annotation.point.column)
            short_code = str(annotation.label.code)
            func_group = str(annotation.label.group)


            row = []
            row.extend(locKeys)
            row.append(original_file_name)
            row.append(photo_date)
            row.append(annotation_date)
            row.append(annotator)
            row.append(point_row)
            row.append(point_col)
            row.append(label_name)
            row.append(short_code)
            row.append(func_group)
            writer.writerow(row)

    return response
예제 #4
0
파일: utils.py 프로젝트: DevangS/CoralNet
def apply_alleviate(image_id, label_probabilities):
    """
    Apply alleviate to a particular image: auto-accept top machine suggestions
    based on the confidence threshold.

    :param image_id: id of the image.
    :param label_probabilities: the machine's assigned label
           probabilities for each point of the image.
    :return: nothing.
    """
    img = Image.objects.get(id=image_id)
    source = img.source
    robot = source.get_latest_robot()
    alleviate_meta = get_alleviate_meta(robot) 

    if source.alleviate_threshold < 1:
        return
    if (not(alleviate_meta['ok'])):
        return

    if (source.alleviate_threshold == 100):
        # if the user wants 100% alleviation, we set the threhold to 0, meaning that all points will be annotated.
        confidenct_threshold = 0
    else:
        # this is a critical step in the alleviate logic. It translate the alleviate level to a confidence threshold for the classifier.
        # this confidence threshold is between 0 and 1.
        confidenct_threshold = alleviate_meta['score_translate'][source.alleviate_threshold]

    machine_annos = Annotation.objects.filter(image=img, user=get_robot_user())
    alleviate_was_applied = False

    for anno in machine_annos:
        pt_number = anno.point.point_number
        label_scores = label_probabilities[pt_number]
        descending_scores = sorted(label_scores, key=operator.itemgetter('score'), reverse=True)
        top_score = descending_scores[0]['score']
        top_confidence = top_score

        if top_confidence >= confidenct_threshold:
            # Save the annotation under the username Alleviate, so that it's no longer
            # a robot annotation.
            anno.user = get_alleviate_user()
            anno.save()
            alleviate_was_applied = True

    if alleviate_was_applied:
        # Are all points human annotated now?
        all_done = image_annotation_all_done(img)
        # Update image status, if needed
        if all_done:
            img.status.annotatedByHuman = True
            img.status.save()
예제 #5
0
파일: utils.py 프로젝트: DevangS/CoralNet
def image_annotation_all_done(image):
    """
    Return True if all of the image's annotation points are human annotated.
    Return False otherwise.
    Don't use the status field annotatedByHuman.  That field depends
    on this function, not the other way around!
    """
    annotations = Annotation.objects.filter(image=image)

    # If every point has an annotation, and all annotations are by humans,
    # then we're all done
    return (annotations.count() == Point.objects.filter(image=image).count()
            and annotations.filter(user=get_robot_user()).count() == 0)
예제 #6
0
파일: tasks.py 프로젝트: koudev/CoralNet
def Classify(image_id):
    image = Image.objects.get(pk=image_id)

    # if annotated by Human, no need to re-classify
    if image.status.annotatedByHuman:
        print 'Classify: Image nr ' + str(image_id) + ' is annotated by the human operator, aborting'
        return

    # make sure that the previous step is complete
    if not image.status.featuresExtracted:
        print 'Classify: Features not extracted for image id {id}, can not proceed'.format(id = image_id)
        return

    # Get all robots for this source
    latestRobot = image.source.get_latest_robot()

    if latestRobot == None:
        print 'Classify: No robots exist for the source, {src}, of image id {id}. Aborting.'.format(src=image.source, id=image_id)
        return

    # Check if this image has been previously annotated by a robot.
    if (image.status.annotatedByRobot):
        # now, compare this version number to the latest_robot_annotator field for image.
        if (not (latestRobot.version > image.latest_robot_annotator.version)):
            print 'Image {id} is already annotated by the latest robot version, {ver}, for source, {src}'.format(id = image_id,  ver=latestRobot.version, src=image.source)
            return

    ####### EVERYTHING OK: START THE CLASSIFICATION ##########
    #update image status
    image.status.annotatedByRobot = True
    image.status.save()
    image.latest_robot_annotator = latestRobot
    image.save()

    print 'Start classify image id {id}'.format(id = image_id)
    #builds args for matlab script
    featureFile = os.path.join(FEATURES_DIR, str(image_id) + "_" + image.get_process_date_short_str() + ".dat")
    #get the source id for this file
    labelFile = os.path.join(CLASSIFY_DIR, str(image_id) + "_" + image.get_process_date_short_str() + ".txt")

    task_helpers.coralnet_classify(
        featureFile=featureFile,
        modelFile=latestRobot.path_to_model,
        labelFile=labelFile,
        logFile=CV_LOG,
        errorLogfile=CLASSIFY_ERROR_LOG,
    )

    #get algorithm user object
    user = get_robot_user()

    #open the labelFile and rowColFile to process labels
    rowColFile = os.path.join(FEATURES_DIR, str(image_id) + "_rowCol.txt")
    label_file = open(labelFile, 'r')
    row_file = open(rowColFile, 'r')

    for line in row_file: #words[0] is row, words[1] is column 
        words = line.split(',')

        #gets the label object based on the label id the algorithm specified
        label_id = label_file.readline()
        label_id.replace('\n', '')
        label = Label.objects.filter(id=label_id)

        #gets the point object(s) that have that row and column.
        #if there's more than one such point, add annotations to all of
        #these points the first time we see this row+col, and don't do
        #anything on subsequent times (filtering with annotation=None accomplishes this).
        points = Point.objects.filter(image=image, row=words[0], column=words[1], annotation=None)
        for point in points:
            #create the annotation object and save it
            Ann = Annotation.objects.filter(point=point, image=image)
            if ( len(Ann) > 0 and ( not is_robot_user(Ann[0].user) ) ): # if this is an imported or human, we don't want to overwrite it, so continue
                continue
            annotation = Annotation(image=image, label=label[0], point=point, user=user, robot_version=latestRobot, source=image.source)
            annotation.save()

    print 'Finished classification of image id {id}'.format(id = image_id)

    label_file.close()
    row_file.close()
예제 #7
0
파일: tasks.py 프로젝트: DevangS/CoralNet
    def helper_classify_does_not_overwrite_manual_annotations(self, annotator_user):
        """
        Helper function for the tests that follow.
        """


        # Take at least (min number for training) images.
        # Preprocess, feature extract, and add human annotations to
        # the features.
        for img in Image.objects.filter(source__pk=self.source_id):
            preprocess_image(img.id)
            make_features(img.id)
            self.add_human_annotations(img.id)
            add_labels_to_features(img.id)

        # Create a robot.
        result = train_robot(self.source_id)
        self.assertTrue(result == 1)

        # Upload a new image.
        img_id = self.upload_image('006_2012-06-28_color-grid-006.png')[0]

        # Preprocess and feature extract.
        preprocess_image(img_id)
        make_features(img_id)

        # Add annotations.
        source = Source.objects.get(pk=self.source_id)
        img = Image.objects.get(pk=img_id)
        points = Point.objects.filter(image=img)
        labels = source.labelset.labels.all()

        # For odd-numbered points, make an annotation by picking a
        # label randomly from the source's labelset.
        # Leave the even-numbered points alone.
        # (Assumption: the test source has at least 2 points per image)
        for pt in points:

            if pt.point_number % 2 == 0:
                continue

            label = random.choice(labels)
            anno = Annotation(
                point=pt,
                image=img,
                source=source,
                user=annotator_user,
                label=label,
            )
            anno.save()

        img.status.save()


        # Get those annotations (again, only odd-numbered points).
        num_points = Point.objects.filter(image__pk=img_id).count()
        manual_annotations = dict()

        for point_num in range(1, num_points+1, 2):

            label_id = Annotation.objects.get(image__pk=img_id, point__point_number=point_num).label.id
            manual_annotations[point_num] = label_id


        # Try to Classify.
        result = classify_image(img_id)

        # Shouldn't throw exception.
        self.assertTrue(result == 1)
        self.assertEqual(Image.objects.get(pk=img_id).status.annotatedByRobot, True)


        # Check the Annotations.
        for point_num in range(1, num_points+1):

            anno = Annotation.objects.get(image__pk=img_id, point__point_number=point_num)
            label_id = anno.label.id

            if point_num % 2 == 0:
                # Even; should be robot
                self.assertEqual(anno.user.id, get_robot_user().id)
            else:
                # Odd; should be manual (and same as before)
                self.assertEqual(label_id, manual_annotations[point_num])
                self.assertEqual(anno.user.id, annotator_user.id)

            if settings.UNIT_TEST_VERBOSITY >= 1:
                print "Point {num} | {username} | {label_id}".format(
                    num=point_num,
                    username=anno.user.username,
                    label_id=label_id,
                )
예제 #8
0
파일: views.py 프로젝트: DevangS/CoralNet
def export_statistics(request, source_id):
    # get the response object, this can be used as a stream.
    response = HttpResponse(mimetype='text/csv')
    # force download.
    response['Content-Disposition'] = 'attachment;filename="statistics.csv"'
    # the csv writer
    writer = csv.writer(response)

    source = get_object_or_404(Source, id=source_id)
    images = Image.objects.filter(source=source).select_related()
    if request.GET.get('robot', ''):
        all_annotations = Annotation.objects.filter(source=source).select_related()
    else:
        all_annotations = Annotation.objects.filter(source=source).exclude(user=get_robot_user()).select_related()
    
    labelset = get_object_or_404(LabelSet, source=source)
    labels = Label.objects.filter(labelset=labelset).order_by('name')
    
    #Adds table header which looks something as follows:
    #locKey1 locKey2 locKey3 locKey4 date label1 label2 label3 label4 .... labelEnd
    #Note: labe1, label2, etc corresponds to the percent coverage of that label on
    #a per IMAGE basis, not per source
    header = []
    header.extend(source.get_key_list())
    header.append('date_taken')
    header.append('annotation_status')
    header.extend(images[0].get_metadata_fields_for_export()) #these are the same for all images. Use first one..
    for label in labels:
        header.append(str(label.name))
    writer.writerow(header)

    zeroed_labels_data = [0 for label in labels]
    #Adds data row which looks something as follows:
    #lter1 out10m line1-2 qu1 20100427  10.2 12.1 0 0 13.2
    for image in images:
        locKeys = image.get_location_value_str_list_robust()

        photo_date = str(image.metadata.photo_date)
        image_labels_data = []
        image_labels_data.extend(zeroed_labels_data)
        image_annotations = all_annotations.filter(image=image)
        total_annotations_count = image_annotations.count()

        for label_index, label in enumerate(labels):
            #Testing out this optimization to see if it's any faster
            label_annotations_count = all_annotations.filter(image=image, label=label).count()
            try:
                label_percent_coverage = (float(label_annotations_count)/total_annotations_count)*100
            except ZeroDivisionError:
                label_percent_coverage = 0
            image_labels_data[label_index] = str(label_percent_coverage)

        row = []
        row.extend(locKeys)
        row.append(photo_date)
        if image.status.annotatedByHuman:
            row.append('verified_by_human')
        elif image.status.annotatedByRobot:
            row.append('not_verified_by_human')
        else:
            row.append('not_annotated')
        row.extend(image.get_metadata_values_for_export())
        row.extend(image_labels_data)
        writer.writerow(row)

    return response
예제 #9
0
파일: views.py 프로젝트: DevangS/CoralNet
def generate_statistics(request, source_id):
    errors = []
    years = []
    label_table = []
    group_table = []
    #graph = []

    #generate form to select images to compute statistics for
    source = get_object_or_404(Source, id=source_id)

    #get image search filters
    if request.GET:

        #form to select descriptors to sort images
        form = StatisticsSearchForm(source_id, request.GET)

        if form.is_valid():
            labels = form.cleaned_data['labels']
            groups = form.cleaned_data['groups']

            imageArgs = image_search_args_to_queryset_args(form.cleaned_data, source)

            #Check that the specified set of images and/or labels was found
            if not labels and not groups:
                errors.append("Sorry you didn't specify any labels or groups!")

            #if no errors found, get data needed to plot line graph with
            # coverage on the y axis, and year on the x axis
            if not errors:

                images = Image.objects.filter(source=source, **imageArgs).distinct().select_related()
                patchArgs = dict([('image__'+k, imageArgs[k]) for k in imageArgs])

                #get all annotations for the source that contain the label
                if request.GET and request.GET.get('include_robot', ''):
                    all_annotations = Annotation.objects.filter(source=source, **patchArgs)
                else:
                    all_annotations = Annotation.objects.filter(source=source, **patchArgs).exclude(user=get_robot_user())


                #check that we found annotations
                if all_annotations:
                    #holds the data that gets passed to the graphing code
                    data = []

                    #Format computed data for the graph API to use
                    #TODO: pick easily distinguishable colours from
                    # http://search.cpan.org/~rokr/Color-Library-0.021/lib/Color/Library/Dictionary/WWW.pm
                    # and add them to bucket to be picked randomly
                    bucket = ['00FFFF','32CD32','A52A2A','DC143C','9370DB']
                    legends = []

                    #gets the years we have data for from the specified set of images
                    for image in images:
                        date = image.metadata.photo_date
                        if not date is None:
                            if not years.count(date.year):
                               years.append(date.year)
                    years.sort()

                    for label in labels:
                        table_yearly_counts = []
                        graph_yearly_counts = []
                        #get yearly counts that become y values for the label's line
                        for year in years:
                            #get the most recent for each point for every label specified
                            total_year_annotations =  all_annotations.filter(image__metadata__photo_date__year=year)
                            total_year_annotations_count = total_year_annotations.count()
                            label_year_annotations_count = total_year_annotations.filter(label=label).count()

                            #add up # of annotations, divide by total annotations, and times 100 to get % coverage
                            # done the way it is b/c we need to cast either num or denom as float to get float result,
                            # convert to %, round, then truncate by casting to int
                            try:
                                percent_coverage = (float(label_year_annotations_count)/total_year_annotations_count)*100
                            except ZeroDivisionError:
                                percent_coverage = 0
                            table_yearly_counts.append(round(percent_coverage,2))
                            table_yearly_counts.append(label_year_annotations_count)
                            graph_yearly_counts.append(int(percent_coverage))

                        data.append(graph_yearly_counts)

                        #add label name to legends
                        name = Label.objects.get(id=int(label)).name
                        legends.append(str(name))

                        #create table row to display
                        table_row = [name]
                        table_row.extend(table_yearly_counts)
                        label_table.append(table_row)
                        
                    for group in groups:
                        table_yearly_counts = []
                        graph_yearly_counts = []
                        #get yearly counts that become y values for the label's line
                        for year in years:
                            #get the most recent for each point for every label specified
                            total_year_annotations =  all_annotations.filter(image__metadata__photo_date__year=year)
                            total_year_annotations_count = total_year_annotations.count()
                            label_year_annotations_count = total_year_annotations.filter(label__group=group).count()

                            #add up # of annotations, divide by total annotations, and times 100 to get % coverage
                            # done the way it is b/c we need to cast either num or denom as float to get float result,
                            # convert to %, round, then truncate by casting to int
                            try:
                                percent_coverage = (float(label_year_annotations_count)/total_year_annotations_count)*100
                            except ZeroDivisionError:
                                percent_coverage = 0
                            table_yearly_counts.append(round(percent_coverage,2))
                            table_yearly_counts.append(label_year_annotations_count)
                            graph_yearly_counts.append(int(percent_coverage))

                        data.append(graph_yearly_counts)

                        #add label name to legends
                        name = LabelGroup.objects.get(id=int(group)).name
                        legends.append(str(name))

                        #create table row to display
                        table_row = [name]
                        table_row.extend(table_yearly_counts)
                        group_table.append(table_row)
                    """
                    #Create string of colors
                    colors_string = str(bucket[0: (len(labels)+len(groups))]).replace(' ', '').replace('[','').replace(']','').replace('\'', '')

                    #Create string of labels to put on legend
                    legends_string = str(legends).replace('[', '').replace(']','').replace(' ','').replace('\'', '').replace(',', '|')

                    #Get max y value and add 5 to it
                    max_y = max(map(max,data)) + 5

                    #Calculate new data proportional to max_y to scale graph
                    for elem in data:
                        elem[:] = [x*(100/max_y) for x in elem]

                    #Actually generate the graph now
                    graph = GChart('lc', data, encoding='text', chxt='x,y', chco=colors_string, chdl=legends_string)
                    #draw x axis values from lowest to highest year stepping by 1 year
                    graph.axes.range(0,min(years),max(years),1)
                    #draw y axis values from 0 to (max percent coverage + 5) stepping by 5
                    graph.axes.range(1,0,max_y,5)
                    #Define pixel size to draw graph
                    graph.size(400,400)
                    #Adds the title to the graph
                    graph.title('% Coverage over Years')
                    #Set the line thickness for each dataset
                    count = len(data)
                    while count > 0:
                        graph.line(3,0,0)
                        count -= 1
                    """
                else:
                    errors.append("No data found!")

        else:
            errors.append("Your specified search parameters were invalid!")

    else:
        form = StatisticsSearchForm(source_id)
    
    return render_to_response('visualization/statistics.html', {
        'errors': errors,
        'form': form,
        'source': source,
        'years': years,
        'label_table': label_table,
        'group_table': group_table
        },
        context_instance=RequestContext(request)
    )
예제 #10
0
파일: views.py 프로젝트: DevangS/CoralNet
def visualize_source(request, source_id):
    """
    View for browsing through a source's images.
    """
    ITEMS_PER_PAGE = 20

    errors = []
    source = get_object_or_404(Source, id=source_id)
    metadata_view_available = request.user.has_perm(Source.PermTypes.EDIT.code, source)


    # This is used to create a formset out of the metadataForm. I need to do this
    # in order to pass in the source id.
    # From http://stackoverflow.com/a/624013
    metadataFormSet = formset_factory(MetadataForm)
    metadataFormSet.form = staticmethod(curry(MetadataForm, source_id=source_id))
    # There is a separate form that controls the checkboxes.
    checkboxFormSet = formset_factory(CheckboxForm)


    # Based on submitted GET/POST data, find the following:
    # - Which view mode we're showing
    # - Which images we're showing (if image/meta mode), or
    #   filtering the patches by (if patch mode)
    # - Other parameters


    # Defaults

    # Search form to show on the page
    search_form = BrowseSearchForm(source_id, metadata_view_available)

    # GET args in url format - for constructing prev page/next page links
    urlArgsStr = ''

    # View mode to show the Browse page in
    page_view = 'images'
    # (Patch mode only) the label we want to see patches of
    label = ''
    # (Patch mode only) show patches annotated by human, by machine, or by either one
    annotated_by = 'human'

    image_specify_form = ImageSpecifyForm(
        dict(
            specify_method='search_keys',
            specify_str=json.dumps(dict()),  # No filters, just get all images
        ),
        source=source,
    )


    if request.POST and 'image_specify_form_from_upload' in request.POST:

        # Coming from the upload page.

        # Let the user edit the metadata of the images they just uploaded.
        page_view = 'metadata'

        # Make the search form's page_view field value accurate.
        search_form = BrowseSearchForm(
            source_id, metadata_view_available,
            initial=dict(page_view=page_view),
        )

        # We expect the upload page to properly fill in an ImageSpecifyForm
        # with the image ids.
        image_specify_form = ImageSpecifyForm(request.POST, source=source)

        # Defaults on everything else

    elif request.GET:

        # Search form submitted OR
        # a URL with GET parameters was entered.

        submitted_search_form = BrowseSearchForm(
            source_id,
            metadata_view_available,
            request.GET,
        )

        if submitted_search_form.is_valid():

            # Have the search form on the page start with the values
            # submitted in this search.
            search_form = submitted_search_form

            # Some search form parameters are used for things other than image
            # filtering. Get these parameters.
            #
            # If any of these parameters are u'', then that parameter wasn't
            # submitted. Don't let that '' override the default value.
            if search_form.cleaned_data['page_view'] != u'':
                page_view = search_form.cleaned_data['page_view']
            if search_form.cleaned_data['label'] != u'':
                label = search_form.cleaned_data['label']
            if search_form.cleaned_data['annotated_by'] != u'':
                annotated_by = search_form.cleaned_data['annotated_by']

            # We're going to serialize this cleaned_data and give it
            # to an ImageSpecifyForm. A bit of cleanup to do first though.

            # The Label object isn't serializable, so avoid an error by
            # popping off this key first.
            search_form.cleaned_data.pop('label')

            if page_view == 'patches':
                # If in patch mode, the image_status filter doesn't apply.
                # Pop off this filter to make sure the ImageSpecifyForm
                # doesn't use it.
                search_form.cleaned_data.pop('image_status')

            # Give the search form data to the ImageSpecifyForm. Note that
            # with the exception of the above parameters, any parameters
            # not used for filtering images will just be ignored and won't cause problems.
            image_specify_form = ImageSpecifyForm(
                dict(
                    specify_method='search_keys',
                    specify_str=json.dumps(search_form.cleaned_data),
                ),
                source=source,
            )

        else:

            messages.error(request, 'Error: invalid search parameters.')

            # Use the defaults

    else:

        # Just got to this page via a link, with no GET parameters in the URL.

        # Use the defaults
        pass


    image_results = []
    delete_form = None
    download_form = None

    if image_specify_form.is_valid():

        image_results = image_specify_form.get_images()

        # Create image delete and image download forms if they're supposed
        # to be displayed on the page.
        #
        # Give the forms the same stuff as the image specify form, so they
        # know which images to operate on.
        if page_view == 'images':

            if request.user.has_perm(Source.PermTypes.EDIT.code, source):

                delete_form = ImageBatchDeleteForm(
                    image_specify_form.data,
                    source=source,
                )

            if request.user.has_perm(Source.PermTypes.VIEW.code, source):

                download_form = ImageBatchDownloadForm(
                    image_specify_form.data,
                    source=source,
                )

    else:

        errors.append("Error: something went wrong with this image search.")


    # Get the search results (all of them, not just on this page).

    if page_view == 'images' or page_view == 'metadata':

        # We'll display the images on the page. Sort them by image id
        # (highest id first).
        all_items = image_results.order_by('-pk')

    else:  # patches

        # Get the annotations in the source that:
        # - contain the label
        # - meet the "annotated_by" constraint
        #
        # And get the annotations in random order.
        #
        # Performance consideration: image__in=<queryset> might be slow in
        # MySQL. If in doubt, test/benchmark it.
        # https://docs.djangoproject.com/en/1.5/ref/models/querysets/#in
        annotations = Annotation.objects.filter(source=source, label=label, image__in=image_results).order_by('?')

        if annotated_by == 'either':
            # No further filtering needed
            pass
        elif annotated_by == 'machine':
            annotations = annotations.filter(user=get_robot_user())
        else:  # 'human'
            annotations = annotations.exclude(user=get_robot_user())

        # Placeholder the image patches with the Annotation objects for now.
        # We'll actually get the patches when we know which page we're showing.
        all_items = annotations

    num_of_total_results = len(all_items)

    if num_of_total_results == 0:
        # No image results in this search
        errors.append("No image results.")


    # If we're in one of the paginated views, find out what the
    # results on this page are.

    page_results = None
    prev_page_link = None
    next_page_link = None

    if page_view == 'images' or page_view == 'patches':

        paginator = Paginator(all_items, ITEMS_PER_PAGE)

        # Make sure page request is an int. If not, deliver first page.
        try:
            page = int(request.GET.get('page', '1'))
        except ValueError:
            page = 1

        # If page request (9999) is out of range, deliver last page of results.
        try:
            page_results = paginator.page(page)
        except (EmptyPage, InvalidPage):
            page_results = paginator.page(paginator.num_pages)

        # If there are previous or next pages, construct links to them.
        # These links include GET parameters in the form of
        # ?param1=value1&param2=value2 etc.

        if page_results.has_previous():
            prev_page_query_args = request.GET.copy()
            prev_page_query_args.update(dict(page=page_results.previous_page_number()))
            prev_page_link = '?' + urllib.urlencode(prev_page_query_args)
        if page_results.has_next():
            next_page_query_args = request.GET.copy()
            next_page_query_args.update(dict(page=page_results.next_page_number()))
            next_page_link = '?' + urllib.urlencode(next_page_query_args)

        # Finalize the data-structure that holds this page's results.

        if page_view == 'patches':

            # Get an image-patch for each result on the page.
            # Earlier we placeholdered the image patches with the annotation objects,
            # so we're iterating over those annotations now.
            for index, annotation in enumerate(page_results.object_list):

                # TODO: don't hardcode the patch path
                # (this might also apply to the label_main view)
                patchPath = "data/annotations/" + str(annotation.id) + ".jpg"

                page_results.object_list[index] = dict(
                    fullImage=annotation.image,
                    patchPath=patchPath,
                    row=annotation.point.row,
                    col=annotation.point.column,
                    pointNum=annotation.point.point_number,
                )

                generate_patch_if_doesnt_exist(patchPath, annotation)

        else:  # 'images'

            for index, image_obj in enumerate(page_results.object_list):

                page_results.object_list[index] = dict(
                    image_obj=image_obj,
                )


    # If we're showing the metadata form (grid of fields), then prepare that.

    if page_view == 'metadata':

        # Get image statuses (needs annotation, etc.)

        statuses = []
        for image in all_items:
            statuses.append(image.get_annotation_status_str)

        # Initialize the form set with the existing metadata values.

        initValues = {
            'form-TOTAL_FORMS': '%s' % len(all_items),
            'form-INITIAL_FORMS': '%s' % len(all_items),
        }
        initValuesMetadata = initValues

        for i, image in enumerate(all_items):

            # Location keys
            keys = image.get_location_value_str_list()
            for j, key in enumerate(keys):
                initValuesMetadata['form-%s-key%s' % (i,j+1)] = key

            # Image id
            initValuesMetadata['form-%s-image_id' % i] = image.id

            # Other fields
            metadata_field_names = ['photo_date', 'height_in_cm', 'latitude',
                                    'longitude', 'depth', 'camera',
                                    'photographer', 'water_quality',
                                    'strobes', 'framing', 'balance']
            for metadata_field in metadata_field_names:
                formset_field_name = 'form-{num}-{field}'.format(num=i, field=metadata_field)
                initValuesMetadata[formset_field_name] = getattr(image.metadata, metadata_field)

        metadataForm = metadataFormSet(initValuesMetadata)
        checkboxForm = checkboxFormSet(initValues)
        metadataFormWithExtra = zip(metadataForm.forms, checkboxForm.forms, all_items, statuses)
        selectAllCheckbox = CheckboxForm()

    else:

        # Not showing the metadata view.

        metadataForm = None
        metadataFormWithExtra = None
        selectAllCheckbox = None


    # The destination page when you click on an image/patch thumbnail.
    if request.user.has_perm(Source.PermTypes.EDIT.code, source):
        thumbnail_dest_page = 'annotation_tool'
    else:
        thumbnail_dest_page = 'image_detail'


    return render_to_response('visualization/visualize_source.html', {
        'source': source,

        'searchForm': search_form,

        'errors': errors,
        'page_results': page_results,
        'num_of_total_results': num_of_total_results,
        'thumbnail_dest_page': thumbnail_dest_page,
        'prev_page_link': prev_page_link,
        'next_page_link': next_page_link,

        'delete_form': delete_form,
        'download_form': download_form,
        'has_delete_form': bool(delete_form),
        'has_download_form': False,
        # TODO: Uncomment this once downloading is implemented
        #'has_download_form': bool(download_form),

        'key_list': source.get_key_list(),
        'metadataForm': metadataForm,
        'selectAllForm': selectAllCheckbox,
        'metadataFormWithExtra': metadataFormWithExtra,

        'page_view': page_view,
        },
        context_instance=RequestContext(request)
    )
예제 #11
0
파일: tasks.py 프로젝트: DevangS/CoralNet
def classify_image(image_id):
    image = Image.objects.get(pk=image_id)

    # if annotated by Human, or if the previous step is not complete
    if image.status.annotatedByHuman or not image.status.featuresExtracted:
        return 1

    # Get last robot for this source
    latestRobot = image.source.get_latest_robot()

    if latestRobot == None:
        return 1

    # Check if this image has been previously annotated by a robot.
    if image.status.annotatedByRobot:
        # now, compare this version number to the latest_robot_annotator field for image.
        if (not (latestRobot.version > image.latest_robot_annotator.version)):
            return 1

    ####### EVERYTHING OK: START THE CLASSIFICATION ##########
    logging.info('Classifying image{id} from source{sid}: {sname}'.format(id = image_id, sid = image.source_id, sname = image.source.name))
    
    #builds args for matlab script
    featureFile = os.path.join(FEATURES_DIR, str(image_id) + "_" + image.get_process_date_short_str() + ".dat")
    labelFile = os.path.join(CLASSIFY_DIR, str(image_id) + "_" + image.get_process_date_short_str() + ".txt")

    task_helpers.coralnet_classify(
        featureFile=featureFile,
        modelFile=latestRobot.path_to_model,
        labelFile=labelFile,
        logFile=CV_LOG,
        errorLogfile=CLASSIFY_ERROR_LOG,
    )

    if os.path.isfile(CLASSIFY_ERROR_LOG):
        logging.info('ERROR classifying image{id} from source{sid}: {sname}'.format(id = image_id, sid = image.source_id, sname = image.source.name))
        mail_admins('CoralNet Backend Error', 'in Classify')
        return 0
    else:
        #update image status
        image.status.annotatedByRobot = True
        image.status.save()
        image.latest_robot_annotator = latestRobot
        image.save()

    ####### IMPORT CLASSIFICATION RESULT TO DATABASE ##########
    user = get_robot_user()

    # Get the label probabilities that we just generated
    label_probabilities = task_utils.get_label_probabilities_for_image(image_id)

    if len(label_probabilities) == 0:
        mail_admins('Classify error', 'Classification output for image{id} from source{sid}: {sname} was empty.'.format(id = image_id, sid = image.source_id, sname = image.source.name))

    # Go through each point and update/create the annotation as appropriate
    for point_number, probs in label_probabilities.iteritems():
        pt = Point.objects.get(image=image, point_number=point_number)

        probs_descending_order = sorted(probs, key=operator.itemgetter('score'), reverse=True)
        top_prob_label_code = probs_descending_order[0]['label']
        label = Label.objects.get(code=top_prob_label_code)

        # If there's an existing annotation for this point, get it.
        # Otherwise, create a new annotation.
        #
        # (Assumption: there's at most 1 Annotation per Point, never multiple.
        # If there are multiple, we'll get a MultipleObjectsReturned exception.)
        try:
            anno = Annotation.objects.get(image=image, point=pt)

        except Annotation.DoesNotExist:
            # No existing annotation. Create a new one.
            new_anno = Annotation(
                image=image, label=label, point=pt,
                user=user, robot_version=latestRobot, source=image.source
            )
            new_anno.save()

        else:
            # Got an existing annotation.
            if is_robot_user(anno.user):
                # It's an existing robot annotation. Update it as necessary.
                if anno.label.id != label.id:
                    anno.label = label
                    anno.robot_version = latestRobot
                    anno.save()

            # Else, it's an existing confirmed annotation, and we don't want
            # to overwrite it. So do nothing in this case.

    logging.info('Classified {npts} points in image{id} from source{sid}: {sname}'.format(npts = len(label_probabilities), id = image_id, sid = image.source_id, sname = image.source.name))
    return 1
예제 #12
0
파일: views.py 프로젝트: koudev/CoralNet
def visualize_source(request, source_id):
    """
    View for browsing through a source's images.
    """
    IMAGES_PER_PAGE = 20

    searchFormErrors = False
    source = get_object_or_404(Source, id=source_id)

    urlArgsStr = ''  # GET args in url format - for constructing prev page/next page links
    label = False
    imageArgs = dict()

    # Get image search filters, if any
    if request.GET:

        #form to select descriptors to sort images
        form = VisualizationSearchForm(source_id, request.GET)

        if form.is_valid():

            urlArgsStr = image_search_args_to_url_arg_str(form.cleaned_data)

            label = form.cleaned_data.pop('labels')
            imageArgs = image_search_args_to_queryset_args(form.cleaned_data, source)
        else:
            searchFormErrors = True

    else:
        form = VisualizationSearchForm(source_id)


    # Perform selected actions, if any, on the images previously shown.
    # Security check: to guard against forged POST data, make sure the
    # user actually has permission to the action form.
    if request.POST and request.user.has_perm(Source.PermTypes.ADMIN.code, source):

        actionForm = ImageBatchActionForm(request.POST)

        if actionForm.is_valid():
            if actionForm.cleaned_data['action'] == 'delete':
                actionFormImageArgs = simplejson.loads(actionForm.cleaned_data['searchKeys'])
                imagesToDelete = Image.objects.filter(source=source, **actionFormImageArgs)

                for img in imagesToDelete:
                    img.delete()

                # Note that img.delete() just removes the image from the
                # database.  But the image objects are still there in the
                # imagesToDelete list (they just don't have primary keys anymore).
                messages.success(request, 'The %d selected images have been deleted.' % len(imagesToDelete))
    else:
        actionForm = ImageBatchActionForm(initial={'searchKeys': simplejson.dumps(imageArgs)})


    # Get the search results (all of them, not just on this page)
    # (This must happen after processing the action form, so that
    # images can be deleted/modified before we get search results.)
    errors = []
    if searchFormErrors:
        errors.append("There were errors in the search parameters.")
        images = []
        showPatches = False
    else:
        #if user did not specify a label to generate patches for, assume they want to view whole images
        if not label:
            showPatches = False

            allSearchResults = Image.objects.filter(source=source, **imageArgs)
            if form.is_valid():
                
                try:
                    value = int(form.cleaned_data.pop('image_status'))
                except ValueError:
                    value = 0
                    
                #All images wanted so just return
                if value:
                    #Else do check for robot classified source options
                    if source.enable_robot_classifier:
                        if value == 1:
                            allSearchResults = allSearchResults.filter(status__annotatedByHuman=False, status__annotatedByRobot=False)
                        elif value == 2:
                            allSearchResults = allSearchResults.filter(status__annotatedByHuman=False, status__annotatedByRobot=True)
                        else:
                            allSearchResults = allSearchResults.filter(status__annotatedByHuman=True)
                    #Else do check for only human annotated source options
                    else:
                        if value == 1:
                            allSearchResults = allSearchResults.filter(status__annotatedByHuman=False)
                        elif value == 2:
                            allSearchResults = allSearchResults.filter(status__annotatedByHuman=True)
                        
            # Sort the images.
            # TODO: Stop duplicating this DB-specific extras query; put it in a separate function...
            # Also, despite the fact that we're dealing with images and not metadatas, selecting photo_date does indeed work.
            db_extra_select = {'metadata__year': 'YEAR(photo_date)'}  # YEAR() is MySQL only, PostgreSQL has different syntax

            sort_keys = ['metadata__'+k for k in (['year'] + source.get_value_field_list())]
            allSearchResults = allSearchResults.extra(select=db_extra_select)
            allSearchResults = allSearchResults.order_by(*sort_keys)

        else:
            #since user specified a label, generate patches to show instead of whole images
            showPatches = True
            patchArgs = dict([('image__'+k, imageArgs[k]) for k in imageArgs])

            #get all annotations for the source that contain the label
            try:
                annotator = int(form.cleaned_data.pop('annotator'))
            except ValueError:
                annotator = 2
            annotations = Annotation.objects.filter(source=source, label=label, **patchArgs).order_by('?')
            if not annotator:
                annotations.exclude(user=get_robot_user())
            elif annotator == 1:
                annotations.filter(user=get_robot_user())

            # Placeholder the image patches with the annotation objects for now.
            # We'll actually get the patches when we know which page we're showing.
            allSearchResults = annotations

        if not allSearchResults:
            if request.GET:
                # No image results in this search
                errors.append("Sorry, no images matched your query")
            else:
                # No image results, and just got to the visualization page
                errors.append("Sorry, there are no images for this source yet. Please upload some.")

        paginator = Paginator(allSearchResults, IMAGES_PER_PAGE)

        # Make sure page request is an int. If not, deliver first page.
        try:
            page = int(request.GET.get('page', '1'))
        except ValueError:
            page = 1

        # If page request (9999) is out of range, deliver last page of results.
        try:
            images = paginator.page(page)
        except (EmptyPage, InvalidPage):
            images = paginator.page(paginator.num_pages)

        if showPatches:

            # Get an image-patch for each result on the page.
            # Earlier we placeholdered the image patches with the annotation objects,
            # so we're iterating over those annotations now.
            for index, annotation in enumerate(images.object_list):

                patchPath = "data/annotations/" + str(annotation.id) + ".jpg"

                images.object_list[index] = dict(
                    type="patches",
                    fullImage=annotation.image,
                    patchPath=patchPath,
                    row=annotation.point.row,
                    col=annotation.point.column,
                    pointNum=annotation.point.point_number,
                )

                generate_patch_if_doesnt_exist(patchPath, annotation)

        else:

            for index, image_obj in enumerate(images.object_list):

                images.object_list[index] = dict(
                    type="full_images",
                    image_obj=image_obj,
                )

    return render_to_response('visualization/visualize_source.html', {
        'errors': errors,
        'searchForm': form,
        'source': source,
        'images': images,
        'showPatches': showPatches,
        'searchParamsStr': urlArgsStr,
        'actionForm': actionForm,
        },
        context_instance=RequestContext(request)
    )
예제 #13
0
파일: views.py 프로젝트: DevangS/CoralNet
def label_main(request, label_id):
    """
    Main page for a particular label
    """

    label = get_object_or_404(Label, id=label_id)

    sources_with_label = Source.objects.filter(labelset__labels=label).order_by('name')
    visible_sources_with_label = [s for s in sources_with_label if s.visible_to_user(request.user)]

    # Differentiate between the sources that the user is part of
    # and the other public sources.  Sort the source list accordingly, too.
    sources_of_user = Source.get_sources_of_user(request.user)

    source_types = []
    for s in visible_sources_with_label:
        if s in sources_of_user:
            source_types.append('mine')
        else:
            source_types.append('public')

    visible_sources_with_label = zip(source_types, visible_sources_with_label)
    visible_sources_with_label.sort(key=lambda x: x[0])  # Mine first, then public

    # Example patches.
    # TODO: don't hardcode the patch path
    example_annotations = Annotation.objects.filter(label=label, image__source__visibility=Source.VisibilityTypes.PUBLIC).exclude(user=get_robot_user()).order_by('?')[:5]
    patches = [dict(
                  annotation=a,
                  fullImage=a.image,
                  source=a.image.source,
                  patchPath="data/annotations/" + str(a.id) + ".jpg",
                  row=a.point.row,
                  col=a.point.column,
                  pointNum=a.point.point_number,
              )
              for a in example_annotations]

    for p in patches:
        generate_patch_if_doesnt_exist(p['patchPath'], p['annotation'])


    return render_to_response('annotations/label_main.html', {
        'label': label,
        'visible_sources_with_label': visible_sources_with_label,
        'patches': patches,
        },
        context_instance=RequestContext(request)
    )
예제 #14
0
파일: forms.py 프로젝트: DevangS/CoralNet
    def __init__(self, *args, **kwargs):
        image = kwargs.pop('image')
        user = kwargs.pop('user')
        show_machine_annotations = kwargs.pop('show_machine_annotations')
        super(AnnotationForm, self).__init__(*args, **kwargs)

        self.fields['image_id'] = CharField(
            widget=HiddenInput(),
            initial=str(image.id),
        )
        self.fields['user_id'] = CharField(
            widget=HiddenInput(),
            initial=str(user.id),
        )

        labelFieldMaxLength = Label._meta.get_field('code').max_length


        for point in Point.objects.filter(image=image).order_by('point_number'):

            try:
                if show_machine_annotations:
                    existingAnnotation = Annotation.objects.get(point=point)
                else:
                    existingAnnotation = Annotation.objects.exclude(user=get_robot_user()).get(point=point)
            except Annotation.DoesNotExist:
                existingAnnotation = None
            except MultipleObjectsReturned:
                existingAnnotation = None
                mail_admins('Multiple annotations returned for a point object', 'Multiple annotations returned for query: Annotations.objects.get(point=point) for Imageid:' + str(image.id) + ', pointid:' + str(point.id) + '. Please investigate.')

            if existingAnnotation:
                existingAnnoCode = existingAnnotation.label.code
                isRobotAnnotation = is_robot_user(existingAnnotation.user)
            else:
                existingAnnoCode = ''
                isRobotAnnotation = None

            pointNum = point.point_number

            # Create the text field for annotating a point with a label code.
            # label_1 for point 1, label_23 for point 23, etc.
            labelFieldName = 'label_' + str(pointNum)

            self.fields[labelFieldName] = CharField(
                widget=TextInput(attrs=dict(
                    size=6,
                    readonly='',
                )),
                max_length=labelFieldMaxLength,
                label=str(pointNum),
                required=False,
                initial=existingAnnoCode,
            )

            # Create a hidden field to indicate whether a point is robot-annotated or not.
            # robot_1 for point 1, robot_23 for point 23, etc.
            robotFieldName = 'robot_' + str(pointNum)

            self.fields[robotFieldName] = BooleanField(
                widget=HiddenInput(),
                required=False,
                initial=simplejson.dumps(isRobotAnnotation),
            )