def add_human_annotations(self, image_id, user=None): """ Add human annotations to an image. :param user - The user who will be the annotator of these annotations. If not specified, default to the User of self.username. """ source = Source.objects.get(pk=self.source_id) img = Image.objects.get(pk=image_id) points = Point.objects.filter(image=img) labels = source.labelset.labels.all() if user is None: user = User.objects.get(username=self.username) # For each point, pick a label randomly from the source's labelset. for pt in points: label = random.choice(labels) anno = Annotation( point=pt, image=img, source=source, user=user, label=label, ) anno.save() img.status.annotatedByHuman = True img.status.save()
def annotation(request, session_id): # GET => gather collections and render UI # POST => get annotation information and create new/update annotation. if not request.user.is_authenticated(): return redirect('login') collections = Collection.objects.filter(creator=request.user).all() try: annotationsession = AnnotationSession.objects.get(pk=session_id) session_annotations = annotationsession.annotations.all().order_by( "datestamp") except AnnotationSession.DoesNotExist: return HttpResponse(status=404) if request.method == "GET": context = RequestContext( request, { 'collections': collections, 'annotationsession': annotationsession, 'annotations': session_annotations }) template = loader.get_template("annotations/annotate.html") return HttpResponse(template.render(context)) elif request.method == "POST": # A new annotation or an old one? leftitem_id = request.POST.get("leftitem") rightitem_id = request.POST.get("rightitem") try: leftitem = Item.objects.get(pk=leftitem_id) rightitem = Item.objects.get(pk=rightitem_id) except Item.DoesNotExist: print("Hmmm it says these don't exist") return HttpResponse(status=404) leftitem_state = request.POST.get("leftitem_state") rightitem_state = request.POST.get("rightitem_state") annotationtext = request.POST.get("annotationtext", u"") try: previous = Annotation.objects.get(leftitem=leftitem, rightitem=rightitem, session=annotationsession) previous.annotation = annotationtext previous.leftitem_state = leftitem_state previous.rightitem_state = rightitem_state previous.save() return HttpResponse("{{'annotation_id':'{0}'}}".format( str(previous.id)), mimetype="application/json") except Annotation.DoesNotExist: new_anno = Annotation(leftitem=leftitem, rightitem=rightitem, session=annotationsession, leftitem_state=leftitem_state, rightitem_state=rightitem_state, creator=request.user) new_anno.save() return HttpResponse("{{'annotation_id':'{0}'}}".format( str(new_anno.id)), mimetype="application/json")
def create_annotation(self, body='', paragraph=None): annotation = Annotation() annotation.content_type = ContentType.objects.get(model='blogcontent', app_label="blogging") annotation.object_id = str(1) annotation.body = body annotation.paragraph = paragraph annotation.author = User.objects.get(id=1) annotation.save()
def update(request): """ Update the database with new annotations from the AudioAnnotator Flash interface. The Flash interface will output a string that looks like: "1,62637,119960,test\n2,137802,175384,test2\n" Each new line is a new annotation. The first field is the annotation id, the second and third are the start and end times, in milliseconds, and the fourth is the label. """ if request.method == "POST": annotations = request.POST.get('annotations', '') recording = Recording.objects.get( pk=request.POST.get('recording_id', '')) # Take the string of annotations from the AudioAnnotator and # parse it into annotations. for annotation in annotations.split("\n"): if (annotation == ''): break fields = annotation.split(",") ann_id = fields[0] ann_start_ms = int(fields[1]) ann_end_ms = int(fields[2]) ann_label = fields[3] # if (ann_label == None): # Annotation.delete(ann_id) # break if (ann_id == "0"): ann = Annotation(start_time_ms=ann_start_ms, end_time_ms=ann_end_ms, label=ann_label, recording=recording) ann.save() else: ann = Annotation.objects.get(pk=ann_id) ann.start_time_ms = ann_start_ms ann.end_time_ms = ann_end_ms ann.label = ann_label print ann ann.save() # Return back to the AudioAnnotator the latest collection of # annotations for this recording. annotations = Annotation.objects.all().filter(recording=r) output = "" for annotation in annotations: output += annotation.to_string + "\n" logging.info("***output=" + output) return HttpResponse(output)
def annotation(request, session_id): # GET => gather collections and render UI # POST => get annotation information and create new/update annotation. if not request.user.is_authenticated(): return redirect('login') collections = Collection.objects.filter(creator=request.user).all() try: annotationsession = AnnotationSession.objects.get(pk=session_id) session_annotations = annotationsession.annotations.all().order_by("datestamp") except AnnotationSession.DoesNotExist: return HttpResponse(status=404) if request.method == "GET": context = RequestContext( request, {'collections': collections, 'annotationsession': annotationsession, 'annotations': session_annotations }) template = loader.get_template("annotations/annotate.html") return HttpResponse(template.render(context)) elif request.method == "POST": # A new annotation or an old one? leftitem_id = request.POST.get("leftitem") rightitem_id = request.POST.get("rightitem") try: leftitem = Item.objects.get(pk=leftitem_id) rightitem = Item.objects.get(pk=rightitem_id) except Item.DoesNotExist: print("Hmmm it says these don't exist") return HttpResponse(status=404) leftitem_state = request.POST.get("leftitem_state") rightitem_state = request.POST.get("rightitem_state") annotationtext = request.POST.get("annotationtext", u"") try: previous = Annotation.objects.get(leftitem = leftitem, rightitem = rightitem, session = annotationsession) previous.annotation = annotationtext previous.leftitem_state = leftitem_state previous.rightitem_state = rightitem_state previous.save() return HttpResponse("{{'annotation_id':'{0}'}}".format(str(previous.id)), mimetype="application/json") except Annotation.DoesNotExist: new_anno = Annotation(leftitem = leftitem, rightitem = rightitem, session = annotationsession, leftitem_state = leftitem_state, rightitem_state = rightitem_state, creator = request.user) new_anno.save() return HttpResponse("{{'annotation_id':'{0}'}}".format(str(new_anno.id)), mimetype="application/json")
def create_annotation_boundingbox_conflicts(threshold=0.9): # find all images with minimum two boundingboxes in same category bb_results = Annotation.boundingbox_objects.values('image_id', 'category_id')\ .annotate(category_id_count=Count('category_id'))\ .filter(category_id_count__gte=2) for bb in bb_results: image = Image.objects.get(id=bb['image_id']) potentially_conflicted_bbs = image.annotation_set.filter( Q(category_id=bb['category_id']) & ~Q(x_min__isnull=True)).all() # compare each item with the rest for a, b in itertools.combinations(potentially_conflicted_bbs, 2): distance = Annotation.boundingbox_distance( { 'left': a.x_min, 'width': a.x_max - a.x_min, 'height': a.y_max - a.y_min, 'top': image.height - a.y_max, }, { 'left': b.x_min, 'width': b.x_max - b.x_min, 'height': b.y_max - b.y_min, 'top': image.height - b.y_max, }) if distance >= threshold: conflict = Conflict.objects.create( reason=Conflict.REASON_AN_BB_DUP, affected_ids=[a.id, b.id], message=('overlapping distance %s is greater than %s.' % (distance, threshold)))
def create_annotation(self, body='', paragraph=None): annotation = Annotation() annotation.content_type=ContentType.objects.get(model='blogcontent', app_label="blogging") annotation.object_id= str(1) annotation.body=body annotation.paragraph=paragraph annotation.author= User.objects.get(id=1) annotation.save()
def update(request): """ Update the database with new annotations from the AudioAnnotator Flash interface. The Flash interface will output a string that looks like: "1,62637,119960,test\n2,137802,175384,test2\n" Each new line is a new annotation. The first field is the annotation id, the second and third are the start and end times, in milliseconds, and the fourth is the label. """ if request.method == "POST": annotations = request.POST.get('annotations', '') recording = Recording.objects.get(pk=request.POST.get('recording_id', '')) # Take the string of annotations from the AudioAnnotator and # parse it into annotations. for annotation in annotations.split("\n"): if (annotation == ''): break fields = annotation.split(",") ann_id = fields[0] ann_start_ms = int(fields[1]) ann_end_ms = int(fields[2]) ann_label = fields[3] # if (ann_label == None): # Annotation.delete(ann_id) # break if (ann_id == "0"): ann = Annotation( start_time_ms = ann_start_ms, end_time_ms = ann_end_ms, label = ann_label, recording = recording) ann.save() else: ann = Annotation.objects.get(pk=ann_id) ann.start_time_ms = ann_start_ms ann.end_time_ms = ann_end_ms ann.label = ann_label print ann ann.save() # Return back to the AudioAnnotator the latest collection of # annotations for this recording. annotations = Annotation.objects.all().filter(recording=r) output = "" for annotation in annotations: output += annotation.to_string + "\n" logging.info("***output=" + output) return HttpResponse(output)
def test_create_serializer_class(self): annotation = Annotation() annotation.content_type = ContentType.objects.get(model='blogcontent', app_label="blogging") annotation.object_id = str(1) annotation.body = "This is a test annotations" annotation.paragraph = "1" annotation.author = User.objects.get(id=1) annotation.save() obj = AnnotationSerializer(annotation) #print(obj.data) json = JSONRenderer().render(obj.data)
def test_create_serializer_class(self): annotation = Annotation() annotation.content_type=ContentType.objects.get(model='blogcontent', app_label="blogging") annotation.object_id= str(1) annotation.body="This is a test annotation" annotation.paragraph="1" annotation.author= User.objects.get(id=1) annotation.save() obj = AnnotationSerializer(annotation) #print(obj.data) json = JSONRenderer().render(obj.data)
def import_archived_annotations(source_id, anndict, with_labels = True): source = Source.objects.get(pk = source_id) # let's fetch the relevant source. imported_user = get_imported_user() # the imported user. images = source.get_all_images().filter(metadata__name__in = list(anndict.keys())) # grab all image that have names in the .csv file. for image in images: # Start by remove annotations and points for this image for ann in Annotation.objects.filter(image=image): ann.delete() for point in Point.objects.filter(image=image): point.delete() # Next, set image metadata to IMPORTED. image.point_generation_method = PointGen.args_to_db_format( point_generation_type=PointGen.Types.IMPORTED, imported_number_of_points=len(anndict[image.metadata.name]) ) image.save() image.status.hasRandomPoints = True image.status.annotatedByHuman = with_labels image.status.save() image.after_annotation_area_change() # set the backend status correctly. # Iterate over this image's annotations and save them. for (point_num, (row, col, code)) in enumerate(anndict[image.metadata.name]): # Save the Point in the database. point = Point(row=row, column=col, point_number=point_num + 1, image=image) point.save() # and save the Annotation. if with_labels: label = Label.objects.filter(code=code)[0] annotation = Annotation(user=imported_user, point=point, image=image, label=label, source=source) annotation.save()
def test_get_coords_from_segmentation_real(self): data = [ [481.08, 2.98, 509.26, 38.74, 563.43, 56.07, 599.19, 84.24, 615.44, 125.42, 640.0, 157.92, 640.0, 110.25, 637.11, 2.98, 486.5, 1.9], [96.43, 88.58, 268.71, 5.15, 114.85, 0.81, 149.53, 49.57, 100.77, 75.58, 89.93, 87.49], [539.59, 460.23, 633.86, 402.8, 633.86, 474.31, 535.26, 467.81], [539.59, 460.23, 633.86, 402.8, 633.86, 474.31, 535.26, 467.81], [539.59, 460.23, 633.86, 402.8, 633.86, 474.31, 535.26, 467.81], ] x_min, x_max, y_min, y_max = Annotation.get_coords_from_segmentation(data) self.assertEqual(x_min, 89.93) self.assertEqual(x_max, 640.0) self.assertEqual(y_min, 0.81) self.assertEqual(y_max, 474.31)
def post(self, request): result = json.loads(json.dumps(request.POST)) annotation_object = Annotation() for _, _, q in self.qualities: vars(annotation_object)[q] = result[q] annotation_object.audio_file = self.get_context_data().get( 'audio_file_name') annotation_object.user = self.request.user annotation_object.description = result.get('description') annotation_object.save() # How many have you annotated? user_annotations = Annotation.objects.filter(user=self.request.user) return render(request, 'annotation_submit.html', {'count': len(user_annotations)})
def test_get_coords_from_segmentation(self): x_min, x_max, y_min, y_max = Annotation.get_coords_from_segmentation([10,10,20,10,20,20,10,20]) self.assertEqual(x_min, 10) self.assertEqual(x_max, 20) self.assertEqual(y_min, 10) self.assertEqual(y_max, 20)
def test_get_coords_from_segmentation_third_dimension(self): x_min, x_max, y_min, y_max = Annotation.get_coords_from_segmentation([[[10,10,20,10,20,20,10,20], [10,10,30,10,30,30,10,30]]]) self.assertEqual(x_min, 10) self.assertEqual(x_max, 30) self.assertEqual(y_min, 10) self.assertEqual(y_max, 30)
def image_upload_process(imageFiles, imageOptionsForm, annotationOptionsForm, source, currentUser, annoFile): """ Helper method for the image upload view and the image+annotation import view. """ uploadedImages = [] duplicates = 0 imagesUploaded = 0 annotationsImported = 0 importedUser = get_imported_user() dupeOption = imageOptionsForm.cleaned_data['skip_or_replace_duplicates'] annotationData = None if annoFile: try: annotationData = annotations_file_to_python(annoFile, source) except FileContentError as errorDetail: return dict(error=True, message='Error reading labels file %s. %s' % (annoFile.name, errorDetail), ) for imageFile in imageFiles: filename = imageFile.name metadataDict = None metadata = Metadata(height_in_cm=source.image_height_in_cm) if imageOptionsForm.cleaned_data['specify_metadata'] == 'filenames': try: metadataDict = filename_to_metadata(filename, source) # Filename parse error. # TODO: check for validity of the file type and contents, too. except (ValueError, StopIteration): return dict(error=True, message='Upload failed - Error when parsing the filename %s for metadata.' % filename, ) # Detect duplicate images and handle them dupe = find_dupe_image(source, **metadataDict) if dupe: duplicates += 1 if dupeOption == 'skip': # Skip uploading this file. continue elif dupeOption == 'replace': # Proceed uploading this file, and delete the dupe. dupe.delete() # Set the metadata valueDict = get_location_value_objs(source, metadataDict['values'], createNewValues=True) photoDate = datetime.date(year = int(metadataDict['year']), month = int(metadataDict['month']), day = int(metadataDict['day'])) metadata.name = metadataDict['name'] metadata.photo_date = photoDate for key, value in valueDict.iteritems(): setattr(metadata, key, value) else: metadata.name = filename # Image + annotation import form # Assumes we got the images' metadata (from filenames or otherwise) if annotationData: pointsOnlyOption = annotationOptionsForm.cleaned_data['points_only'] # Use the location values and the year to build a string identifier for the image, such as: # Shore1;Reef5;...;2008 imageIdentifier = get_image_identifier(metadataDict['values'], metadataDict['year']) # Use the identifier as the index into the annotation file's data. if not annotationData.has_key(imageIdentifier): return dict(error=True, message='%s seems to have no annotations for the image file %s, which has the following keys:\n%s' % ( annoFile.name, imageFile.name, imageIdentifier.replace(';',' ')) ) imageAnnotations = annotationData[imageIdentifier] status = ImageStatus() status.save() metadata.annotation_area = AnnotationAreaUtils.IMPORTED_STR metadata.save() img = Image(original_file=imageFile, uploaded_by=currentUser, point_generation_method=PointGen.args_to_db_format( point_generation_type=PointGen.Types.IMPORTED, imported_number_of_points=len(imageAnnotations) ), metadata=metadata, source=source, status=status, ) img.save() # Iterate over this image's annotations and save them. pointNum = 1 for anno in imageAnnotations: # Save the Point in the database. point = Point(row=anno['row'], column=anno['col'], point_number=pointNum, image=img) point.save() if not pointsOnlyOption: label = Label.objects.filter(code=anno['label'])[0] # Save the Annotation in the database, marking the annotations as imported. annotation = Annotation(user=importedUser, point=point, image=img, label=label, source=source) annotation.save() annotationsImported += 1 pointNum += 1 img.status.hasRandomPoints = True if not pointsOnlyOption: img.status.annotatedByHuman = True img.status.save() # Image upload form, no annotations else: status = ImageStatus() status.save() metadata.annotation_area = source.image_annotation_area metadata.save() # Save the image into the DB img = Image(original_file=imageFile, uploaded_by=currentUser, point_generation_method=source.default_point_generation_method, metadata=metadata, source=source, status=status, ) img.save() # Generate and save points generate_points(img) # Up to 5 uploaded images will be shown # upon successful upload. # Prepend to list, so most recent image comes first uploadedImages.insert(0, img) if len(uploadedImages) > 5: uploadedImages = uploadedImages[:5] imagesUploaded += 1 # Construct success message. success_message = image_upload_success_message( num_images_uploaded=imagesUploaded, num_dupes=duplicates, dupe_option=dupeOption, num_annotations=annotationsImported, ) return dict(error=False, uploadedImages=uploadedImages, message=success_message, )
def helper_classify_does_not_overwrite_manual_annotations(self, annotator_user): """ Helper function for the tests that follow. """ # Take at least (min number for training) images. # Preprocess, feature extract, and add human annotations to # the features. for img in Image.objects.filter(source__pk=self.source_id): preprocess_image(img.id) make_features(img.id) self.add_human_annotations(img.id) add_labels_to_features(img.id) # Create a robot. result = train_robot(self.source_id) self.assertTrue(result == 1) # Upload a new image. img_id = self.upload_image('006_2012-06-28_color-grid-006.png')[0] # Preprocess and feature extract. preprocess_image(img_id) make_features(img_id) # Add annotations. source = Source.objects.get(pk=self.source_id) img = Image.objects.get(pk=img_id) points = Point.objects.filter(image=img) labels = source.labelset.labels.all() # For odd-numbered points, make an annotation by picking a # label randomly from the source's labelset. # Leave the even-numbered points alone. # (Assumption: the test source has at least 2 points per image) for pt in points: if pt.point_number % 2 == 0: continue label = random.choice(labels) anno = Annotation( point=pt, image=img, source=source, user=annotator_user, label=label, ) anno.save() img.status.save() # Get those annotations (again, only odd-numbered points). num_points = Point.objects.filter(image__pk=img_id).count() manual_annotations = dict() for point_num in range(1, num_points+1, 2): label_id = Annotation.objects.get(image__pk=img_id, point__point_number=point_num).label.id manual_annotations[point_num] = label_id # Try to Classify. result = classify_image(img_id) # Shouldn't throw exception. self.assertTrue(result == 1) self.assertEqual(Image.objects.get(pk=img_id).status.annotatedByRobot, True) # Check the Annotations. for point_num in range(1, num_points+1): anno = Annotation.objects.get(image__pk=img_id, point__point_number=point_num) label_id = anno.label.id if point_num % 2 == 0: # Even; should be robot self.assertEqual(anno.user.id, get_robot_user().id) else: # Odd; should be manual (and same as before) self.assertEqual(label_id, manual_annotations[point_num]) self.assertEqual(anno.user.id, annotator_user.id) if settings.UNIT_TEST_VERBOSITY >= 1: print "Point {num} | {username} | {label_id}".format( num=point_num, username=anno.user.username, label_id=label_id, )
def ajax_save_annotations(request, annotationForm): """ Called via Ajax from the annotation tool form, if the user clicked the "Save Annotations" button. Takes: the annotation form field names and values, serialized with jQuery's serializeArray() Does: saves the annotations in the database Returns: false if successful, an error string if there was a problem """ #TODO: just use request.POST instead of the annotationForm parameter formDict = dict([ (d['name'], d['value']) for d in annotationForm ]) image = Image.objects.get(pk=formDict['image_id']) user = User.objects.get(pk=formDict['user_id']) source = image.source sourceLabels = source.labelset.labels.all() # Sanity checks if user != request.user: return simplejson.dumps(dict(error="User id error")) if not user.has_perm(Source.PermTypes.EDIT.code, image.source): return simplejson.dumps(dict(error="Image id error")) # Get stuff from the DB in advance, should save time pointsList = list(Point.objects.filter(image=image)) points = dict([ (p.point_number, p) for p in pointsList ]) annotationsList = list(Annotation.objects.filter(image=image, source=source)) annotations = dict([ (a.point_id, a) for a in annotationsList ]) for name, value in formDict.iteritems(): if name.startswith('label_'): # Get this annotation's point pointNum = name[len('label_'):] # The part after 'label_' point = points[int(pointNum)] # Does the form field have a non-human-confirmed robot annotation? isFormRobotAnnotation = simplejson.loads(formDict['robot_' + pointNum]) # Get the label that the form field value refers to. # Anticipate errors, even if we plan to check input with JS. labelCode = value if labelCode == '': label = None else: labels = Label.objects.filter(code=labelCode) if len(labels) == 0: return simplejson.dumps(dict(error="No label with code %s." % labelCode)) label = labels[0] if label not in sourceLabels: return simplejson.dumps(dict(error="The labelset has no label with code %s." % labelCode)) # An annotation of this point number exists in the database if annotations.has_key(point.id): anno = annotations[point.id] # Label field is now blank. # We're not allowing label deletions, so don't do anything in this case. if label is None: pass # Label was robot annotated, and then the human user confirmed or changed it elif is_robot_user(anno.user) and not isFormRobotAnnotation: anno.label = label anno.user = user anno.save() # Label was otherwise changed elif label != anno.label: anno.label = label anno.user = user anno.save() # No annotation of this point number in the database yet else: if label is not None: newAnno = Annotation(point=point, user=user, image=image, source=source, label=label) newAnno.save() # Are all points human annotated? all_done = image_annotation_all_done(image) # Update image status, if needed if image.status.annotatedByHuman: image.after_completed_annotations_change() if image.status.annotatedByHuman != all_done: image.status.annotatedByHuman = all_done image.status.save() if all_done: # Need simplejson.dumps() to convert the Python True to a JS true return simplejson.dumps(dict(all_done=True)) else: return dict()
def image_upload_process(imageFile, imageOptionsForm, annotation_dict_id, csv_dict_id, metadata_import_form_class, annotation_options_form, source, currentUser): is_uploading_points_or_annotations = annotation_options_form.cleaned_data['is_uploading_points_or_annotations'] filename = imageFile.name metadata_dict = None metadata_obj = Metadata(height_in_cm=source.image_height_in_cm) if imageOptionsForm.cleaned_data['specify_metadata'] == 'filenames': filename_check_result = check_image_filename(filename, source) filename_status = filename_check_result['status'] if filename_status == 'error': # This case should never happen if the pre-upload # status checking is doing its job, but just in case... return dict( status=filename_status, message=u"{m}".format(m=filename_check_result['message']), link=None, title=None, ) # Set the metadata metadata_dict = filename_check_result['metadata_dict'] value_dict = get_location_value_objs(source, metadata_dict['values'], createNewValues=True) photo_date = datetime.date( year = int(metadata_dict['year']), month = int(metadata_dict['month']), day = int(metadata_dict['day']) ) metadata_obj.name = metadata_dict['name'] metadata_obj.photo_date = photo_date for key, value in value_dict.iteritems(): setattr(metadata_obj, key, value) elif imageOptionsForm.cleaned_data['specify_metadata'] == 'csv': if not csv_dict_id: return dict( status='error', message=u"{m}".format(m="CSV file was not uploaded."), link=None, title=None, ) csv_dict_filename = os.path.join( settings.SHELVED_ANNOTATIONS_DIR, 'csv_source{source_id}_{dict_id}.db'.format( source_id=source.id, dict_id=csv_dict_id, ), ) # Corner case: the specified shelved annotation file doesn't exist. # Perhaps the file was created a while ago and has been pruned since, # or perhaps there is a bug. if not os.path.isfile(csv_dict_filename): return dict( status='error', message="CSV file could not be found - if you provided the .csv file a while ago, maybe it just timed out. Please retry the upload.", link=None, title=None, ) csv_dict = shelve.open(csv_dict_filename) #index into the csv_dict with the filename. the str() is to handle #the case where the filename is a unicode object instead of a str; #unicode objects can't index into dicts. filename_str = str(filename) if filename_str in csv_dict: # There is CSV metadata for this file. metadata_dict = csv_dict[str(filename)] csv_dict.close() # The reason this uses metadata_import_form_class instead of # importing MetadataImportForm is that I'm too lazy to deal with the # circular-import implications of the latter solution right now. # -Stephen metadata_import_form = metadata_import_form_class( source.id, True, metadata_dict, ) if not metadata_import_form.is_valid(): return dict( status='error', message="Unknown error with the CSV metadata.", link=None, title=None, ) fields = ['photo_date', 'value1', 'value2', 'value3', 'value4', 'value5', 'height_in_cm', 'latitude', 'longitude', 'depth', 'camera', 'photographer', 'water_quality', 'strobes', 'framing', 'balance'] for field in fields: if not field in metadata_import_form.fields: # A location value field that's not in this form continue value = metadata_import_form.cleaned_data[field] # Check for a non-empty value; don't want empty values to # override default values that we've already set on the # metadata_obj if value: setattr(metadata_obj, field, value) else: # No CSV metadata for this file. csv_dict.close() metadata_obj.name = filename else: # Not specifying any metadata at upload time. metadata_obj.name = filename image_annotations = None has_points_or_annotations = False if is_uploading_points_or_annotations: # Corner case: somehow, we're uploading with points+annotations and without # a checked annotation file specified. This probably indicates a bug. if not annotation_dict_id: return dict( status='error', message=u"{m}".format(m=str_consts.UPLOAD_ANNOTATIONS_ON_AND_NO_ANNOTATION_DICT_ERROR_STR), link=None, title=None, ) annotation_dict_filename = os.path.join( settings.SHELVED_ANNOTATIONS_DIR, 'source{source_id}_{dict_id}'.format( source_id=source.id, dict_id=annotation_dict_id, ), ) # Corner case: the specified shelved annotation file doesn't exist. # Perhaps the file was created a while ago and has been pruned since, # or perhaps there is a bug. if not os.path.isfile(annotation_dict_filename): return dict( status='error', message="Annotations could not be found - if you provided the .txt file a while ago, maybe it just timed out. Please retry the upload.", link=None, title=None, ) # Use the location values and the year to build a string identifier for the image, such as: # Shore1;Reef5;...;2008 # Convert to a string (instead of a unicode string) for the shelve key lookup. image_identifier = str(get_image_identifier(metadata_dict['values'], metadata_dict['year'])) annotation_dict = shelve.open(annotation_dict_filename) if annotation_dict.has_key(image_identifier): image_annotations = annotation_dict[image_identifier] has_points_or_annotations = True annotation_dict.close() if has_points_or_annotations: # Image upload with points/annotations is_uploading_annotations_not_just_points = annotation_options_form.cleaned_data['is_uploading_annotations_not_just_points'] imported_user = get_imported_user() status = ImageStatus() status.save() metadata_obj.annotation_area = AnnotationAreaUtils.IMPORTED_STR metadata_obj.save() img = Image( original_file=imageFile, uploaded_by=currentUser, point_generation_method=PointGen.args_to_db_format( point_generation_type=PointGen.Types.IMPORTED, imported_number_of_points=len(image_annotations) ), metadata=metadata_obj, source=source, status=status, ) img.save() # Iterate over this image's annotations and save them. point_num = 0 for anno in image_annotations: # Save the Point in the database. point_num += 1 point = Point(row=anno['row'], column=anno['col'], point_number=point_num, image=img) point.save() if is_uploading_annotations_not_just_points: label = Label.objects.filter(code=anno['label'])[0] # Save the Annotation in the database, marking the annotations as imported. annotation = Annotation(user=imported_user, point=point, image=img, label=label, source=source) annotation.save() img.status.hasRandomPoints = True if is_uploading_annotations_not_just_points: img.status.annotatedByHuman = True img.status.save() else: # Image upload, no points/annotations image_status = ImageStatus() image_status.save() metadata_obj.annotation_area = source.image_annotation_area metadata_obj.save() # Save the image into the DB img = Image(original_file=imageFile, uploaded_by=currentUser, point_generation_method=source.default_point_generation_method, metadata=metadata_obj, source=source, status=image_status, ) img.save() # Generate and save points generate_points(img) success_message = "Uploaded" return dict( status='ok', message=success_message, link=reverse('image_detail', args=[img.id]), title=img.get_image_element_title(), image_id=img.id, )
def test_get_coords_from_segmentation_another_real(self): ''' some big data caused trouble, so we should take care ''' data = [ [[ 425.09, 315.19, 422.69, 307.98, 411.88, 302.58, 406.47, 301.37, 406.09, 306.68, 417.45, 311.23, 423.41, 315.2, 412.05, 313.78, 406.37, 307.25, 402.68, 303.27, 401.54, 299.01, 397.85, 300.15, 392.74, 297.31, 388.47, 297.59, 387.91, 299.3, 383.65, 295.04, 381.09, 297.88, 387.34, 303.84, 393.3, 302.71, 399.27, 304.69, 404.1, 306.4, 410.35, 313.22, 410.35, 314.07, 402.68, 313.22, 387.34, 310.66, 382.79, 303.84, 378.53, 299.3, 377.4, 296.17, 373.14, 295.6, 372.28, 299.01, 378.82, 301.28, 386.2, 310.66, 381.09, 310.66, 376.54, 307.53, 368.02, 301.0, 370.23, 299.11, 371.04, 296.15, 366.87, 293.19, 364.05, 293.06, 363.25, 296.55, 367.28, 299.51, 368.62, 302.73, 380.58, 310.93, 371.58, 305.69, 366.07, 301.12, 360.69, 296.28, 362.04, 293.19, 360.56, 291.18, 354.24, 291.18, 353.03, 288.36, 349.54, 288.09, 346.72, 288.89, 347.79, 291.04, 351.02, 293.6, 348.73, 297.36, 350.75, 298.43, 355.99, 296.82, 360.42, 297.23, 368.22, 303.67, 377.36, 309.72, 379.24, 310.93, 374.53, 309.32, 371.17, 309.99, 366.47, 308.11, 364.59, 307.17, 360.02, 308.65, 354.65, 305.56, 349.0, 300.32, 341.21, 293.19, 343.23, 292.39, 342.02, 289.03, 338.93, 287.69, 335.7, 288.36, 333.15, 288.22, 335.84, 290.91, 341.61, 294.94, 349.41, 301.66, 354.11, 306.23, 348.47, 305.15, 346.72, 304.62, 339.87, 298.17, 330.46, 289.97, 326.03, 286.21, 331.4, 287.55, 333.55, 287.55, 332.07, 284.46, 328.98, 283.39, 326.16, 283.92, 324.95, 284.86, 323.07, 282.98, 323.2, 280.16, 319.71, 277.88, 316.22, 278.28, 315.81, 281.24, 311.65, 281.37, 307.89, 284.46, 308.96, 286.48, 310.44, 287.01, 314.61, 285.54, 321.19, 285.54, 325.35, 288.09, 333.55, 294.67, 343.9, 304.08, 338.25, 303.94, 335.3, 301.26, 331.13, 298.97, 324.01, 290.91, 318.77, 285.94, 316.76, 286.34, 328.98, 298.3, 330.19, 299.91, 326.97, 299.38, 325.76, 298.43, 324.41, 299.91, 320.38, 296.55, 315.81, 291.72, 310.98, 287.28, 309.1, 286.88, 308.02, 285.0, 303.05, 281.24, 299.15, 277.34, 296.33, 274.11, 294.32, 272.9, 290.96, 273.44, 289.08, 275.86, 289.88, 278.28, 294.05, 276.94, 297.81, 279.22, 301.17, 281.64, 308.96, 288.22, 314.61, 293.46, 317.43, 295.88, 311.65, 295.88, 311.11, 294.27, 309.63, 294.27, 305.87, 291.58, 300.63, 286.07, 292.7, 278.41, 292.57, 277.88, 290.69, 278.28, 296.87, 284.06, 304.39, 291.58, 301.57, 291.31, 297.81, 291.18, 296.47, 291.85, 295.79, 289.43, 294.05, 289.57, 289.75, 286.48, 286.52, 284.33, 279.8, 278.55, 273.22, 273.17, 268.25, 268.34, 265.02, 265.25, 262.74, 263.1, 261.66, 261.75, 259.38, 263.36, 261.66, 266.59, 265.02, 269.95, 269.19, 271.96, 277.92, 279.76, 286.12, 285.8, 282.36, 285.8, 280.07, 285.8, 276.85, 282.71, 266.5, 273.31, 258.04, 266.32, 253.87, 261.89, 251.72, 259.87, 247.56, 258.39, 243.93, 259.87, 242.72, 261.89, 245.41, 264.04, 248.9, 263.9, 252.93, 262.83, 258.04, 267.53, 266.9, 275.32, 276.31, 283.52, 274.29, 283.92, 271.74, 281.77, 267.44, 279.76, 262.34, 278.68, 261.93, 277.34, 257.5, 274.92, 251.86, 270.62, 243.66, 263.36, 234.79, 255.97, 235.19, 254.23, 232.64, 251.54, 230.49, 251.41, 228.34, 249.12, 224.71, 248.99, 224.85, 253.29, 228.34, 255.17, 231.3, 256.38, 236.81, 259.74, 252.39, 272.77, 254.54, 274.92, 249.57, 274.38, 244.73, 273.31, 241.64, 274.52, 239.09, 271.7, 232.77, 265.78, 223.5, 257.72, 217.86, 251.81, 218.26, 249.12, 215.84, 247.78, 214.1, 247.91, 211.68, 245.9, 207.78, 246.03, 206.57, 243.88, 205.5, 243.34, 200.26, 248.05, 201.06, 248.18, 204.56, 246.97, 206.57, 246.97, 206.57, 248.05, 201.87, 250.6, 202.41, 251.81, 205.77, 252.21, 209.53, 251.14, 212.75, 251.67, 214.64, 253.96, 216.52, 253.29, 224.44, 260.54, 240.3, 275.32, 238.02, 274.92, 231.43, 269.81, 231.7, 269.14, 226.59, 267.4, 223.64, 266.72, 222.29, 267.4, 218.67, 266.99, 214.64, 268.87, 209.26, 264.3, 206.84, 262.42, 204.69, 261.48, 204.56, 259.6, 201.87, 259.06, 199.72, 260.54, 189.51, 253.56, 185.61, 250.06, 182.92, 246.43, 179.43, 242.54, 177.01, 243.61, 176.21, 247.51, 179.43, 250.06, 175.53, 251.0, 172.44, 251.14, 166.93, 250.47, 160.08, 251.41, 158.33, 251.41, 152.29, 246.97, 143.96, 243.21, 145.03, 241.06, 147.32, 238.37, 149.47, 237.57, 149.87, 237.57, 148.12, 236.22, 146.38, 236.22, 144.09, 237.3, 142.34, 238.51, 141.27, 241.46, 139.79, 240.79, 139.93, 237.97, 142.34, 233.8, 143.55, 222.38, 145.03, 218.62, 147.72, 215.26, 147.05, 213.65, 145.17, 214.19, 142.08, 217.95, 140.6, 221.71, 139.52, 221.84, 139.39, 216.87, 138.18, 216.6, 137.24, 219.56, 136.43, 224.53, 134.82, 227.89, 133.61, 233.13, 131.46, 238.37, 130.92, 240.12, 126.89, 235.15, 122.73, 231.92, 118.56, 231.38, 113.99, 231.38, 110.63, 233.8, 109.69, 235.42, 109.42, 238.91, 107.14, 241.46, 104.72, 241.73, 100.69, 240.92, 99.48, 239.45, 95.18, 234.34, 95.32, 230.85, 98.81, 220.9, 101.23, 212.04, 99.62, 209.62, 97.87, 208.54, 95.05, 208.27, 92.76, 210.15, 91.69, 211.77, 87.52, 219.02, 86.45, 219.83, 80.0, 209.75, 79.25, 203.9, 76.34, 201.71, 70.87, 196.24, 66.4, 191.36, 64.5, 190.67, 62.6, 190.32, 59.67, 193.08, 58.81, 195.67, 60.36, 200.15, 66.4, 213.08, 64.38, 228.74, 62.98, 227.34, 57.83, 210.49, 52.8, 201.78, 47.65, 192.77, 44.43, 190.19, 39.93, 188.91, 36.71, 191.48, 35.42, 197.27, 39.61, 204.35, 41.86, 206.93, 38.32, 207.89, 37.35, 208.54, 36.07, 212.4, 38.32, 219.48, 44.76, 232.67, 38.32, 227.84, 34.78, 227.84, 39.93, 230.74, 44.43, 236.53, 41.22, 235.89, 38.0, 235.25, 36.07, 237.82, 40.25, 237.82, 43.79, 242.97, 40.57, 243.61, 36.39, 243.61, 33.49, 242.65, 32.53, 240.07, 30.92, 241.04, 34.78, 245.87, 37.03, 246.19, 32.85, 248.76, 29.31, 257.13, 30.27, 264.53, 32.13, 256.04, 33.85, 251.39, 39.03, 248.8, 42.82, 251.04, 46.62, 248.46, 49.03, 250.53, 46.44, 255.53, 44.55, 261.22, 45.24, 270.87, 48.68, 278.12, 52.65, 281.91, 50.89, 270.44, 50.89, 255.47, 59.78, 242.84, 70.07, 241.9, 79.43, 243.31, 80.83, 248.45, 78.49, 252.2, 82.49, 268.71, 77.98, 281.26, 76.05, 285.44, 75.41, 293.49, 72.83, 297.03, 70.26, 305.07, 66.15, 322.3, 63.0, 329.66, 67.77, 327.66, 73.79, 327.66, 82.56, 334.93, 85.82, 337.43, 88.33, 336.93, 93.59, 336.93, 103.62, 331.67, 108.14, 342.95, 109.14, 353.73, 116.09, 374.43, 109.54, 375.36, 98.31, 369.28, 90.82, 364.13, 89.89, 365.07, 96.44, 370.68, 110.94, 378.17, 118.43, 376.77, 119.75, 389.41, 126.57, 381.17, 134.23, 375.78, 137.08, 372.65, 133.1, 359.3, 129.41, 343.96, 121.45, 332.6, 123.44, 327.2, 131.39, 345.38, 136.79, 363.28, 136.51, 350.49, 138.78, 357.31, 141.9, 350.49, 137.36, 338.28, 124.58, 318.11, 123.44, 316.69, 126.0, 316.12, 136.51, 326.92, 146.17, 341.69, 148.15, 337.71, 140.2, 326.63, 129.97, 314.42, 127.42, 312.43, 134.8, 303.62, 138.5, 297.66, 137.95, 293.55, 137.29, 290.01, 136.4, 289.12, 137.07, 283.15, 136.18, 280.71, 141.05, 282.93, 147.91, 283.81, 153.67, 284.03, 158.09, 281.82, 166.51, 288.68, 174.92, 291.12, 185.76, 291.56, 191.29, 289.12, 199.04, 296.65, 208.78, 298.64, 215.2, 298.2, 219.18, 296.87, 225.6, 302.62, 232.68, 304.4, 240.21, 304.4, 243.08, 302.85, 249.95, 309.04, 254.59, 310.37, 261.68, 310.59, 263.45, 309.49, 275.57, 317.1, 288.37, 325.77, 292.09, 327.84, 293.43, 329.74, 296.53, 328.85, 297.86, 330.63, 296.09, 334.83, 296.97, 336.6, 300.29, 335.05, 301.18, 341.91, 302.95, 345.23, 306.27, 343.68, 306.49, 337.27, 309.81, 341.91, 310.25, 348.33, 316.89, 346.34, 320.65, 349.66, 320.88, 352.76, 318.88, 354.97, 321.54, 359.62, 326.41, 356.52, 327.52, 349.44, 323.97, 344.35, 320.43, 341.91, 316.23, 342.13, 313.57, 337.71, 311.8, 334.61, 306.49, 331.73, 301.62, 325.09, 298.52, 320.67, 295.2, 318.23, 288.56, 318.89, 279.49, 314.91, 289.23, 318.01, 296.97, 317.79, 298.74, 319.56, 303.17, 322.44, 306.27, 323.54, 322.43, 341.03, 338.9, 359.21, 340.04, 362.9, 341.74, 366.03, 345.31, 366.79, 347.79, 373.4, 348.24, 365.64, 345.67, 360.81, 341.81, 358.56, 337.95, 357.92, 326.68, 343.76, 312.85, 328.31, 308.72, 324.18, 313.37, 324.18, 315.58, 327.06, 321.78, 328.38, 338.03, 346.3, 352.67, 360.2, 354.57, 361.93, 355.43, 365.03, 357.68, 366.41, 357.68, 370.2, 360.61, 376.58, 360.09, 371.41, 360.43, 368.82, 362.5, 366.58, 361.12, 361.75, 357.85, 360.03, 354.23, 359.51, 350.6, 356.23, 343.36, 348.99, 335.43, 341.4, 323.87, 327.95, 327.84, 327.26, 329.74, 329.33, 331.98, 329.68, 344.05, 342.27, 354.23, 353.3, 361.99, 361.24, 365.61, 366.93, 365.95, 371.07, 368.37, 373.31, 366.99, 378.48, 364.57, 380.55, 370.21, 377.58, 371.09, 371.82, 372.64, 370.27, 372.2, 367.17, 369.99, 364.96, 368.0, 363.85, 366.23, 363.85, 362.68, 358.54, 353.61, 349.25, 341.21, 335.97, 335.68, 331.1, 341.66, 330.21, 357.81, 347.25, 375.3, 366.29, 376.19, 368.94, 379.06, 370.49, 376.41, 373.15, 375.74, 375.14, 379.28, 377.13, 377.96, 382.0, 381.5, 373.81, 382.16, 368.5, 380.83, 364.74, 376.85, 363.85, 366.89, 353.67, 348.08, 334.2, 348.3, 332.42, 352.5, 331.54, 355.38, 334.42, 369.1, 349.02, 385.04, 366.51, 387.25, 370.05, 388.8, 373.15, 387.03, 377.58, 391.01, 372.49, 392.12, 369.17, 389.91, 365.62, 386.81, 364.96, 359.36, 335.08, 363.79, 332.65, 367.33, 337.07, 395.22, 367.39, 395.44, 370.27, 396.77, 370.94, 395.0, 375.14, 392.34, 378.9, 399.2, 375.81, 400.75, 371.6, 401.2, 367.62, 396.99, 365.62, 394.56, 364.07, 369.99, 336.19, 374.19, 335.97, 378.4, 337.52, 406.29, 364.52, 405.4, 366.29, 408.06, 369.17, 407.39, 372.26, 405.4, 376.03, 406.29, 377.35, 412.93, 370.05, 410.71, 364.3, 406.95, 362.75, 380.39, 336.63, 385.26, 334.86, 413.59, 364.74, 415.14, 366.73, 416.25, 372.71, 415.8, 374.7, 420.67, 370.94, 418.46, 364.52, 415.14, 362.3, 388.36, 336.41, 393.89, 335.97, 399.2, 339.29, 424.43, 362.97, 423.7, 366.33, 425.08, 368.23, 425.6, 363.06, 425.6, 360.47, 423.01, 360.3, 412.84, 350.12, 405.08, 343.4, 400.94, 339.26, 398.35, 336.84, 402.66, 336.84, 405.94, 337.36, 417.67, 349.26, 425.43, 356.85, 424.74, 353.57, 412.49, 341.33, 408.53, 337.02, 411.98, 336.84, 416.98, 341.16, 426.0, 348.57, 424.74, 344.09, 416.63, 337.53, 420.77, 336.5, 425.77, 336.33, 425.95, 319.43, 425.43, 314.6, 425.26, 314.6 ],[ 279.29, 269.61, 277.17, 269.59, 277.88, 267.57, 279.42, 266.51, 281.32, 267.22, 283.1, 267.1, 284.87, 268.17, 284.99, 270.3, 286.42, 271.96, 289.38, 274.45, 288.91, 275.52, 286.42, 273.38, 283.57, 271.37, 281.79, 271.25, 280.49, 270.18], [311.32, 280.03, 311.09, 278.25, 310.97, 275.88, 307.77, 274.57, 304.56, 274.57, 303.02, 276.35, 305.04, 278.25, 307.88, 279.08, 309.19, 280.5, 310.38, 282.52, 311.8, 281.21, 311.32, 279.43], [329.47, 356.59, 330.99, 363.26, 330.74, 367.02, 327.73, 370.53, 323.22, 374.54, 327.48, 373.54, 331.74, 369.52, 331.24, 378.05, 334.01, 367.4, 335.71, 361.71, 333.88, 357.45, 332.71, 354.32, 330.95, 353.74, 329.19, 356.47 ]] ] x_min, x_max, y_min, y_max = Annotation.get_coords_from_segmentation(data) self.assertTrue(x_min > 0) self.assertTrue(x_max > 0) self.assertTrue(y_min > 0) self.assertTrue(y_max > 0)
def create(self, validated_data): print "In create" print validated_data annotation = Annotation() annotation.author = validated_data.get('author') annotation.body = validated_data.get('body') annotation.content_type = validated_data.get('content_type') annotation.object_id = validated_data.get('object_id') annotation.paragraph = validated_data.get('paragraph') annotation.privacy = validated_data.get('privacy') annotation.privacy_override = validated_data.get('privacy_override', False) #Get row from contentType which has content_type content_object = ContentType.objects.get_for_id(annotation.content_type.id) annotation.content_object = content_object.model_class().objects.get(id=annotation.object_id) print annotation.content_object annotation.save() print validated_data.get('shared_with') for user in validated_data.get('shared_with'): sharing = AnnotationShareMap(annotation=annotation, user=user) sharing.save() return annotation
def Classify(image_id): image = Image.objects.get(pk=image_id) # if annotated by Human, no need to re-classify if image.status.annotatedByHuman: print 'Classify: Image nr ' + str(image_id) + ' is annotated by the human operator, aborting' return # make sure that the previous step is complete if not image.status.featuresExtracted: print 'Classify: Features not extracted for image id {id}, can not proceed'.format(id = image_id) return # Get all robots for this source latestRobot = image.source.get_latest_robot() if latestRobot == None: print 'Classify: No robots exist for the source, {src}, of image id {id}. Aborting.'.format(src=image.source, id=image_id) return # Check if this image has been previously annotated by a robot. if (image.status.annotatedByRobot): # now, compare this version number to the latest_robot_annotator field for image. if (not (latestRobot.version > image.latest_robot_annotator.version)): print 'Image {id} is already annotated by the latest robot version, {ver}, for source, {src}'.format(id = image_id, ver=latestRobot.version, src=image.source) return ####### EVERYTHING OK: START THE CLASSIFICATION ########## #update image status image.status.annotatedByRobot = True image.status.save() image.latest_robot_annotator = latestRobot image.save() print 'Start classify image id {id}'.format(id = image_id) #builds args for matlab script featureFile = os.path.join(FEATURES_DIR, str(image_id) + "_" + image.get_process_date_short_str() + ".dat") #get the source id for this file labelFile = os.path.join(CLASSIFY_DIR, str(image_id) + "_" + image.get_process_date_short_str() + ".txt") task_helpers.coralnet_classify( featureFile=featureFile, modelFile=latestRobot.path_to_model, labelFile=labelFile, logFile=CV_LOG, errorLogfile=CLASSIFY_ERROR_LOG, ) #get algorithm user object user = get_robot_user() #open the labelFile and rowColFile to process labels rowColFile = os.path.join(FEATURES_DIR, str(image_id) + "_rowCol.txt") label_file = open(labelFile, 'r') row_file = open(rowColFile, 'r') for line in row_file: #words[0] is row, words[1] is column words = line.split(',') #gets the label object based on the label id the algorithm specified label_id = label_file.readline() label_id.replace('\n', '') label = Label.objects.filter(id=label_id) #gets the point object(s) that have that row and column. #if there's more than one such point, add annotations to all of #these points the first time we see this row+col, and don't do #anything on subsequent times (filtering with annotation=None accomplishes this). points = Point.objects.filter(image=image, row=words[0], column=words[1], annotation=None) for point in points: #create the annotation object and save it Ann = Annotation.objects.filter(point=point, image=image) if ( len(Ann) > 0 and ( not is_robot_user(Ann[0].user) ) ): # if this is an imported or human, we don't want to overwrite it, so continue continue annotation = Annotation(image=image, label=label[0], point=point, user=user, robot_version=latestRobot, source=image.source) annotation.save() print 'Finished classification of image id {id}'.format(id = image_id) label_file.close() row_file.close()
def classify_image(image_id): image = Image.objects.get(pk=image_id) # if annotated by Human, or if the previous step is not complete if image.status.annotatedByHuman or not image.status.featuresExtracted: return 1 # Get last robot for this source latestRobot = image.source.get_latest_robot() if latestRobot == None: return 1 # Check if this image has been previously annotated by a robot. if image.status.annotatedByRobot: # now, compare this version number to the latest_robot_annotator field for image. if (not (latestRobot.version > image.latest_robot_annotator.version)): return 1 ####### EVERYTHING OK: START THE CLASSIFICATION ########## logging.info('Classifying image{id} from source{sid}: {sname}'.format(id = image_id, sid = image.source_id, sname = image.source.name)) #builds args for matlab script featureFile = os.path.join(FEATURES_DIR, str(image_id) + "_" + image.get_process_date_short_str() + ".dat") labelFile = os.path.join(CLASSIFY_DIR, str(image_id) + "_" + image.get_process_date_short_str() + ".txt") task_helpers.coralnet_classify( featureFile=featureFile, modelFile=latestRobot.path_to_model, labelFile=labelFile, logFile=CV_LOG, errorLogfile=CLASSIFY_ERROR_LOG, ) if os.path.isfile(CLASSIFY_ERROR_LOG): logging.info('ERROR classifying image{id} from source{sid}: {sname}'.format(id = image_id, sid = image.source_id, sname = image.source.name)) mail_admins('CoralNet Backend Error', 'in Classify') return 0 else: #update image status image.status.annotatedByRobot = True image.status.save() image.latest_robot_annotator = latestRobot image.save() ####### IMPORT CLASSIFICATION RESULT TO DATABASE ########## user = get_robot_user() # Get the label probabilities that we just generated label_probabilities = task_utils.get_label_probabilities_for_image(image_id) if len(label_probabilities) == 0: mail_admins('Classify error', 'Classification output for image{id} from source{sid}: {sname} was empty.'.format(id = image_id, sid = image.source_id, sname = image.source.name)) # Go through each point and update/create the annotation as appropriate for point_number, probs in label_probabilities.iteritems(): pt = Point.objects.get(image=image, point_number=point_number) probs_descending_order = sorted(probs, key=operator.itemgetter('score'), reverse=True) top_prob_label_code = probs_descending_order[0]['label'] label = Label.objects.get(code=top_prob_label_code) # If there's an existing annotation for this point, get it. # Otherwise, create a new annotation. # # (Assumption: there's at most 1 Annotation per Point, never multiple. # If there are multiple, we'll get a MultipleObjectsReturned exception.) try: anno = Annotation.objects.get(image=image, point=pt) except Annotation.DoesNotExist: # No existing annotation. Create a new one. new_anno = Annotation( image=image, label=label, point=pt, user=user, robot_version=latestRobot, source=image.source ) new_anno.save() else: # Got an existing annotation. if is_robot_user(anno.user): # It's an existing robot annotation. Update it as necessary. if anno.label.id != label.id: anno.label = label anno.robot_version = latestRobot anno.save() # Else, it's an existing confirmed annotation, and we don't want # to overwrite it. So do nothing in this case. logging.info('Classified {npts} points in image{id} from source{sid}: {sname}'.format(npts = len(label_probabilities), id = image_id, sid = image.source_id, sname = image.source.name)) return 1