def add_human_annotations(self, image_id, user=None): """ Add human annotations to an image. :param user - The user who will be the annotator of these annotations. If not specified, default to the User of self.username. """ source = Source.objects.get(pk=self.source_id) img = Image.objects.get(pk=image_id) points = Point.objects.filter(image=img) labels = source.labelset.labels.all() if user is None: user = User.objects.get(username=self.username) # For each point, pick a label randomly from the source's labelset. for pt in points: label = random.choice(labels) anno = Annotation( point=pt, image=img, source=source, user=user, label=label, ) anno.save() img.status.annotatedByHuman = True img.status.save()
def create(self, validated_data): print "In create" print validated_data annotation = Annotation() annotation.author = validated_data.get('author') annotation.body = validated_data.get('body') annotation.content_type = validated_data.get('content_type') annotation.object_id = validated_data.get('object_id') annotation.paragraph = validated_data.get('paragraph') annotation.privacy = validated_data.get('privacy') annotation.privacy_override = validated_data.get('privacy_override', False) #Get row from contentType which has content_type content_object = ContentType.objects.get_for_id(annotation.content_type.id) annotation.content_object = content_object.model_class().objects.get(id=annotation.object_id) print annotation.content_object annotation.save() print validated_data.get('shared_with') for user in validated_data.get('shared_with'): sharing = AnnotationShareMap(annotation=annotation, user=user) sharing.save() return annotation
def annotation(request, session_id): # GET => gather collections and render UI # POST => get annotation information and create new/update annotation. if not request.user.is_authenticated(): return redirect('login') collections = Collection.objects.filter(creator=request.user).all() try: annotationsession = AnnotationSession.objects.get(pk=session_id) session_annotations = annotationsession.annotations.all().order_by( "datestamp") except AnnotationSession.DoesNotExist: return HttpResponse(status=404) if request.method == "GET": context = RequestContext( request, { 'collections': collections, 'annotationsession': annotationsession, 'annotations': session_annotations }) template = loader.get_template("annotations/annotate.html") return HttpResponse(template.render(context)) elif request.method == "POST": # A new annotation or an old one? leftitem_id = request.POST.get("leftitem") rightitem_id = request.POST.get("rightitem") try: leftitem = Item.objects.get(pk=leftitem_id) rightitem = Item.objects.get(pk=rightitem_id) except Item.DoesNotExist: print("Hmmm it says these don't exist") return HttpResponse(status=404) leftitem_state = request.POST.get("leftitem_state") rightitem_state = request.POST.get("rightitem_state") annotationtext = request.POST.get("annotationtext", u"") try: previous = Annotation.objects.get(leftitem=leftitem, rightitem=rightitem, session=annotationsession) previous.annotation = annotationtext previous.leftitem_state = leftitem_state previous.rightitem_state = rightitem_state previous.save() return HttpResponse("{{'annotation_id':'{0}'}}".format( str(previous.id)), mimetype="application/json") except Annotation.DoesNotExist: new_anno = Annotation(leftitem=leftitem, rightitem=rightitem, session=annotationsession, leftitem_state=leftitem_state, rightitem_state=rightitem_state, creator=request.user) new_anno.save() return HttpResponse("{{'annotation_id':'{0}'}}".format( str(new_anno.id)), mimetype="application/json")
def create_annotation(self, body='', paragraph=None): annotation = Annotation() annotation.content_type=ContentType.objects.get(model='blogcontent', app_label="blogging") annotation.object_id= str(1) annotation.body=body annotation.paragraph=paragraph annotation.author= User.objects.get(id=1) annotation.save()
def update(request): """ Update the database with new annotations from the AudioAnnotator Flash interface. The Flash interface will output a string that looks like: "1,62637,119960,test\n2,137802,175384,test2\n" Each new line is a new annotation. The first field is the annotation id, the second and third are the start and end times, in milliseconds, and the fourth is the label. """ if request.method == "POST": annotations = request.POST.get('annotations', '') recording = Recording.objects.get(pk=request.POST.get('recording_id', '')) # Take the string of annotations from the AudioAnnotator and # parse it into annotations. for annotation in annotations.split("\n"): if (annotation == ''): break fields = annotation.split(",") ann_id = fields[0] ann_start_ms = int(fields[1]) ann_end_ms = int(fields[2]) ann_label = fields[3] # if (ann_label == None): # Annotation.delete(ann_id) # break if (ann_id == "0"): ann = Annotation( start_time_ms = ann_start_ms, end_time_ms = ann_end_ms, label = ann_label, recording = recording) ann.save() else: ann = Annotation.objects.get(pk=ann_id) ann.start_time_ms = ann_start_ms ann.end_time_ms = ann_end_ms ann.label = ann_label print ann ann.save() # Return back to the AudioAnnotator the latest collection of # annotations for this recording. annotations = Annotation.objects.all().filter(recording=r) output = "" for annotation in annotations: output += annotation.to_string + "\n" logging.info("***output=" + output) return HttpResponse(output)
def create_annotation(self, body='', paragraph=None): annotation = Annotation() annotation.content_type = ContentType.objects.get(model='blogcontent', app_label="blogging") annotation.object_id = str(1) annotation.body = body annotation.paragraph = paragraph annotation.author = User.objects.get(id=1) annotation.save()
def update(request): """ Update the database with new annotations from the AudioAnnotator Flash interface. The Flash interface will output a string that looks like: "1,62637,119960,test\n2,137802,175384,test2\n" Each new line is a new annotation. The first field is the annotation id, the second and third are the start and end times, in milliseconds, and the fourth is the label. """ if request.method == "POST": annotations = request.POST.get('annotations', '') recording = Recording.objects.get( pk=request.POST.get('recording_id', '')) # Take the string of annotations from the AudioAnnotator and # parse it into annotations. for annotation in annotations.split("\n"): if (annotation == ''): break fields = annotation.split(",") ann_id = fields[0] ann_start_ms = int(fields[1]) ann_end_ms = int(fields[2]) ann_label = fields[3] # if (ann_label == None): # Annotation.delete(ann_id) # break if (ann_id == "0"): ann = Annotation(start_time_ms=ann_start_ms, end_time_ms=ann_end_ms, label=ann_label, recording=recording) ann.save() else: ann = Annotation.objects.get(pk=ann_id) ann.start_time_ms = ann_start_ms ann.end_time_ms = ann_end_ms ann.label = ann_label print ann ann.save() # Return back to the AudioAnnotator the latest collection of # annotations for this recording. annotations = Annotation.objects.all().filter(recording=r) output = "" for annotation in annotations: output += annotation.to_string + "\n" logging.info("***output=" + output) return HttpResponse(output)
def test_create_serializer_class(self): annotation = Annotation() annotation.content_type=ContentType.objects.get(model='blogcontent', app_label="blogging") annotation.object_id= str(1) annotation.body="This is a test annotation" annotation.paragraph="1" annotation.author= User.objects.get(id=1) annotation.save() obj = AnnotationSerializer(annotation) #print(obj.data) json = JSONRenderer().render(obj.data)
def test_create_serializer_class(self): annotation = Annotation() annotation.content_type = ContentType.objects.get(model='blogcontent', app_label="blogging") annotation.object_id = str(1) annotation.body = "This is a test annotations" annotation.paragraph = "1" annotation.author = User.objects.get(id=1) annotation.save() obj = AnnotationSerializer(annotation) #print(obj.data) json = JSONRenderer().render(obj.data)
def annotation(request, session_id): # GET => gather collections and render UI # POST => get annotation information and create new/update annotation. if not request.user.is_authenticated(): return redirect('login') collections = Collection.objects.filter(creator=request.user).all() try: annotationsession = AnnotationSession.objects.get(pk=session_id) session_annotations = annotationsession.annotations.all().order_by("datestamp") except AnnotationSession.DoesNotExist: return HttpResponse(status=404) if request.method == "GET": context = RequestContext( request, {'collections': collections, 'annotationsession': annotationsession, 'annotations': session_annotations }) template = loader.get_template("annotations/annotate.html") return HttpResponse(template.render(context)) elif request.method == "POST": # A new annotation or an old one? leftitem_id = request.POST.get("leftitem") rightitem_id = request.POST.get("rightitem") try: leftitem = Item.objects.get(pk=leftitem_id) rightitem = Item.objects.get(pk=rightitem_id) except Item.DoesNotExist: print("Hmmm it says these don't exist") return HttpResponse(status=404) leftitem_state = request.POST.get("leftitem_state") rightitem_state = request.POST.get("rightitem_state") annotationtext = request.POST.get("annotationtext", u"") try: previous = Annotation.objects.get(leftitem = leftitem, rightitem = rightitem, session = annotationsession) previous.annotation = annotationtext previous.leftitem_state = leftitem_state previous.rightitem_state = rightitem_state previous.save() return HttpResponse("{{'annotation_id':'{0}'}}".format(str(previous.id)), mimetype="application/json") except Annotation.DoesNotExist: new_anno = Annotation(leftitem = leftitem, rightitem = rightitem, session = annotationsession, leftitem_state = leftitem_state, rightitem_state = rightitem_state, creator = request.user) new_anno.save() return HttpResponse("{{'annotation_id':'{0}'}}".format(str(new_anno.id)), mimetype="application/json")
def post(self, request): result = json.loads(json.dumps(request.POST)) annotation_object = Annotation() for _, _, q in self.qualities: vars(annotation_object)[q] = result[q] annotation_object.audio_file = self.get_context_data().get( 'audio_file_name') annotation_object.user = self.request.user annotation_object.description = result.get('description') annotation_object.save() # How many have you annotated? user_annotations = Annotation.objects.filter(user=self.request.user) return render(request, 'annotation_submit.html', {'count': len(user_annotations)})
def import_archived_annotations(source_id, anndict, with_labels = True): source = Source.objects.get(pk = source_id) # let's fetch the relevant source. imported_user = get_imported_user() # the imported user. images = source.get_all_images().filter(metadata__name__in = list(anndict.keys())) # grab all image that have names in the .csv file. for image in images: # Start by remove annotations and points for this image for ann in Annotation.objects.filter(image=image): ann.delete() for point in Point.objects.filter(image=image): point.delete() # Next, set image metadata to IMPORTED. image.point_generation_method = PointGen.args_to_db_format( point_generation_type=PointGen.Types.IMPORTED, imported_number_of_points=len(anndict[image.metadata.name]) ) image.save() image.status.hasRandomPoints = True image.status.annotatedByHuman = with_labels image.status.save() image.after_annotation_area_change() # set the backend status correctly. # Iterate over this image's annotations and save them. for (point_num, (row, col, code)) in enumerate(anndict[image.metadata.name]): # Save the Point in the database. point = Point(row=row, column=col, point_number=point_num + 1, image=image) point.save() # and save the Annotation. if with_labels: label = Label.objects.filter(code=code)[0] annotation = Annotation(user=imported_user, point=point, image=image, label=label, source=source) annotation.save()
def Classify(image_id): image = Image.objects.get(pk=image_id) # if annotated by Human, no need to re-classify if image.status.annotatedByHuman: print 'Classify: Image nr ' + str(image_id) + ' is annotated by the human operator, aborting' return # make sure that the previous step is complete if not image.status.featuresExtracted: print 'Classify: Features not extracted for image id {id}, can not proceed'.format(id = image_id) return # Get all robots for this source latestRobot = image.source.get_latest_robot() if latestRobot == None: print 'Classify: No robots exist for the source, {src}, of image id {id}. Aborting.'.format(src=image.source, id=image_id) return # Check if this image has been previously annotated by a robot. if (image.status.annotatedByRobot): # now, compare this version number to the latest_robot_annotator field for image. if (not (latestRobot.version > image.latest_robot_annotator.version)): print 'Image {id} is already annotated by the latest robot version, {ver}, for source, {src}'.format(id = image_id, ver=latestRobot.version, src=image.source) return ####### EVERYTHING OK: START THE CLASSIFICATION ########## #update image status image.status.annotatedByRobot = True image.status.save() image.latest_robot_annotator = latestRobot image.save() print 'Start classify image id {id}'.format(id = image_id) #builds args for matlab script featureFile = os.path.join(FEATURES_DIR, str(image_id) + "_" + image.get_process_date_short_str() + ".dat") #get the source id for this file labelFile = os.path.join(CLASSIFY_DIR, str(image_id) + "_" + image.get_process_date_short_str() + ".txt") task_helpers.coralnet_classify( featureFile=featureFile, modelFile=latestRobot.path_to_model, labelFile=labelFile, logFile=CV_LOG, errorLogfile=CLASSIFY_ERROR_LOG, ) #get algorithm user object user = get_robot_user() #open the labelFile and rowColFile to process labels rowColFile = os.path.join(FEATURES_DIR, str(image_id) + "_rowCol.txt") label_file = open(labelFile, 'r') row_file = open(rowColFile, 'r') for line in row_file: #words[0] is row, words[1] is column words = line.split(',') #gets the label object based on the label id the algorithm specified label_id = label_file.readline() label_id.replace('\n', '') label = Label.objects.filter(id=label_id) #gets the point object(s) that have that row and column. #if there's more than one such point, add annotations to all of #these points the first time we see this row+col, and don't do #anything on subsequent times (filtering with annotation=None accomplishes this). points = Point.objects.filter(image=image, row=words[0], column=words[1], annotation=None) for point in points: #create the annotation object and save it Ann = Annotation.objects.filter(point=point, image=image) if ( len(Ann) > 0 and ( not is_robot_user(Ann[0].user) ) ): # if this is an imported or human, we don't want to overwrite it, so continue continue annotation = Annotation(image=image, label=label[0], point=point, user=user, robot_version=latestRobot, source=image.source) annotation.save() print 'Finished classification of image id {id}'.format(id = image_id) label_file.close() row_file.close()
def helper_classify_does_not_overwrite_manual_annotations(self, annotator_user): """ Helper function for the tests that follow. """ # Take at least (min number for training) images. # Preprocess, feature extract, and add human annotations to # the features. for img in Image.objects.filter(source__pk=self.source_id): preprocess_image(img.id) make_features(img.id) self.add_human_annotations(img.id) add_labels_to_features(img.id) # Create a robot. result = train_robot(self.source_id) self.assertTrue(result == 1) # Upload a new image. img_id = self.upload_image('006_2012-06-28_color-grid-006.png')[0] # Preprocess and feature extract. preprocess_image(img_id) make_features(img_id) # Add annotations. source = Source.objects.get(pk=self.source_id) img = Image.objects.get(pk=img_id) points = Point.objects.filter(image=img) labels = source.labelset.labels.all() # For odd-numbered points, make an annotation by picking a # label randomly from the source's labelset. # Leave the even-numbered points alone. # (Assumption: the test source has at least 2 points per image) for pt in points: if pt.point_number % 2 == 0: continue label = random.choice(labels) anno = Annotation( point=pt, image=img, source=source, user=annotator_user, label=label, ) anno.save() img.status.save() # Get those annotations (again, only odd-numbered points). num_points = Point.objects.filter(image__pk=img_id).count() manual_annotations = dict() for point_num in range(1, num_points+1, 2): label_id = Annotation.objects.get(image__pk=img_id, point__point_number=point_num).label.id manual_annotations[point_num] = label_id # Try to Classify. result = classify_image(img_id) # Shouldn't throw exception. self.assertTrue(result == 1) self.assertEqual(Image.objects.get(pk=img_id).status.annotatedByRobot, True) # Check the Annotations. for point_num in range(1, num_points+1): anno = Annotation.objects.get(image__pk=img_id, point__point_number=point_num) label_id = anno.label.id if point_num % 2 == 0: # Even; should be robot self.assertEqual(anno.user.id, get_robot_user().id) else: # Odd; should be manual (and same as before) self.assertEqual(label_id, manual_annotations[point_num]) self.assertEqual(anno.user.id, annotator_user.id) if settings.UNIT_TEST_VERBOSITY >= 1: print "Point {num} | {username} | {label_id}".format( num=point_num, username=anno.user.username, label_id=label_id, )
def image_upload_process(imageFiles, imageOptionsForm, annotationOptionsForm, source, currentUser, annoFile): """ Helper method for the image upload view and the image+annotation import view. """ uploadedImages = [] duplicates = 0 imagesUploaded = 0 annotationsImported = 0 importedUser = get_imported_user() dupeOption = imageOptionsForm.cleaned_data['skip_or_replace_duplicates'] annotationData = None if annoFile: try: annotationData = annotations_file_to_python(annoFile, source) except FileContentError as errorDetail: return dict(error=True, message='Error reading labels file %s. %s' % (annoFile.name, errorDetail), ) for imageFile in imageFiles: filename = imageFile.name metadataDict = None metadata = Metadata(height_in_cm=source.image_height_in_cm) if imageOptionsForm.cleaned_data['specify_metadata'] == 'filenames': try: metadataDict = filename_to_metadata(filename, source) # Filename parse error. # TODO: check for validity of the file type and contents, too. except (ValueError, StopIteration): return dict(error=True, message='Upload failed - Error when parsing the filename %s for metadata.' % filename, ) # Detect duplicate images and handle them dupe = find_dupe_image(source, **metadataDict) if dupe: duplicates += 1 if dupeOption == 'skip': # Skip uploading this file. continue elif dupeOption == 'replace': # Proceed uploading this file, and delete the dupe. dupe.delete() # Set the metadata valueDict = get_location_value_objs(source, metadataDict['values'], createNewValues=True) photoDate = datetime.date(year = int(metadataDict['year']), month = int(metadataDict['month']), day = int(metadataDict['day'])) metadata.name = metadataDict['name'] metadata.photo_date = photoDate for key, value in valueDict.iteritems(): setattr(metadata, key, value) else: metadata.name = filename # Image + annotation import form # Assumes we got the images' metadata (from filenames or otherwise) if annotationData: pointsOnlyOption = annotationOptionsForm.cleaned_data['points_only'] # Use the location values and the year to build a string identifier for the image, such as: # Shore1;Reef5;...;2008 imageIdentifier = get_image_identifier(metadataDict['values'], metadataDict['year']) # Use the identifier as the index into the annotation file's data. if not annotationData.has_key(imageIdentifier): return dict(error=True, message='%s seems to have no annotations for the image file %s, which has the following keys:\n%s' % ( annoFile.name, imageFile.name, imageIdentifier.replace(';',' ')) ) imageAnnotations = annotationData[imageIdentifier] status = ImageStatus() status.save() metadata.annotation_area = AnnotationAreaUtils.IMPORTED_STR metadata.save() img = Image(original_file=imageFile, uploaded_by=currentUser, point_generation_method=PointGen.args_to_db_format( point_generation_type=PointGen.Types.IMPORTED, imported_number_of_points=len(imageAnnotations) ), metadata=metadata, source=source, status=status, ) img.save() # Iterate over this image's annotations and save them. pointNum = 1 for anno in imageAnnotations: # Save the Point in the database. point = Point(row=anno['row'], column=anno['col'], point_number=pointNum, image=img) point.save() if not pointsOnlyOption: label = Label.objects.filter(code=anno['label'])[0] # Save the Annotation in the database, marking the annotations as imported. annotation = Annotation(user=importedUser, point=point, image=img, label=label, source=source) annotation.save() annotationsImported += 1 pointNum += 1 img.status.hasRandomPoints = True if not pointsOnlyOption: img.status.annotatedByHuman = True img.status.save() # Image upload form, no annotations else: status = ImageStatus() status.save() metadata.annotation_area = source.image_annotation_area metadata.save() # Save the image into the DB img = Image(original_file=imageFile, uploaded_by=currentUser, point_generation_method=source.default_point_generation_method, metadata=metadata, source=source, status=status, ) img.save() # Generate and save points generate_points(img) # Up to 5 uploaded images will be shown # upon successful upload. # Prepend to list, so most recent image comes first uploadedImages.insert(0, img) if len(uploadedImages) > 5: uploadedImages = uploadedImages[:5] imagesUploaded += 1 # Construct success message. success_message = image_upload_success_message( num_images_uploaded=imagesUploaded, num_dupes=duplicates, dupe_option=dupeOption, num_annotations=annotationsImported, ) return dict(error=False, uploadedImages=uploadedImages, message=success_message, )
def classify_image(image_id): image = Image.objects.get(pk=image_id) # if annotated by Human, or if the previous step is not complete if image.status.annotatedByHuman or not image.status.featuresExtracted: return 1 # Get last robot for this source latestRobot = image.source.get_latest_robot() if latestRobot == None: return 1 # Check if this image has been previously annotated by a robot. if image.status.annotatedByRobot: # now, compare this version number to the latest_robot_annotator field for image. if (not (latestRobot.version > image.latest_robot_annotator.version)): return 1 ####### EVERYTHING OK: START THE CLASSIFICATION ########## logging.info('Classifying image{id} from source{sid}: {sname}'.format(id = image_id, sid = image.source_id, sname = image.source.name)) #builds args for matlab script featureFile = os.path.join(FEATURES_DIR, str(image_id) + "_" + image.get_process_date_short_str() + ".dat") labelFile = os.path.join(CLASSIFY_DIR, str(image_id) + "_" + image.get_process_date_short_str() + ".txt") task_helpers.coralnet_classify( featureFile=featureFile, modelFile=latestRobot.path_to_model, labelFile=labelFile, logFile=CV_LOG, errorLogfile=CLASSIFY_ERROR_LOG, ) if os.path.isfile(CLASSIFY_ERROR_LOG): logging.info('ERROR classifying image{id} from source{sid}: {sname}'.format(id = image_id, sid = image.source_id, sname = image.source.name)) mail_admins('CoralNet Backend Error', 'in Classify') return 0 else: #update image status image.status.annotatedByRobot = True image.status.save() image.latest_robot_annotator = latestRobot image.save() ####### IMPORT CLASSIFICATION RESULT TO DATABASE ########## user = get_robot_user() # Get the label probabilities that we just generated label_probabilities = task_utils.get_label_probabilities_for_image(image_id) if len(label_probabilities) == 0: mail_admins('Classify error', 'Classification output for image{id} from source{sid}: {sname} was empty.'.format(id = image_id, sid = image.source_id, sname = image.source.name)) # Go through each point and update/create the annotation as appropriate for point_number, probs in label_probabilities.iteritems(): pt = Point.objects.get(image=image, point_number=point_number) probs_descending_order = sorted(probs, key=operator.itemgetter('score'), reverse=True) top_prob_label_code = probs_descending_order[0]['label'] label = Label.objects.get(code=top_prob_label_code) # If there's an existing annotation for this point, get it. # Otherwise, create a new annotation. # # (Assumption: there's at most 1 Annotation per Point, never multiple. # If there are multiple, we'll get a MultipleObjectsReturned exception.) try: anno = Annotation.objects.get(image=image, point=pt) except Annotation.DoesNotExist: # No existing annotation. Create a new one. new_anno = Annotation( image=image, label=label, point=pt, user=user, robot_version=latestRobot, source=image.source ) new_anno.save() else: # Got an existing annotation. if is_robot_user(anno.user): # It's an existing robot annotation. Update it as necessary. if anno.label.id != label.id: anno.label = label anno.robot_version = latestRobot anno.save() # Else, it's an existing confirmed annotation, and we don't want # to overwrite it. So do nothing in this case. logging.info('Classified {npts} points in image{id} from source{sid}: {sname}'.format(npts = len(label_probabilities), id = image_id, sid = image.source_id, sname = image.source.name)) return 1
def ajax_save_annotations(request, annotationForm): """ Called via Ajax from the annotation tool form, if the user clicked the "Save Annotations" button. Takes: the annotation form field names and values, serialized with jQuery's serializeArray() Does: saves the annotations in the database Returns: false if successful, an error string if there was a problem """ #TODO: just use request.POST instead of the annotationForm parameter formDict = dict([ (d['name'], d['value']) for d in annotationForm ]) image = Image.objects.get(pk=formDict['image_id']) user = User.objects.get(pk=formDict['user_id']) source = image.source sourceLabels = source.labelset.labels.all() # Sanity checks if user != request.user: return simplejson.dumps(dict(error="User id error")) if not user.has_perm(Source.PermTypes.EDIT.code, image.source): return simplejson.dumps(dict(error="Image id error")) # Get stuff from the DB in advance, should save time pointsList = list(Point.objects.filter(image=image)) points = dict([ (p.point_number, p) for p in pointsList ]) annotationsList = list(Annotation.objects.filter(image=image, source=source)) annotations = dict([ (a.point_id, a) for a in annotationsList ]) for name, value in formDict.iteritems(): if name.startswith('label_'): # Get this annotation's point pointNum = name[len('label_'):] # The part after 'label_' point = points[int(pointNum)] # Does the form field have a non-human-confirmed robot annotation? isFormRobotAnnotation = simplejson.loads(formDict['robot_' + pointNum]) # Get the label that the form field value refers to. # Anticipate errors, even if we plan to check input with JS. labelCode = value if labelCode == '': label = None else: labels = Label.objects.filter(code=labelCode) if len(labels) == 0: return simplejson.dumps(dict(error="No label with code %s." % labelCode)) label = labels[0] if label not in sourceLabels: return simplejson.dumps(dict(error="The labelset has no label with code %s." % labelCode)) # An annotation of this point number exists in the database if annotations.has_key(point.id): anno = annotations[point.id] # Label field is now blank. # We're not allowing label deletions, so don't do anything in this case. if label is None: pass # Label was robot annotated, and then the human user confirmed or changed it elif is_robot_user(anno.user) and not isFormRobotAnnotation: anno.label = label anno.user = user anno.save() # Label was otherwise changed elif label != anno.label: anno.label = label anno.user = user anno.save() # No annotation of this point number in the database yet else: if label is not None: newAnno = Annotation(point=point, user=user, image=image, source=source, label=label) newAnno.save() # Are all points human annotated? all_done = image_annotation_all_done(image) # Update image status, if needed if image.status.annotatedByHuman: image.after_completed_annotations_change() if image.status.annotatedByHuman != all_done: image.status.annotatedByHuman = all_done image.status.save() if all_done: # Need simplejson.dumps() to convert the Python True to a JS true return simplejson.dumps(dict(all_done=True)) else: return dict()
def image_upload_process(imageFile, imageOptionsForm, annotation_dict_id, csv_dict_id, metadata_import_form_class, annotation_options_form, source, currentUser): is_uploading_points_or_annotations = annotation_options_form.cleaned_data['is_uploading_points_or_annotations'] filename = imageFile.name metadata_dict = None metadata_obj = Metadata(height_in_cm=source.image_height_in_cm) if imageOptionsForm.cleaned_data['specify_metadata'] == 'filenames': filename_check_result = check_image_filename(filename, source) filename_status = filename_check_result['status'] if filename_status == 'error': # This case should never happen if the pre-upload # status checking is doing its job, but just in case... return dict( status=filename_status, message=u"{m}".format(m=filename_check_result['message']), link=None, title=None, ) # Set the metadata metadata_dict = filename_check_result['metadata_dict'] value_dict = get_location_value_objs(source, metadata_dict['values'], createNewValues=True) photo_date = datetime.date( year = int(metadata_dict['year']), month = int(metadata_dict['month']), day = int(metadata_dict['day']) ) metadata_obj.name = metadata_dict['name'] metadata_obj.photo_date = photo_date for key, value in value_dict.iteritems(): setattr(metadata_obj, key, value) elif imageOptionsForm.cleaned_data['specify_metadata'] == 'csv': if not csv_dict_id: return dict( status='error', message=u"{m}".format(m="CSV file was not uploaded."), link=None, title=None, ) csv_dict_filename = os.path.join( settings.SHELVED_ANNOTATIONS_DIR, 'csv_source{source_id}_{dict_id}.db'.format( source_id=source.id, dict_id=csv_dict_id, ), ) # Corner case: the specified shelved annotation file doesn't exist. # Perhaps the file was created a while ago and has been pruned since, # or perhaps there is a bug. if not os.path.isfile(csv_dict_filename): return dict( status='error', message="CSV file could not be found - if you provided the .csv file a while ago, maybe it just timed out. Please retry the upload.", link=None, title=None, ) csv_dict = shelve.open(csv_dict_filename) #index into the csv_dict with the filename. the str() is to handle #the case where the filename is a unicode object instead of a str; #unicode objects can't index into dicts. filename_str = str(filename) if filename_str in csv_dict: # There is CSV metadata for this file. metadata_dict = csv_dict[str(filename)] csv_dict.close() # The reason this uses metadata_import_form_class instead of # importing MetadataImportForm is that I'm too lazy to deal with the # circular-import implications of the latter solution right now. # -Stephen metadata_import_form = metadata_import_form_class( source.id, True, metadata_dict, ) if not metadata_import_form.is_valid(): return dict( status='error', message="Unknown error with the CSV metadata.", link=None, title=None, ) fields = ['photo_date', 'value1', 'value2', 'value3', 'value4', 'value5', 'height_in_cm', 'latitude', 'longitude', 'depth', 'camera', 'photographer', 'water_quality', 'strobes', 'framing', 'balance'] for field in fields: if not field in metadata_import_form.fields: # A location value field that's not in this form continue value = metadata_import_form.cleaned_data[field] # Check for a non-empty value; don't want empty values to # override default values that we've already set on the # metadata_obj if value: setattr(metadata_obj, field, value) else: # No CSV metadata for this file. csv_dict.close() metadata_obj.name = filename else: # Not specifying any metadata at upload time. metadata_obj.name = filename image_annotations = None has_points_or_annotations = False if is_uploading_points_or_annotations: # Corner case: somehow, we're uploading with points+annotations and without # a checked annotation file specified. This probably indicates a bug. if not annotation_dict_id: return dict( status='error', message=u"{m}".format(m=str_consts.UPLOAD_ANNOTATIONS_ON_AND_NO_ANNOTATION_DICT_ERROR_STR), link=None, title=None, ) annotation_dict_filename = os.path.join( settings.SHELVED_ANNOTATIONS_DIR, 'source{source_id}_{dict_id}'.format( source_id=source.id, dict_id=annotation_dict_id, ), ) # Corner case: the specified shelved annotation file doesn't exist. # Perhaps the file was created a while ago and has been pruned since, # or perhaps there is a bug. if not os.path.isfile(annotation_dict_filename): return dict( status='error', message="Annotations could not be found - if you provided the .txt file a while ago, maybe it just timed out. Please retry the upload.", link=None, title=None, ) # Use the location values and the year to build a string identifier for the image, such as: # Shore1;Reef5;...;2008 # Convert to a string (instead of a unicode string) for the shelve key lookup. image_identifier = str(get_image_identifier(metadata_dict['values'], metadata_dict['year'])) annotation_dict = shelve.open(annotation_dict_filename) if annotation_dict.has_key(image_identifier): image_annotations = annotation_dict[image_identifier] has_points_or_annotations = True annotation_dict.close() if has_points_or_annotations: # Image upload with points/annotations is_uploading_annotations_not_just_points = annotation_options_form.cleaned_data['is_uploading_annotations_not_just_points'] imported_user = get_imported_user() status = ImageStatus() status.save() metadata_obj.annotation_area = AnnotationAreaUtils.IMPORTED_STR metadata_obj.save() img = Image( original_file=imageFile, uploaded_by=currentUser, point_generation_method=PointGen.args_to_db_format( point_generation_type=PointGen.Types.IMPORTED, imported_number_of_points=len(image_annotations) ), metadata=metadata_obj, source=source, status=status, ) img.save() # Iterate over this image's annotations and save them. point_num = 0 for anno in image_annotations: # Save the Point in the database. point_num += 1 point = Point(row=anno['row'], column=anno['col'], point_number=point_num, image=img) point.save() if is_uploading_annotations_not_just_points: label = Label.objects.filter(code=anno['label'])[0] # Save the Annotation in the database, marking the annotations as imported. annotation = Annotation(user=imported_user, point=point, image=img, label=label, source=source) annotation.save() img.status.hasRandomPoints = True if is_uploading_annotations_not_just_points: img.status.annotatedByHuman = True img.status.save() else: # Image upload, no points/annotations image_status = ImageStatus() image_status.save() metadata_obj.annotation_area = source.image_annotation_area metadata_obj.save() # Save the image into the DB img = Image(original_file=imageFile, uploaded_by=currentUser, point_generation_method=source.default_point_generation_method, metadata=metadata_obj, source=source, status=image_status, ) img.save() # Generate and save points generate_points(img) success_message = "Uploaded" return dict( status='ok', message=success_message, link=reverse('image_detail', args=[img.id]), title=img.get_image_element_title(), image_id=img.id, )