def post(self, request, format=None): annotation_id = request.POST.get('annotationId', None) age = request.POST.get('ageGroup', None) gender = request.POST.get('gender', None) newSubjectId = request.POST.get('newSubjectId', None) person_album = None if newSubjectId and newSubjectId != '': person_album = Album.objects.filter(pk=newSubjectId).first() new_rectangle = FaceRecognitionRectangle.objects.get(pk=annotation_id) if (person_album and len( AlbumPhoto.objects.filter(photo=new_rectangle.photo, album=person_album)) < 1): albumPhoto = AlbumPhoto(album=person_album, photo=new_rectangle.photo, type=AlbumPhoto.FACE_TAGGED, profile=request.user.profile) albumPhoto.save() person_album.set_calculated_fields() person_album.save() additional_subject_data = AddAdditionalSubjectData( subject_rectangle_id=annotation_id, age=age, gender=gender, newSubjectId=newSubjectId) return self.add_subject_data( additional_subject_data=additional_subject_data, request=request)
def handle(self, *args, **options): rectangles = FaceRecognitionRectangle.objects.filter( Q(subject_consensus__isnull=False) | Q(subject_ai_guess__isnull=False)) for rectangle in rectangles: if rectangle.subject_consensus: existing_relation = AlbumPhoto.objects.filter(photo=rectangle.photo, album=rectangle.subject_consensus, type=AlbumPhoto.FACE_TAGGED).first() if not existing_relation: new_relation = AlbumPhoto( photo=rectangle.photo, album=rectangle.subject_consensus, type=AlbumPhoto.FACE_TAGGED ) new_relation.save() print('New relation between %s and %s' % (new_relation.photo.pk, new_relation.album.pk)) elif rectangle.subject_ai_guess: existing_relation = AlbumPhoto.objects.filter(photo=rectangle.photo, album=rectangle.subject_ai_guess, type=AlbumPhoto.FACE_TAGGED).first() if not existing_relation: new_relation = AlbumPhoto( photo=rectangle.photo, album=rectangle.subject_ai_guess, type=AlbumPhoto.FACE_TAGGED ) new_relation.save() print('New relation between %s and %s' % (new_relation.photo.pk, new_relation.album.pk))
def _create_photos_from_xml_response(self, xml_response): for elem in xml_response: if elem.tag == "docs": if not self._resource_already_exists(elem): new_photo = Photo( title=elem.find("title").text, description=elem.find("title_sort").text, source=Source.objects.get( description=elem.find('institution').text), source_key=elem.find("identifier").text, external_id=elem.find("identifier").text, date_text=getattr(elem.find("main_date_str"), 'text', None), author=elem.find("author").text, source_url=elem.find("record_link").text, licence=Licence.objects.filter( url='https://creativecommons.org/about/pdm').first( )) opener = build_opener() opener.addheaders = [( "User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36" )] img_response = opener.open(elem.find("image_links").text) new_photo.image.save("finna.jpg", ContentFile(img_response.read())) new_photo.save() ap = AlbumPhoto(album=self.album, photo=new_photo) ap.save()
def handle(self, *args, **options): translation.activate('en') tag = args[0] page = args[1] search_url = 'https://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=' + settings.FLICKR_API_KEY + '&tags=' + tag + '&is_commons=1&content_type=6&extras=license,original_format&format=json&nojsoncallback=1&page=' + page # https://farm{farm-id}.staticflickr.com/{server-id}/{id}_{o-secret}_o.(jpg|gif|png) image_url_template = 'https://farm%s.staticflickr.com/%s/%s_%s_b.jpg' # https://www.flickr.com/photos/{user-id}/{photo-id} reference_url_template = 'https://www.flickr.com/photos/%s/%s' request = Request(search_url) response = urlopen(request) data = response.read() # Testing # data = open(ABSOLUTE_PROJECT_ROOT + '/ajapaik/home/management/commands/flickr_import_test.json', 'r').read() data = json.loads(data) source = Source.objects.get(description='The British Library') licence = Licence.objects.get(name='No known copyright restrictions') album = Album.objects.get( name='The British Library Metropolitan Improvements') area = Area.objects.get(name='London') for photo in data['photos']['photo']: if photo['license'] == '7' and not self._resource_already_exists( photo['id']): new_photo = Photo(source=source, source_url=reference_url_template % (photo['owner'], photo['id']), source_key=photo['id'], date_text='1830', licence=licence, description=photo['title'], area=area, author='Shepherd, Thomas Hosmer') try: image_url = image_url_template % ( photo['farm'], photo['server'], photo['id'], photo['secret']) opener = build_opener() opener.addheaders = [( "User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36" )] img_response = opener.open(image_url) new_photo.image.save("tbl.jpg", ContentFile(img_response.read())) new_photo.save() ap = AlbumPhoto(album=album, photo=new_photo) ap.save() except: # print "Problem loading image" continue
def save_detected_face(new_rectangle_id, person_id, user_id, user_profile): new_rectangle = FaceRecognitionRectangle.objects.get(pk=new_rectangle_id) person_album = Album.objects.get(pk=person_id) if (person_album and len( AlbumPhoto.objects.filter(photo=new_rectangle.photo, album=person_album)) < 1): albumPhoto = AlbumPhoto(album=person_album, photo=new_rectangle.photo, type=AlbumPhoto.FACE_TAGGED, profile=user_profile) albumPhoto.save() person_album.set_calculated_fields() person_album.save() save_subject_object(person_album, new_rectangle, user_id, user_profile)
def finna_add_to_album(photo, target_album): if target_album and target_album != '': album = Album.objects.filter(name_en=target_album).first() if not album: album = Album(name_en=target_album, atype=Album.CURATED, is_public=True, cover_photo=photo) album.save() ap_found = AlbumPhoto.objects.filter(album=album, photo=photo).first() if not ap_found: ap = AlbumPhoto(album=album, photo=photo) ap.save() # update counts album.save()
def handle(self, *args, **options): self.query_url = "http://europeana.eu/api/v2/search.json" self.resource_url = "http://europeana.eu/api/v2/record" query = args[0] geoname = args[1] album_name = args[2] text_language = args[3] translation.activate('en') try: area = Area.objects.get(name=geoname) except ObjectDoesNotExist: new_area = Area(name=geoname) new_area.save() area = new_area try: album = Album.objects.get(name=album_name) except ObjectDoesNotExist: new_album = Album(name=album_name, atype=Album.COLLECTION, is_public=True) new_album.save() album = new_album translation.activate(text_language) query_result = self.query_europeana(query) item_count = int(query_result["itemsCount"]) ret = [] for i in xrange(0, item_count): if "dataProvider" in query_result["items"][ i] and "id" in query_result["items"][i]: if not self._resource_already_exists( query_result["items"][i]["dataProvider"][0], query_result["items"][i]["id"]): new_photo = Photo( area=area, source=Source.objects.get( description=query_result["items"][i] ["dataProvider"][0]), source_key=query_result["items"][i]["id"], licence="Public domain") if "edmIsShownAt" in query_result["items"][i]: new_photo.source_url = query_result["items"][i][ "edmIsShownAt"][0] if "edmAgentLabel" in query_result["items"][i]: new_photo.author = query_result["items"][i][ "edmAgentLabel"][0]["def"] if "title" in query_result["items"][i]: new_photo.description = query_result["items"][i][ "title"][0] opener = build_opener() opener.addheaders = [( "User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36" )] try: img_response = opener.open( query_result["items"][i]["edmIsShownBy"][0]) new_photo.image.save("europeana.jpg", ContentFile(img_response.read())) new_photo.save() ap = AlbumPhoto(album=album, photo=new_photo) ap.save() except: pass
def handle(self, *args, **options): translation.activate('en') set_id = '72157652352869904' page = 1 set_url = 'https://api.flickr.com/services/rest/?method=flickr.photosets.getPhotos&api_key=' + settings.FLICKR_API_KEY + '&photoset_id=' + set_id + '&extras=license,owner_name,geo,tags&format=json&nojsoncallback=1&page=' + str( page) # https://farm{farm-id}.staticflickr.com/{server-id}/{id}_{o-secret}_o.(jpg|gif|png) image_url_template = 'https://farm%s.staticflickr.com/%s/%s_%s_b.jpg' # https://www.flickr.com/photos/{user-id}/{photo-id} reference_url_template = 'https://www.flickr.com/photos/%s/%s' request = Request(set_url) response = urlopen(request) data = response.read() data = json.loads(data) source = Source.objects.filter(description='Flickr').first() if not source: source = Source(name='Flickr', description='Flickr') source.save() licence = Licence.objects.get( url='https://creativecommons.org/licenses/by/2.0/') album = Album.objects.get(pk=1089) for photo in data['photoset']['photo']: if not self._resource_already_exists(photo['id']): new_photo = Photo( source=source, source_url=(reference_url_template % (photo['ownername'], photo['id'])).replace( ' ', '_'), source_key=photo['id'], keywords=photo['tags'], licence=licence, description=photo['title'], author='CeriC') try: image_url = image_url_template % ( photo['farm'], photo['server'], photo['id'], photo['secret']) opener = build_opener() opener.addheaders = [( "User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36" )] img_response = opener.open(image_url) new_photo.image.save("ceric.jpg", ContentFile(img_response.read())) new_photo.save() if photo['latitude'] and photo['longitude']: source_geotag = GeoTag(lat=photo['latitude'], lon=photo['longitude'], origin=GeoTag.SOURCE, type=GeoTag.SOURCE_GEOTAG, map_type=GeoTag.NO_MAP, photo=new_photo, is_correct=True, trustworthiness=0.07) source_geotag.save() new_photo.latest_geotag = source_geotag.created new_photo.set_calculated_fields() new_photo.save() ap = AlbumPhoto(album=album, photo=new_photo) ap.save() if not album.cover_photo: album.cover_photo = new_photo album.light_save() except: # print "Problem loading image %s" % photo['id'] continue album.save()
def handle(self, *args, **options): logger = logging.getLogger(__name__) # Import sets muis_url = 'https://www.muis.ee/OAIService/OAIService' set_name = (options['set_name'])[0] museum_name = set_name.split(':')[0] source = Source.objects.filter(name=museum_name).first() if source is None: sets_url = muis_url + '?verb=ListSets' url_response = urllib.request.urlopen(sets_url) parser = ET.XMLParser(encoding="utf-8") tree = ET.fromstring(url_response.read(), parser=parser) ns = {'d': 'http://www.openarchives.org/OAI/2.0/'} sets = tree.findall('d:ListSets/d:set', ns) for s in sets: if s.find('d:setSpec', ns).text == museum_name: source_description = s.find('d:setName', ns).text source = Source(name=museum_name, description=source_description) source.save() source = Source.objects.filter(name=museum_name).first() album_ids = (options['album_ids']) albums = Album.objects.filter(id__in=album_ids) all_person_album_ids_set = set() list_identifiers_url = muis_url + '?verb=ListRecords&set=' + set_name \ + '&metadataPrefix=lido' url_response = urllib.request.urlopen(list_identifiers_url) parser = ET.XMLParser(encoding="utf-8") tree = ET.fromstring(url_response.read(), parser=parser) ns = {'d': 'http://www.openarchives.org/OAI/2.0/', 'lido': 'http://www.lido-schema.org'} header = 'd:header/' records = tree.findall('d:ListRecords/d:record', ns) record = 'd:metadata/lido:lidoWrap/lido:lido/' object_identification_wrap = record + 'lido:descriptiveMetadata/lido:objectIdentificationWrap/' object_description_wraps = \ object_identification_wrap + 'lido:objectDescriptionWrap/lido:objectDescriptionSet' title_wrap = object_identification_wrap + 'lido:titleWrap/' repository_wrap = object_identification_wrap + 'lido:repositoryWrap/' event_wrap = record + 'lido:descriptiveMetadata/lido:eventWrap/' record_wrap = record + 'lido:administrativeMetadata/lido:recordWrap/' resource_wrap = record + 'lido:administrativeMetadata/lido:resourceWrap/' actor_wrap = event_wrap + 'lido:eventSet/lido:event/lido:eventActor/' for rec in records: try: locations = [] person_album_ids = [] creation_date_earliest = None creation_date_latest = None external_id = rec.find(header + 'd:identifier', ns).text \ if rec.find(header + 'd:identifier', ns) is not None \ else None existing_photo = Photo.objects.filter(source=source, external_id=external_id).first() if existing_photo is not None: continue image_url = rec.find(resource_wrap + 'lido:resourceSet/lido:' + 'resourceRepresentation/lido:linkResource', ns).text \ if rec.find(resource_wrap + 'lido:resourceSet/lido:' + 'resourceRepresentation/lido:linkResource', ns) is not None\ else None image_extension = rec.find(resource_wrap + 'lido:resourceSet/lido:' + 'resourceRepresentation/lido:linkResource', ns).attrib['{' + ns['lido'] + '}formatResource'] \ if rec.find(resource_wrap + 'lido:resourceSet/lido:' + 'resourceRepresentation/lido:linkResource', ns) is not None\ else None source_url_find = rec.find(record_wrap + 'lido:recordInfoSet/lido:recordInfoLink', ns) source_url = source_url_find.text \ if source_url_find is not None \ else None identifier_find = rec.find(repository_wrap + 'lido:repositorySet/lido:workID', ns) identifier = identifier_find.text \ if identifier_find is not None \ else None if image_url is None: continue img_data = requests.get(image_url).content image_id = external_id.split(':')[-1] file_name = set_name + '_' + image_id + '.' + image_extension file_name = file_name.replace(':', '_') path = settings.MEDIA_ROOT + '/uploads/' + file_name with open(path, 'wb') as handler: handler.write(img_data) photo = Photo( image=path, source_key=identifier, source_url=source_url, external_id=external_id, source=source ) dt = datetime.utcnow() dt.replace(tzinfo=timezone.utc) photo.muis_update_time = dt.replace(tzinfo=timezone.utc).isoformat() photo.light_save() photo = Photo.objects.get(id=photo.id) photo.image.name = 'uploads/' + file_name title_find = rec.find(title_wrap + 'lido:titleSet/lido:appellationValue', ns) title = title_find.text \ if title_find is not None \ else None if title: photo = reset_modeltranslated_field(photo, 'title', title) dating = None photo, dating = set_text_fields_from_muis(photo, dating, rec, object_description_wraps, ns) photo.light_save() creation_date_earliest = None creation_date_latest = None date_prefix_earliest = None date_prefix_latest = None events = rec.findall(event_wrap + 'lido:eventSet/lido:event', ns) if events is not None and len(events) > 0: locations, \ creation_date_earliest, \ creation_date_latest, \ date_prefix_earliest, \ date_prefix_latest, \ date_earliest_has_suffix, \ date_latest_has_suffix, \ = extract_dating_from_event( events, locations, creation_date_earliest, creation_date_latest, photo.latest_dating is not None or dating is not None, ns ) if dating is not None: creation_date_earliest, date_prefix_earliest, date_earliest_has_suffix = \ get_muis_date_and_prefix(dating, False) creation_date_latest, date_prefix_latest, date_latest_has_suffix = \ get_muis_date_and_prefix(dating, True) actors = rec.findall(actor_wrap + 'lido:actorInRole', ns) person_album_ids = add_person_albums(actors, person_album_ids, ns) photo.add_to_source_album() if locations != []: photo = add_geotag_from_address_to_photo(photo, locations) photo = add_dating_to_photo( photo, creation_date_earliest, creation_date_latest, date_prefix_earliest, date_prefix_latest, Dating, date_earliest_has_suffix, date_latest_has_suffix, ) photo.light_save() for album in albums: if not album.cover_photo: album.cover_photo = photo ap = AlbumPhoto(photo=photo, album=album, type=AlbumPhoto.CURATED) ap.save() person_albums = Album.objects.filter(id__in=person_album_ids) if person_albums is not None: for album in person_albums: if not album.cover_photo: album.cover_photo = photo ap = AlbumPhoto(photo=photo, album=album, type=AlbumPhoto.FACE_TAGGED) ap.save() all_person_album_ids_set.add(album.id) photo.set_calculated_fields() except Exception as e: logger.exception(e) exception = ApplicationException(exception=traceback.format_exc(), photo=photo) exception.save() for album in albums: album.set_calculated_fields() album.save() all_person_album_ids = list(all_person_album_ids_set) all_person_albums = Album.objects.filter(id__in=all_person_album_ids) if all_person_albums is not None: for person_album in all_person_albums: person_album.set_calculated_fields() person_album.save()
def handle(self, *args, **options): muis_url = 'https://www.muis.ee/OAIService/OAIService' all_person_album_ids_set = set() photos = Photo.objects.filter( source_url__contains='www.muis.ee/museaal') for photo in photos: try: parser = ET.XMLParser(encoding="utf-8") list_identifiers_url = muis_url + '?verb=GetRecord&identifier=' + photo.external_id \ + '&metadataPrefix=lido' url_response = urllib.request.urlopen(list_identifiers_url) tree = ET.fromstring(url_response.read(), parser=parser) ns = { 'd': 'http://www.openarchives.org/OAI/2.0/', 'lido': 'http://www.lido-schema.org' } rec = tree.find('d:GetRecord/d:record', ns) record = 'd:metadata/lido:lidoWrap/lido:lido/' object_identification_wrap = record + 'lido:descriptiveMetadata/lido:objectIdentificationWrap/' object_description_wraps = \ object_identification_wrap + 'lido:objectDescriptionWrap/lido:objectDescriptionSet' title_wrap = object_identification_wrap + 'lido:titleWrap/' event_wrap = record + 'lido:descriptiveMetadata/lido:eventWrap/' actor_wrap = event_wrap + 'lido:eventSet/lido:event/lido:eventActor/' person_album_ids = [] title_find = rec.find( title_wrap + 'lido:titleSet/lido:appellationValue', ns) title = title_find.text \ if title_find is not None \ else None photo = reset_modeltranslated_field(photo, 'title', title) photo.light_save() dating = None photo, dating = set_text_fields_from_muis( photo, dating, rec, object_description_wraps, ns) photo.light_save() creation_date_earliest = None creation_date_latest = None date_prefix_earliest = None date_prefix_latest = None date_earliest_has_suffix = False date_latest_has_suffix = False location = [] events = rec.findall(event_wrap + 'lido:eventSet/lido:event', ns) existing_dating = Dating.objects.filter(photo=photo, profile=None).first() if events is not None and len(events) > 0: location, \ creation_date_earliest, \ creation_date_latest, \ date_prefix_earliest, \ date_prefix_latest, \ date_earliest_has_suffix, \ date_latest_has_suffix, \ = extract_dating_from_event( events, location, creation_date_earliest, creation_date_latest, dating is not None and existing_dating is None, ns ) if dating is not None and existing_dating is None: creation_date_earliest, date_prefix_earliest, date_earliest_has_suffix = \ get_muis_date_and_prefix(dating, False) creation_date_latest, date_prefix_latest, date_latest_has_suffix = \ get_muis_date_and_prefix(dating, True) actors = rec.findall(actor_wrap + 'lido:actorInRole', ns) person_album_ids = add_person_albums(actors, person_album_ids, ns) if location != []: photo = add_geotag_from_address_to_photo(photo, location) photo = add_dating_to_photo(photo, creation_date_earliest, creation_date_latest, date_prefix_earliest, date_prefix_latest, Dating, date_earliest_has_suffix, date_latest_has_suffix) dt = datetime.utcnow() dt.replace(tzinfo=timezone.utc) photo.muis_update_time = dt.replace( tzinfo=timezone.utc).isoformat() photo.light_save() person_albums = Album.objects.filter(id__in=person_album_ids) if person_albums is not None: for album in person_albums: if not album.cover_photo: album.cover_photo = photo ap = AlbumPhoto(photo=photo, album=album, type=AlbumPhoto.FACE_TAGGED) ap.save() all_person_album_ids_set.add(album.id) photo.set_calculated_fields() except Exception as e: exception = ApplicationException(exception=e, photo=photo) exception.save() all_person_album_ids = list(all_person_album_ids_set) all_person_albums = Album.objects.filter(id__in=all_person_album_ids) if all_person_albums is not None: for person_album in all_person_albums: person_album.set_calculated_fields() person_album.save()