def create_new_photo(node_id, s3_path): #id auto increase photo = Photo.create(node=node_id, s3_path=s3_path, created_at=datetime.datetime.now(), updated_at=datetime.datetime.now()) print(str(photo.id)) return photo
def parse_file(file_name: str) -> list: photos = [] with open(file_name) as file: lines = [line.replace('\n', '') for line in file] lines.pop(0) photos = [line.split(" ") for line in lines] photos = [{ "index": idx, "orientation": 0 if photo[0] == "H" else 1, "number_of_tags": int(photo[1]), "tags": photo[2::] } for idx, photo in enumerate(photos)] photos = [ Photo(index=photo["index"], orientation=photo["orientation"], numTags=photo["number_of_tags"], tags=photo["tags"]) for photo in photos ] return photos
else: current_slide = next_slide break if not next_slide: current_slide = database.find_one_and_delete(collection=collection_name, mongo_query={ }, sort=[("best_partner_2", 1), ("number_of_tags", 1)]) # else: # import pdb # pdb.set_trace() # current_slide = database.find_one_and_delete(collection = collection_name, mongo_query = {},sort = [("best_partner_2",1), ("number_of_tags",1)], limit= 1) # pull_statement = { # "partners" : {"_id": current_slide["_id"]} # } # database.pull(collection = collection_name, mongo_query = {}, pull_statement = pull_statement, multi = True) show.append(current_slide) total_points = get_points_of_show(show) # show_copy = [slide for slide in show] # next_slide = show_copy.pop(0) # current_slide = Slide(photos = [Photo(index = index) for index in first_slide.photos]) # while show_copy: # next_slide = show.pop(0) # new_slide = Slide(photos = [Photo(index = index) for index in first_slide.photos]) # current_slide.next = new_slide # new_slide.prev = current_slide # current_slide = new_slide slide_show = [Slide(photos = [Photo(index= index) for index in slide["photos"]]) for slide in show] output = "outputs/b_lovely_landscapes.txt" Slide.parse_output(slide_show, output) print("Puntuacion total: {}".format(total_points)) data = 0
def get_album_details(set_id, user): # gets list of pictuers in the album try: photosets = json.loads( flickrObj.photosets.getPhotos( photoset_id=set_id, user_id=user).decode(encoding='utf-8')) except: time.sleep(5) photosets = json.loads( flickrObj.photosets.getPhotos( photoset_id=set_id, user_id=user).decode(encoding='utf-8')) # creates album object we are analyzing and sets the album id, album url, and album name newalbum = Album(set_id, user_id=user) add_album_url(newalbum) albumName = photosets['photoset']['title'] newalbum.name = albumName # initializes minimum and maximum posted/taken time """first_posted = json.loads( flickrObj.photos.getInfo(photo_id=photosets['photoset']['photo'][0]['id']).decode( encoding='utf-8'))['photo']['dates']['posted'] first_taken = json.loads( flickrObj.photos.getInfo(photo_id=photosets['photoset']['photo'][0]['id']).decode( encoding='utf-8'))['photo']['dates']['taken']""" # converts the text time into unix timestamp mint = 1577840461 # 01/01/2020 maxt = 915152461 # 01/01/1990 minp = 1577840461 maxp = 915152461 # counter in loop which counts number of pictures in album album_size = 0 # number of pictures of species of interest in the album num_species = 0 count1 = 1 # loops through each picture in the photoset creatingi a photo class object for each image if len(photosets['photoset']['photo']) >= 500: print(photosets['photoset']["total"]) count1 += 1 count = 1 if photosets['photoset']['pages'] > 1: for page in range(0, photosets['photoset']['pages']): photosets1 = json.loads( flickrObj.photosets.getPhotos(photoset_id=set_id, user_id=user, page=page + 1).decode(encoding='utf-8')) for j in photosets1['photoset']['photo']: start = time.time() # print( str(count) + " " + j['id']) count += 1 try: photoInfo = json.loads( flickrObj.photos.getInfo(photo_id=j['id']).decode( encoding='utf-8')) except: time.sleep(10) photoInfo = json.loads( flickrObj.photos.getInfo(photo_id=j['id']).decode( encoding='utf-8')) if "photo" not in photoInfo: continue newphoto = Photo(photo_info=photoInfo) newphoto.albumId = set_id # add_photo_url(newphoto) newphoto.url = photoInfo['photo']['urls']['url'][0]['_content'] newphoto.photo_description = photoInfo['photo']['description'][ '_content'] if "location" in photoInfo["photo"]: photolocation = photoInfo['photo']['location'] newphoto.photoLocationX = float(photolocation['latitude']) newphoto.photoLocationY = float(photolocation['longitude']) newphoto.location = (newphoto.photoLocationX, newphoto.photoLocationY) else: newphoto.location = 0 # add_photo_description(newphoto) # add_photo_location(newphoto) ''' NEED TO BE DONE: make photo_list a list of photo objects (appending newphoto ) instead of a list of photo ids, which it currently is see: line 177 ''' taken = photoInfo['photo']['dates']['taken'] # converts the text time into unix timestamp taken = datetime.strptime(taken, '%Y-%m-%d %H:%M:%S') # print(type(taken)) taken = int(time.mktime(taken.timetuple())) posted = int(photoInfo['photo']['dates']['posted']) # posted = int(time.mktime(datetime.strptime(posted, '%Y-%m-%d %H:%M:%S').timetuple())) # resets the max/min time if its later/earlier respectively if taken < mint: mint = taken if taken > maxt: maxt = taken if posted < minp: minp = posted if posted > maxp: maxp = posted # adds the photo to the photolist attribute of the album object newalbum.photo_list[j['id']] = newphoto album_size += 1 stop = time.time() duration = stop - start # checks to see if picture has tag of species of interest and updates the count # if j['id'] in photolist : # num_species+=1 newalbum.time_range_posted = int(maxp) - int(minp) newalbum.time_range_taken = int(maxt) - int(mint) # updates album size newalbum.size = album_size # calculates species of interest ratio to total number of photos in album # newalbum.species_ratio = float(num_species)/float(album_size) return newalbum
def get_albums(): #creates flickr object flickrObj = flickrapi.FlickrAPI(key, secret, format="json") photolist = get_ids() #list of ids returned from the search on flickr albumlist = {} # {album id: album object} #loops through all the photos in the search for pid in photolist: ''' #for i in range(0,1): pid = photolist[i] ''' all_contexts = json.loads( flickrObj.photos.getAllContexts(photo_id=pid).decode( encoding='utf-8')) #list of all set ids if 'set' in all_contexts: #all_contexts["set"] == True sets = all_contexts["set"] #loops through all the sets that the photo is in for i in sets: set_id = i["id"] #if the album has not already been processed if not (set_id in albumlist): #gets the userid for the owner of the album user = json.loads( flickrObj.photos.getInfo(photo_id=pid).decode( encoding='utf-8'))['photo']['owner']['nsid'] #gets list of pictuers in the album photosets = json.loads( flickrObj.photosets.getPhotos( photoset_id=set_id, user_id=user).decode(encoding='utf-8')) #creates album object we are analyzing and sets the album id, album url, and album name newalbum = Album(set_id, user_id=user) add_album_url(newalbum) add_album_name(newalbum) #initializes minimum and maximum posted/taken time first_posted = json.loads( flickrObj.photos.getInfo( photo_id=photosets['photoset']['photo'][0]['id']). decode(encoding='utf-8'))['photo']['dates']['posted'] first_taken = json.loads( flickrObj.photos.getInfo( photo_id=photosets['photoset']['photo'][0]['id']). decode(encoding='utf-8'))['photo']['dates']['taken'] #converts the text time into unix timestamp mint = int( datetime.strptime(first_taken, '%Y-%m-%d %H:%M:%S').strftime("%s")) maxt = int( datetime.strptime(first_taken, '%Y-%m-%d %H:%M:%S').strftime("%s")) minp = int(first_posted) maxp = int(first_posted) #counter in loop which counts number of pictures in album album_size = 0 #number of pictures of species of interest in the album num_species = 0 #loops through each picture in the photoset creatingi a photo class object for each image for j in photosets['photoset']['photo']: newphoto = Photo(photoId=j['id']) add_photo_url(newphoto) add_photo_description(newphoto) #add_photo_location(newphoto) ''' NEED TO BE DONE: make photo_list a list of photo objects (appending newphoto ) instead of a list of photo ids, which it currently is see: line 177 ''' taken = json.loads( flickrObj.photos.getInfo(photo_id=j['id']).decode( encoding='utf-8'))['photo']['dates']['taken'] #converts the text time into unix timestamp taken = int( datetime.strptime( taken, '%Y-%m-%d %H:%M:%S').strftime("%s")) posted = int( json.loads( flickrObj.photos.getInfo( photo_id=j['id']).decode(encoding='utf-8')) ['photo']['dates']['posted']) #resets the max/min time if its later/earlier respectively if taken < mint: mint = taken if taken > maxt: maxt = taken if posted < minp: minp = posted if posted > maxp: maxp = posted #adds the photo to the photolist attribute of the album object newalbum.photo_list.append(j['id']) album_size += 1 #checks to see if picture has tag of species of interest and updates the count if j['id'] in photolist: num_species += 1 #calculates the time range the album has for taken/posted newalbum.time_range_posted = int(maxp) - int(minp) newalbum.time_range_taken = int(maxt) - int(mint) #updates album size newalbum.size = album_size #calculates species of interest ratio to total number of photos in album newalbum.species_ratio = float(num_species) / float(album_size) #adds the album to the albumlist albumlist[newalbum.sid] = newalbum return albumlist