def flickr_auth(): # Convert the frob passed back from Flickr's auth system into an auth token. api_key = os.environ['PARAM1'] api_secret = os.environ['PARAM2'] flickr = FlickrAPI(api_key, api_secret, store_token = False) token = flickr.get_token(request.values['frob']) # Retrieve the Flickr screen name associated with the dude that just authenticated rsp = flickr.auth_checkToken(auth_token = token, format = 'xmlnode') flickr_screen_name = rsp.auth[0].user[0].attrib['username'] user = session['tmp_flickr_user']; # Check if authenticated screen name matches screen name entered on account creation if flickr_screen_name.lower() == user.name.lower(): user.flickr_auth = True db.session.add(user) db.session.commit() flash('You have successfully authenticated your Flickr account. Welcome.') return redirect(url_for('index')) # Send newly minted user to their Brickr landing page else: flash('Your chosen Screen Name does not match the Screen Name you logged into Flickr with! Try this one.') db.session.rollback() return redirect(url_for('users.register', next = oid.get_next_url(), do_flickr_auth = True, screen_name = flickr_screen_name, real_name = user.real_name, email = user.email)) # Send user back to profile creation page return 'FAILED to Authenticate with Flickr. FAILED. HARD. Call Remi.'
def get_urls(image_tag, MAX_COUNT): flickr = FlickrAPI(key, secret) photos = flickr.walk(text=image_tag, tag_mode='all', tags=image_tag, geo_context=2, extras='url_o', per_page=50, sort='relevance') count = 0 urls = [] for photo in photos: if count < MAX_COUNT: count = count + 1 print("Fetching url for image number {}".format(count)) try: url = photo.get('url_o') urls.append(url) except: print("Url for image number {} could not be fetched".format( count)) else: print("Done fetching urls, fetched {} urls out of {}".format( len(urls), MAX_COUNT)) break urls = pd.Series(urls) print("Writing out the urls in the current directory") urls.to_csv(image_tag + "_urls.csv") print("Done!!!")
def get_urls(search_text, image_tags, max_count): flickr = FlickrAPI(key, secret) # API documentation: https://www.flickr.com/services/api/flickr.photos.search.html photos = flickr.walk( text=image_tags, tag_mode='any', tags=image_tags, extras='url_m', # url_o for original-res images (others: s, m, l) per_page=500, sort='relevance', content_type=1, media='photos') urls = [] with tqdm(total=max_count) as pbar: for count, photo in enumerate(photos): if count < max_count: try: urls.append(photo.get('url_m')) except Exception: pbar.write( "URL for image number {} could not be fetched".format( count)) else: break pbar.update() return urls
def currentFlickrTitle(): '''Return the title of the Flickr image currently showing in the browser. The function works through Apple Events and supports only the Safari browser.''' # Flickr parameters fuser = '******' key = 'Get key from Flickr' secret = 'Get secret from Flickr' # Get the image ID. try: imageID = currentFlickrID() except IndexError: return "Not a Flickr image" # Establish the connection with Flickr. flickr = FlickrAPI(api_key=key, secret=secret) # Get the title. etree = flickr.photos_getInfo(photo_id=imageID, format='etree') for i in etree[0]: if i.tag == 'title': return i.text break # If the size wasn't found. return "Title not found"
def __init__( self, key, secret, httplib=None, dryrun=False, verbose=False, ): """Instantiates an Offlickr object An API key is needed, as well as an API secret""" self.__flickrAPIKey = key self.__flickrSecret = secret self.__httplib = httplib # Get authentication token # note we must explicitly select the xmlnode parser to be compatible with FlickrAPI 1.2 self.fapi = FlickrAPI(self.__flickrAPIKey, self.__flickrSecret, format='xmlnode') (token, frob) = self.fapi.get_token_part_one() if not token: raw_input('Press ENTER after you authorized this program') self.fapi.get_token_part_two((token, frob)) self.token = token test_login = self.fapi.test_login() uid = test_login.user[0]['id'] self.flickrUserId = uid self.dryrun = dryrun self.verbose = verbose
def run(write_directory, exiv2_file_path, from_date): """Fetch all geotagged photos and write exiv2 script for them. Retrieve all geotagged photos taken since `from_date` to `write_directory` and write a set of exiv2(1) commands storing geotags in EXIF to `exiv2_file_path`. `from_date` is a string YYYY-MM-DD, `write_directory` and `exiv2_file_path` are valid directory and file names, respectively. """ exiv2_file = open(exiv2_file_path, 'w') # Start flickring flickr = FlickrAPI(API_KEY, API_SECRET, format='etree') # Authorize (token, frob) = flickr.get_token_part_one(perms='read') if not token: raw_input('Press ENTER after you authorized this program') flickr.get_token_part_two((token, frob)) print 'Retrieving list of geotagged photos taken since %s...' % from_date photos = flickr.photos_getWithGeoData(min_date_taken=from_date).getiterator('photo') # Retrieve photo to `write_directory`, write exiv2 commands to # scriptfile. for photo in photos: title, url, location = get_photo_data(flickr, photo) print 'Retrieving photo %s...' % title filename, headers = urlretrieve(url, os.path.join(write_directory, \ os.path.basename(url))) write_location_commands(os.path.abspath(filename), location, exiv2_file)
def get_urls(image_tag, max_amount, key, secret): # using the flickr api library to get the urls for returned images flickr = FlickrAPI(key, secret) images = flickr.walk(text=image_tag, tag_mode='all', tags=image_tag, extras='url_o', per_page=50, sort='relevance') count = 0 urls = [] for image in images: if count < max_amount: count = count + 1 print("Fetching url for image number {}".format(count)) try: url = image.get('url_o') if url is not None: urls.append(url) else: print( "Url for image number {} returned None".format(count)) except: print("Url for image number {} could not be fetched".format( count)) else: print("Done fetching urls, fetched {} urls out of {}".format( len(urls), max_amount)) break return urls
def cb(): fapi = FlickrAPI(config["api_key"], config["api_secret"]) try: rsp = fapi.photos_comments_getList(apikey=config["api_key"], photo_id=self.image) except Exception, msg: log.debug("Exception getting comments: %s" % msg) return {}
def remove_from_group(self, token): fapi = FlickrAPI(config["api_key"], config["api_secret"], token=token) try: rsp = fapi.groups_pools_remove(photo_id=self.image, group_id=config["group_id"]) except Exception, msg: log.debug("Exception removing from group: %s" % (msg)) return False
def get_urls(image_tag, max_count): flickr = FlickrAPI(key, secret) photos = flickr.walk(text=image_tag, tag_mode='all', tags=image_tag, extras='url_o', per_page=50, sort='relevance') count = 0 urls = [] for photo in photos: if count < max_count: count = count + 1 try: url = photo.get('url_o') urls.append(url) print(url) except: url else: break urls = pd.Series(urls) file_name = image_tag + "_urls.csv" urls.to_csv(file_name) return file_name
def currentFlickrTitle(): '''Return the title of the Flickr image currently showing in the browser. The function works through Apple Events and supports only the Safari browser.''' # Flickr parameters fuser = '******' key = 'Get key from Flickr' secret = 'Get secret from Flickr' # Get the image ID. try: imageID = currentFlickrID() except IndexError: return "Not a Flickr image" # Establish the connection with Flickr. flickr = FlickrAPI(api_key=key, secret=secret) # Get the title. etree = flickr.photos_getInfo(photo_id = imageID, format = 'etree') for i in etree[0]: if i.tag == 'title': return i.text break # If the size wasn't found. return "Title not found"
def _reload(self): # Here, we grab some stuff from the pool, filter it against what we already have in the # db table, and make sure that we've got at least 20 items. fapi = FlickrAPI(config['api_key'], config['api_secret']) bite_size = 20 ct_checked = 0 ct_added = 0 ct_total = 200 # so long as this is > 0, we're ok. page = 1 while ct_added < bite_size and ct_checked < ct_total: log.debug('downloading, starting at: %d' % ct_checked) try: rsp = fapi.groups_pools_getphotos(apikey=config['api_key'], group_id=config['group_id'], page=page, per_page=100) except Exception, msg: log.debug(msg.args) return False log.debug(rsp) photos = rsp.find('photos') ct_checked += int(photos.get('perpage')) ct_total = int(photos.get('total')) log.debug('got photos %s'%photos) for photo in photos.getchildren(): # photo_id, date_added == the tuple of a primary key. If it's there, then we skip it. image = photo.get('id') log.debug('checking %s'%image) dt = int(photo.get('dateadded')) if PseudoPing.get(image=image, dt=dt): log.debug('already in table') continue if Decision.get(image=image, fl_ok=True): log.debug('already decided good') continue if not ImageHistory.get(image=image, dt=dt): log.debug("adding image history entry, since one doesn't exist yet") #undone -- concurrency issue here? #undone -- check dup here? ih = ImageHistory() ih.image = image ih.dt = dt ih.save() ImageHistory.commit() p = PseudoPing() p.image = image p.dt = dt p.owner = photo.get('owner') p.secret = photo.get('secret') log.debug('saving') ct_added += 1 p.save() if ct_added >= bite_size: # thinking pulling just a small bite at a time would be better -- keep things fresher. # rather than sometimes pulling in 100 at a shot. break page+=1
def get_urls_by_tag(image_tag, max_count=100, url_type='url_o', pickle_file=None): """get a number of urls for images by their tags Arguments: image_tag {[string]} -- [tag applied to search for relevant images] Keyword Arguments: max_count {int} -- [total number of urls returned] (default: {100}) url_type {string} -- [type for the urls to be returned, see the top of the file for explanation of different url types] Returns: [urls] -- [an array of urls (of size max_count), each of which can be used to download an image.] [views] -- [an array of integers (of size max_count), each of which is number of views that the image has] """ flickr = FlickrAPI(FLICKR_KEY, FLICKR_SECRET) photos = flickr.walk(text=image_tag, tag_mode='all', extras=','.join([url_type, 'views']), per_page=50, sort='relevance') t_prev = time.time() count = 0 urls = [] views = [] for photo in photos: if count % TIME_THRESHOLD == 0 and count != 0: print("{} urls downloaded in the past {:.3f} s".format( TIME_THRESHOLD, time.time() - t_prev)) t_prev = time.time() if count >= max_count: print("all {} photo urls have been saved".format(count)) break try: url = photo.get(url_type) if url is None: print('failed to fetch url for image {} '.format(count)) continue urls.append(url) views.append(photo.get('views')) except: print('url for image number {} cannot be fetched'.format(count)) # update the count count += 1 # cast views into integers views = [int(i) for i in views] if pickle_file is not None: with open(pickle_file, 'wb') as handle: pickle.dump((urls, views), handle, protocol=pickle.HIGHEST_PROTOCOL) print("All photo urls have been saved to pickle file {}".format( pickle_file)) return urls, views
def currentFlickrURL(kind, format = ""): '''Return a URL for the Flickr image currently showing in the browser. The string parameter "kind" can be either "Short" or one of the standard Flickr image sizes: "Original", "Large", "Medium 640", "Medium", "Small", "Thumbnail", or "Square". If it's Short, the function will return a flic.kr URL for the image page. If it's one of the others, the function will return the URL of the image of that size, if available. The function works through Apple Events and supports only the Safari and Chrome browsers.''' # Flickr parameters fuser = '******' key = 'Get key from Flickr' secret = 'Get secret from Flickr' # Make sure we're asking for a legitimate kind. kind = kind.capitalize() kinds = ["Short", "Original", "Large", "Medium 640", "Medium", "Small", "Thumbnail", "Square"] if kind not in kinds: return "Not a legitimate kind of URL" # Get the image ID. try: imageID = currentFlickrID() except IndexError: return "Not a Flickr image" # Establish the connection with Flickr. flickr = FlickrAPI(api_key=key, secret=secret) # Get the URL. if kind == "Short": return shorturl.url(photo_id = imageID) else: esizes = flickr.photos_getSizes(photo_id = imageID, format = 'etree') if format == '': for i in esizes[0]: if i.attrib['label'] == kind: return i.attrib['source'] break # If the size wasn't found. return "Size not found" elif format == 'md': einfo = flickr.photos_getInfo(photo_id = imageID, format = 'etree') photourl = einfo.find('photo/urls/url').text phototitle = einfo.find('photo/title').text if not phototitle: phototitle = "Untitled" for i in esizes[0]: if i.attrib['label'] == kind: jpgurl = i.attrib['source'] return "[![" + phototitle + "](" + jpgurl + ")](" + photourl + ")" break # If the size wasn't found. return "Size not found"
def test_authenticate_fail(self): flickr = FlickrAPI(FAKEKEY, FAKESECRET) try: (token, frob) = flickr.get_token_part_one(perms='write') if not token: raw_input("Press ENTER after you authorized this program") flickr.get_token_part_two((token, frob)) except FlickrError as e: self.assertEqual(e[0], u'Error: 100: Invalid API Key (Key not found)')
def cb(): self.fl_dirty = False fapi = FlickrAPI(config["api_key"], config["api_secret"]) try: rsp = fapi.photos_getInfo(apikey=config["api_key"], photo_id=self.image, secret=self.secret) except Exception, msg: log.debug("Exception getting image info: %s" % msg) return {}
def get_photos(image_tag): extras = ','.join(SIZES) flickr = FlickrAPI(KEY, SECRET) photos = flickr.walk(text=image_tag, extras=extras, privacy_filter=1, per_page=50, sort='relevance') return photos
def get_urls(image_tag, max_count, ignore_ids=[]): extra_url = "url_l" flickr = FlickrAPI(key, secret) photos = flickr.walk( text=image_tag, tag_mode='all', tags=image_tag, extras=extra_url, # see url_ options above per_page=50, content_type=1, # photos only is_commons=True, # no licence restrictons orientation='landscape', dimension_search_mode='min', height='640', width='640', sort='relevance') count = 0 urls = [] for photo in photos: url = photo.get(extra_url) if url == None: continue photo_id = url_to_id(url) if photo_id not in ignore_ids: if count < max_count: print("Fetching url for image number {}".format(count)) try: url = photo.get(extra_url) print("Retrieved url: {}".format(url)) if url == '' or url == None: print("Url could not be fetched") else: urls.append(url) ignore_ids.append(photo_id) count += 1 except: print("Url fetch failed") else: print("Done fetching urls, fetched {} urls out of {}".format( len(urls), max_count)) break # urls to csv urls = pd.Series(urls) now = datetime.now().strftime('%Y-%m-%d_%H%M%S') csv_filename = image_tag + "_urls_" + now + ".csv" print("Writing out the urls to ", csv_filename) urls.to_csv(csv_filename) # ignore ids to csv ids = pd.Series(ignore_ids) ids_filename = image_tag + "_ids_" + now + ".csv" print("Writing out the ids to ", ids_filename) ids.to_csv(ids_filename) print("Done!!!")
def collect_imgs(tag): extras = ','.join(IMG_SIZES) flickr = FlickrAPI(KEY, SECRET) imgs = flickr.walk(text=tag, extras=extras, privacy_filter=1, per_page=500, sort='relevance') return imgs
def cb(self): # this is the callback point for the flickr auth. # we'll get a parameter of ?frob=123412341234 # we call flickr.auth.getToken with the frob, and get # xml with the username, the token, and permissions. fapi = FlickrAPI(config['api_key'], config['api_secret']) frob = request.params.get('frob') if not frob: return "Invalid Response" rsp = fapi.auth_getToken(frob=frob) auth = rsp.find('auth') if not auth: return "invalid response from get token" username = auth.find('user').get('username').encode('ascii','ignore') token = auth.find('token').text nsid = auth.find('user').get('nsid') if not (username and token): return "Invalid Response from getToken" user = model.User.get_byNsid(nsid) if not user: user = model.User.get_byName(username) if not user: user = model.User() user.username = username user.nsid = auth.find('user').get('nsid') user.make_secret() user.save() user.nsid = nsid user.commit() else: # people can change usernames, nsids are static. if user.username != username: user.username=username user.commit() session['user'] = username session['nsid'] = nsid session['mod'] = user.check_mod(token) if session['mod']: session['token'] = token session.save() # Send user back to the page he originally wanted to get to if session.get('path_before_login'): path = session['path_before_login'] del(session['path_before_login']) redirect(url(path)) else: if session.get('mod'): redirect(url('/ping/index')) redirect(url('/profile/bookmarklet'))
def tag(self, tags, token): fapi = FlickrAPI(config["api_key"], config["api_secret"], token=token) if type(tags) == type([]): tags = ",".join(tags) try: rsp = fapi.photos_addTags(photo_id=self.image, tags=tags) except Exception, msg: log.debug("Exception adding tags (%s) to %s: %s" % (tags, self.image, msg)) return False
def get_photos(image_tag): extras = ','.join(SIZES) flickr = FlickrAPI(KEY, SECRET) photos = flickr.walk( text=image_tag, # it will search by image title and image tags extras=extras, # get the urls for each size we want privacy_filter=1, # search only for public photos per_page=50, sort='relevance') # we want what we are looking for to appear first return photos
def get_photos(image_tag): extras = ','.join(SIZES) flickr = FlickrAPI(KEY, SECRET) photos = flickr.walk( text=image_tag, extras=extras, # get the url for the original size image privacy_filter=1, # search only for public photos per_page=50, sort='relevance') return photos
def __init__(self, tags, max_num, num_core): self.tags = tags self.max_num = max_num self.num_core = num_core self.flickr = FlickrAPI(key, secret) print( f'Using flickr API\nTargets: {self.tags}\nMax num of image: {self.max_num}' )
def get_flickr(API_KEY, SECRET, TOKEN=None): if TOKEN is None: flickr = FlickrAPI(API_KEY, SECRET) (token, frob) = flickr.get_token_part_one(perms='write') if not token: raw_input("Press ENTER after you authorized this program") flickr.get_token_part_two((token, frob)) else: flickr = FlickrAPI(api_key=API_KEY, secret=SECRET, token=TOKEN) return flickr
def cb(): fapi = FlickrAPI(config["api_key"], config["api_secret"]) try: rsp = fapi.groups_pools_getContext( apikey=config["api_key"], photo_id=self.image, group_id=config["group_id"] ) except Exception, msg: log.debug(msg.args) if ": 2:" in msg.args[0]: return {"in-pool": False} return {}
def __init__(self, api_key, secret=None, token=None, store_token=False, cache=False, **kwargs): FlickrAPI.__init__(self, api_key, secret=secret, token=token, store_token=store_token, cache=cache, **kwargs) # Thread-local HTTPConnection, see __flickr_call self.thr = ThreadLocal() if token: return (token, frob) = self.get_token_part_one(perms='read') if not token: print "A browser window should have opened asking you to authorise this program." raw_input("Press ENTER when you have done so... ") self.get_token_part_two((token, frob))
def get_images(image_tag='honeybees on flowers', n_images=10): extras = ','.join(SIZES) flickr = FlickrAPI(API_KEY['key'], API_KEY['secret']) license = ( ) # https://www.flickr.com/services/api/explore/?method=flickr.imgs.licenses.getInfo images = flickr.walk( text= image_tag, # http://www.flickr.com/services/api/flickr.imgs.search.html extras=extras, # get urls for acceptable sizes per_page=500, # 1-500 license=license, sort='relevance') return images
def search_image(image_name): extras = ','.join(SIZES) flickr = FlickrAPI(KEY, SECRET) photo = flickr.walk(text=image_name, privacy_filter=1, extras=extras, sort='relevance', per_page=5) img_url = url(photo) t = check_url(img_url) if t == False: return False return img_url
def currentFlickrURL(kind): '''Return a URL for the Flickr image currently showing in the browser. The string parameter "kind" can be either "Short" or one of the standard Flickr image sizes: "Original", "Large", "Medium 800", "Medium 640", "Medium", "Small 320", "Small", "Thumbnail", "Large Square", or "Square". If it's Short, the function will return a flic.kr URL for the image page. If it's one of the others, the function will return the URL of the image of that size, if available. The function works through Apple Events and supports only the Safari browser.''' # Flickr parameters fuser = '******' key = 'Get key from Flickr' secret = 'Get secret from Flickr' # Make sure we're asking for a legitimate kind. kind = ' '.join([x.capitalize() for x in kind.split()]) kinds = ["Short", "Original", "Large", "Medium 800", "Medium 640", "Medium", "Small 320", "Small", "Thumbnail", "Large Square", "Square"] if kind not in kinds: return "Not a legitimate kind of URL" # Get the image ID. try: imageID = currentFlickrID() except IndexError: return "Not a Flickr image" # Establish the connection with Flickr. flickr = FlickrAPI(api_key=key, secret=secret) # Get the URL. if kind == "Short": return shorturl.url(photo_id = imageID) else: etree = flickr.photos_getSizes(photo_id = imageID, format = 'etree') for i in etree[0]: if i.attrib['label'] == kind: return i.attrib['source'] break # If the size wasn't found. return "Size not found"
def currentFlickrURL(kind): '''Return a URL for the Flickr image currently showing in the browser. The string parameter "kind" can be either "Short" or one of the standard Flickr image sizes: "Original", "Large", "Medium 800", "Medium 640", "Medium", "Small 320", "Small", "Thumbnail", "Large Square", or "Square". If it's Short, the function will return a flic.kr URL for the image page. If it's one of the others, the function will return the URL of the image of that size, if available. The function works through Apple Events and supports only the Safari browser.''' # Flickr parameters fuser = '******' key = 'Get key from Flickr' secret = 'Get secret from Flickr' # Make sure we're asking for a legitimate kind. kind = ' '.join([x.capitalize() for x in kind.split()]) kinds = [ "Short", "Original", "Large", "Medium 800", "Medium 640", "Medium", "Small 320", "Small", "Thumbnail", "Large Square", "Square" ] if kind not in kinds: return "Not a legitimate kind of URL" # Get the image ID. try: imageID = currentFlickrID() except IndexError: return "Not a Flickr image" # Establish the connection with Flickr. flickr = FlickrAPI(api_key=key, secret=secret) # Get the URL. if kind == "Short": return shorturl.url(photo_id=imageID) else: etree = flickr.photos_getSizes(photo_id=imageID, format='etree') for i in etree[0]: if i.attrib['label'] == kind: return i.attrib['source'] break # If the size wasn't found. return "Size not found"
def sync_photo(photo): flickr = FlickrAPI(API_KEY) infos = flickr.photos_getInfo(photo_id=photo.flickr_id).find('photo') exifs = flickr.photos_getExif(photo_id=photo.flickr_id).find('photo').findall('exif') print "\tSyncing Photo: %s" % smart_str(photo.title) for exif in exifs: if exif.attrib['label'] == 'Aperture' and exif.attrib['tag'] == 'FNumber': photo.aperture = exif.find('clean').text if exif.attrib['label'] == 'Model' and exif.attrib['tag'] == 'Model': photo.camera = exif.find('raw').text if exif.attrib['label'] == 'Exposure' and exif.attrib['tag'] == 'ExposureTime': photo.exposure = exif.find('raw').text if exif.attrib['label'] == 'ISO Speed' and exif.attrib['tag'] == 'ISO': photo.iso = exif.find('raw').text if exif.attrib['label'] == 'Lens' and exif.attrib['tag'] == 'Lens': photo.lens = exif.find('raw').text photo.posted_date = datetime.fromtimestamp(float(infos.find('dates').attrib['posted'])) photo.description = infos.find('description').text tags = infos.find('tags').findall('tag') photo.tags.clear() # clear all previous tags if present #Save photo prior saving the many to many relationship with tags try: photo.save() except: print '\t\tFail to Save Photo: %s' % smart_str(photo.title) return photo for tag in tags: tag_id = tag.text[0:31] print '\t\tFound tag: %s' % tag_id try: t = Tag.objects.get(pk=tag_id) photo.tags.add(t) except: t = Tag(name=tag.text, raw=tag.attrib['raw']) t.save() photo.tags.add(t) #print '[Flickr] Exif for %s: %s, %s, %s' % (photo.title, photo.lens, photo.iso, photo.posted_date) return photo
def get_urls(search='honeybees on flowers', n=10, download=False): t = time.time() flickr = FlickrAPI(key, secret) photos = flickr.walk( text= search, # http://www.flickr.com/services/api/flickr.photos.search.html extras='url_z', per_page=100, sort='relevance') if download: # print(search) folder_name = search.split(' ') # print(folder_name) folder_name = ' '.join(folder_name[:-1]) # print(folder_name) dir = os.getcwd( ) + os.sep + 'images' + os.sep + folder_name + os.sep # save directory if not os.path.exists(dir): os.makedirs(dir) urls = [] for i, photo in enumerate(photos): if i == n: break try: # construct url https://www.flickr.com/services/api/misc.urls.html url = photo.get('url_o') # original size if url is None: url = 'https://farm%s.staticflickr.com/%s/%s_%s_b.jpg' % \ (photo.get('farm'), photo.get('server'), photo.get('id'), photo.get('secret')) # large size # download if download: download_uri(url, dir) urls.append(url) print('%g/%g %s' % (i, n, url)) except: print('%g/%g error...' % (i, n)) # import pandas as pd # urls = pd.Series(urls) # urls.to_csv(search + "_urls.csv") print('Done. (%.1fs)' % (time.time() - t) + ('\nAll images saved to %s' % dir if download else ''))
def __getattr__(self, attrib): handler = FlickrAPI.__getattr__(self, attrib) def wrapper(**args): i = 0 code_ignore = args.get("code_ignore", []) while True: err = None try: return handler(**args) except FlickrError, e: code = FlickrError_code(e) if code == 0 or code == 112: # FIXME LOW only when "unknown" is returned as the method called err = e else: if code not in code_ignore: LOG.warning("SafeFlickrAPI: ABORT %s due to %r" % (repr_call(attrib, **args), e)) raise except (URLError, IOError, ImproperConnectionState, HTTPException), e: err = e except ExpatError, e: LOG.warning("SafeFlickrAPI: ABORT %s due to %r" % (repr_call(attrib, **args), e)) raise
def get_images(self, character,savedir="downloads"): """flickrからAPIで画像を取得して保存します。 画像の保存先は./savedir/flickr/characterとなります。 Args: character(int):検索キーワード。jigglypuffかkirby savedir(str):保存先フォルダ名。デフォルトはdownloads """ from flickrapi import FlickrAPI from urllib.request import urlretrieve import os, time, sys wait_time = 1 savepath = savedir+"/flickr/"+character try: os.makedirs(savepath) except FileExistsError: pass flickr = FlickrAPI(self.key, self.secret, format="parsed-json") result = flickr.photos.search( text = character, per_page = 500, media = "photos", sort = "relevance", safe_search = 1 ) for item in result["photos"]["photo"]: url = "https://live.staticflickr.com/{0}/{1}_{2}.jpg".format(item["server"],item["id"],item["secret"]) filepath = savepath + "/" + item["id"] + ".jpg" if os.path.exists(filepath): continue urlretrieve(url, filepath) time.sleep(wait_time)
def __init__(self, browserName): self.fapi = FlickrAPI(flickrAPIKey, flickrSecret) self.user_id = "" # proceed with auth # TODO use auth.checkToken function if available, # and wait after opening browser. print "Authorizing with flickr..." log.info("authorizing with flickr...") try: self.authtoken = self.fapi.getToken(browser=browserName) except: print ("Can't retrieve token from browser %s" % browserName) print ("\tIf you're behind a proxy server," " first set http_proxy environment variable.") print "\tPlease close all your browser windows, and try again" log.error(format_exc()) log.error("can't retrieve token from browser %s", browserName) sys.exit(-1) if self.authtoken == None: print "Unable to authorize (reason unknown)" log.error('not able to authorize; exiting') sys.exit(-1) #Add some authorization checks here(?) print "Authorization complete." log.info('authorization complete')
def download(self, search_term): flickr = FlickrAPI(self.API_KEY, self.SHARED_SECRET, format='parsed-json') list = flickr.photos.search(text=search_term, per_page=5, extras='url_m') photos = list['photos'] for photo in photos['photo']: if 'url_m' not in photo: continue url = photo['url_m'] tfile = TemporaryFile() req = requests.get(url, stream=True) with tfile: tfile.write(req.content) tfile.seek(0) client = boto3.client('rekognition') response = client.detect_labels(Image={'Bytes': tfile.read()}, MinConfidence=50) self.report = self.generate_report(photo['title'], response['Labels'], url) self.report += "</body></html>" with open('report.html', 'w') as report_html: report_html.write(self.report)
def go_download(keyword, dir): # 저장 경로 지정하기 savedir = "./image/" + dir if not os.path.exists(savedir): os.mkdir(savedir) # API를 사용해서 다운로드하기 --- (*4) flickr = FlickrAPI(key, secret, format='parsed-json') res = flickr.photos.search( text=keyword, # 키워드 per_page=300, # 검색할 개수 media='photos', # 사진 검색 sort="relevance", # 키워드 관련도 순서 safe_search=1, # 안전 검색 extras='url_q, license') # 결과 확인하기 photos = res['photos'] pprint(photos) try: # 1장씩 다운로드하기 --- (*5) for i, photo in enumerate(photos['photo']): url_q = photo['url_q'] filepath = savedir + '/' + photo['id'] + '.jpg' if os.path.exists(filepath): continue print(str(i + 1) + ":download=", url_q) urlretrieve(url_q, filepath) time.sleep(wait_time) except: import traceback traceback.print_exc()
def download(keyword, savedir, api_key, api_secret, max_count=300, size_type="q"): if not os.path.exists(savedir): os.mkdir(savedir) if max_count > 500: max_count = 500 url_type = "url_" + size_type flickr = FlickrAPI(api_key, api_secret, format='parsed-json') res = flickr.photos.search( text=keyword, per_page=max_count, media='photos', sort="relevance", safe_search=1, extras=url_type+',license') # print result photos = res['photos'] try: # download photos for i, photo in enumerate(photos['photo']): url = photo[url_type] filepath = savedir + '/' + photo['id'] + '.jpg' # 保存先 if os.path.exists(filepath): continue print(str(i + 1) + ":download=" + url) urlretrieve(url, filepath) time.sleep(1) except: import traceback traceback.print_exc()
def go_download(keyword, dir): # 画像の保存パスを決定 savedir = "./image/" + dir if not os.path.exists(savedir): os.mkdir(savedir) # APIを使ってダウンロード --- (*4) flickr = FlickrAPI(key, secret, format='parsed-json') res = flickr.photos.search( text=keyword, # 検索語 per_page=300, # 取得件数 media='photos', # 写真を検索 sort="relevance", # 検索語の関連順に並べる safe_search=1, # セーフサーチ extras='url_q, license') # 検索結果を確認 photos = res['photos'] pprint(photos) try: # 1枚ずつ画像をダウンロード --- (*5) for i, photo in enumerate(photos['photo']): url_q = photo['url_q'] filepath = savedir + '/' + photo['id'] + '.jpg' if os.path.exists(filepath): continue print(str(i + 1) + ":download=", url_q) urlretrieve(url_q, filepath) time.sleep(wait_time) except: import traceback traceback.print_exc()
def go_download(keyword,dir): #画像の保存パス savedir="./image/"+dir if not os._exists(savedir): os.mkdir(savedir) #APIでダウンロード flickr=FlickrAPI(key,secret,format="parsed-json") res=flickr.photos.search( text="soba", per_page=300, media="photos", sort="relevance", safe_search=1, extras="url_q,license") #検索結果を確認 photos=res["photos"] pprint(photos) try: #一枚ずつ画像をダウンロード for i ,photo in enumerate(photos["photo"]): url_q=photo["url_q"] filepath=savedir+"/"+photo["id"]+".jpg" if os.path.exists(filepath):continue print(str(i+1)+":download=",url_q) urlretrieve(url_q,filepath) time.sleep(wait_time) except: import traceback traceback.print_exc()
def fetch_images(search): flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json') response = flickr.photos.search(text=search, per_page=200, sort='relevance', page=9, privacy_filter=1) photo = response['photos']['photo'] size = (333, 333) count = 0 for i in range(len(photo)): url = "https://farm" + str( photo[i] ['farm']) + ".staticflickr.com/" + photo[i]['server'] + "/" + str( photo[i]['id']) + "_" + photo[i]['secret'] + ".jpg" response = requests.get(url) img = Image.open(BytesIO(response.content)) img_width, img_height = img.size crop_height = min(img_width, img_height) crop_width = min(img_width, img_height) if crop_width > 332: square = img.crop(((img_width - crop_width) // 2, (img_height - crop_height) // 2, (img_width + crop_width) // 2, (img_height + crop_height) // 2)) square = square.resize(size) name = (path + 'z_flickr_chinesisch_' + str(1800 + i)) square.save(name, 'JPEG') #helps us see how many pictures were processed (as it might take a while) if count % 100 == 0: print('processed so far...' + str(count)) count += 1
def main(argv): """ # flickr auth information: # change these to your flickr api keys and secret: # flickrAPIKey - key # flickrSecret - secret """ #global current_image_num #uncomment line to reset global variable if more than one function call is made. flickrAPIKey = "7bf4ce840f517255cce4295b2c753b63" # API key of sharkPulse account flickrSecret = "e44408e82cb422fc" path = "output.csv" fapi = FlickrAPI(flickrAPIKey, flickrSecret) # shared "secret" rsp = query_flickr(fapi, flickrAPIKey, flickrSecret) photos = list(rsp[0]) total_images = len(photos) sys.stderr.write("Found {0} images\n".format(total_images)) i = getattr(rsp, 'photos', None) output_csv(path, photos) sys.stderr.write("Showing {0} out of {1} images...\n".format( str(current_image_num), total_images))
def go_download(keyword, dir): #画像の保存パス savedir = "./image/" + dir #画像の保存ディレクトリを作成 if not os._exists(savedir): #pathが存在しているか確認 os.mkdir(savedir) #新しいディレクトリを作成 #APIでダウンロード flickr = FlickrAPI(key, secret, format="parsed-json") #key, secret, formatの設定 res = flickr.photos.search( #検索条件の設定 text="pasta", per_page=300, media="photos", sort="relevance", #関連順に調べる safe_search=1, #1は安全 extras="url_q,license") #余分に取得する情報(ダウンロード用のURL、ライセンス) #検索結果を確認 photos = res["photos"] pprint(photos) try: #一枚ずつ画像をダウンロード for i, photo in enumerate(photos["photo"]): #enumerate→1枚ずつ画像を取得 url_q = photo["url_q"] filepath = savedir + "/" + photo["id"] + ".jpg" #pathの設定 if os.path.exists(filepath): continue print(str(i + 1) + ":download=", url_q) urlretrieve(url_q, filepath) time.sleep(wait_time) except: import traceback #スタックトレース(エラーの過程)の表示 traceback.print_exc()
def search_flickr_sorted(searchpage, taglist): """ Searches flickr for photos with specific tags :param searchpage: the result-page to return from the search, starts at 1 :param taglist: list of tags(strings) seperate by comma :return: list where every element is a link(string) to a flickr photo """ flickr = FlickrAPI(config["flickr"]["apikey"], '', format='parsed-json') photolist = [] photoset = None try: photoset = flickr.photos.search(tags=taglist, sort='relevance', tag_mode='all', media="photos", extras='views', per_page=100, page=searchpage) except FlickrError as e: print('Flickr API error:' + e.code) for photo in photoset['photos']['photo']: photolist.append(["http://flic.kr/p/" + shorturl.encode(photo['id']), int(photo['views'])]) photolist.sort(key=lambda x: x[1], reverse=True) return [i[0] for i in photolist]
def go_download(keyword, dir): #画像の保存パス savedir = "./images/" + dir if not os._exists(savedir): os.mkdir(savedir) #APIでダウンロード flickr = FlickrAPI(key, secret, format="parsed-json") res = flickr.photos.search( text=keyword, #検索ワード、関数go_downloadで設定 per_page=300, #取得する画像の枚数 media="photos", #検索するデータの種類(画像) sort="relevance", #データの並び順(関連順) safe_search=1, #UIコンテンツを非表示にする extras="url_q,license" ) #取得したいオプションの値(url_q->画像のアドレスが入っている情報、licence -> ライセンス情報) #検索結果を確認 photos = res["photos"] pprint(photos) try: #一枚ずつ画像をダウンロード for i, photo in enumerate(photos["photo"]): url_q = photo["url_q"] filepath = savedir + "/" + photo["id"] + ".jpg" if os.path.exists(filepath): continue print(str(i + 1) + ":download=", url_q) urlretrieve(url_q, filepath) time.sleep(wait_time) except: import traceback traceback.print_exc()
def __init__(self, key, secret, uid, httplib=None, dryrun=False, verbose=False): """Instantiates an unflickr object An API key is needed, as well as an API secret and a user id.""" self.flickr_api_key = key self.flickr_secret = secret self.httplib = httplib # Get authentication token # note we must explicitly select the xmlnode parser to be compatible # with FlickrAPI 1.2 self.fapi = FlickrAPI(self.flickr_api_key, self.flickr_secret, format='xmlnode') (token, frob) = self.fapi.get_token_part_one() if not token: raw_input('Press ENTER after you authorized this program') self.fapi.get_token_part_two((token, frob)) self.token = token self.flickr_user_id = uid self.dryrun = dryrun self.verbose = verbose
def ask_text(request, keyword): FLICKR_PUBLIC = '62fc03e54fbf6e29d42cb66de850a66b' FLICKR_SECRET = '5247f1bd66733dd9' flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json') searched_photos = flickr.photos.search(text=keyword) photos_container = searched_photos['photos']['photo'] list_of_urls = [] for photo in photos_container: farm_id = photo['farm'] server_id = photo['server'] photo_id = photo['id'] secret = photo['secret'] if farm_id is not 0 and server_id is not 0: url = 'https://farm' + str(farm_id) + '.staticflickr.com/' + str( server_id) + '/' + str(photo_id) + '_' + str(secret) + '.jpg' list_of_urls += [url] url_dict = [] for url in list_of_urls: url_dict += [{'url': url}] context = {'url_dict': url_dict, 'keyword': keyword} return render(request, 'text_input_app/text_input_page.html', context)
def fetch_from_flickr(self, keyword, api_key, api_secret, number_links=50): """ Fetch images from Flikr """ from flickrapi import FlickrAPI # we import flikcr API only if needed # calculate number of pages: if number_links < 200: items_per_page = number_links else: items_per_page = 200 # max 200 for flikr pages_nbr = int(ceil(number_links / items_per_page)) links = [] # get links from the first page: print("Carwling Flickr Search...") flickr = FlickrAPI(api_key, api_secret) response = flickr.photos_search(api_key=api_key, text=keyword, per_page=items_per_page, media='photos', sort='relevance') images = [im for im in list(response.iter()) if im.tag == 'photo'] for photo in images: photo_url = "https://farm{0}.staticflickr.com/{1}/{2}_{3}.jpg". format( photo.get('farm'), photo.get('server'), photo.get('id'), photo.get('secret')) links.append(photo_url) print(" >> ", len(links), " links extracted...", end="") # get next pages: for i in range(1, pages_nbr): response = flickr.photos_search(api_key=api_key, text=keyword, per_page=items_per_page, media='photos', page = i + 1, sort='relevance') images = [im for im in list(response.iter()) if im.tag == 'photo'] for photo in images: link = "https://farm{0}.staticflickr.com/{1}/{2}_{3}.jpg". format( photo.get('farm'), photo.get('server'), photo.get('id'), photo.get('secret')) links.append(link) print("\r >> ", len(links), " links extracted...", end="") # store and reduce the number of images if too much: return links
def sync_set(photoset): flickr = FlickrAPI(API_KEY) request = flickr.photosets_getPhotos(photoset_id=photoset.flickr_id,extras='last_update') photos = request.find('photoset').findall('photo') print "Syncing set: %s" % (smart_str(photoset.title)) updated = False for photo in reversed(photos): photo_id = photo.attrib['id'] try: p = Photo.objects.get(flickr_id = photo_id) updated_date = datetime.fromtimestamp(float(photo.attrib['lastupdate'])) if p.updated_date < updated_date: p.title = photo.attrib['title'] p.updated_date = updated_date sync_photo(p) except: p = Photo( flickr_id = photo_id, server = int(photo.attrib['server']), secret = photo.attrib['secret'], title = photo.attrib['title'], farm = int(photo.attrib['farm']), added_date = datetime.now(), updated_date= datetime.fromtimestamp(float(photo.attrib['lastupdate'])), set = photoset ) sync_photo(p) updated = True return updated
def register(): if g.user is not None or 'openid' not in session: return redirect(oid.get_next_url()) # Registration form form = RegisterForm(request.form) if 'openid' in session: form.password.validators = [] form.confirm.validators = [] if form.validate_on_submit(): # Create a user instance not yet stored in the database user = User(form.screen_name.data, form.email.data, form.real_name.data) if 'openid' in session: user.openid = session['openid'] else: user.password = generate_password_hash(form.password.data) if form.flickr_auth.data: # User selected to authorize their screen name as a flickr account session['tmp_flickr_user'] = user; api_key = os.environ['PARAM1'] api_secret = os.environ['PARAM2'] flickr = FlickrAPI(api_key, api_secret, store_token = False) login_url = flickr.web_login_url('read') return redirect(login_url) else: # Insert the record in our data base and commit it db.session.add(user) db.session.commit() # Log the user in, as he now has an ID session['user_id'] = user.id flash('Thank you for registering with Brickr!') return redirect(url_for('index')) # Send newly minted user to their Brickr landing page return render_template('users/register.html', form = form, next = oid.get_next_url())
def get_flickr(config_file): config = ConfigParser.ConfigParser() config.read(config_file) api_key = config.get('Flickr', 'api_key') api_secret = config.get('Flickr', 'api_secret') api = FlickrAPI(api_key, api_secret, cache=True) if not api.token_valid(perms=u'read'): api.get_request_token(oauth_callback='oob') authorize_url = api.auth_url(perms=u'read') verifier = unicode(raw_input('Verifier code for %s:' % authorize_url)) api.get_access_token(verifier) return api
def _setup(self, opts): # flickr auth information: self.flickrAPIKey = os.environ['FLICKR_BATCHR_KEY'] self.flickrSecret = os.environ['FLICKR_BATCHR_SECRET'] try: gui = opts.gui except AttributeError: # really wish I could just test "gui" in opts gui = False self.progress = progressBar("Flickr Shell", indeterminate=True, gui=gui) self.progress.update("Logging In...") # make a new FlickrAPI instance self.fapi = FlickrAPI(self.flickrAPIKey, self.flickrSecret) # do the whole whatever-it-takes to get a valid token: self.token = self.fapi.getToken(browser="Firefox")
def __init__(self): """ Initializes the link between this app and Flickr API using stored configuration values. Due to the way the Flickr API authorizes, the first time a set of credentials are used on a given system, this must be initialized within a context that allows a browser window to open and username and password to be entered. """ config_dict = utils.read_config_dict("FlickrCommunicator") self.flickr = FlickrAPI(config_dict['api_key'], config_dict['api_secret']) self.app_name = config_dict['app_name'] (token, frob) = self.flickr.get_token_part_one(perms='write') if not token: raw_input("Press ENTER after you authorized this program") self.flickr.get_token_part_two((token, frob))
class FwiktrFlickrRetriever(FwiktrServiceManager): transformList = [FwiktrFlickrFuckItSelectionTransform(), FwiktrFlickrFullANDSelectionTransform()] def __init__(self): self._pic_info = [] FwiktrServiceManager.__init__(self) self.name = "Flickr" def _SetupService(self): self._fapi = FlickrAPI(self._GetOption('flickr_api_key'), self._GetOption('flickr_api_secret')) def GetPictureXML(self): return flickr_info_xml def GetPictureData(self): return {'picture_title':cgi.escape(self._pic_info['title']), 'picture_info':self._GetPictureSpecificData()} def _GetPictureSpecificData(self): return flickr_info_xml % {'flickr_server':self._pic_info['server'], 'flickr_farm':self._pic_info['farm'], 'flickr_photo_id':self._pic_info['id'], 'flickr_secret':self._pic_info['secret'], 'flickr_owner_id':self._pic_info['owner']} def GetNewPicture(self, tag_list): try: if len(tag_list) > 20: culler = FwiktrFlickrTagCullTransform() tag_list = culler.RunTransform(tag_list) tag_string = ','.join(tag_list) if(tag_string == ""): return False pic = FwiktrFlickrFullANDSelectionTransform() rsp = self._fapi.photos_search(api_key=self._GetOption('flickr_api_key'),tags=tag_string,tag_mode='all') self._fapi.testFailure(rsp) print rsp.photos[0]['total'] if(int(rsp.photos[0]['total']) == 0): pic = FwiktrFlickrFuckItSelectionTransform() rsp = self._fapi.photos_search(api_key=self._GetOption('flickr_api_key'),tags=tag_string,tag_mode='any') print rsp.photos[0]['total'] self._fapi.testFailure(rsp) if(int(rsp.photos[0]['total']) == 0): return False rand_index = random.randint(0, min(int(rsp.photos[0]['perpage']), int(rsp.photos[0]['total']))) self._pic_info = rsp.photos[0].photo[rand_index] pic.RunTransform({'total':rsp.photos[0]['total'],'picked':rand_index}) return True except: return False
except Exception as error: log += 'ERROR: there was an exception while querying' + '\n' log += str(error) + '\n' print (log) continue successfulQuery = True return response if __name__ == "__main__": flickr_api_key = config.flickrAPIKey flickr_per_page = str(config.maxPhotosPerPage) flickrAPI = FlickrAPI(config.flickrAPIKey, config.flickrSecret) response = flickrAPI.photos_search( api_key=flickr_api_key, sort='interestingness-desc', ispublic='1', media='photos', per_page=flickr_per_page, page='1', text='love', extras = config.photo_extras, min_upload_date='1300822535', max_upload_date='1300882535') count = 0 for photo in response[0]: image = photo.attrib print (extract_url(image))
bild.paste(logo, pos) bild.save(Datei) t = splitext(basename(Datei))[0] PiCam.stop_preview() PiCam.preview_fullscreen= False PiCam.start_preview() print "Auf Flicker hochladen..." response=flickr.upload(filename=Datei, title=t, is_public=1, format='etree') photoID = response.find('photoid').text photoURL = 'http://www.flickr.com/photos/%s/%s/' % (flickruser, photoID) print "Fertig" time.sleep(1) flickr = FlickrAPI(key, secret) (token, frob) = flickr.get_token_part_one(perms='write') if not token: raw_input("Bitte Anwendung im Browser autorisieren und dann ENTER druecken") flickr.get_token_part_two((token, frob)) PiCam = picamera.PiCamera() PiCam.preview_fullscreen= False PiCam.resolution=(1024, 768) PiCam.preview_window = (1, 1 , 800, 600) PiCam.start_preview() IO.setwarnings(False) IO.setmode(IO.BCM) Taster = 23
#!/usr/bin/env python from xml.etree import ElementTree as ET import logging import webbrowser logging.basicConfig(level=logging.INFO) from flickrapi import FlickrAPI class keys: apikey = u'a233c66549c9fb5e40a68c1ae156b370' apisecret = u'03fbb3ea705fe096' print('Creating FlickrAPI object') flickr = FlickrAPI(keys.apikey, keys.apisecret) # ------------------------------------------------------------------------------ print('Step 1: authenticate') if not flickr.token_valid(perms='read'): # Get a request token flickr.get_request_token(oauth_callback='oob') # Open a browser at the authentication URL. Do this however # you want, as long as the user visits that URL. authorize_url = flickr.auth_url(perms='read') webbrowser.open_new_tab(authorize_url) # Get the verifier code from the user. Do this however you # want, as long as the user gives the application the code.