def InitFlickr(): global flickr global userid global username Log("Creating Flickr Client") flickr = flickrapi.FlickrAPI(FLICKR_API_KEY, cache=True) flickr.cache = flickrapi.SimpleCache(timeout=300, max_entries=200) Log("Finding Flickr user '%s'" % Prefs.Get(PLUGIN_PREF_USERNAME)) try: response = flickr.people_findByUsername(username=Prefs.Get(PLUGIN_PREF_USERNAME)) Log("Response Status: %s" % response.attrib['stat']) if response.attrib["stat"] == "ok": Log("User found") user = response.find("user") if user.find("username").text != None: userid = user.attrib["id"] username = user.find("username").text else: userid = None username = None except FlickrError, message: Log("Flickr Error: %s" % message) userid = None username = None
def __init__(self, user_id): self.flickr = flickrapi.FlickrAPI(FLICKR_API_KEY, FLICKR_API_SECRET, format='json', store_token=False) self.flickr.cache = flickrapi.SimpleCache(timeout=300, max_entries=200) self.user_id = user_id self.loadPhotos()
def test_max_entries(self): max_entries = 90 cache = flickrapi.SimpleCache(max_entries=max_entries) for num in six.moves.range(100): cache.set('key-%03d' % num, 'value') removed = float(max_entries) / cache.cull_frequency self.assertEqual(100 - removed, len(cache))
def links_from_flickr(topic): KEY = '2a6eeafdc1f3e6f648d5ae1e17793666' SECRET = '8c89b7612bcb2675' SIZES = ["url_o", "url_k", "url_h", "url_l", "url_c"] # in order of preference """ - url_o: Original (4520 × 3229) - url_k: Large 2048 (2048 × 1463) - url_h: Large 1600 (1600 × 1143) - url_l=: Large 1024 (1024 × 732) - url_c: Medium 800 (800 × 572) - url_z: Medium 640 (640 × 457) - url_m: Medium 500 (500 × 357) - url_n: Small 320 (320 × 229) - url_s: Small 240 (240 × 171) - url_t: Thumbnail (100 × 71) - url_q: Square 150 (150 × 150) - url_sq: Square 75 (75 × 75) """ extras = ','.join(SIZES) flickr = flickrapi.FlickrAPI(KEY, SECRET, cache=True) week = 60 * 60 * 24 * 7 flickr.cache = flickrapi.SimpleCache(timeout=week, max_entries=99999) photos = flickr.walk( text=topic, # it will search by image title and image tags extras=extras, # get the urls for each size we want privacy_filter=1, # search only for public photos per_page=50, sort='relevance') # we want what we are looking for to appear first counter = 0 try: for photo in photos: got_photo = False for i in range( len(SIZES )): # makes sure the loop is done in the order we want url = photo.get(SIZES[i]) if url: # if url is None try with the next size yield url got_photo = True break if not got_photo: counter += 1 if counter >= 100: return except Exception as e: print(e)
def test_cache_write(self): '''tests that the call result is written to cache''' photo_id = '2333478006' cache_key = ('api_key=%s' '&photo_id=%s' '&method=flickr.photos.getInfo' '&format=rest' % (key, photo_id)) f = flickrapi.FlickrAPI(key, store_token=False, format='rest') f.cache = flickrapi.SimpleCache() self.assertEqual(0, len(f.cache)) info = f.photos_getInfo(photo_id=photo_id) self.assertEqual(info, f.cache.get(cache_key))
def test_cache_read(self): '''Tests that cached data is returned if available''' photo_id = '2333478006' cache_key = ('api_key=%s' '&photo_id=%s' '&method=flickr.photos.getInfo' '&format=rest' % (key, photo_id)) faked_value = "FAKED_VALUE" f = flickrapi.FlickrAPI(key, store_token=False, format='rest') f.cache = flickrapi.SimpleCache() f.cache.set(cache_key, faked_value) info = f.photos_getInfo(photo_id=photo_id) self.assertEqual(faked_value, info)
try: img = flickr.photos.getSizes(photo_id=i) copy_img = img image_url = [i['source'] for i in copy_img['sizes']['size'] if i['label'] == "Medium"][0] f.insert(0,"![]("+image_url+")") print("\n\n".join(f)) md_file.append("\n\n".join(f)) except (ConnectionError, flickrapi.exceptions.FlickrError) as e: pass return md_file if __name__ == "__main__": # Initialize Flickr API with cache flickr = flickrapi.FlickrAPI(api_key, api_secret, format='parsed-json') flickr.cache = flickrapi.SimpleCache(timeout=300, max_entries=200) # Get random chunks of five lines five_chunks = create_five_chunks() # Do the next bit 3 times for a high enough word count all_photo_ids = [] for n in range(1,4): photo_ids = get_photo_ids(n) all_photo_ids.extend(photo_ids) # Put it all together! md_file = match_with_text(all_photo_ids, five_chunks) with open('shakes_summary.md', 'w') as f:
def test_delete(self): cache = flickrapi.SimpleCache() key = 'abc' cache.set(key, 'def') cache.delete(key) self.assertFalse(key in cache)
def test_expire(self): cache = flickrapi.SimpleCache(timeout=1) key = 'abc' cache.set(key, 'def') time.sleep(1.1) self.assertFalse(key in cache)
def test_store_retrieve(self): cache = flickrapi.SimpleCache() key = 'abc' value = 'def' cache.set(key, value) self.assertEqual(value, cache.get(key))