def place_details(self, params): third_party_id = params['pids'] if third_party_id: self.third_party_id = third_party_id[0] license_included = '0,4,5,6,7,8,9,10' extras = 'url_o, views, date_upload, date_taken, url_l' flickr = FlickrAPI(self.flickr_public, self.flickr_secret, format='parsed-json') # flickr = flickrapi.FlickrAPI(self.flickr_public, self.flickr_secret) data = flickr.photos.search(place_id=self.third_party_id, format='parsed-json', extras=extras, license=license_included) cats = flickr.places.getInfo(place_id=self.third_party_id) longitude = cats['place'].get('longitude') latitude = cats['place'].get('latitude') name = cats['place'].get('name') keys = [('web', lambda obj: "Not Provided"), ('description', lambda obj: "Not Provided"), ('rating', lambda obj: int(0)), ('twitter_url', lambda obj: "Not Provided"), ('phone', lambda obj: None), ('name', lambda obj: name), ('types', lambda obj: None), ('lat', lambda obj: longitude), ('lon', lambda obj: latitude), ('third_party_id', lambda obj: self.third_party_id), ('pictures', self._map_pictures), ('third_party_provider', lambda obj: self.provider_name)] result = map_dict(keys, data) return [result]
def places(self, params): lat = params['lat'] log = params['lon'] # third_party_id = params['pid'] logger.info('{} - Searching places'.format(self.provider_name.title())) total_results = None places = [] # Need do precalculate the offsets for i in range(MAX_REQUESTS_PER_PROVIDER): if total_results is None: flickr = FlickrAPI(self.flickr_public, self.flickr_secret, format='parsed-json') place_details = flickr.places.findByLatLon(lat=lat, lon=log) venues = place_details['places']['place'] places += map(self._map, venues) for item in places[:2]: parameters = {'pids': [item['third_party_id']]} item['place_details'] = self.place_details(parameters) # scraped_data_list = self.scoring_data_fetch(item) # item['scraped_data_list'] = scraped_data_list return places
def currentFlickrTitle(): '''Return the title of the Flickr image currently showing in the browser. The function works through Apple Events and supports only the Safari and Chrome browsers.''' # Flickr parameters fuser = '******' key = 'Get key from Flickr' secret = 'Get secret from Flickr' # Get the image ID. try: imageID = currentFlickrID() except IndexError: return "Not a Flickr image" # Establish the connection with Flickr. flickr = FlickrAPI(api_key=key, secret=secret) # Get the title. etree = flickr.photos_getInfo(photo_id = imageID, format = 'etree') for i in etree[0]: if i.tag == 'title': return i.text break # If the size wasn't found. return "Title not found"
def get_images(self, character,savedir="downloads"): """flickrからAPIで画像を取得して保存します。 画像の保存先は./savedir/flickr/characterとなります。 Args: character(int):検索キーワード。jigglypuffかkirby savedir(str):保存先フォルダ名。デフォルトはdownloads """ from flickrapi import FlickrAPI from urllib.request import urlretrieve import os, time, sys wait_time = 1 savepath = savedir+"/flickr/"+character try: os.makedirs(savepath) except FileExistsError: pass flickr = FlickrAPI(self.key, self.secret, format="parsed-json") result = flickr.photos.search( text = character, per_page = 500, media = "photos", sort = "relevance", safe_search = 1 ) for item in result["photos"]["photo"]: url = "https://live.staticflickr.com/{0}/{1}_{2}.jpg".format(item["server"],item["id"],item["secret"]) filepath = savepath + "/" + item["id"] + ".jpg" if os.path.exists(filepath): continue urlretrieve(url, filepath) time.sleep(wait_time)
def go_download(keyword, dir): # 저장 경로 지정하기 savedir = "./face_dataset/" + dir if not os.path.exists(savedir): os.mkdir(savedir) # 4. API를 사용해서 다운로드하기 flickr = FlickrAPI(key, secret, format='parsed-json') res = flickr.photos.search( text=keyword, # 키워드 per_page=300, # 검색할 개수 media='photos', # 사진 검색 sort="relevance", # 키워드 관련도 순서 safe_search=1, # 안전 검색 extras='url_n, license') # 결과 확인하기 photos = res['photos'] pprint(photos) try: # 1장씩 다운로드하기 -- (5) for i, photo in enumerate(photos['photo']): url_n = photo['url_n'] filepath = savedir + '/' + photo['id'] + '.jpg' if os.path.exists(filepath): continue print(str(i + 1) + ":download=", url_n) urlretrieve(url_n, filepath) time.sleep(wait_time) except: import traceback traceback.print_exc()
def go_download(keyword, dir): # 画像の保存パスを決定 savedir = "./image/" + dir if not os.path.exists(savedir): os.mkdir(savedir) # APIを使ってダウンロード --- (*4) flickr = FlickrAPI(key, secret, format='parsed-json') res = flickr.photos.search( text=keyword, # 検索語 per_page=300, # 取得件数 media='photos', # 写真を検索 sort="relevance", # 検索語の関連順に並べる safe_search=1, # セーフサーチ extras='url_q, license') # 検索結果を確認 photos = res['photos'] pprint(photos) try: # 1枚ずつ画像をダウンロード --- (*5) for i, photo in enumerate(photos['photo']): url_q = photo['url_q'] filepath = savedir + '/' + photo['id'] + '.jpg' if os.path.exists(filepath): continue print(str(i + 1) + ":download=", url_q) urlretrieve(url_q, filepath) time.sleep(wait_time) except: import traceback traceback.print_exc()
def go_download(keyword,dir): #画像の保存パス savedir="./image/"+dir if not os._exists(savedir): os.mkdir(savedir) #APIでダウンロード flickr=FlickrAPI(key,secret,format="parsed-json") res=flickr.photos.search( text="ramen", per_page=300, media="photos", sort="relevance", safe_search=1, extras="url_q,license") #検索結果を確認 photos=res["photos"] pprint(photos) try: #一枚ずつ画像をダウンロード for i ,photo in enumerate(photos["photo"]): url_q=photo["url_q"] filepath=savedir+"/"+photo["id"]+".jpg" if os.path.exists(filepath):continue print(str(i+1)+":download=",url_q) urlretrieve(url_q,filepath) time.sleep(wait_time) except: import traceback traceback.print_exc()
def text(): # use GET to obtain photo id and keyword photoID = request.args.get('photoID') keyword = request.args.get('keyword') # use Flickr API to obtain image static url flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json') fetchData = flickr.photos.getSizes(photo_id=photoID) # '6' is the Medium photo option sourceURL = fetchData["sizes"]["size"][6]["source"] # use timeStamp to prevent caching timeStamp = str(datetime.datetime.now().microsecond) path = 'static/images/' + photoID + timeStamp + '.jpg' # download image urllib.request.urlretrieve(sourceURL, path) # find out size of image and limit text input length image = Image.open(path) width, height = image.size limit = int(width / 45) data = { "photoID": photoID, "keyword": keyword, "path": path, "timeStamp": timeStamp, "limit": limit } return render_template("text.html", **data)
def get_urls(image_tag,MAX_COUNT): flickr = FlickrAPI(key, secret) photos = flickr.walk(text=image_tag, tag_mode='all', tags=image_tag, extras='url_s', per_page=100, sort='relevance') count=0 urls=[] for photo in photos: if count< MAX_COUNT: print("Fetching url for image number {}".format(count)) try: url=photo.get('url_s').strip() if url == "": continue urls.append(url) except: print("Url for image number {} could not be fetched".format(count)) count=count+1 else: print("Done fetching urls, fetched {} urls out of {}".format(len(urls),MAX_COUNT)) break urls=pd.Series(urls) print("Writing out the urls in the current directory") urls.to_csv(image_tag+"_urls.csv") print("Done!!!")
def get_urls_by_tag(image_tag, max_count=100, url_type='url_o', pickle_file=None): """get a number of urls for images by their tags Arguments: image_tag {[string]} -- [tag applied to search for relevant images] Keyword Arguments: max_count {int} -- [total number of urls returned] (default: {100}) url_type {string} -- [type for the urls to be returned, see the top of the file for explanation of different url types] Returns: [urls] -- [an array of urls (of size max_count), each of which can be used to download an image.] [views] -- [an array of integers (of size max_count), each of which is number of views that the image has] """ flickr = FlickrAPI(FLICKR_KEY, FLICKR_SECRET) photos = flickr.walk(text=image_tag, tag_mode='all', extras=','.join([url_type, 'views']), per_page=50, sort='relevance') t_prev = time.time() count = 0 urls = [] views = [] for photo in photos: if count % TIME_THRESHOLD == 0 and count != 0: print("{} urls downloaded in the past {:.3f} s".format( TIME_THRESHOLD, time.time() - t_prev)) t_prev = time.time() if count >= max_count: print("all {} photo urls have been saved".format(count)) break try: url = photo.get(url_type) if url is None: print('failed to fetch url for image {} '.format(count)) continue urls.append(url) views.append(photo.get('views')) except: print('url for image number {} cannot be fetched'.format(count)) # update the count count += 1 # cast views into integers views = [int(i) for i in views] if pickle_file is not None: with open(pickle_file, 'wb') as handle: pickle.dump((urls, views), handle, protocol=pickle.HIGHEST_PROTOCOL) print("All photo urls have been saved to pickle file {}".format( pickle_file)) return urls, views
def _getImageUrl(self, rawUrl): flickr = FlickrAPI(config.FLICKR_PUBLIC, config.FLICKR_SECRET, format='json') # info = flickr.photos.getInfo(photoId='6337093927') # print(info) sets = flickr.photosets.getList(user_id='rubund') print(sets)
def test_authenticate_fail(self): flickr = FlickrAPI(FAKEKEY, FAKESECRET) try: (token, frob) = flickr.get_token_part_one(perms='write') if not token: raw_input("Press ENTER after you authorized this program") flickr.get_token_part_two((token, frob)) except FlickrError as e: self.assertEqual(e[0], u'Error: 100: Invalid API Key (Key not found)')
def search_image(keyword): flickr = FlickrAPI(key, secret, format='parsed-json') res = flickr.photos.search(text=keyword, per_page=image_count, media='photos', sort="relevance", safe_search=1, extras='url_q,license') return res
def get_location_image(location): """Return a photo from Flickr searching by location name.""" flickr = FlickrAPI(os.environ['FLICKR_KEY'], os.environ['FLICKR_SECRET'], format='parsed-json') photo_info = flickr.photos.search(text=location, per_page=1, sort='relevance') photo_dict = photo_info['photos'][u'photo'][0] photo_url = 'https://farm' + str(photo_dict['farm']) + '.staticflickr.com/' + photo_dict['server'] + '/' + photo_dict['id'] + '_' + photo_dict['secret'] + '.jpg' return photo_url
def get_photos(image_tag): extras = ','.join(SIZES) flickr = FlickrAPI(KEY, SECRET) photos = flickr.walk(text=image_tag, extras=extras, privacy_filter=1, per_page=50, sort='relevance') return photos
def __init__(self, api_key, secret, response_format='json', store_token=False): self.flickr_api_client = FlickrAPI(api_key=api_key, secret=secret, format=response_format, store_token=store_token)
def collect_imgs(tag): extras = ','.join(IMG_SIZES) flickr = FlickrAPI(KEY, SECRET) imgs = flickr.walk(text=tag, extras=extras, privacy_filter=1, per_page=500, sort='relevance') return imgs
def search_api(): saida_flickr = '/tmp/flickr_output.txt' api_file = '/home/lenonr/Dropbox/Arquivos/Flickr/keys' # open file keys f = open(api_file, 'r') list_of_lines = f.read().splitlines() # capture values | api_key + api_secret | api_key = list_of_lines[0] api_secret = list_of_lines[1] # convert values to string api_key = str(api_key) api_secret = str(api_secret) # close connection f.close() # set values search flickr = FlickrAPI(api_key, api_secret, format='parsed-json') loop = 10 array = [ 'Ariane Arianespace', 'Soyuz Rocket', 'Delta ULA', 'Atlas ULA', 'NewShepard BlueOrigin', 'SpaceX Falcon', 'Electron RocketLab', 'PSLV ISRO', 'STS NASA', 'Spacewalk', 'Starship', 'Perseverance' ] with open(saida_flickr, "w") as text_file: for x in range(0, len(array)): for y in range(0, loop): try: extras = 'url_o' space = flickr.photos.search(text=array[x], per_page=loop, extras=extras) photos = space['photos'] title = photos['photo'][y]['title'] url = photos['photo'][y][extras] except KeyError: extras = 'url_m' space = flickr.photos.search(text=array[x], per_page=loop, extras=extras) photos = space['photos'] title = photos['photo'][y]['title'] url = photos['photo'][y][extras] # title_url = (array[x] + ' - ' + title + ': ' + url) title_url = (title + ': ' + url) from pprint import pprint text_file.write("%s\n" % title_url)
def get_urls(image_tag, max_count, ignore_ids=[]): extra_url = "url_l" flickr = FlickrAPI(key, secret) photos = flickr.walk( text=image_tag, tag_mode='all', tags=image_tag, extras=extra_url, # see url_ options above per_page=50, content_type=1, # photos only is_commons=True, # no licence restrictons orientation='landscape', dimension_search_mode='min', height='640', width='640', sort='relevance') count = 0 urls = [] for photo in photos: url = photo.get(extra_url) if url == None: continue photo_id = url_to_id(url) if photo_id not in ignore_ids: if count < max_count: print("Fetching url for image number {}".format(count)) try: url = photo.get(extra_url) print("Retrieved url: {}".format(url)) if url == '' or url == None: print("Url could not be fetched") else: urls.append(url) ignore_ids.append(photo_id) count += 1 except: print("Url fetch failed") else: print("Done fetching urls, fetched {} urls out of {}".format( len(urls), max_count)) break # urls to csv urls = pd.Series(urls) now = datetime.now().strftime('%Y-%m-%d_%H%M%S') csv_filename = image_tag + "_urls_" + now + ".csv" print("Writing out the urls to ", csv_filename) urls.to_csv(csv_filename) # ignore ids to csv ids = pd.Series(ignore_ids) ids_filename = image_tag + "_ids_" + now + ".csv" print("Writing out the ids to ", ids_filename) ids.to_csv(ids_filename) print("Done!!!")
def get_photos(): extras = ','.join(config.sizes) flickr = FlickrAPI(config.api_key, config.api_secret, cache=True) photos = flickr.interestingness.getList( extras=extras, # get the urls for each size we want privacy_filter=1, # search only for public photos per_page=config.max_image_pull, sort='relevance') return photos[0]
def init_flickrapi(request): token = request.session.get('token') f = FlickrAPI(settings.FLICKR_KEY, settings.FLICKR_SECRET, token=token, store_token=False, format='parsed-json') return f
def __init__(self, tags, band, album): self._tags, self.tags = tags, tags self.band = band self.album = album self.available_fonts = [ font for font in os.listdir('./fonts') if font.lower().endswith('ttf') ] self.flickr = FlickrAPI(FLICKR_API_KEY, FLICKR_API_SECRET) self.clarifai = ClarifaiApi(CLARIFAI_API_KEY, CLARIFAI_API_SECRET)
def __init__(self, tags, max_num, num_core): self.tags = tags self.max_num = max_num self.num_core = num_core self.flickr = FlickrAPI(key, secret) print( f'Using flickr API\nTargets: {self.tags}\nMax num of image: {self.max_num}' )
def get_photos(image_tag): extras = ','.join(SIZES) flickr = FlickrAPI(KEY, SECRET) photos = flickr.walk( text=image_tag, extras=extras, # get the url for the original size image privacy_filter=1, # search only for public photos per_page=50, sort='relevance') return photos
def get_flickr_client(): """ Util function to get Flickr API client :return: flickr_client """ flickr_client = FlickrAPI( api_key=os.getenv('FLICKR_API_KEY'), secret=os.getenv('FLICKR_API_SECRET'), format='parsed-json' ) return flickr_client
def get_photos(image_tag): extras = ','.join(SIZES) flickr = FlickrAPI(KEY, SECRET) photos = flickr.walk( text=image_tag, # it will search by image title and image tags extras=extras, # get the urls for each size we want privacy_filter=1, # search only for public photos per_page=50, sort='relevance') # we want what we are looking for to appear first return photos
def print_by_tags(tag): flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json') extras = 'url_sq' dossier = flickr.photos.search(tags=tag, per_page=1, extras=extras, sort='relevance') photos = dossier['photos'] #picture=pprint(photos['photo'][0]['url_sq']) return (photos['photo'][0]['url_sq'])
def get(tags): flickr = FlickrAPI(config.FLICKR_PUBLIC, config.FLICKR_SECRET, format='parsed-json') rnd = 1 raw = flickr.photos.search(tags=tags[0:1], text=tags[1], tag_node='all', per_page=rnd, extras=config.extras) return raw['photos']['photo'][rnd - 1]['url_m']
def get_flickr_image(self, search_str): # extras = 'url_sq,url_t,url_s,url_q,url_m,url_n,url_z,url_c,url_l,url_o' extras = 'url_sq,url_t,url_s,url_q,url_m' random.seed FLICKR = FlickrAPI(self.flickr_public, self.flickr_secret, format='parsed-json') data = FLICKR.photos.search(text=search_str, per_page=1, sort='interestingness-desc', safe_search=3) if data['stat'] == 'ok': photo_data = FLICKR.photos.search(text=search_str, per_page=1, page=random.randint(1, min(int(data['photos']['total']), 1000)), extras=extras, sort='interestingness-desc') if photo_data['photos']['photo']: extras_array = extras.split(',') for extra in extras.split(',')[::-1]: return photo_data['photos']['photo'][0][extra]
def searchimage(query): flickr = FlickrAPI(FLICKR_PUBLIC, FLICKR_SECRET, format='parsed-json') extras='url_o, url_t' info = flickr.photos.search(text=query, per_page=1, extras=extras) try: photos = info['photos']['photo'][0]['url_o'] except Exception as e: try: photos = info['photos']['photo'][0]['url_t'] except Exception as r: return None return photos