def pexels_parse_resp(subject): """ From Pexels API resp, collect the top 4 images from results. :param subject: The subject to be used for the image search or type(None). If None, random photos are fetched. :rtype images: A list containing data on the fetched images. :except AttributeErrror: Occurs when resp fails to fetch images and enumerate cannot parse resp. """ py_pexel = PyPexels(api_key=PEXELS_API_KEY) if subject is not None: resp = py_pexel.search(query=subject, per_page=4) else: resp = py_pexel.random(per_page=4) images = [] try: for num, item in enumerate(resp.entries, 1): image_info = { "author_name": item.photographer, "full_image": item.src["original"], "image_id": item.id, "author_profile": item.photographer_url, } images.append(image_info) return images except AttributeError as err: handle_err( f"Failed to parse pexels resp object: {err}\nCheck that your API_KEYs are setup correctly." )
def test_search(self): index = 'redflower' resource_filepath = self.store_mapping[index] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}/{}{}'.format(API_ROOT, API_VERSION, stored_response.get('_url')), # _url contains only the short path like /popular?page=2&per_page=5 json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers'), match_querystring=True, ) py_pexels = PyPexels(api_key=api_key) search_results_page = py_pexels.search(query='red flower', page=2, per_page=5) # Page properties print search_results_page.page print search_results_page.per_page print search_results_page.has_next print search_results_page.has_previous print search_results_page.link_self print search_results_page.link_first print search_results_page.link_last print search_results_page.link_next print search_results_page.link_previous # Entries for photo in search_results_page.entries: print photo.id, photo.photographer, photo.width, photo.height, photo.url print photo.src
def execute_search(key, string, pages, format): pexels_instance = PyPexels(api_key=key) search_results = pexels_instance.search(query=string, per_page=pages) while True: for image in search_results.entries: search_urls.append(image.src.get(format)) print(image.src.get(format)) if not search_results.has_next: break search_results = search_results.get_next_page()
def img(term='Puppies'): py_pexels = PyPexels(api_key=pexels_token) search_results = py_pexels.search(query=term, per_page=5) i = 0 while i < 5: for photo in search_results.entries: id.append(photo.id) l = f"https://images.pexels.com/photos/{photo.id}/pexels-photo-{photo.id}.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=650&w=940" links.append(l) i += 1
class PyPexelApi: pypexels = None def __init__(self, pypexels_api_key=None): self.pypexels = PyPexels(api_key=pypexels_api_key) def get_single_random_photo(self): random_photo = self.pypexels.random(per_page=1) for photo in random_photo.entries: photo_url = str(photo.src['large']) photo_photographer_name = photo.photographer.encode('utf-8') photo_photographer_url = str(photo.photographer_url) photo_id = str(photo.id) photo = Photo(photo_url, photo_photographer_name, photo_photographer_url, photo_id) return photo def get_photos_by_search(self, search_term=None, limit=None): list_photos = [] search_results_page = self.pypexels.search(query=search_term, per_page=limit) for pexel_photo in search_results_page.entries: photo_url = str(pexel_photo.src['large']) photo_photographer_name = str(pexel_photo.photographer) photo_photographer_url = str(pexel_photo.photographer_url) photo_id = str(pexel_photo.id) photo = Photo(photo_url, photo_photographer_name, photo_photographer_url, photo_id) list.append(photo) return list_photos def get_popular_photos(self, limit=None): list_photos = [] popular_photos_page = self.pypexels.popular(per_page=limit) for pexel_photo in popular_photos_page.entries: photo_url = str(pexel_photo.src['large']) photo_photographer_name = str(pexel_photo.photographer) photo_photographer_url = str(pexel_photo.photographer_url) photo_id = str(pexel_photo.id) photo = Photo(photo_url, photo_photographer_name, photo_photographer_url, photo_id) list_photos.append(photo) return list_photos
def execute_search(key, string, pages, format, progress_function): if (len(search_urls) > 0): search_urls.clear() pexels_instance = PyPexels(api_key=key) search_results = pexels_instance.search(query=string, per_page=pages) progress_function(0) while True: for image in search_results.entries: search_urls.append(image.src.get(format)) print(image.src.get(format)) if not search_results.has_next: break search_results = search_results.get_next_page() progress_function('finished')
def searchPhotos(searchTerm="", amountPerPage=10, AmountOfPages=5, AmountOfPagesToSkip=5): theurls = [] py_pexel = PyPexels(api_key=api_key) search_results = py_pexel.search(query=searchTerm, per_page=amountPerPage) cc = 0 c2 = 0 c3 = 0 tradeLog("Searching for: ", searchTerm) while search_results.has_next and c2 < AmountOfPages: if c3 > AmountOfPagesToSkip: for photo in search_results.entries: if photo.photographer != "Pixabay": theurls.append( [photo.src.get("original"), photo.photographer]) cc = cc + 1 c2 = c2 + 1 search_results.get_next_page() c3 = c3 + 1 return theurls
# Initialize app logging logger = logging.getLogger() logging.basicConfig(filename='app_search.log', level=logging.DEBUG) # pypexels logger defaults to level logging.ERROR # If you need to change that, use getLogger/setLevel # on the module logger, like this: logging.getLogger(PyPexels.logger_name).setLevel(logging.DEBUG) # add a headers to the log logger.debug(80 * '=') logging.debug('Testing PyPexels.search()') logger.debug(80 * '=') # instantiate PyPexels object py_pexel = PyPexels(api_key=api_key) # Start with a search query, maximize number of items per page # Note: this will run until all results pages have been visited, # unless a connection error occurs. # Typically the API hourly limit gets hit during this # search_results_page = py_pexel.search(query='red flowers', per_page=40) while True: for photo in search_results_page.entries: print((photo.id, photo.photographer, photo.url)) if not search_results_page.has_next: break search_results_page = search_results_page.get_next_page()
# -*- coding: utf-8 -*- """ Created on Sat Aug 24 20:07:32 2019 @author: Jacky """ from pypexels import PyPexels api_key = '' #Insert API key here # instantiate PyPexels object py_pexels = PyPexels(api_key=api_key) search_results = py_pexels.search(query='dog', per_page=40) while True: for photo in search_results.entries: print(photo.id, photo.photographer, photo.url) if not search_results.has_next: break search_results = search_results.get_next_page()
"""pexels api.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1vA-xqWtQfoFyj-5j7ikFK-bNmJDE28hI """ !pip install -q pexels_api !pip install -q PyPexels from pypexels import PyPexels # add your api key api_key = '' # instantiate PyPexels object py_pexel = PyPexels(api_key=api_key) # create a list for the results empty = [] search_results = py_pexel.search(query='bears', per_page=6) while True: for photo in search_results.entries: print(photo.id, photo.photographer, photo.url) empty.append(photo.url) print(empty) if not search_results.has_next: break search_results = search_results.get_next_page()
def download_image(url, root_dir, filename): headers = { "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36" } response = requests.get(url, stream=True, headers=headers) filename = f"{filename}.jpg" filepath = Path(root_dir) / filename with open(filepath, 'wb') as file: for chunk in response.iter_content(10000): file.write(chunk) search_results = py_pexels.search(query='funny dogs', per_page=10) for _ in range(1): for photo in search_results.entries: print(photo.id, photo.photographer, photo.url) image_url = f"https://www.pexels.com/photo/{photo.id}/download" dest_dir = "./data" download_image(image_url, dest_dir, str(photo.id)) metadata_file = f"./metadata/{photo.id}.json" with open(metadata_file, "w") as file: json.dump( { "download_url": image_url, "id": photo.id, "photo_url": photo.url, "photographer": photo.photographer
from __future__ import division from pypexels import PyPexels import json api_key = '563492ad6f9170000100000121984210cc7b4f57a4fdb0d06251dec5' # instantiate PyPexels object py_pexels = PyPexels(api_key=api_key) result_photos = [] nature_photos = py_pexels.search(query='nature', per_page=40, page=200) count = 0 fh = open('photos.json', 'w') while nature_photos.has_next: try: print 'page number:', nature_photos.page for photo in nature_photos.entries: #print photo.src.get('medium') height = photo.height width = photo.width #print 'size demision', width/height if (width / height > 1.4) and (width / height < 1.6): mediumPhoto = photo.src.get('medium') #print mediumPhoto result_photos.append(mediumPhoto) fh.write(mediumPhoto) fh.write('\n') count = count + 1 else: #print 'size does not match to 1.5'