def getFromPexels(): api_key = 'useyourownapi' py_pexels = PyPexels(api_key=api_key) listback=[] popular_photos = py_pexels.popular(page=random.randrange(0,202,2)) for photo in popular_photos.entries: landsc=photo.src["landscape"] if landsc is not "": listback.append(photo.src["landscape"]) #print(listback) #else: # popular_photos=popular_photos.get_next_page() for i in range(len(listback)): try: print(listback[i]) opener = urllib.request.Request(listback[i]) opener.add_header('User-Agent','Mozilla/5.0 (X11; Linux x86_64; rv:65.0) Gecko/20100101 Firefox/65.0') fx = open('/home/devnull/Pictures/background/background.jpg','wb') fx.write(urllib.request.urlopen(opener).read()) fx.close() #listback.remove(listback[i]) time.sleep(660) except IndexError: print("List Index Out of Range") break
def test_curated(self): index = 'curated' resource_filepath = self.store_mapping[index] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, 'https://api.pexels.com/v1/curated?page=2&per_page=5', json=stored_response, status=200, content_type='application/json', adding_headers={'Authorization': api_key}, match_querystring=True, ) py_pexels = PyPexels(api_key=api_key) curated_results_page = py_pexels.curated(page=2, per_page=5) # Page properties print(curated_results_page.page) print(curated_results_page.per_page) print(curated_results_page.has_next) print(curated_results_page.has_previous) print(curated_results_page.link_self) print(curated_results_page.link_first) print(curated_results_page.link_last) print(curated_results_page.link_next) print(curated_results_page.link_previous) # Entries for photo in curated_results_page.entries: print(photo.id, photo.photographer, photo.width, photo.height, photo.url) print(photo.src)
def test_search(self): index = 'redflowervideo' resource_filepath = self.store_mapping[index] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('_url')), # _url contains only the short path like /popular?page=2&per_page=5 json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers'), match_querystring=True, ) py_pexels = PyPexels(api_key=api_key) search_results_page = py_pexels.videos_search(query='red flower', page=2, per_page=5) # Page properties print(search_results_page.page) print(search_results_page.per_page) print(search_results_page.has_next) print(search_results_page.has_previous) print(search_results_page.link_self) print(search_results_page.link_first) print(search_results_page.link_last) print(search_results_page.link_next) print(search_results_page.link_previous) # Entries for video in search_results_page.entries: print(video.id, video.user, video.width, video.height, video.url) print(video.video_files)
def pexels_parse_resp(subject): """ From Pexels API resp, collect the top 4 images from results. :param subject: The subject to be used for the image search or type(None). If None, random photos are fetched. :rtype images: A list containing data on the fetched images. :except AttributeErrror: Occurs when resp fails to fetch images and enumerate cannot parse resp. """ py_pexel = PyPexels(api_key=PEXELS_API_KEY) if subject is not None: resp = py_pexel.search(query=subject, per_page=4) else: resp = py_pexel.random(per_page=4) images = [] try: for num, item in enumerate(resp.entries, 1): image_info = { "author_name": item.photographer, "full_image": item.src["original"], "image_id": item.id, "author_profile": item.photographer_url, } images.append(image_info) return images except AttributeError as err: handle_err( f"Failed to parse pexels resp object: {err}\nCheck that your API_KEYs are setup correctly." )
def test_popular(self): index = 'popular' resource_filepath = self.store_mapping[index] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}/{}{}'.format(API_ROOT, API_VERSION, stored_response.get('_url')), # _url contains only the short path like /popular?page=2&per_page=5 json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers'), match_querystring=True, ) py_pexels = PyPexels(api_key=api_key) popular_results_page = py_pexels.popular(page=2, per_page=5) # Page properties print(popular_results_page.page) print(popular_results_page.per_page) print(popular_results_page.has_next) print(popular_results_page.has_previous) print(popular_results_page.link_self) print(popular_results_page.link_first) print(popular_results_page.link_last) print(popular_results_page.link_next) print(popular_results_page.link_previous) # Entries for photo in popular_results_page.entries: print(photo.id, photo.photographer, photo.width, photo.height, photo.url) print(photo.src)
def execute_search(key, string, pages, format): pexels_instance = PyPexels(api_key=key) search_results = pexels_instance.search(query=string, per_page=pages) while True: for image in search_results.entries: search_urls.append(image.src.get(format)) print(image.src.get(format)) if not search_results.has_next: break search_results = search_results.get_next_page()
def img(term='Puppies'): py_pexels = PyPexels(api_key=pexels_token) search_results = py_pexels.search(query=term, per_page=5) i = 0 while i < 5: for photo in search_results.entries: id.append(photo.id) l = f"https://images.pexels.com/photos/{photo.id}/pexels-photo-{photo.id}.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=650&w=940" links.append(l) i += 1
class PyPexelApi: pypexels = None def __init__(self, pypexels_api_key=None): self.pypexels = PyPexels(api_key=pypexels_api_key) def get_single_random_photo(self): random_photo = self.pypexels.random(per_page=1) for photo in random_photo.entries: photo_url = str(photo.src['large']) photo_photographer_name = photo.photographer.encode('utf-8') photo_photographer_url = str(photo.photographer_url) photo_id = str(photo.id) photo = Photo(photo_url, photo_photographer_name, photo_photographer_url, photo_id) return photo def get_photos_by_search(self, search_term=None, limit=None): list_photos = [] search_results_page = self.pypexels.search(query=search_term, per_page=limit) for pexel_photo in search_results_page.entries: photo_url = str(pexel_photo.src['large']) photo_photographer_name = str(pexel_photo.photographer) photo_photographer_url = str(pexel_photo.photographer_url) photo_id = str(pexel_photo.id) photo = Photo(photo_url, photo_photographer_name, photo_photographer_url, photo_id) list.append(photo) return list_photos def get_popular_photos(self, limit=None): list_photos = [] popular_photos_page = self.pypexels.popular(per_page=limit) for pexel_photo in popular_photos_page.entries: photo_url = str(pexel_photo.src['large']) photo_photographer_name = str(pexel_photo.photographer) photo_photographer_url = str(pexel_photo.photographer_url) photo_id = str(pexel_photo.id) photo = Photo(photo_url, photo_photographer_name, photo_photographer_url, photo_id) list_photos.append(photo) return list_photos
def execute_search(key, string, pages, format, progress_function): if (len(search_urls) > 0): search_urls.clear() pexels_instance = PyPexels(api_key=key) search_results = pexels_instance.search(query=string, per_page=pages) progress_function(0) while True: for image in search_results.entries: search_urls.append(image.src.get(format)) print(image.src.get(format)) if not search_results.has_next: break search_results = search_results.get_next_page() progress_function('finished')
def searchPhotos(searchTerm="", amountPerPage=10, AmountOfPages=5, AmountOfPagesToSkip=5): theurls = [] py_pexel = PyPexels(api_key=api_key) search_results = py_pexel.search(query=searchTerm, per_page=amountPerPage) cc = 0 c2 = 0 c3 = 0 tradeLog("Searching for: ", searchTerm) while search_results.has_next and c2 < AmountOfPages: if c3 > AmountOfPagesToSkip: for photo in search_results.entries: if photo.photographer != "Pixabay": theurls.append( [photo.src.get("original"), photo.photographer]) cc = cc + 1 c2 = c2 + 1 search_results.get_next_page() c3 = c3 + 1 return theurls
def __init__(self, vname=''): self.py_pexel = PyPexels(api_key=api_key) self.latestIDs = [] self.vname = vname
import json import time from pathlib import Path import os import requests from pypexels import PyPexels from azure.storage.blob import BlobClient api_key = '563492ad6f91700001000001aeb006d415ed4a22bda755bf22efc602' # instantiate PyPexels object py_pexels = PyPexels(api_key=api_key) def download_image(url, root_dir, filename): headers = { "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36" } response = requests.get(url, stream=True, headers=headers) filename = f"{filename}.jpg" filepath = Path(root_dir) / filename with open(filepath, 'wb') as file: for chunk in response.iter_content(10000): file.write(chunk) search_results = py_pexels.search(query='funny dogs', per_page=10)
# -*- coding: utf-8 -*- """ Created on Sat Aug 24 20:07:32 2019 @author: Jacky """ from pypexels import PyPexels api_key = '' #Insert API key here # instantiate PyPexels object py_pexels = PyPexels(api_key=api_key) search_results = py_pexels.search(query='dog', per_page=40) while True: for photo in search_results.entries: print(photo.id, photo.photographer, photo.url) if not search_results.has_next: break search_results = search_results.get_next_page()
def pullvideos(wordlist): api_key = '@@@@@@@@@@' py_pexel = PyPexels(api_key=api_key) novid = False video_links = [] length = len(wordlist) def search_vids(word, flag, topicused, multiplevids): # search for vids print(word) if multiplevids == True: search_videos_page = py_pexel.videos_search(query=word, per_page=5, min_duration=6, max_duration=30) links = [] for video in search_videos_page.entries: for x in video.video_files: link = str(x.get("link")) links.append(link) for link in links: if not link in video_links: video_links.append(link) multiplevids = False return video_links else: search_videos_page = py_pexel.videos_search(query=word, per_page=1, min_duration=6, max_duration=30) for video in search_videos_page.entries: for x in video.video_files: # get link of video from json output >> dictionary link = str(x.get("link")) if not link in video_links: video_links.append(link) return video_links else: if flag == 1 and topicused == True: topic = wordlist[length - 3] multiplevids = True search_vids(topic, flag, topicused, multiplevids) else: topic = wordlist[length - 3] flag = 1 topicused = True search_vids(topic, flag, topicused, multiplevids) return video_links # loop through word list and search for vids multiplevids = False flag = 0 topicused = False for word in wordlist: video_links = search_vids(word, flag, topicused, multiplevids) video_links = list(set(video_links)) return video_links
# Comment: What's new in revision 1 # ############################################################################### import logging import os from pypexels import PyPexels api_key = os.environ.get('API_KEY', None) or 'DUMMY_API_KEY' # Initialize app logging logger = logging.getLogger() logging.basicConfig(filename='app_random.log', level=logging.DEBUG) # pypexels logger defaults to level logging.ERROR # If you need to change that, use getLogger/setLevel # on the module logger, like this: logging.getLogger(PyPexels.logger_name).setLevel(logging.DEBUG) # add a headers to the log logger.debug(80*'=') logging.debug('Testing PyPexels.random()') logger.debug(80*'=') # instantiate PyPexels object py_pexel = PyPexels(api_key=api_key) random_photos_page = py_pexel.random(per_page=7) for photo in random_photos_page.entries: print(photo.id, photo.photographer, photo.url)
def __init__(self, pypexels_api_key=None): self.pypexels = PyPexels(api_key=pypexels_api_key)
# Initialize app logging logger = logging.getLogger() logging.basicConfig(filename='app_popular.log', level=logging.DEBUG) # pypexels logger defaults to level logging.ERROR # If you need to change that, use getLogger/setLevel # on the module logger, like this: logging.getLogger(PyPexels.logger_name).setLevel(logging.DEBUG) # add a headers to the log logger.debug(80*'=') logging.debug('Testing PyPexels.popular()') logger.debug(80*'=') # instantiate PyPexels object py_pexel = PyPexels(api_key=api_key) # Start with the generic collection, maximize number of items per page # Note: this will run until all popular photos have been visited, # unless a connection error occurs. # Typically the API hourly limit gets hit during this # popular_photos_page = py_pexel.popular(per_page=40) while True: for photo in popular_photos_page.entries: print((photo.id, photo.photographer, photo.url)) if not popular_photos_page.has_next: break popular_photos_page = popular_photos_page.get_next_page()
# ############################################################################### import logging import os from pypexels import PyPexels api_key = os.environ.get('API_KEY', None) or 'DUMMY_API_KEY' # Initialize app logging logger = logging.getLogger() logging.basicConfig(filename='app_single_video.log', level=logging.DEBUG) # pypexels logger defaults to level logging.ERROR # If you need to change that, use getLogger/setLevel # on the module logger, like this: logging.getLogger(PyPexels.logger_name).setLevel(logging.DEBUG) # add a headers to the log logger.debug(80*'=') logging.debug('Testing PyPexels.single_video()') logger.debug(80*'=') # instantiate PyPexels object py_pexel = PyPexels(api_key=api_key) # Retrieve a single video, known by its ID video = py_pexel.single_video(video_id=1448735) print(video.id, video.user.get('name'), video.url) print(video.get_attribution('txt')) print(video.get_attribution('html'))
# Initialize app logging logger = logging.getLogger() logging.basicConfig(filename='app_search.log', level=logging.DEBUG) # pypexels logger defaults to level logging.ERROR # If you need to change that, use getLogger/setLevel # on the module logger, like this: logging.getLogger(PyPexels.logger_name).setLevel(logging.DEBUG) # add a headers to the log logger.debug(80 * '=') logging.debug('Testing PyPexels.search()') logger.debug(80 * '=') # instantiate PyPexels object py_pexel = PyPexels(api_key=api_key) # Start with a search query, maximize number of items per page # Note: this will run until all results pages have been visited, # unless a connection error occurs. # Typically the API hourly limit gets hit during this # search_results_page = py_pexel.search(query='red flowers', per_page=40) while True: for photo in search_results_page.entries: print((photo.id, photo.photographer, photo.url)) if not search_results_page.has_next: break search_results_page = search_results_page.get_next_page()
class PexelPusher: def __init__(self, vname=''): self.py_pexel = PyPexels(api_key=api_key) self.latestIDs = [] self.vname = vname def searchVideos(self, query='', qSize=10): selected_videos = [] search_videos_page = self.py_pexel.videos_search(query=query, per_page=qSize) while True: for video in search_videos_page.entries: self.latestIDs.append(video.id) if video.width/video.height >= 1.33: image = video.image.split('?') selected_videos.append(image[0]) if not search_videos_page.has_next: break search_videos_page = search_videos_page.get_next_page() return selected_videos def getThumbs(self, querylist,qSize): nlist = 0 root = '.\\cacheimages\\' if self.vname == '': print('Please name your video first, champion.') return for query in querylist: video_list = self.searchVideos(query, qSize) nlist = '{:04d}'.format(querylist.index(query)+1) cache_folder = ('.\\cacheimages\\') if not os.path.exists(cache_folder): os.mkdir(cache_folder) random.shuffle(video_list) for imageurl in video_list: r = requests.get(imageurl) filename = cache_folder+os.path.split(imageurl)[1] with open(filename, 'wb') as outfile: outfile.write(r.content) outfile.close() image = Image.open(filename) image.thumbnail((400,400)) image.save(filename) self.makeContactSheet() def makeContactSheet(self): root = '.\\cacheimages\\' vname = self.vname images = [] for root, dirs, files in os.walk(root): #print(root) targetPath = os.listdir(os.path.join(root)) random.shuffle(targetPath) for file in targetPath: images.append(os.path.join(root,file)) outfile = os.path.join('.\\media\\cs_{0}.jpg'.format(vname)) montaner.generate_montage(images, outfile) self.saveVideoIDs() img = Image.open(outfile) img.show() shutil.rmtree(root) def saveVideoIDs(self): file = '.\\modules\\config\\{0}_IDs.json'.format(self.vname) if os.path.exists(file): os.remove(file) tobiasj.saveThis(self.latestIDs, file) else: tobiasj.saveThis(self.latestIDs, file) def downloadVideos(self): file = '.\\modules\\config\\{0}_IDs.json'.format(self.vname) ids = tobiasj.loadThis(file) namelist = [] root = '.\\media\\videos\\{0}\\'.format(self.vname) if not os.path.exists(root): os.mkdir(root) for videoid in ids: video = self.py_pexel.single_video(video_id=videoid) for each in video.video_files: if 'hd' in each['quality']: link = each['link'] name = 'ID{0}_{1}'.format(videoid,video.user.get('name')) if name not in namelist: namelist.append(name) filename = root+'{0}.mp4'.format(videoid) if not os.path.isfile(filename): print('Downloading this video: {0}'.format(filename)) r = requests.get(link) with open(filename, 'wb') as outfile: outfile.write(r.content) outfile.close() else: print('Skipping this video because it already exists: {0}'.format(filename)) tobiasj.saveThis(namelist,'.\\modules\\config\\{0}.credits.json'.format(self.vname))
def do_stuff(infinite=True): db = DB(host=DB_HOST, port=DB_PORT, user=DB_USER, passwd=DB_PASS, name=DB_NAME) rmq = Rmq(host=RMQ_HOST, port=RMQ_PORT, user=RMQ_USER, passw=RMQ_PASS) pexel = PyPexels(api_key=API) log.info('started') banned = False while True: try: log.info('Attempting to get photos!') entries = set(pexel.random(per_page=PER_PAGE).entries) banned = False except PexelsError: word = "Still" if banned else "Got" log.warning(f'{word} banned on pexels, waiting 5 min.') banned = True for _ in range(6): rmq.connection.process_data_events() sleep(50) continue rejected_pics = db.seen_pictures for photo in entries: source = photo.src["original"] if source in rejected_pics: log.info(f'Already seen this({source}) picture!') continue db.add_seen_pic(source) pic = PictureValid(service="Pexels", download_url=photo.src["original"], preview_url=photo.src["large"], source_url=photo.url, height=int(photo.height), width=int(photo.width)) if photo.height > photo.width: log.info( 'not adding this pic because height > weight, for now') log.debug(f' height = {photo.height}\nwidth={photo.width}') continue log.info(f'Adding {pic}!') rmq.channel.basic_publish(exchange='', routing_key='check_out', body=pic.json(), properties=rmq.durable) if not infinite: return int_range = range(0, INTERVAL, 20) for _ in int_range: how_many = len(list(int_range)) rmq.connection.process_data_events() sleep(INTERVAL / how_many)
# ############################################################################### import logging import os from pypexels import PyPexels api_key = os.environ.get('API_KEY', None) or 'DUMMY_API_KEY' # Initialize app logging logger = logging.getLogger() logging.basicConfig(filename='app_single_photo.log', level=logging.DEBUG) # pypexels logger defaults to level logging.ERROR # If you need to change that, use getLogger/setLevel # on the module logger, like this: logging.getLogger(PyPexels.logger_name).setLevel(logging.DEBUG) # add a headers to the log logger.debug(80 * '=') logging.debug('Testing PyPexels.single_photo()') logger.debug(80 * '=') # instantiate PyPexels object py_pexel = PyPexels(api_key=api_key) # Retrieve a single photo, known by its ID photo = py_pexel.single_photo(photo_id=415071) print(photo.id, photo.photographer, photo.url) print(photo.get_attribution('txt')) print(photo.get_attribution('html'))
# Initialize app logging logger = logging.getLogger() logging.basicConfig(filename='app_video_search.log', level=logging.DEBUG) # pypexels logger defaults to level logging.ERROR # If you need to change that, use getLogger/setLevel # on the module logger, like this: logging.getLogger(PyPexels.logger_name).setLevel(logging.DEBUG) # add a headers to the log logger.debug(80 * '=') logging.debug('Testing PyPexels.videos_search()') logger.debug(80 * '=') # instantiate PyPexels object py_pexel = PyPexels(api_key=api_key) # Start with the generic collection, maximize number of items per page # Note: this will run until all popular photos have been visited, # unless a connection error occurs. # Typically the API hourly limit gets hit during this # # Note2: the video API is not currently (May 2020) producing next_page/prev_page links # so this example will not be able to keep walking forward # search_videos_page = py_pexel.videos_search(query="red+flower", per_page=40) while True: for video in search_videos_page.entries: print(video.id, video.user.get('name'), video.url) if not search_videos_page.has_next: break
# Need to complete download, and try next time again manually print('....') print('Completed download') #time.sleep(30) print('....') return total_downloaded # api_key from Cao Liang #api_key = '563492ad6f917000010000018cd4391b02e24cb683cd90ac888d7189' # api_key from Jacky api_key = '563492ad6f917000010000015dee3b636ab44279b6f1edf055de1fe0' # instantiate PyPexels object py_pexels = PyPexels(api_key=api_key) #search_target = 'domestic cat' search_target = 'bird' # Each time only allow maximum 200 images to be downloaded total_images = 200 # Existing downloaded image files exiting_num_images = 1200 print("{0} - Try to download {1} images for '{2}'".format( show_time(), total_images, search_target)) print("---------") num_images = complete_download_task(py_pexels, search_target, total_images, ignore_num_images = exiting_num_images)
"""pexels api.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1vA-xqWtQfoFyj-5j7ikFK-bNmJDE28hI """ !pip install -q pexels_api !pip install -q PyPexels from pypexels import PyPexels # add your api key api_key = '' # instantiate PyPexels object py_pexel = PyPexels(api_key=api_key) # create a list for the results empty = [] search_results = py_pexel.search(query='bears', per_page=6) while True: for photo in search_results.entries: print(photo.id, photo.photographer, photo.url) empty.append(photo.url) print(empty) if not search_results.has_next: break search_results = search_results.get_next_page()
from __future__ import division from pypexels import PyPexels import json api_key = '563492ad6f9170000100000121984210cc7b4f57a4fdb0d06251dec5' # instantiate PyPexels object py_pexels = PyPexels(api_key=api_key) result_photos = [] nature_photos = py_pexels.search(query='nature', per_page=40, page=200) count = 0 fh = open('photos.json', 'w') while nature_photos.has_next: try: print 'page number:', nature_photos.page for photo in nature_photos.entries: #print photo.src.get('medium') height = photo.height width = photo.width #print 'size demision', width/height if (width / height > 1.4) and (width / height < 1.6): mediumPhoto = photo.src.get('medium') #print mediumPhoto result_photos.append(mediumPhoto) fh.write(mediumPhoto) fh.write('\n') count = count + 1 else: #print 'size does not match to 1.5'
from pypexels import PyPexels from os import chdir import urllib.request as req import requests chdir("imgs") api_key = '563492ad6f9170000100000133e1d38b49754e5ebcf024313a5fdbc2' # instantiate PyPexels object py_pexels = PyPexels(api_key=api_key) for i in range(100): popular_photos = py_pexels.random(per_page=10) for photo in popular_photos.entries: print(photo.id, photo.photographer, photo.url) img_data = requests.get(photo.src['medium']).content with open('{0}.jpeg'.format(photo.id), 'wb') as handler: handler.write(img_data) break break
# Initialize app logging logger = logging.getLogger() logging.basicConfig(filename='app_video_popular.log', level=logging.DEBUG) # pypexels logger defaults to level logging.ERROR # If you need to change that, use getLogger/setLevel # on the module logger, like this: logging.getLogger(PyPexels.logger_name).setLevel(logging.DEBUG) # add a headers to the log logger.debug(80 * '=') logging.debug('Testing PyPexels.videos_popular()') logger.debug(80 * '=') # instantiate PyPexels object py_pexel = PyPexels(api_key=api_key) # Start with the generic collection, maximize number of items per page # Note: this will run until all popular videos have been visited, # unless a connection error occurs. # Typically the API hourly limit gets hit during this # # Note2: the video API is not currently (May 2020) producing next_page/prev_page links # so this example will not be able to keep walking forward # popular_videos_page = py_pexel.videos_popular(per_page=40) while True: for video in popular_videos_page.entries: print(video.id, video.user.get('name'), video.url) if not popular_videos_page.has_next: break
from pypexels import PyPexels import requests from PIL import Image import os from multiprocessing import Pool API_KEY = '563492ad6f917000010000014db666f070d64bec826d5746cf55d29d' # OUTPUT_DIR = 'E:\\user\\Pictures\\!Wallpapers\\stock' OUTPUT_DIR = os.path.abspath(__file__) px = PyPexels(api_key=API_KEY) def get_image(img_url): # open img = Image.open(requests.get(img_url, stream=True).raw) # determine filename filename = img_url.split('/')[-1] os.path.join(OUTPUT_DIR, filename) # save the image img.save(filename) # print success message print(f"{filename} saved.") if __name__ == '__main__': popular_photos = px.curated(per_page=30)