def dog_api_key(): url = 'https://api.pexels.com/v1' MY_API_KEY = '563492ad6f917000010000014a3f9bf8a7724c9daabaf113ac5bd748' #headers = {'Authorization': MY_API_KEY} # parameters = {'format':'json', 'page': 1, 'per_page':10} dog = API(MY_API_KEY) dog.search('dog', page=1, results_per_page=1) dog_photo = dog.get_entries()[0].small return dog_photo
def get_photo(appliance): """Call the Pexels API to return an image URL of the appliance. If there is no appropriate photo, use dictionary of URLs""" photo_dict = { "Wall AC": "https://i0.wp.com/homeairguides.com/wp-content/uploads/2020/02/best-through-the-wall-air-conditioners.jpg", "Central AC": "https://www.polspam.org/wp-content/uploads/2017/05/1.jpg", "Range Oven": "https://kitchenaid-h.assetsadobe.com/is/image/content/dam/business-unit/whirlpool/en-us/marketing-content/site-assets/page-content/ranges-sclp/ssb-removal/RangeSCLP_Masthead_Mobile_P180285_1z.jpg", "Waterbed Heater": "https://i5.walmartimages.com/asr/d6e84bf5-c480-4dde-b817-3bde1c7fa67b_1.e8fbe6f081dafe88251edf4a03a4f478.jpeg", "Televisions": "https://cnet3.cbsistatic.com/img/5AYR9m4W6Uu7GkTCa1TP9pN7AfQ=/868x488/2019/10/17/c272379f-4a92-4a77-9432-16f23703ea22/03-vizio-v-series-2019-v556-g1-v605-g3.jpg", 'Analog, <40"': "https://cnet4.cbsistatic.com/img/99Yw213RfOcFayHaMojxDzlD_u0=/756x567/2012/02/28/861e600b-cc2e-11e2-9a4a-0291187b029a/old-polish-tv_1.jpg", 'Analog, >40"': "https://cnet4.cbsistatic.com/img/99Yw213RfOcFayHaMojxDzlD_u0=/756x567/2012/02/28/861e600b-cc2e-11e2-9a4a-0291187b029a/old-polish-tv_1.jpg", 'Digital, ED/HD TV, <40"': "https://scene7.samsclub.com/is/image/samsclub/0088727636930_A?wid=280&hei=280", 'Digital, ED/HD TV, >40"': "https://scene7.samsclub.com/is/image/samsclub/0088727636930_A?wid=280&hei=280", 'Set-top Boxes': "https://appliance-standards.org/sites/default/files/set-top_box.jpg", 'DVD/VCR': 'https://pisces.bbystatic.com/image2/BestBuy_US/images/products/4790/4790684_ra.jpg', 'Dehumidifier': 'https://images.allergybuyersclub.com/img/TC-DE-PD7P-1-500.jpg', 'Space Heater': 'https://images-na.ssl-images-amazon.com/images/I/71eohyKh2bL._AC_SX522_.jpg', 'Water Heater-Family of 4': 'https://mobileimages.lowes.com/product/converted/035505/035505002440.jpg', 'Portable Spa': 'https://www.diamondspas.com/wp-content/uploads/2018/07/MG_7601-Edit_Side_wood_bubbles_steps_72dpi_9X13.png', 'Rechargeable Power Tool': 'https://i5.walmartimages.com/asr/2538ea03-9d15-4b68-89fa-c5f99f8f323a_1.8a29d0cf88517ba9881e157b2963fd6e.jpeg', 'Well Pump': 'https://mpop-prod-hls-primary.s3.amazonaws.com/jones-services/img/1568475825-jones-services-1562082569Well-tank.jpg', 'Aquarium Equipment': 'https://lh3.googleusercontent.com/proxy/xn0hkva7QPlevGbs6v-zrJnNJB1_Oi8RNkGCb2UwFTJV4U4ngjKVcbSdn79zKR-2x4rytMNp6wtq8nJe9x9NvwiwaQyTqiPw4Gqed1-LwQUQ9qRCkNkK8g', 'Dryer (Auto-regular)': 'https://images.homedepot-static.com/productImages/ef33d316-3fa6-4e90-8d52-1fc65baee64c/svn/white-lg-electronics-electric-dryers-dle3500w-64_1000.jpg' } if photo_dict.get(appliance): image_url = photo_dict[appliance] else: api = API(PEXELS_API_KEY) try: search = api.search(appliance, results_per_page=1) image_url = search["photos"][0]["src"]["original"] except: image_url = "https://images.unsplash.com/photo-1519626504899-7a03a8a9ab51" return image_url
def main(args): sleep = get_sleep(args.sleep) api = API(PEXELS_API_KEY) query = args.query page = 1 counter = 0 photos_dict = {} # Step 1: Getting urls and meta information while page <= PAGE_LIMIT: api.search(query, page=page, results_per_page=RESULTS_PER_PAGE) photos = api.get_entries() for photo in tqdm.tqdm(photos): photos_dict[photo.id] = vars(photo)['_Photo__photo'] counter += 1 if not api.has_next_page: break page += 1 sleep() print(f"Finishing at page: {page}") print(f"Images were processed: {counter}") # Step 2: Downloading if photos_dict: os.makedirs(args.path, exist_ok=True) # Saving dict with open(os.path.join(args.path, f'{query}.json'), 'w') as fout: json.dump(photos_dict, fout) for val in tqdm.tqdm(photos_dict.values()): url = val['src'][args.resolution] fname = os.path.basename(val['src']['original']) image_path = os.path.join(args.path, fname) if not os.path.isfile(image_path): # ignore if already downloaded response = requests.get(url, stream=True) with open(image_path, 'wb') as outfile: outfile.write(response.content) else: print(f"File exists: {image_path}")
def index(): heading = "Photography Agency" subheading = "Our work" # Import API class from pexels_api package # Type your Pexels API # Create API object fashion_api = API(PEXELS_API_KEY) building_api = API(PEXELS_API_KEY) # Search five 'kitten' photos fashion_api.search('fashion', page=1, results_per_page=20) building_api.search('building', page=1, results_per_page=20) # Get photo entries fashion = fashion_api.get_entries() building = building_api.get_entries() return render_template('index.html', heading=heading, subheading=subheading, fashion=fashion, building=building)
def download_images(): api = API(PEXELS_API_KEY) api.search(keyword, page=1, results_per_page=100) photos = api.get_entries() links = [] for photo in photos: links.append(photo.medium) print(f'Downloading {len(links)} images....') # Access the data URI and download the image to a file for i, link in enumerate(links): response = requests.get(link) image_name = SAVE_FOLDER + '/' + keyword + str(i + 1) + '.jpg' with open(image_name, 'wb') as raw_img: raw_img.write(response.content)
def get_pic(topic): with open(r"files\pexel_key.json") as key: api_key = json.load(key)['key'] api = API(api_key) pics = api.search(topic, results_per_page=40) num_pages = pics['total_results'] // pics['per_page'] api.search(topic, page=rn.randint(1, num_pages)) wallpapers = filter(lambda x: 1.4 < x.width / x.height < 1.9, api.get_entries()) return rn.choice(list(wallpapers)).original
from pexels_api import API # pip install pexels_api import os import requests def downloadImage(url, fotoid, arama): img_data = requests.get(url).content with open(f"Download/{arama}/{fotoid}.jpeg", "wb") as img: # Save to local disk (Download) img.write(img_data) PEXELS_API_KEY = 'your key' # Your api key goes here. api = API(PEXELS_API_KEY) search = input("Search something to download / İndirmek için bir kelime arayın: ") # Search for image if (os.path.exists(f"Download/{search}") == False): # Don't create any folder if folder exists. os.mkdir(f"Download/{search}") arama = api.search(search) while True: photos = api.get_entries() dosyalar = os.listdir(f"Download/{search}") # Create a list for continue to download for photo in photos: if (os.path.exists(f"Download/{arama}/{photo.id}.jpeg") == False): # If photo exists, don't download again downloadImage(photo.original, photo.id, search) if (api.has_next_page == False): break
print("{}: Must be hole number".format(required_args[1])) print(usage) exit() if total_photos < 1: print("{}: Minimum value 1".format(total_photos)) print(usage) exit() if len(required_args) == 3: if os.path.isdir(required_args[2]): path = required_args[2] else: print("{}: Must be a directory".format(required_args[2])) print(usage) exit() # Create api object api = API(API_KEY) # Search photos print("Searching: {}".format(query)) per_page = total_photos if total_photos < 80 else 80 api.search(query, per_page) print("Total results: {}".format(api.total_results)) # Check if there are photos if not api.json["photos"]: exit() # If there aren't enough photos assign new total_photos if total_photos > api.total_results: total_photos = api.total_results print("Not enough photos, downloading {} photos instead".format(total_photos)) # Create directory if does not exists path = os.path.join(path, query.replace(" ", "-")) if not os.path.isdir(path): os.mkdir(path)
from pexels_api import API PEXELS_API_KEY = '563492ad6f91700001000001de8ec7b23dc3431798ac1d6b9d945791' api = API(PEXELS_API_KEY) api.search('models', page=1, results_per_page=20) photos = api.get_entries() for photo in photos: print('Photographer: ', photo.photographer) print('Photo url: ', photo.url) print('Photo original size: ', photo.original)
from pexels_api import API PEXELS_API_KEY = '563492ad6f91700001000001f15d07838df1429bb172cf9081a1f0cc' # Create API object px = API(PEXELS_API_KEY) def search_photo(query='', results_per_page=9): ''' Search 6 photo on Pexels using the string query, return a list of image urls (original size) ''' px.search(query=query, results_per_page=results_per_page, page=1) photos = px.get_entries() photos_link = [] for photo in photos: photos_link.append(photo.landscape) return photos_link def main(): p = search_photo('sadness') print(p) if __name__ == "__main__": main()
from pexels_api import API import os # Init api object with your Pexels API key API_KEY = os.environ.get("PEXELS_API_KEY") api = API(API_KEY) # Search 'koala' photos api.search("koala") print("Total results: ", api.total_results) # Loop all the pages while True: # Get all photos in the page photos = api.get_entries() # For each photo print its properties for photo in photos: print("-----------------------------------------------") print("Photo id: ", photo.id) print("Photo width: ", photo.width) print("Photo height: ", photo.height) print("Photo url: ", photo.url) print("Photographer: ", photo.photographer) print("Photo description: ", photo.description) print("Photo extension: ", photo.extension) print("Photo sizes:") print("\toriginal: ", photo.original) print("\tcompressed: ", photo.compressed) print("\tlarge2x: ", photo.large2x) print("\tlarge: ", photo.large) print("\tmedium: ", photo.medium) print("\tsmall: ", photo.small) print("\ttiny: ", photo.tiny) print("\tportrait: ", photo.portrait)
from rest_framework import status from rest_framework import permissions from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.renderers import TemplateHTMLRenderer from rest_framework import generics from rest_framework.parsers import JSONParser from .models import PikifyUser, Image from .serializers import PikifyUserSerializer, ImageSerializer import json from pexels_api import API PEXELS_API_KEY = '563492ad6f917000010000015fed1bf3240b45b783e929fb372a40eb' api = API(PEXELS_API_KEY) class Index(APIView): """ GET: view index page """ renderer_classes = [TemplateHTMLRenderer] template_name = 'pikify/index.html' def get(self, request, format = None): #render sign up form return render(request, 'pikify/index.html') class SignUp(APIView): """ GET: View sign up page
from app.models import db, User from werkzeug.security import generate_password_hash from faker import Faker from pexels_api import API import os fake = Faker() api = API(os.environ.get('PEXEL_API_KEY')) api.search('person', page=1, results_per_page=20) photos = api.get_entries() for photo in photos: print(photo.original) def seed_more_users(): users = [User( name=fake.name(), email=fake.email(), username=fake.user_name(), hashed_password=generate_password_hash('pass'), biography=fake.sentence(), profilePicture=photos[i].small) for i in range(7)] for user in users: db.session.add(user) db.session.commit() def undo_more_users(): db.session.execute('TRUNCATE users CASCADE;')
def get_pexels_session(): creds = settings.pexels_auth() api = API(creds["pexels_key"]) return api
from pexels_api import API import os # Init api object with your Pexels API key API_KEY = os.environ.get("PEXELS_API_KEY") api = API(API_KEY) # Search curated photos in the fifth page api.curated(results_per_page=10, page=5) # Loop backwards while True: # Get all photos in the page photos = api.get_entries() # For each photo print its properties for photo in photos: print("-----------------------------------------------") print("Photo id: ", photo.id) print("Photo width: ", photo.width) print("Photo height: ", photo.height) print("Photo url: ", photo.url) print("Photographer: ", photo.photographer) print("Photo description: ", photo.description) print("Photo extension: ", photo.extension) print("Photo sizes:") print("\toriginal: ", photo.original) print("\tcompressed: ", photo.compressed) print("\tlarge2x: ", photo.large2x) print("\tlarge: ", photo.large) print("\tmedium: ", photo.medium) print("\tsmall: ", photo.small) print("\ttiny: ", photo.tiny) print("\tportrait: ", photo.portrait) print("\tlandscape: ", photo.landscape)
def get_main_image(): # Download base image from pexels.com API # and store as report_base.png # Default Image URL img_url = 'https://i.imgur.com/wOSnnYI.png' img_dir = f'tasks/steem_japan' base_img_file = 'report_base.png' overlay_img_file = 'report_overlay.png' main_img_file = 'report_main.png' # Setting up user_agent, base_image_url user_agent_list = [ 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)', 'AppleWebKit/537.36 (KHTML, like Gecko)', 'Chrome/92.0.4515.101 Safari/537.36' ] user_agent = ' '.join(user_agent_list) opener = urllib.request.build_opener() opener.addheaders = [('User-Agent', user_agent)] urllib.request.install_opener(opener) # base_image_url = "https://loremflickr.com/610/427/japan" # Pick a random page number page_number = random.randint(1, 80) # Search Japan image and access image data from pexels API PEXELS_API = API(PEXELS_API_KEY) PEXELS_API.search('japan', page=page_number, results_per_page=10) photos = PEXELS_API.get_entries() # Pick a random photo photos = [random.choice(photos)] base_image_url = '' image_params = '?auto=compress&cs=tinysrgb&fit=crop&h=427&w=610' for photo in photos: base_image_url = photo.landscape.split('?')[0] base_image_url += image_params # Call urlretrieve function to get an image urllib.request.urlretrieve(base_image_url, f'{img_dir}/{base_img_file}') # Open base image base_image = Image.open(rf"{img_dir}/{base_img_file}") # Open overlay image overlay_image = Image.open(rf"{img_dir}/{overlay_img_file}").convert( "RGBA") # Paste overlay_image on top of base image at coordinates (x, y) base_image.paste(overlay_image, (5, 150), mask=overlay_image) # Save overlay image base_image.save(f'{img_dir}/{main_img_file}') # Upload image to steem # and get the uploaded image URL result = IU.upload(f'{img_dir}/{main_img_file}', USERNAME) img_url = result['url'] return {'url': img_url, 'src': base_image_url}