def test_search(self): index = 'redflowervideo' resource_filepath = self.store_mapping[index] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('_url')), # _url contains only the short path like /popular?page=2&per_page=5 json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers'), match_querystring=True, ) py_pexels = PyPexels(api_key=api_key) search_results_page = py_pexels.videos_search(query='red flower', page=2, per_page=5) # Page properties print(search_results_page.page) print(search_results_page.per_page) print(search_results_page.has_next) print(search_results_page.has_previous) print(search_results_page.link_self) print(search_results_page.link_first) print(search_results_page.link_last) print(search_results_page.link_next) print(search_results_page.link_previous) # Entries for video in search_results_page.entries: print(video.id, video.user, video.width, video.height, video.url) print(video.video_files)
logging.basicConfig(filename='app_video_search.log', level=logging.DEBUG) # pypexels logger defaults to level logging.ERROR # If you need to change that, use getLogger/setLevel # on the module logger, like this: logging.getLogger(PyPexels.logger_name).setLevel(logging.DEBUG) # add a headers to the log logger.debug(80 * '=') logging.debug('Testing PyPexels.videos_search()') logger.debug(80 * '=') # instantiate PyPexels object py_pexel = PyPexels(api_key=api_key) # Start with the generic collection, maximize number of items per page # Note: this will run until all popular photos have been visited, # unless a connection error occurs. # Typically the API hourly limit gets hit during this # # Note2: the video API is not currently (May 2020) producing next_page/prev_page links # so this example will not be able to keep walking forward # search_videos_page = py_pexel.videos_search(query="red+flower", per_page=40) while True: for video in search_videos_page.entries: print(video.id, video.user.get('name'), video.url) if not search_videos_page.has_next: break search_videos_page = search_videos_page.get_next_page()
class PexelPusher: def __init__(self, vname=''): self.py_pexel = PyPexels(api_key=api_key) self.latestIDs = [] self.vname = vname def searchVideos(self, query='', qSize=10): selected_videos = [] search_videos_page = self.py_pexel.videos_search(query=query, per_page=qSize) while True: for video in search_videos_page.entries: self.latestIDs.append(video.id) if video.width/video.height >= 1.33: image = video.image.split('?') selected_videos.append(image[0]) if not search_videos_page.has_next: break search_videos_page = search_videos_page.get_next_page() return selected_videos def getThumbs(self, querylist,qSize): nlist = 0 root = '.\\cacheimages\\' if self.vname == '': print('Please name your video first, champion.') return for query in querylist: video_list = self.searchVideos(query, qSize) nlist = '{:04d}'.format(querylist.index(query)+1) cache_folder = ('.\\cacheimages\\') if not os.path.exists(cache_folder): os.mkdir(cache_folder) random.shuffle(video_list) for imageurl in video_list: r = requests.get(imageurl) filename = cache_folder+os.path.split(imageurl)[1] with open(filename, 'wb') as outfile: outfile.write(r.content) outfile.close() image = Image.open(filename) image.thumbnail((400,400)) image.save(filename) self.makeContactSheet() def makeContactSheet(self): root = '.\\cacheimages\\' vname = self.vname images = [] for root, dirs, files in os.walk(root): #print(root) targetPath = os.listdir(os.path.join(root)) random.shuffle(targetPath) for file in targetPath: images.append(os.path.join(root,file)) outfile = os.path.join('.\\media\\cs_{0}.jpg'.format(vname)) montaner.generate_montage(images, outfile) self.saveVideoIDs() img = Image.open(outfile) img.show() shutil.rmtree(root) def saveVideoIDs(self): file = '.\\modules\\config\\{0}_IDs.json'.format(self.vname) if os.path.exists(file): os.remove(file) tobiasj.saveThis(self.latestIDs, file) else: tobiasj.saveThis(self.latestIDs, file) def downloadVideos(self): file = '.\\modules\\config\\{0}_IDs.json'.format(self.vname) ids = tobiasj.loadThis(file) namelist = [] root = '.\\media\\videos\\{0}\\'.format(self.vname) if not os.path.exists(root): os.mkdir(root) for videoid in ids: video = self.py_pexel.single_video(video_id=videoid) for each in video.video_files: if 'hd' in each['quality']: link = each['link'] name = 'ID{0}_{1}'.format(videoid,video.user.get('name')) if name not in namelist: namelist.append(name) filename = root+'{0}.mp4'.format(videoid) if not os.path.isfile(filename): print('Downloading this video: {0}'.format(filename)) r = requests.get(link) with open(filename, 'wb') as outfile: outfile.write(r.content) outfile.close() else: print('Skipping this video because it already exists: {0}'.format(filename)) tobiasj.saveThis(namelist,'.\\modules\\config\\{0}.credits.json'.format(self.vname))