from url_utils import gen_from_urls urls = ('https://www.facebook.com','https://www.cnn.com','https://www.twitter.com') for resp_len,status,url in gen_from_urls(urls): print(resp_len, '->', status,url) """ for resp in [ requests.get(url) for url in urls]: print(len(resp.content),'->',resp.status_code,resp.url) #using generator to process the URLs for resp in (requests.get(url) for url in urls): print(len(resp.content),'->',resp.status_code,resp.url) """
from url_utils import gen_from_urls urls = ('http://www.oreilly.com', 'http://baidu.com', 'http://music.163.com') for resp_len, status, url, in gen_from_urls(urls): #在一个for循环中使用这个生成器函数 print(resp_len, '->', status, '->', url)
#!/usr/bin/env python3 #-*- mode: python; coding: utf-8 -*- # file: generator.py # Created: <2019/02/14 21:37:25> # Last Modified: <2019/02/17 15:40:21> from url_utils import gen_from_urls urls = ('http://headfirstlabs.com', 'http://oreilly.com', 'http://twitter.com') for resp_len, status, url in gen_from_urls(urls): print(resp_len, '->', status, '->', url) urls_res = {url: size for size, _, url in gen_from_urls(urls)} import pprint pprint.pprint(urls_res)
from url_utils import gen_from_urls from pprint import pprint """Generates a dictionary of urls and response content length """ urls = ('http://google.com', 'http://twitter.com', 'http://youtube.com') # Example of function's full use for response_length, status, url in gen_from_urls(urls): print(response_length, '->', status, '->', url) print('-------') # Create dictionary with key being url and value being length of content urls_length = {url: size for size, _, url in gen_from_urls(urls)} pprint(urls_length)