def crawl_add(seed, depth, index): urls = web_crawler.crawl_web(seed, depth) add_all_to_index(urls, index)
def crawl_build(seed, depth): urls = web_crawler.crawl_web(seed, depth) return build_index(urls)
# -*- coding: utf-8 -*- import argparse from web_crawler import crawl_web if __name__ == '__main__': arg_parser = argparse.ArgumentParser() arg_parser.add_argument('seed_url') args = arg_parser.parse_args() crawl_web(args.seed_url, 2)
# -*- coding: utf-8 -*- __author__ = 'masashi' from web_crawler import crawl_web if __name__ == '__main__': crawl_web('http://docs.sphinx-users.jp/contents.html', 2)
# -*- coding: utf-8 -*- from web_crawler import crawl_web if __name__ == '__main__': crawl_web('https://wikidocs.net/book/1', 2)