from crawler import Crawler import concurrent.futures # URL url = "http://www.epocacosmeticos.com.br" # files' title title = "products.csv" # How many products did we store? products = 0 # 1 - create crawler c = Crawler() # 2 - set base url c.setUrl(url) # 3 - Create csv file c.createCsv(title) # 4 - keep base url in visited urls c.keepUrl(url) # 5 - start getting all links on base url for link in c.getAllLinks(url): if c.isValidUrl(link): c.keepUrl(link) # print test #print(str(len(c.getUrlsVisited()))) print("Runing...\n") # How many pages did we visit? i = 0