def getThanks(url): value = '' try: data = httpClient.crawlerResource(url, "GET", None) value = Parser.getElementText(data, "#openList") except Exception, e: raise
def getThanks(volNumber): value = '' url = 'http://www.luoo.net/music/'+str(volNumber) try: data = httpClient.crawlerResource(url, "GET", None) value = Parser.getElementText(data, "#openList") m = re.search("(\d)+", value) value = m.group(1) except Exception, e: raise
def getPic(volNumber): #初始化目录 initdir(volNumber) url = 'http://www.luoo.net/music/'+str(volNumber) pics = [] tasks = [] try: data = httpClient.crawlerResource(url, "GET", None) imgs = Parser.getElements(data, "li.track-item", "a[data-img]") title = Parser.getElementText(data, "span.vol-title").strip() print 'Album title:', title print 'There are ', len(imgs), ' pictures need to be downloaded' i = 1 for img in imgs: imgurl = Parser.getElementAttr(img, 'a', "data-img") pic = httpClient.crawlerResource(imgurl, "GET", None) filepath = "./static/vol."+str(volNumber)+"/pic/"+str(i)+".jpg" def t(): if not os.path.exists(filepath): fileUtil.saveByteFile(filepath, pic) t = threading.Thread(target = t) t.start() i= i +1 print 'END' except Exception, e: raise