Exemplo n.º 1
0
def getMaxPictureCount(volNumber):
	initdir(volNumber)
	url = 'http://www.luoo.net/music/'+str(volNumber)
	
	imgs = []
	try:
		data = httpClient.crawlerResource(url, "GET", None)
		imgs = Parser.getElements(data, "li.track-item", "a[data-img]")

	except Exception, e:
		raise
Exemplo n.º 2
0
def getPic(url, volNumber):
	pics = []
	try:
		data = httpClient.crawlerResource(url, "GET", None)
		imgs = Parser.getElements(data, "li.track-item", "a[data-img]")

		i = 1
		for img in imgs:
			imgurl = Parser.getElementAttr(img, 'a', "data-img")
			pic = httpClient.crawlerResource(imgurl, "GET", None)
			fileUtil.saveByteFile("./luoo/"+str(volNumber)+"/pic/"+str(i)+".jpg", pic)
			i= i +1
			#print i

		#print 'END'

	except Exception, e:
		raise
Exemplo n.º 3
0
def getPic(volNumber):
	#初始化目录
	initdir(volNumber)

	url = 'http://www.luoo.net/music/'+str(volNumber)
	pics = []
	tasks = []

	try:
		data = httpClient.crawlerResource(url, "GET", None)
		imgs = Parser.getElements(data, "li.track-item", "a[data-img]")
		title = Parser.getElementText(data, "span.vol-title").strip()

		print 'Album title:', title

		print 'There are ', len(imgs), ' pictures need to be downloaded'
		i = 1
		for img in imgs:
			imgurl = Parser.getElementAttr(img, 'a', "data-img")
			pic = httpClient.crawlerResource(imgurl, "GET", None)
			
			filepath = "./static/vol."+str(volNumber)+"/pic/"+str(i)+".jpg"

			def t():
				if not os.path.exists(filepath):
					fileUtil.saveByteFile(filepath, pic)

			t = threading.Thread(target = t)

			t.start()
			i= i +1


		print 'END'

	except Exception, e:
		raise