def update(self): self.lastUpdate = datetime.now() for linenumber, bus in self.lines.items(): bus.reset() html = self.downloader.get(self.url) for line in xpath.search(html, '//table//tr'): counter=0 time = '' delay = '' destination = '' linenumber = '' details = '' for item in xpath.search(line, '/td'): if counter == 0: hour, minutes = xpath.get(item, '/span').strip().split(':'); time = Time(hour, minutes) delay = xpath.get(item, '/span[@class="block exclamation bold mts"]').strip().split(':') if len(delay) > 1: delay=Time(delay[0], delay[1], "delay") else: delay=Time(0, delay[0], "delay") elif counter == 1: destination = item.strip() elif counter == 2: linenumber = item.strip() elif counter == 4: details = re.sub('<span.*</span>', '', item).strip() counter+=1 if delay != '': if self.lines.has_key(linenumber + destination): self.lines[linenumber + destination].find(time).delay = delay
def update(self): self.lastUpdate = datetime.now() for linenumber, bus in self.lines.items(): bus.reset() html = self.downloader.get(self.url) for line in xpath.search(html, '//table//tr'): counter = 0 time = '' delay = '' destination = '' linenumber = '' details = '' for item in xpath.search(line, '/td'): if counter == 0: hour, minutes = xpath.get(item, '/span').strip().split(':') time = Time(hour, minutes) delay = xpath.get( item, '/span[@class="block exclamation bold mts"]' ).strip().split(':') if len(delay) > 1: delay = Time(delay[0], delay[1], "delay") else: delay = Time(0, delay[0], "delay") elif counter == 1: destination = item.strip() elif counter == 2: linenumber = item.strip() elif counter == 4: details = re.sub('<span.*</span>', '', item).strip() counter += 1 if delay != '': if self.lines.has_key(linenumber + destination): self.lines[linenumber + destination].find(time).delay = delay
def get(self, url): html = self.downloadPage(url) data = xpath.get(html, '//span[@class="new_price"]//') data = self.cleanUp(data) if data == '': data = xpath.get(html, '//span[@class="regular_price"]//') data = self.cleanUp(data) return self.toInt(data)
def mal(mal_title, mal_id=False): cookies = {"incap_ses_224_81958":"P6tYbUr7VH9V6shgudAbA1g5FVYAAAAAyt7eDF9npLc6I7roc0UIEQ=="} response = requests.get( "http://myanimelist.net/api/anime/search.xml", params={'q':mal_title}, cookies=cookies, auth=("zodman1","zxczxc"), headers = {'User-Agent':'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36'}) content = response.content if not mal_id is False: for e in xpath.search(content,"//entry"): if mal_id in e: content = e break tqdm.write("%s %s"%((mal_title,), mal_id)) id = xpath.get(content, "//id") title = xpath.get(content, "//title") title_en = xpath.get(content, "//english") type_ = xpath.get(content, "//type") synonyms = xpath.get(content, "//synonyms") status = xpath.get(content, "//status") synopsys = translate(xpath.get(content, "//synopsis"),"es") img = xpath.get(content, "//image") episodes = xpath.get(content,"//episodes") resumen = synopsys.replace("<br />", " ").replace("\n\r","") resumen = translate(resumen,'es') status = translate(status,'es') assert id is not "", mal_title data=dict(title=title, title_en=title_en, type=type_, status=status, resumen=resumen, img=img,episodes=episodes, synonyms=synonyms,id=id, synopsys=synopsys) return MalResult(**data)
def extract(url): ''' Function that extracts product info from websites listed in the csv page . It takes the url as an argument. ''' try: url = url.encode('utf-8') D = download.Download() try: xpath_input_file = open(os.path.join(os.path.dirname(__file__), 'webpage_xpath.csv'), 'rb') # Joining absolute path so that the function can be anywhere except IOError: # Checking for IO exceptions, i.e if the file exists or not print("An error occured while reading the csv file,\ check your Directory again") sys.exit() reader = csv.reader(xpath_input_file) row = list(reader) item_info = {} for r in range(0, len(row)): if url.find(row[r][0]) >= 0 and url.find(row[r][4]) >= 0: # Checks if the url fiven is correct or not # Fails in the case the url is of given site but not a prduct url xpath1 = row[r][1] xpath2 = row[r][2] xpath3 = row[r][3] html = D.get(url) # Webpage downloads after validation item_info['name'] = xpath.get(html, '%s//text()' % xpath1).strip() item_info['price'] = xpath.get(html, '%s//text()' % xpath2) item_info['image'] = xpath.get(html, '%s' % xpath3).strip() return item_info else: continue if item_info == {}: raise InvalidurlError("Enter a valid product url") sys.exit() xpath_input_file.close() sys.exit() except KeyboardInterrupt: raise sys.exit()
def scrapeBaramDom(): # UTF-8 support reload(sys) sys.setdefaultencoding('utf-8') now = datetime.now() down = Downloader('http://www.baramdom.com/') content = down.get_content() html = unicode(content) p = xpath.get(html, '//div[@class="box post"]') linkovi = xpath.search(p, '//div[@class="content"]') ads = [] for l in linkovi: link = "http://www.baramdom.com" + xpath.get(l, '//div[@class="post-title"]/h2/a/@href') title = xpath.get(l, '//div[@class="post-title"]/h2/a') imageUrl = xpath.get(l, '//a[@class="grouped"]/img/@src') if imageUrl == "": imageUrl = "http://www.baramdom.com/img/apartment_noimage.png" else: imageUrl = "http://www.baramdom.com" + imageUrl download = Downloader(link) cont = download.get_content() cont = unicode(cont) description = xpath.get(cont, '//p[@class="post_add_desc"]') description = description.strip() category = u"Недвижнини" ost = xpath.get(l, '//p[@class="add-title"]') ost = ost.strip() ost = ost.split(" во ") region = ost[1] country = u"Македонија" k = ost[0] k = k.split("ам ") subcategory = k[1] price = xpath.get(cont, '//div[@class="post-add"]/p[@class="last"]').strip() price = price.split(" ") if len(price)==3: value = "/" currency = "/" else: value = price[0] currency = price[1] if currency == "Euro.": currency = "EUR" elif currency == u"Ден.": currency = "MKD" date = xpath.get(l, '//div[@class="fl"]') date = date.strip() date = date.split(">") date = date[1] date = date.strip() date = date.split(" ") date = date[0] date = date.split("-") date = date[2]+"-"+date[1]+"-"+date[0] ad = Ad(link, title, imageUrl, description, category, subcategory, value, currency, region, date, country) ads.append(ad) return adsToJson(ads) #print scrapeBaramDom()
def parse_html2(html): infos = [] for i in html.split("<td></td></tr>"): ms = xpath.search(i, r"//span[@class='address-tag']") txhash = common.normalize(ms[0]) if len(ms) > 0 else '' fm = common.normalize(ms[1]) if len(ms) > 1 else '' too = common.normalize(ms[2]) if len(ms) > 2 else '' age = xpath.get(i, r"//span[@rel='tooltip']/@title") quantity = common.regex_get(i, r'>([\d\.\,]+)</td>$') direction = common.normalize(xpath.get(i, r'//span[@class="label\slabel.+"]')) if txhash: info = '"' + '","'.join([txhash, age, fm, direction, too, quantity]) + '"' infos.append(info) return infos
def get(self, url): html = self.downloadPage(url) data = xpath.get(html, '//div[@class="our_price"]//') parser = JumboHTMLParser() parser.feed(data) data = parser.price data = self.cleanUp(data) return self.toInt(data)
def holders_parse(html, i): infos = [] h = xpath.get(html, r'//table[@class="table"]', remove=None) for k in xpath.search(h, r'//tr', remove=None): if '</td><td>' in k: ms = [common.normalize(m) for m in xpath.search(k, r'//td')] infos.append('"' + '","'.join(ms) + '"') return infos
def get(self, url): html = self.downloadPage(url) data = xpath.get(html, '//p[@class="special-price"]//') parser = AlShopHTMLParser() parser.feed(data) data = parser.price data = self.cleanUp(data) return self.toInt(data)
def get(self, url): html = self.downloadPage(url) # data = xpath.get(html, '//div[@class="price-holder xlarg-price"]//', remove=('span','/span')) data = xpath.get(html, '//h3[@class="price"]//') parser = SouqHTMLParser() parser.feed(data) price = self.cleanUp(parser.price) return self.toInt(price)
def scrapePC(gamename, con=None): search = gamename search = search.replace(" ", "+") search = search.lower() if con: console = con # console = console.replace("{}".format(console),"\"{}\"".format(console)) else: console = "" if search <> "": duckduckgo = "https://www.duckduckgo.com/html/?q=!ducky+{}+site%3Apricecharting.com+t%3A\"{}\"+prices".format(console, search) else: duckduckgo = "https://www.duckduckgo.com/html/?q=!ducky+{}+site%3Apricecharting.com+t%3A{}+prices".format(console, search) PC = urllib2.Request(duckduckgo) PC.add_header('User-Agent', chrome) url = urllib2.urlopen(PC) time.sleep(1) infopage = url.read() redir = url.geturl() ddg = re.compile("duckduckgo") gm = re.compile("game") if ddg.match(redir): nopc=1 return nopc if "game" not in redir: nopc=1 return nopc gamedata = [] gameinfo = xpath.get(infopage, '//h1[@id="product_name"') # Get the game's title gametitle = gameinfo.replace("Prices", "") gametitle = gametitle.strip(" \t\n\r") gamedata.append(gametitle) gamedata.append(redir) release = xpath.get(infopage, '//span[@class="date"]') # Get the game's release date release = release.strip(" \t\n\r") gamedata.append(release) upc = xpath.get(infopage, '//span[@class="attribute"]/span') # Get the UPC code upc = upc.strip(" \t\n\r") gamedata.append(upc) lp = xpath.get(infopage, '//td[@id="used_price"]/span') # Get the loose price lp = lp.strip(" \t\n\r") gamedata.append(lp) cib = xpath.get(infopage, '//td[@id="complete_price"]/span') # Get the CIB price cib = cib.strip(" \t\n\r") gamedata.append(cib) np = xpath.get(infopage, '//td[@id="new_price"]/span') # get the New price np = np.strip(" \t\n\r") gamedata.append(np) c = xpath.get(infopage, '//h2[@class="chart_title"]/a') # get the formatted console name c = c.strip(" \t\n\r") gamedata.append(c) return gamedata
def get(self, url): html = self.downloadPage(url) data = xpath.get( html, '//div[@class="item equalize-capacity-button-height selection"]//') parser = AppleHTMLParser() parser.feed(data) data = self.cleanUp(parser.price) return self.toInt(data)
def scrapeOglasiRs(): # UTF-8 support reload(sys) sys.setdefaultencoding('utf-8') now = datetime.now() down = Downloader('http://www.oglasi.rs/pretraga/0/0/') content = down.get_content() html = unicode(content) linkovi = xpath.search(html, '//li[@class="clearfix"]') ads = [] for l in linkovi: link = xpath.get(l, '//a[@class="ogl_id"]/@href') title = xpath.get(l, '//h2/a[@class="ogl_id"].text()') imageUrl ="http://oglasi.rs" + xpath.get(l, '//a[@class="ogl_id"]/img/@src') price = xpath.get(l, '//div[@class="ad-price"]/h3') datum = xpath.get(l, '//div[@class="right-side"]/div/p/strong') datum = datum.split(".") date = datum[2]+"-"+datum[1]+"-"+datum[0] price = price.split(" ") price[0] = price[0].replace(".","") currency = price[1] value = price[0] value = value.split(",") value = value[0] download = Downloader(link) ad = download.get_content() ad = unicode(ad) description = xpath.search(ad, '//div[@class="description"]/p') description = description[1].strip() category="/" subcategory="/" loc = xpath.search(ad, '//div[@class="description"]/ul[@class="clearfix"]') lo = xpath.search(loc[0], '//li') region = lo[1] region = region.split("(") region = region[0] region = region.strip() country = u"Србија" ad = Ad(link, title, imageUrl, description, category, subcategory, value, currency, region, date, country) ads.append(ad) return adsToJson(ads) #print scrapeOglasiRs()
class Render(QWebPage): def __init__(self,url): self.app = QApplication(sys.argv) QWebPage.__init__(self) self.loadFinished.connect(self._loadFinished) self.mainFrame().load(QUrl(url)) self.app.exec_() def _loadFinished(self, result): self.frame = self.mainFrame() self.app.quit() r = Render(url) html = r.frame.toHtml() try: f = open(os.path.join(os.path.dirname(__file__),'webpage_xpath.csv'), 'rb') #Joining absolute path so that the function can be used inside an app except IOError: # Checking for Input Output exceptions, i.e if the file exists or not print('An error occured while reading the csv file, check your Directory again') sys.exit() reader = csv.reader(f) row = list(reader) item ={} for r in range(0,len(row)): if url.find(row[r][0])>=0 and url.find(row[r][4])>=0: xpath1 = row[r][1] xpath2 = row[r][2] xpath3 = row[r][3] item['name'] = xpath.get(html,'%s//text()' % xpath1) item['price'] = xpath.get(html,'%s//text()' % xpath2) item['image'] = xpath.get(html, '%s' % xpath3) return item sys.exit() else: continue if item == {}: print "Invalid url given" sys.exit()
def mal(mal_title, mal_id=False): cookies = { "incap_ses_224_81958": "P6tYbUr7VH9V6shgudAbA1g5FVYAAAAAyt7eDF9npLc6I7roc0UIEQ==" } response = requests.get( "http://myanimelist.net/api/anime/search.xml", params={'q': mal_title}, cookies=cookies, auth=("zodman1", "zxczxc"), headers={ 'User-Agent': 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36' }) content = response.content if not mal_id is False: for e in xpath.search(content, "//entry"): if mal_id in e: content = e break tqdm.write("%s %s" % ((mal_title, ), mal_id)) id = xpath.get(content, "//id") title = xpath.get(content, "//title") title_en = xpath.get(content, "//english") type_ = xpath.get(content, "//type") synonyms = xpath.get(content, "//synonyms") status = xpath.get(content, "//status") synopsys = translate(xpath.get(content, "//synopsis"), "es") img = xpath.get(content, "//image") episodes = xpath.get(content, "//episodes") resumen = synopsys.replace("<br />", " ").replace("\n\r", "") resumen = translate(resumen, 'es') status = translate(status, 'es') assert id is not "", mal_title data = dict(title=title, title_en=title_en, type=type_, status=status, resumen=resumen, img=img, episodes=episodes, synonyms=synonyms, id=id, synopsys=synopsys) return MalResult(**data)
def getDescription(link, path): try: reload(sys) sys.setdefaultencoding('utf-8') down = Downloader(fixurl(link)) html = down.get_content() # print html html = unicode(html) description = xpath.get(html, path) return description except: pass
def parse(html, page): for i in html.split("<td></td></tr>"): ms = xpath.search(i, r"//a[@target='_parent']") txhash = ms[0] if len(ms) > 0 else '' fm = ms[1] if len(ms) > 1 else '' too = ms[2] if len(ms) > 2 else '' age = xpath.get(i, r"//span[@rel='tooltip']/@title") quantity = common.regex_get(i, r'>([\d\.\,]+)</td>$') info = '"' + '","'.join([txhash, age, fm, too, quantity]) + '"' infos.append(info) return infos
def get_earliest_crawl(website): """Return the datetime of the earliest crawl by archive.org for this website """ url = 'http://web-beta.archive.org/web/*/' + website html = D.get(url) earliest_crawl_url = xpath.get(html, '//div[@id="wbMeta"]/p/a[2]/@href') try: earliest_crawl = earliest_crawl_url.split('/')[2] except IndexError: # unable to parse the date so assume just current data ts = datetime.datetime.now() else: ts = datetime.datetime.strptime(earliest_crawl, '%Y%m%d%H%M%S') return ts
def scrapeNedviznostiMakedonija(): # UTF-8 support reload(sys) sys.setdefaultencoding('utf-8') now = datetime.now() down = Downloader( 'http://www.nedviznostimakedonija.com.mk/Default.aspx?search=1') content = down.get_content() html = unicode(content) linkovi = xpath.search(html, '//div[@class="boxesResultNewTop"]') ads = [] for l in linkovi: link = "http://www.nedviznostimakedonija.com.mk/" + xpath.get( l, '//a[@class="subjectLook nobackim"]/@href') title = xpath.get(l, '//a[@class="subjectLook nobackim"]').strip() imageUrl = "http://www.nedviznostimakedonija.com.mk/" + xpath.get( l, '//a[@class="nobackim"]/img/@src') download = Downloader(link) cont = download.get_content() cont = unicode(cont) description = xpath.get( cont, '//span[@id="Body1_DetailControl1_FormView1_Label5"]') category = u"Недвижнини" subcategory = "/" price = xpath.get( l, '//div[@style="float:right; color:#1b5474; font-size:14px; font-weight:bold;"]/span' ) price = price.split(" ") price[0] = price[0].replace(".", "") if price[1] == "€": price[1] = "EUR" else: price[1] = "MKD" value = price[0] currency = price[1] region = xpath.get( cont, '//span[@id="Body1_DetailControl1_FormView1_cityDescriptionLabel"]' ) country = u"Македонија" date = xpath.get( cont, '//span[@id="Body1_DetailControl1_FormView1_LabelDate"]') date = date.split(".") date = date[2] + "-" + date[1] + "-" + date[0] ad = Ad(link, title, imageUrl, description, category, subcategory, value, currency, region, date, country) ads.append(ad) return adsToJson(ads) #print scrapeNedviznostiMakedonija()
def scrapeVipMarket5(): # UTF-8 support reload(sys) sys.setdefaultencoding('utf-8') now = datetime.now() down = Downloader('http://www.vipmarket5.mk/search/') content = down.get_content() html = unicode(content) linkovi = xpath.search(html, '//tr[@class="frame_content"]') ads = [] for l in linkovi: link = "http://www.vipmarket5.mk" + xpath.get(l, '//div[@style="width:365px; height:90%; margin-top:10px;"]/b/a/@href') title = xpath.get(l, '//div[@style="width:365px; height:90%; margin-top:10px;"]/b/a') imageUrl = xpath.get(l, '//div[@style="overflow:hidden; width:150px; height: 146px; margin: 5px;"]/a/img/@src') download = Downloader(link) cont = download.get_content() cont = unicode(cont) description = xpath.get(cont, '//div[@class="feature"]/p').strip() if description == "": description = "/" #VNIMANIE! NEMA KATEGORII category="/" subcategory="/" price = xpath.get(l, '//div[@style="margin-top:5px; margin-left:10px;height:155px; overflow:hidden;"]/h4/a') if price == u"Цена:По договор": value = "/" currency = "/" else: price = price.split(":") price = price[1] price = price.split(" ") value = price[0] if price[1]=="€": currency = "EUR" elif price[1]=="ден.": currency = "MKD" date = xpath.get(l, '//b[@style="font-weight:bold;"]') date = date.split(": ") date = date[1] date = date.split(".") date = date[2]+"-"+date[1]+"-"+date[0] country = u"Македонија" region = xpath.get(cont, '//div[@style="float:left; width: 140px; overflow:hidden; font-family: Tahoma,Geneva,sans-serif; font-weight:bold"]') if region == "": region = "/" ad = Ad(link, title, imageUrl, description, category, subcategory, value, currency, region, date, country) ads.append(ad) return adsToJson(ads) #print scrapeVipMarket5()
def upload_to_nyaa(torrent_file, url_info, description): req = requests.Session() url = URL_LOGIN login_url = url % "login" data_login = dict(login=settings.NYAA_USER, password=settings.NYAA_PASSWORD, method=1, submit="Submit") res = req.post(login_url, data=data_login) if res.status_code != 200: raise NyaaException("status code error %s" % res.status_code) if "Login failed" in res.text: raise NyaaException("login failed") if "id" not in res.cookies and "pw" not in res.cookies: raise NyaaException("login cookies not set") upload_url = url % "upload" desc = description # % dict(anime=anime, episodio=episodio) data = { "catid": "1_38", "info": url_info, "description": desc, "rules": 1, 'submit': "Upload", "remake": "0", "anonymous": "0", "hidden": "1" } files = {'torrent': open(torrent_file, "rb")} res = req.post(upload_url, data, files=files) res_text = res.text if "Upload failed" in res_text: error = xpath.get(res_text, "//[@class='error']/text()") return NyaaException(error) reg = re.compile('tid=([\d].+?)\">View your torrent.') match = reg.search(res.text) if match: r = match.groups()[0] url = URL_SUCCESS % r return url return True
def search_animenetwork(title): base_url="http://cdn.animenewsnetwork.com/encyclopedia/api.xml" params = {'anime':"~"+title} response = requests.get(base_url, params=params) animes = xpath.search(response.content,"//anime") l = [] for i in animes: id = xpath.search(i, "./@id").pop() images = xpath.search(i, "//info/img/@src") summary = xpath.get(i, "//info[@type='Plot Summary']") genres = xpath.search(i, "//info[@type='Genres']") openings = xpath.search(i, "//info[@type='Opening Theme']") endings = xpath.search(i, "//info[@type='Ending Theme']") d={'summary': summary, 'images':images, 'genres': genres, 'openings': openings,'endings': endings,'id':id } l.append(d) return l
def upload_to_nyaa(torrent_file, url_info, description): req = requests.Session() url = URL_LOGIN login_url = url % "login" data_login = dict(login=settings.NYAA_USER, password=settings.NYAA_PASSWORD, method=1, submit="Submit") res = req.post(login_url, data=data_login) if res.status_code != 200: raise NyaaException("status code error %s" % res.status_code) if "Login failed" in res.text: raise NyaaException("login failed") if "id" not in res.cookies and "pw" not in res.cookies: raise NyaaException("login cookies not set") upload_url = url % "upload" desc = description # % dict(anime=anime, episodio=episodio) data = {"catid": "1_38", "info": url_info, "description": desc, "rules": 1, 'submit': "Upload", "remake": "0", "anonymous": "0", "hidden": "1"} files = {'torrent': open(torrent_file, "rb")} res = req.post(upload_url, data, files=files) res_text = res.text if "Upload failed" in res_text: error = xpath.get(res_text, "//[@class='error']/text()") return NyaaException(error) reg = re.compile('tid=([\d].+?)\">View your torrent.') match = reg.search(res.text) if match: r = match.groups()[0] url = URL_SUCCESS % r return url return True
def mal_search(mal_title, mal_id=False): cookies = {"incap_ses_224_81958":"P6tYbUr7VH9V6shgudAbA1g5FVYAAAAAyt7eDF9npLc6I7roc0UIEQ=="} response = requests.get( "http://myanimelist.net/api/anime/search.xml", params={'q':mal_title}, cookies=cookies, auth=("zodman1","zxczxc"), headers = {'User-Agent':'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36'} ) content = response.content if mal_id is not False: for e in xpath.search(content,"//entry"): if mal_id in e: content = xpath.get(e, "//anime/entry") break else: content = xpath.get(content, "//anime/entry") english_title = xpath.get(content, '//english') title = xpath.get(content, '//title') synonyms = xpath.get(content, '//synonyms') id = xpath.get(content, "//id") return {'title':title, 'english_title':english_title, 'synonyms': synonyms, 'id':id }
from webscraping import download, xpath D = download.Download() url = 'http://uslawfirms.co' html1 = D.get(url) html2 = D.archive_get(url) for html in (html1, html2): print xpath.get(html, '//title')
seen_urls = set( ) # track which articles URL's already seen, to prevent duplicates D = download.Download() # iterate each of the categories for category_link in ('/developer/knowledge‐base?page=%d', '/developer/articles?page=%d'): # iterate the pages of a category for page in itertools.count(): category_html = D.get(urlparse.urljoin(DOMAIN, category_link % page)) article_links = xpath.search(category_html, '//div[@class="morelink"]/a/@href') num_new_articles = 0 for article_link in article_links: # scrape each article url = urlparse.urljoin(DOMAIN, article_link) if url not in seen_urls: num_new_articles += 1 seen_urls.add(url) html = D.get(url) title = xpath.get(html, '//div[@class="feed‐header‐wrap"]/h2') num_reads = xpath.get( html, '//li[@class="statistics_counter last"]/span').replace row = title, num_reads, url writer.writerow(row) if num_new_articles == 0: break # have found all articles for this category
def scrapeMobileBg(): # cp1251 support reload(sys) sys.setdefaultencoding('cp1251') now = datetime.now() down = Downloader('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71wxzy&f1=1') #http://www.mobile.bg/71ydeh #http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71wxzy&f1=1 content = down.get_content() html = unicode(content) linkovi = xpath.search(html, '//form[@name="search"]/table[@class="tablereset"]') linkovi = linkovi[3:len(linkovi)-4] links = [] links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71xw69&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71xwi1&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71xwr0&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71xx7g&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71xxjy&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71xzyr&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y06e&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y0dk&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y0q6&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y16v&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y1ep&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y2ih&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y2x5&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y34p&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y3ex&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y3wj&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y449&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y4wz&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y5qh&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y5yv&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y6az&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y6kg&f1=1') links.append('http://www.mobile.bg/pcgi/mobile.cgi?act=3&slink=71y6qz&f1=1') for link in links: dole = Downloader(link) content = dole.get_content() html = unicode(content) lin = xpath.search(html, '//form[@name="search"]/table[@class="tablereset"]') lin = lin[3:len(lin)-4] for li in lin: linkovi.append(li) linkot = xpath.get(li, '//td[@class="valgtop"]/a[@class="mmm"]/@href') ads = [] for l in linkovi: link = xpath.get(l, '//td[@class="valgtop"]/a[@class="mmm"]/@href') title = xpath.get(l, '//td[@class="valgtop"]/a[@class="mmm"]').strip() imageUrl = xpath.get(l, '//a[@class="photoLink"]/img/@src') download = Downloader(link) cont = download.get_content() cont = unicode(cont) description = xpath.get(cont, '//td[@style="font-size:13px;"]').strip() description = description.split("<a href") description = description[0] if description == "» ": description = "/" else: description = description[0:len(description)-19] description = description = description.replace("\"", "") category = u"Возила" subcategory = "/" price = xpath.get(l, '//span[@class="price"]').strip() if price == u"Договаряне": value = "/" currency = "/" else: price = price.split(" ") if len(price)==2: value = price[0] currency = price[1] elif len(price)==3: currency = price[2] value = price[0]+price[1] else: currency = price[3] value = price[0]+price[1]+price[2] if currency == "лв.": currency = "BGN" region = xpath.get(cont, '//td[@style="padding:10px"]').strip() region = region.split("Регион: ") region = region[1] region = region.split(" ") region = region[0] region = region.replace("<a","").strip() date = str(now.year)+"-"+str(now.month)+"-"+str(now.day) country = u"Бугарија" ad = Ad(link, title, imageUrl, description, category, subcategory, value, currency, region, date, country) ads.append(ad) return adsToJson(ads) #print scrapeMobileBg()
from webscraping import download, xpath engine = download.Download() html = engine.get('http://code.google.com/p/webscraping') project_title = xpath.get(html, '//div[@id="pname"]/a/span') labels = xpath.get(html, '/a[@class="label"]')
self.app.exec_() def _loadFinished(self, result): self.frame = self.mainFrame() self.app.quit() search_term = sys.argv[1] yr = int(sys.argv[2]) url = 'http://query.nytimes.com/search/sitesearch/?action=click&contentCollection®ion=TopBar&WT.nav=searchWidget&module=SearchSubmit&pgtype=Homepage#/' + search_term + '/from' + str( yr) + '0101to' + str(yr + 1) + '0101/' r = Render(url) result = str(r.frame.toHtml().toAscii()) results = xpath.get(result, '//div[@id="totalResultsCount"]/p/text()') with open('results.out', 'a') as f: f.write("{:}\t{:}\t{:}\n".format(search_term, yr, results.split()[3])) # from lxml import html 2/1 # def scrape(url,html_): # formatted_result = str(html_.toAscii()) # tree = html.fromstring(formatted_result) # results = tree.xpath('//div[@id="totalResultsCount"]/p/text()') # print results # 2/1 # #QString should be converted to string before processed by lxml # formatted_result = str(result.toAscii())
def scrapeReklama5(): # UTF-8 support reload(sys) sys.setdefaultencoding('utf-8') down = Downloader('https://www.reklama5.mk/Search') html = down.get_content() html = unicode(html) requestedWebPageUrl = 'https://www.reklama5.mk' adverts = xpath.search(html, '//div[@class="OglasResults"]') ads = [] for advert in adverts: link = requestedWebPageUrl + xpath.get( advert, '//a[@class="SearchAdTitle"]/@href') title = xpath.get( advert, '//a[@class="SearchAdTitle"].text()').strip().replace("\"", "") description = getDescription( link, '//div[@class="oglasTitle"]/p[@class="oglasTitle"]').strip( ).replace("\"", "") subcategory = "/" imageUrl = xpath.get(advert, '//img[@class="thumbnail thumbs"]/@src') if imageUrl == "/Content/images/noImage2.jpg": imageUrl = requestedWebPageUrl + imageUrl price = xpath.get(advert, '//div[@class="text-left text-success"]') price = re.sub('\s+', ' ', price).strip() price = price.split(" ") if price[0] == "По": price[0] = "/" if price[1] == "Договор": price[1] = "/" value = price[0] currency = price[1] if currency == "€": currency = "EUR" if currency == u"МКД": currency = "MKD" region = xpath.get(advert, '//p[@class="clear-margin"]') region = region.split(">") region = region[0].strip() country = u"Македонија" date = xpath.get(advert, '//div[@class="text-center clear-padding adDate"]') date = re.sub('\s+', ' ', date).strip() time = xpath.get(advert, '//div[@class="text-center clear-padding adDate"]') time = re.sub('\s+', ' ', time).strip() if date.split()[0] == u"Денес" and time.split()[0]: date = datetime.now() datum = str(date.year) + "-" + str(date.month) + "-" + str( date.day) vreme = time.split(" ")[1] p = datum + " " + vreme date = p category = xpath.get(advert, '//p[@class="adCategoryName"]/a') ad = Ad(link, title, imageUrl, description, category, subcategory, value, currency, region, date, country) #print link, title, imageUrl, description, category, subcategory, value, currency, region, date ads.append(ad) return adsToJson(ads)
def filterContent(url, filterXpath): renderedWebPage = SinglePageRenderer(url) html = renderedWebPage.frame.toHtml() html = unicode(html) # print html return re.sub('\s+', ' ', xpath.get(html, filterXpath)).strip()
def scrapePobarajOglasi(): # UTF-8 support reload(sys) sys.setdefaultencoding('utf-8') now = datetime.now() down = Downloader('http://www.pobaraj.com.mk/lista_na_oglasi/all/1') content = down.get_content() html = unicode(content) site = xpath.get(html, '//ul[@class="lista_na_oglasi"]') linkovi = xpath.search(site, '//li') ads = [] for l in linkovi: link = "http://www.pobaraj.com.mk" + xpath.get( l, '//a[@class="title"]/@href') title = xpath.get(l, '//a[@class="title"]') imageUrl = xpath.get(l, '//a[@class="photo"]/img/@src') download = Downloader(link) cont = download.get_content() cont = unicode(cont) description = xpath.get(cont, '//div[@class="oglas_prikaz_opis"]').strip() if description == "": description = "/" kategorii = xpath.search(cont, '//a[@class="pateka"]') category = kategorii[1] if len(kategorii) > 2: subcategory = kategorii[2] else: subcategory = "/" price = xpath.get(l, '//div[@class="price"]').strip() price = price.split("<div ") price = price[0].strip() price = price.split("Цена: ") price = price[1] if price == u"по договор": value = "/" currency = "/" else: price = price.split(" ") value = price[0] if price[1] == u"денари": currency = "MKD" elif price[1] == u"евра": currency = "EUR" else: currency = price[1] region = xpath.get(cont, '//div[@class="oglas_prikaz_left"]').strip() region = region.split("Град:<") region = region[1] region = region.split("<b class") region = region[0] region = region.split("b>") region = region[1] region = region.strip() country = u"Македонија" datum = xpath.get(l, '//div[@class="oglas_date"]').strip() datum = datum.split(": ") datum = datum[1] datum = datum.split(", ") vreme = datum[1] datum = datum[0] if datum == u"Денес": date = str(now.year) + "-" + str(now.month) + "-" + str( now.day) + " " + vreme elif datum == u"Вчера": da = datetime.now() - timedelta(days=1) date = str(da.year) + "-" + str(da.month) + "-" + str( da.day) + " " + vreme else: datum = datum.split(" ") if datum[1] == "Јан": datum = str(now.year) + "-1-" + datum[0] elif datum[1] == "Фев": datum = str(now.year) + "-2-" + datum[0] elif datum[1] == "Мар": datum = str(now.year) + "-3-" + datum[0] elif datum[1] == "Апр": datum = str(now.year) + "-4-" + datum[0] elif datum[1] == "Мај": datum = str(now.year) + "-5-" + datum[0] elif datum[1] == "Јун": datum = str(now.year) + "-6-" + datum[0] elif datum[1] == "Јул": datum = str(now.year) + "-7-" + datum[0] elif datum[1] == "Авг": datum = str(now.year) + "-8-" + datum[0] elif datum[1] == "Сеп": datum = str(now.year) + "-9-" + datum[0] elif datum[1] == "Окт": datum = str(now.year) + "-10-" + datum[0] elif datum[1] == "Ное": datum = str(now.year) + "-11-" + datum[0] elif datum[1] == "Дек": datum = str(now.year) + "-12-" + datum[0] date = datum + " " + vreme ad = Ad(link, title, imageUrl, description, category, subcategory, value, currency, region, date, country) ads.append(ad) return adsToJson(ads) #print scrapePobarajOglasi()
def download_nmr(self, cas): start_time = int(time.time()) logging.info(u"抓取CAS:%s核磁共振数据", cas) try: ie = self.startup_ie() hwd_ie = winGuiAuto.findTopWindows("Windows Internet Explorer") url = settings.APP_PATH + 'predictor.htm' ie.navigate(url) hwnd = winGuiAuto.findTopWindows(u"安全警告".encode('gbk')) if hwnd: self.close_alter_window(elapsed_seconds=0) get_data_link = ie.elementFind('a', 'name', 'getresultdata') ie.elementClick(get_data_link) counter = 0 while True: counter += 1 text_tableformat_value = ie.textAreaGetValue('tableformat', 'value') if text_tableformat_value: btn_get_pdf = ie.elementFind('input', 'value', 'Get PDF') ie.elementClick(btn_get_pdf) if HIDE_IE_WINDOW: ie._ie.Visible = 0 time.sleep(1) break elif counter>9: raise Exception(u'CAS号无核磁数据', 555) break else: time.sleep(1) html = ie.outerHTML() if 'action=/cheminfo/servlet/org.cheminfo.hook.appli.HookServlet' in html or 'action=http://www.nmrdb.com/cheminfo/servlet/org.cheminfo.hook.appli.HookServlet' in html: soup = BeautifulSoup(html) para_molfile = xpath.get(html, '//TEXTAREA[@name=molfile]').strip() # para_molfile = soup.find('TEXTAREA', {'name':'molfile'}) # print '-------------------------molfile' # print para_molfile para_ethylvinylether = xpath.get(html, '//TEXTAREA[@name=ethylvinylether]').strip() para_assignment = xpath.get(html, '//TEXTAREA[@name=assignment]').strip() # para_assignment = soup.find('TEXTAREA', {'name':'assignment'}) # print '-------------------------assignment' # print para_assignment para_tableformat = xpath.get(html, '//TEXTAREA[@name=tableformat]').strip() # para_tableformat = soup.find('TEXTAREA', {'name':'tableformat'}) # print '-------------------------tableformat' # print para_tableformat para_url = xpath.get(html, '//input[@name=url]/@value').replace('&', '&').strip() para_url = soup.find('input', {'name':'url'})['value'] # print '-------------------------url' # print para_url para_xmlString = xpath.get(html, '//INPUT[@name=xmlString]/@value').strip() para_xmlString = soup.find("input", {'name':'xmlString'})['value'] # print '-------------------------xmlString' # print para_xmlString if not para_xmlString: para_xmlString = common.regex_get(html, r'<INPUT value="([^"]+)"\s*type=hidden name=xmlString>', normalized=False) if not para_xmlString: para_xmlString = common.regex_get(html, r"<INPUT value='([^']+)'\s*type=hidden name=xmlString>", normalized=False) # para_xmlString = common.unescape(para_xmlString).replace('"', '"') para_resolution = xpath.get(html, '//input[@name=resolution]/@value').strip() para_rotate = xpath.get(html, '//input[@name=rotate]/@value').strip() get_pdf_url = 'http://www.nmrdb.org/cheminfo/servlet/org.cheminfo.hook.appli.HookServlet' post_data2 = {} post_data2['molfile'] = para_molfile # post_data2['ethylvinylether'] = para_ethylvinylether post_data2['assignment'] = para_assignment post_data2['tableformat'] = para_tableformat post_data2['width'] = '800' post_data2['height'] = '600' post_data2['url'] = para_url post_data2['xmlString'] = para_xmlString post_data2['resolution'] = para_resolution post_data2['rotate'] = para_rotate post_data2['options'] = '' post_data2['format'] = 'pdf' days = datetime.datetime.now().strftime('%Y-%m-%d') save_path = settings.SAVE_PATH + days + '/' if not os.path.exists(save_path): os.mkdir(save_path) du = DownUtil2() du.downfile(get_pdf_url, post_data2, save_path , cas + '.pdf') except Exception, e: raise e
def scrapeAvtooglasi(): # UTF-8 support reload(sys) sys.setdefaultencoding('utf-8') now = datetime.now() down = Downloader('http://www.avtooglasi.com.mk/rezultati/show/?vid=0&orderby=0') content = down.get_content() html = unicode(content) sliki = xpath.search(html, '//div[@class="resultLeft"]') ostanato = xpath.search(html, '//div[@class="oglasInfoTopContent"]') ceni = xpath.search(html, '//a[@class="btn btn-info btn-xs oglasInfoAdditionalPrice"]') link = {} title = {} imageUrl = {} description = {} category = {} subcategory = {} value = {} currency = {} region = {} date = {} i = 0 ads = [] for slika in sliki: imageUrl[i] = xpath.search(slika, '//a[@class="thumbnail resultImg"]/img/@src')[0] i = i + 1 i = 0 for cena in ceni: price = xpath.get(cena,'//span/span').strip() price=price.split(" ") if len(price)>1: if price[0]=="По": price[0]="/" if price[1]=="договор": price[1]="/" value[i]=price[0] currency[i]=price[1] if currency[i]=="€": currency[i]="EUR" i = i + 1 i = 0 for advert in ostanato: link[i] = xpath.get(advert, '//a[@class="resultMainLink"]/@href') title[i] = xpath.get(advert, '//a[@class="resultMainLink"]/span').strip().replace("\"", "") path = xpath.search(getDescription(link[i],'//div[@class="centerC"]'), '/div/div[@class="padded"]') description[i] = path[1] subcategory[i]="/" category[i] = u"Возила" dodatok = xpath.get(advert, '//span[@class="oglasInfoAdditionalInfo"]') dodatok = dodatok.split(" | ") region[i] = dodatok[0] country = u"Македеонија" description[i] = dodatok[1] + u" година, "+ dodatok[2] +", "+ dodatok[3] +", "+ dodatok[4] +", "+ dodatok[5] +", "+ description[i] description[i] = description[i].strip().replace("\"", "") date[i]="" #print description[i] datum = dodatok[6].strip() datum = datum.split(" ") if datum[0]=="Денес": datum [0]= str(now.year)+"-"+str(now.month)+"-"+str(now.day) date[i]=datum[0]+" "+datum[2] elif datum[0]=="Вчера": da=datetime.now()-timedelta(days=1) datum[0]=str(da.year)+"-"+str(da.month)+"-"+str(da.day) date[i]=datum[0]+" "+datum[2] elif datum[0]=="пред": if datum[2]=="дена": da=datetime.now()-timedelta(days=int(datum[1])) datum[0]=str(da.year)+"-"+str(da.month)+"-"+str(da.day) date[i]=datum[0] else: if datum[1]=="1": da=datetime.now()-timedelta(days=30) datum[0]=str(da.year)+"-"+str(da.month)+"-"+str(da.day) date[i]=datum[0] else: da=datetime.now()-timedelta(days=60) datum[0]=str(da.year)+"-"+str(da.month)+"-"+str(da.day) date[i]=datum[0] else: date[i]=datum[0]+" "+datum[1] #print date[i] i = i + 1 for i in link: ad = Ad(link[i], title[i], imageUrl[i], description[i], category[i], subcategory[i], value[i], currency[i], region[i], date[i], country) ads.append(ad) return adsToJson(ads) # print scrapeAvtooglasi()
def scrapeMobile24(): # UTF-8 support reload(sys) sys.setdefaultencoding('utf-8') now = datetime.now() #http://www.mobile24.mk/avtomobili/' down = Downloader('http://www.mobile24.mk/avtomobili/') content = down.get_content() html = unicode(content) linkovi = xpath.search(html, '//tr[@class="t0"]') lin = xpath.search(html, '//tr[@class="t1"]') for l in lin: linkovi.append(l) #http://www.mobile24.mk/motocikli/ down = Downloader('http://www.mobile24.mk/motocikli/') content = down.get_content() html = unicode(content) linko = xpath.search(html, '//tr[@class="t0"]') lin = xpath.search(html, '//tr[@class="t1"]') for l in lin: linkovi.append(l) for l in linko: linkovi.append(l) #http://www.mobile24.mk/kombinja/ down = Downloader('http://www.mobile24.mk/kombinja/') content = down.get_content() html = unicode(content) linko = xpath.search(html, '//tr[@class="t0"]') lin = xpath.search(html, '//tr[@class="t1"]') for l in lin: linkovi.append(l) for l in linko: linkovi.append(l) #http://www.mobile24.mk/kamioni/ down = Downloader('http://www.mobile24.mk/kamioni/') content = down.get_content() html = unicode(content) linko = xpath.search(html, '//tr[@class="t0"]') lin = xpath.search(html, '//tr[@class="t1"]') for l in lin: linkovi.append(l) for l in linko: linkovi.append(l) #http://www.mobile24.mk/prikolki/ down = Downloader('http://www.mobile24.mk/prikolki/') content = down.get_content() html = unicode(content) linko = xpath.search(html, '//tr[@class="t0"]') lin = xpath.search(html, '//tr[@class="t1"]') for l in lin: linkovi.append(l) for l in linko: linkovi.append(l) #http://www.mobile24.mk/avtobusi/ down = Downloader('http://www.mobile24.mk/avtobusi/') content = down.get_content() html = unicode(content) linko = xpath.search(html, '//tr[@class="t0"]') lin = xpath.search(html, '//tr[@class="t1"]') for l in lin: linkovi.append(l) for l in linko: linkovi.append(l) #http://www.mobile24.mk/gumiiavtodelovi/ down = Downloader('http://www.mobile24.mk/gumiiavtodelovi/') content = down.get_content() html = unicode(content) linko = xpath.search(html, '//tr[@class="t0"]') lin = xpath.search(html, '//tr[@class="t1"]') for l in lin: linkovi.append(l) for l in linko: linkovi.append(l) ads = [] for l in linkovi: link = xpath.get(l, '//a[@class="listing-title"]/@href') title = xpath.get(l, '//a[@class="listing-title"]/b') imageUrl = xpath.get(l, '//td[@class="image"]/a/img/@src') download = Downloader(link) cont = download.get_content() cont = unicode(cont) desc = xpath.search( cont, '//div[@class="item-left"]/div[@class="fieldset rounded4"]/div') if len(desc) == 4: description = desc[1] else: description = desc[0] category = u"Возила" subcategory = "/" price = xpath.get(l, '//td[@class="price"].text()') value = xpath.get(l, '//td[@class="price"]/span') value = value.replace(",", "") price = price.split("span>") price = price[2] price = price.split("<") price = price[0] currency = price if currency == u"денари": currency = "MKD" if value == u"По договор": value = "/" currency = "/" region = xpath.get(l, '//span[@class="city"]') date = str(now.year) + "-" + str(now.month) + "-" + str(now.day) country = u"Македонија" ad = Ad(link, title, imageUrl, description, category, subcategory, value, currency, region, date, country) ads.append(ad) return adsToJson(ads) # print scrapeMobile24()
def scrapeKoli(): reload(sys) sys.setdefaultencoding('utf-8') down = Downloader('http://koli.com.mk/polovni_lista.aspx') html = down.get_content() html = unicode(html) requestedWebPageUrl = 'http://koli.com.mk/polovni_lista.aspx' adverts = xpath.search(html, '//table[@id="dlRezultati"]') ads = [] links = xpath.search(html, '//a[@class="linkovi_desno_golemi"]/@href') da = datetime.now() for l in links: link = "http://koli.com.mk/" + l d = Downloader(link) ad = d.get_content() ad = unicode(ad) description = u"Опрема: " + xpath.get( ad, '//span[@id="lblOprema"]') + " \nOpis: " + xpath.get( ad, '//span[@id="lblOpis"]') title = xpath.get(ad, '//span[@id="lblMarkaModel"].text()').strip() imageUrl = 'http://koli.com.mk/' + xpath.get( ad, '//img[@id="slika"]/@src') subcategory = "/" category = u"Возила" region = xpath.get(ad, '//span[@id="lblGrad"].text()') country = u"Македонија" value = xpath.get(ad, '//span[@id="lblMomentalnaCena"]').strip() currency = "EUR" date = "" d = xpath.get(ad, '//span[@id="lblDenovi"]').strip() d = d.split(" ") if len(d) == 1: if d[0] == u"минута": date = str(da.year) + "-" + str(da.month) + "-" + str(da.day) elif d[0] == u"час": date = str(da.year) + "-" + str(da.month) + "-" + str(da.day) elif d[0] == u"ден": da = datetime.now() - timedelta(days=1) date = str(da.year) + "-" + str(da.month) + "-" + str(da.day) elif d[0] == u"месец": da = datetime.now() - timedelta(days=30) date = str(da.year) + "-" + str(da.month) + "-" + str(da.day) elif d[0] == u"секунда": date = str(da.year) + "-" + str(da.month) + "-" + str(da.day) else: if d[1] == u"месеци": da = datetime.now() - timedelta(days=int(d[0] * 30)) date = str(da.year) + "-" + str(da.month) + "-" + str(da.day) elif d[1] == u"дена": da = datetime.now() - timedelta(days=int(d[0])) date = str(da.year) + "-" + str(da.month) + "-" + str(da.day) elif d[1] == u"минути": date = str(da.year) + "-" + str(da.month) + "-" + str(da.day) elif d[1] == u"часа": date = str(da.year) + "-" + str(da.month) + "-" + str(da.day) elif d[1] == u"секунди": date = str(da.year) + "-" + str(da.month) + "-" + str(da.day) ad = Ad(link, title, imageUrl, description, category, subcategory, value, currency, region, date, country) ads.append(ad) return adsToJson(ads)
def scrapeKupujemProdajem(): # UTF-8 support reload(sys) sys.setdefaultencoding('utf-8') now = datetime.now() ads = [] try: down = Downloader( 'http://www.kupujemprodajem.com/search.php?action=list&data[category_id]=&data[group_id]=&data[location_id]=&data[keywords]=&submit[search]=Tra%C5%BEi' ) content = down.get_content() html = unicode(content) link = "" title = "" imageUrl = "" description = "/" category = "/" subcategory = "/" value = "/" currency = "/" region = "/" date = str(now.year) + "-" + str(now.month) + "-" + str(now.day) linkovi = xpath.search(html, '//div[@class="item clearfix"]') highlighted = xpath.search( html, '//div[@class="item clearfix adHighlighted"]') for h in highlighted: linkovi.append(h) for l in linkovi: try: link = "http://www.kupujemprodajem.com/" + xpath.get( l, '//a[@class="adName"]/@href') title = xpath.get(l, '//a[@class="adName"]') region = xpath.get(l, '//section[@class="locationSec"]').strip() region = region.split(" | ") region = region[0] price = xpath.get(l, '//span[@class="adPrice"]') price = price.split(" ") if len(price) == 2: value = price[0] value = value.replace(".", "") value = value.split(",") value = value[0] currency = price[1] else: value = "/" currency = "/" if currency == "€": currency = "EUR" elif currency == "din": currency = "DIN" down = Downloader(link) content = down.get_content() category = xpath.get(content, '//a[@class="crumbs"]') description = xpath.get( l, '//section[@class="nameSec"]/p[@class="adDescription"]') category = category.split("|") category = category[0] category = category.strip() imageUrl = xpath.get( content, '//div[@class="adThumbnailHolder"]/a/img/@src') imageUrl = imageUrl.replace("//", "/") imageUrl = imageUrl[1::] if imageUrl == "": imageUrl = "/" description = description.replace("...<p>", "") description = description.strip() country = u"Србија" ad = Ad(link, title, imageUrl, description, category, subcategory, value, currency, region, date, country) ads.append(ad) except: pass except: pass return adsToJson(ads) #print scrapeKupujemProdajem()