def getInfo(urls): for url in urls: html = getURL("http://www.fshare.vn/check_link.php", post={'action': "check_link", 'arrlinks': url}, decode=True) yield parseFileInfo(FshareVn, url, html)
def getInfo(urls): for url in urls: h = getURL(url, just_header=True) m = re.search(r'Location: (.+)\r\n', h) if m and not re.match(m.group(1), FilefactoryCom.__pattern): #: It's a direct link! Skipping yield (url, 0, 3, url) else: #: It's a standard html page yield parseFileInfo(FilefactoryCom, url, getURL(url))
def getInfo(urls): h = getRequest() h.c.setopt(pycurl.HTTPHEADER, ["Accept: text/html", "User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0"]) for url in urls: html = h.load(url, decode=True) yield parseFileInfo(MegaRapidCz, url, html)
def getInfo(urls): h = getRequest() h.c.setopt(pycurl.HTTPHEADER, [ "Accept: text/html", "User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0" ]) for url in urls: html = h.load(url, decode=True) yield parseFileInfo(MegaRapidCz, url, html)
def getInfo(urls): for url in urls: html = getURL("http://www.fshare.vn/check_link.php", post={ 'action': "check_link", 'arrlinks': url }, decode=True) yield parseFileInfo(FshareVn, url, html)