Exemplo n.º 1
0
def getInfo(urls):
    for url in urls:
        html = getURL("http://www.fshare.vn/check_link.php",
                      post={'action': "check_link", 'arrlinks': url},
                      decode=True)

        yield parseFileInfo(FshareVn, url, html)
Exemplo n.º 2
0
def getInfo(urls):
    for url in urls:
        header = getURL(url, just_header=True)
        if 'Location: http://cloudzer.net/404' in header:
            file_info = (url, 0, 1, url)
        else:
            file_info = parseFileInfo(CloudzerNet, url, getURL(url, decode=True))
        yield file_info
Exemplo n.º 3
0
def getInfo(urls):
    for url in urls:
        location, status = checkHTMLHeader(url)
        if status:
            file_info = (url, 0, status, url)
        else:
            file_info = parseFileInfo(MediafireCom, url, getURL(url, decode=True))
        yield file_info
Exemplo n.º 4
0
def getInfo(urls):
    for url in urls:
        h = getURL(url, just_header=True)
        m = re.search(r'Location: (.+)\r\n', h)
        if m and not re.match(m.group(1), FilefactoryCom.__pattern__):  #: It's a direct link! Skipping
            yield (url, 0, 3, url)
        else:  #: It's a standard html page
            yield parseFileInfo(FilefactoryCom, url, getURL(url))
Exemplo n.º 5
0
def getInfo(urls):
    result = []

    for url in urls:
        file_info = parseFileInfo(StahnuTo, url, getURL("http://stahnu.to/?file=" + re.search(StahnuTo.__pattern__, url).group(3), decode=True)) 
        result.append(file_info)
            
    yield result
Exemplo n.º 6
0
def getInfo(urls):
    for url in urls:
        header = getURL(url, just_header=True)
        if 'Location: http://cloudzer.net/404' in header:
            file_info = (url, 0, 1, url)
        else:
            file_info = parseFileInfo(CloudzerNet, url, getURL(url,
                                                               decode=True))
        yield file_info
Exemplo n.º 7
0
def getInfo(urls):
    h = getRequest()
    h.c.setopt(HTTPHEADER,
               ["Accept: text/html",
                "User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0"])
    for url in urls:
        html = h.load(url, decode=True)
        file_info = parseFileInfo(ShareRapidCom, replace_patterns(url, ShareRapidCom.FILE_URL_REPLACEMENTS), html)
        yield file_info
Exemplo n.º 8
0
def getInfo(urls):
    for url in urls:
        location, status = checkHTMLHeader(url)
        if status:
            file_info = (url, 0, status, url)
        else:
            file_info = parseFileInfo(MediafireCom, url,
                                      getURL(url, decode=True))
        yield file_info
Exemplo n.º 9
0
def getInfo(urls):
    h = getRequest()
    h.c.setopt(HTTPHEADER,
               ["Accept: text/html",
                "User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0"])

    for url in urls:
        html = h.load(url, decode=True)
        yield parseFileInfo(MegaRapidCz, url, html)
Exemplo n.º 10
0
def getInfo(urls):
    for url in urls:
        html = getURL("http://www.fshare.vn/check_link.php",
                      post={
                          'action': "check_link",
                          'arrlinks': url
                      },
                      decode=True)

        yield parseFileInfo(FshareVn, url, html)
Exemplo n.º 11
0
def getInfo(urls):
    for url in urls:
        html = getURL('http://www.fshare.vn/check_link.php', post={
            "action": "check_link",
            "arrlinks": url
        }, decode=True)

        file_info = parseFileInfo(FshareVn, url, html)

        yield file_info
Exemplo n.º 12
0
def getInfo(urls):
    for url in urls:
        h = getURL(url, just_header=True)
        m = re.search(r'Location: (.+)\r\n', h)
        if m and not re.match(
                m.group(1),
                FilefactoryCom.__pattern__):  #: It's a direct link! Skipping
            yield (url, 0, 3, url)
        else:  #: It's a standard html page
            yield parseFileInfo(FilefactoryCom, url, getURL(url))
Exemplo n.º 13
0
def getInfo(urls):
    h = getRequest()
    h.c.setopt(pycurl.HTTPHEADER, [
        "Accept: text/html",
        "User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0"
    ])

    for url in urls:
        html = h.load(url, decode=True)
        yield parseFileInfo(MegaRapidCz, url, html)
Exemplo n.º 14
0
def getInfo(urls):
    result = []

    for url in urls:
        info_url = toInfoPage(url)
        if info_url:
            file_info = parseFileInfo(CzshareCom, url, getURL(info_url, decode=True)) 
            result.append(file_info)
            
    yield result
Exemplo n.º 15
0
def getInfo(urls):
    for url in urls:
        html = getURL('http://www.fshare.vn/check_link.php', post={
            "action": "check_link",
            "arrlinks": url
        }, decode=True)

        file_info = parseFileInfo(FshareVn, url, html)

        yield file_info
Exemplo n.º 16
0
def getInfo(urls):
    result = []

    for url in urls:
        info_url = toInfoPage(url)
        if info_url:
            file_info = parseFileInfo(CzshareCom, url,
                                      getURL(info_url, decode=True))
            result.append(file_info)

    yield result
Exemplo n.º 17
0
def getInfo(urls):
    h = getRequest()
    h.c.setopt(HTTPHEADER, [
        "Accept: text/html",
        "User-Agent: Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0"
    ])
    for url in urls:
        html = h.load(url, decode=True)
        file_info = parseFileInfo(
            ShareRapidCom,
            replace_patterns(url, ShareRapidCom.FILE_URL_REPLACEMENTS), html)
        yield file_info
Exemplo n.º 18
0
def getInfo(urls):
    for url in urls:
        info = checkFile(url)
        if "filename" in info:
            yield info['name'], info['size'], info['status'], url
        else:        
            file_info = (url, 0, 3, url)
            h = getRequest()
            try:
                h.c.setopt(HTTPHEADER, ["Accept: text/html"])
                html = h.load(url, cookies = True, decode = True)
                file_info = parseFileInfo(ShareRapidCom, url, html) 
            finally:
                h.close()
                yield file_info