Example #1
0
def getInfo(urls):
    # DDLStorage API Documentation:
    # http://www.ddlstorage.com/cgi-bin/api_req.cgi?req_type=doc
    ids = dict()
    for url in urls:
        m = re.search(DdlstorageCom.__pattern__, url)
        ids[m.group('ID')] = url

    for chunk in chunks(ids.keys(), 5):
        api = getURL('http://www.ddlstorage.com/cgi-bin/api_req.cgi',
                     post={
                         'req_type':
                         'file_info_free',
                         'client_id':
                         53472,
                         'file_code':
                         ','.join(chunk),
                         'sign':
                         md5('file_info_free%d%s%s' %
                             (53472, ','.join(chunk),
                              '25JcpU2dPOKg8E2OEoRqMSRu068r0Cv3')).hexdigest()
                     })
        api = api.replace('<pre>', '').replace('</pre>', '')
        api = json_loads(api)

        result = list()
        for el in api:
            if el['status'] == 'online':
                result.append((el['file_name'], int(el['file_size']), 2,
                               ids[el['file_code']]))
            else:
                result.append(
                    (ids[el['file_code']], 0, 1, ids[el['file_code']]))
        yield result
Example #2
0
def getInfo(urls):
    # DDLStorage API Documentation:
    # http://www.ddlstorage.com/cgi-bin/api_req.cgi?req_type=doc
    ids = dict()
    for url in urls:
        m = re.search(DdlstorageCom.__pattern__, url)
        ids[m.group('ID')] = url

    for chunk in chunks(ids.keys(), 5):
        api = getURL('http://www.ddlstorage.com/cgi-bin/api_req.cgi',
                     post={'req_type': 'file_info_free',
                           'client_id': 53472,
                           'file_code': ','.join(chunk),
                           'sign': md5('file_info_free%d%s%s' % (53472, ','.join(chunk),
                                                                 '25JcpU2dPOKg8E2OEoRqMSRu068r0Cv3')).hexdigest()})
        api = api.replace('<pre>', '').replace('</pre>', '')
        api = json_loads(api)

        result = list()
        for el in api:
            if el['status'] == 'online':
                result.append((el['file_name'], int(el['file_size']), 2, ids[el['file_code']]))
            else:
                result.append((ids[el['file_code']], 0, 1, ids[el['file_code']]))
        yield result
Example #3
0
def getInfo(urls):
    api_url_base = "http://api.share-online.biz/linkcheck.php"

    for chunk in chunks(urls, 90):
        api_param_file = {
            "links":
            "\n".join(
                x.replace("http://www.share-online.biz/dl/", "").rstrip("/")
                for x in chunk)
        }  #api only supports old style links
        src = getURL(api_url_base, post=api_param_file)
        result = []
        for i, res in enumerate(src.split("\n")):
            if not res:
                continue
            fields = res.split(";")

            if fields[1] == "OK":
                status = 2
            elif fields[1] in ("DELETED", "NOT FOUND"):
                status = 1
            else:
                status = 3

            result.append((fields[2], int(fields[3]), status, chunk[i]))
        yield result
Example #4
0
def getInfo(urls):
    for chunk in chunks(urls, 80):
        result = []

        api = getAPIData(chunk)

        for data in api.itervalues():
            if data[0] == "online":
                result.append((html_unescape(data[2]), data[1], 2, data[4]))

            elif data[0] == "offline":
                result.append((data[4], 0, 1, data[4]))

        yield result
Example #5
0
def getInfo(urls):
    for chunk in chunks(urls, 80):
        result = []

        api = getAPIData(chunk)

        for data in api.itervalues():
            if data[0] == "online":
                result.append((html_unescape(data[2]), data[1], 2, data[4]))

            elif data[0] == "offline":
                result.append((data[4], 0, 1, data[4]))

        yield result
Example #6
0
def getInfo(urls):
    ##  returns list of tupels (name, size (in bytes), status (see FileDatabase), url)

    apiurl = "http://api.netload.in/info.php"
    id_regex = re.compile(NetloadIn.__pattern__)
    urls_per_query = 80

    for chunk in chunks(urls, urls_per_query):
        ids = ""
        for url in chunk:
            match = id_regex.search(url)
            if match:
                ids = ids + match.group('ID') + ";"

        api = getURL(apiurl,
                     get={'auth'   : "Zf9SnQh9WiReEsb18akjvQGqT0I830e8",
                          'bz'     : 1,
                          'md5'    : 1,
                          'file_id': ids},
                     decode=True)

        if api is None or len(api) < 10:
            self.logDebug("Prefetch failed")
            return

        if api.find("unknown_auth") >= 0:
            self.logDebug("Outdated auth code")
            return

        result = []

        for i, r in enumerate(api.splitlines()):
            try:
                tmp = r.split(";")

                try:
                    size = int(tmp[2])
                except Exception:
                    size = 0

                result.append((tmp[1], size, 2 if tmp[3] == "online" else 1, chunk[i] ))

            except Exception:
                self.logDebug("Error while processing response: %s" % r)

        yield result
Example #7
0
def getInfo(urls):
    api_url_base = "http://api.hotfile.com/"
    
    for chunk in chunks(urls, 90):
        api_param_file = {"action":"checklinks","links": ",".join(chunk),"fields":"id,status,name,size"} #api only supports old style links
        src = getURL(api_url_base, post=api_param_file)
        result = []
        for i, res in enumerate(src.split("\n")):
            if not res:
                continue
            fields = res.split(",")
            
            if fields[1] in ("1", "2"):
                status = 2
            else:
                status = 1
                
            result.append((fields[2], int(fields[3]), status, chunk[i]))
        yield result
Example #8
0
def getInfo(urls):
    result = []
    for chunk in chunks(urls, 10):
        for url in chunk:
            src = getURL(url)
            if r'<div class="errorMessage mb10">' in src:
                result.append((url, 0, 1, url))
            elif r'Page cannot be displayed' in src:
                result.append((url, 0, 1, url))
            else:
                try:
                    url_pattern = '<a href="(.+?)" onclick="return Act\(this\, \'dlink\'\, event\)">(.+?)</a>'
                    file_name = re.search(url_pattern, src).group(0).split(', event)">')[1].split('</a>')[0]
                    result.append((file_name, 0, 2, url))
                except:
                    pass

        # status 1=OFFLINE, 2=OK, 3=UNKNOWN
        # result.append((#name,#size,#status,#url))
        yield result
Example #9
0
def getInfo(urls):
    for chunk in chunks(urls, 20):
        result = []
        ids = dict()
        for url in chunk:
            id = getId(url)
            if id:
                ids[id] = url
            else:
                result.append((None, 0, 1, url))

        if len(ids) > 0:
            check_url = "http://api.wupload.com/link?method=getInfo&format=json&ids=" + ",".join(ids.keys())
            response = json_loads(getURL(check_url).decode("utf8", "ignore"))
            for item in response["FSApi_Link"]["getInfo"]["response"]["links"]:
                if item["status"] != "AVAILABLE":
                    result.append((None, 0, 1, ids[str(item["id"])]))
                else:
                    result.append((unquote(item["filename"]), item["size"], 2, ids[str(item["id"])]))
        yield result
Example #10
0
def getInfo(urls):
    for chunk in chunks(urls, 20):
        result = []
        ids = dict()
        for url in chunk:
            id = getId(url)
            if id:
                ids[id] = url
            else:
                result.append((None, 0, 1, url))

        if len(ids) > 0:
            check_url = "http://api.wupload.com/link?method=getInfo&format=json&ids=" + ",".join(ids.keys())
            response = json_loads(getURL(check_url).decode("utf8", "ignore"))
            for item in response["FSApi_Link"]["getInfo"]["response"]["links"]:
                if item["status"] != "AVAILABLE":
                    result.append((None, 0, 1, ids[str(item["id"])]))
                else:
                    result.append((unquote(item["filename"]), item["size"], 2, ids[str(item["id"])]))
        yield result
Example #11
0
def getInfo(urls):
    api_url_base = "http://api.share-online.biz/linkcheck.php"
    
    for chunk in chunks(urls, 90):
        api_param_file = {"links": "\n".join(x.replace("http://www.share-online.biz/dl/","").rstrip("/") for x in chunk)} #api only supports old style links
        src = getURL(api_url_base, post=api_param_file)
        result = []
        for i, res in enumerate(src.split("\n")):
            if not res:
                continue
            fields = res.split(";")
            
            if fields[1] == "OK":
                status = 2
            elif fields[1] in ("DELETED", "NOT FOUND"):
                status = 1
            else:
                status = 3
                
            result.append((fields[2], int(fields[3]), status, chunk[i]))
        yield result
Example #12
0
def getInfo(urls):
 ##  returns list of tupels (name, size (in bytes), status (see FileDatabase), url)


    apiurl = "http://api.netload.in/info.php?auth=Zf9SnQh9WiReEsb18akjvQGqT0I830e8&bz=1&md5=1&file_id="
    id_regex = re.compile(NetloadIn.__pattern__)
    urls_per_query = 80

    for chunk in chunks(urls, urls_per_query):
        ids = ""
        for url in chunk:
            match = id_regex.search(url)
            if match:
                ids = ids + match.group(1) +";"

        api = getURL(apiurl+ids, decode = True)

        if api is None or len(api) < 10:
            print "Netload prefetch: failed "
            return
        if api.find("unknown_auth") >= 0:
            print "Netload prefetch: Outdated auth code "
            return

        result = []

        for i, r in enumerate(api.splitlines()):
            try:
                tmp = r.split(";")
                try:
                    size = int(tmp[2])
                except:
                    size = 0
                result.append( (tmp[1], size, 2 if tmp[3] == "online" else 1, chunk[i] ) )
            except:
                print "Netload prefetch: Error while processing response: "
                print r

        yield result
Example #13
0
def getInfo(urls):
    ##  returns list of tupels (name, size (in bytes), status (see FileDatabase), url)

    apiurl = "http://api.netload.in/info.php?auth=Zf9SnQh9WiReEsb18akjvQGqT0I830e8&bz=1&md5=1&file_id="
    id_regex = re.compile(NetloadIn.__pattern__)
    urls_per_query = 80

    for chunk in chunks(urls, urls_per_query):
        ids = ""
        for url in chunk:
            match = id_regex.search(url)
            if match:
                ids = ids + match.group(1) + ";"

        api = getURL(apiurl + ids, decode=True)

        if api is None or len(api) < 10:
            print "Netload prefetch: failed "
            return
        if api.find("unknown_auth") >= 0:
            print "Netload prefetch: Outdated auth code "
            return

        result = []

        for i, r in enumerate(api.splitlines()):
            try:
                tmp = r.split(";")
                try:
                    size = int(tmp[2])
                except:
                    size = 0
                result.append(
                    (tmp[1], size, 2 if tmp[3] == "online" else 1, chunk[i]))
            except:
                print "Netload prefetch: Error while processing response: "
                print r

        yield result
Example #14
0
def getInfo(urls):
    for chunk in chunks(urls, 100): yield checkFile(UploadStationCom, chunk) 
Example #15
0
def getInfo(urls):
    for chunk in chunks(urls, 100):
        yield checkFile(UploadStationCom, chunk)
Example #16
0
def getInfo(urls):
    for chunk in chunks(urls, 100):
        yield checkFile(FilefactoryCom, chunk)
Example #17
0
def getInfo(urls):    
    for chunk in chunks(urls, 100): yield checkFile(FilejungleCom, chunk)  
Example #18
0
def getInfo(urls):
    for chunk in chunks(urls, 100):
        yield checkFile(FileserveCom, chunk)
Example #19
0
def getInfo(urls):
    for chunk in chunks(urls, 100):
        yield checkFile(FilefactoryCom, chunk)