Exemplo n.º 1
0
def get_info(urls):
    for url in urls:
        h = get_url(url, just_header=True)
        m = re.search(r'Location: (.+)\r\n', h)
        if m and not re.match(m.group(1), FilefactoryCom.__pattern__):  #: It's a direct link! Skipping
            yield (url, 0, 3, url)
        else:  #: It's a standard html page
            yield parse_fileInfo(FilefactoryCom, url, get_url(url))
Exemplo n.º 2
0
def get_info(urls):
    for url in urls:
        h = get_url(url, just_header=True)
        m = re.search(r'Location: (.+)\r\n', h)

        if m and not re.match(
                m.group(1),
                FilefactoryCom.__pattern__):  #: It's a direct link! Skipping
            yield (url, 0, 7, url)
        else:
            #: It's a standard html page
            yield parse_fileInfo(FilefactoryCom, url, get_url(url))
    def get_info(cls, url="", html=""):
        redirect = url
        for i in xrange(10):
            try:
                headers = dict(re.findall(r"(?P<name>.+?): (?P<value>.+?)\r?\n", get_url(redirect, just_header=True).lower()))
                if 'location' in headers and headers['location']:
                    redirect = headers['location']
                else:
                    if 'content-type' in headers and headers['content-type'] == "application/octet-stream":
                        if "filename=" in headers.get('content-disposition'):
                            name = dict(_i.split("=") for _i in map(str.strip, headers['content-disposition'].split(";"))[1:])['filename'].strip("\"'")
                        else:
                            name = url

                        info = {'name'  : name,
                                'size'  : long(headers.get('content-length')),
                                'status': 3,
                                'url'   : url}

                    else:
                        info = super(OneFichierCom, cls).get_info(url, html)

                    break

            except Exception, e:
                info = {'status' : 8,
                        'error'  : e.message}
Exemplo n.º 4
0
def get_info(urls):
    result  = []
    regex   = re.compile(DailymotionCom.__pattern__)
    apiurl  = "https://api.dailymotion.com/video/%s"
    request = {'fields': "access_error,status,title"}

    for url in urls:
        id   = regex.match(url).group('ID')
        html = get_url(apiurl % id, get=request)
        info = json_loads(html)

        name = info['title'] + ".mp4" if "title" in info else url

        if "error" in info or info['access_error']:
            status = "offline"
        else:
            status = info['status']
            if status in ("ready", "published"):
                status = "online"
            elif status in ("waiting", "processing"):
                status = "temp. offline"
            else:
                status = "offline"

        result.append((name, 0, statusMap[status], url))

    return result
Exemplo n.º 5
0
    def get_info(cls, url="", html=""):
        info   = cls.api_info(url)
        online = True if info['status'] == 2 else False

        try:
            info['pattern'] = re.match(cls.__pattern__, url).groupdict()  #: Pattern groups will be saved here

        except Exception:
            info['pattern'] = {}

        if not html and not online:
            if not url:
                info['error']  = "missing url"
                info['status'] = 1

            elif info['status'] == 3:
                try:
                    html = get_url(url, cookies=cls.COOKIES, decode=cls.TEXT_ENCODING)

                except BadHeader, e:
                    info['error'] = "%d: %s" % (e.code, e.content)

                    if e.code == 404:
                        info['status'] = 1

                    elif e.code == 503:
                        info['status'] = 6

                except Exception:
                    pass
Exemplo n.º 6
0
 def api_response(cls, method, **kwargs):
     kwargs['a'] = method
     sid = kwargs.pop('sid', None)
     return json.loads(
         get_url(cls.API_URL,
                 get={'sid': sid} if sid is not None else {},
                 post=json.dumps([kwargs])))
Exemplo n.º 7
0
    def api_info(cls, url):
        info = {}

        for _i in range(5):
            html = get_url("http://uploaded.net/api/filemultiple",
                           get={
                               'apikey': cls.API_KEY,
                               'id_0': re.match(cls.__pattern__,
                                                url).group('ID')
                           })

            if html != "can't find request":
                api = html.split(",", 4)
                if api[0] == "online":
                    info.update({
                        'name': api[4].strip(),
                        'size': api[2],
                        'status': 2,
                        'sha1': api[3]
                    })
                else:
                    info['status'] = 1
                break
            else:
                time.sleep(3)

        return info
Exemplo n.º 8
0
def get_info(urls):
    result = []
    _re = re.compile(DailymotionCom.__pattern__)
    apiurl = "https://api.dailymotion.com/video/%s"
    request = {'fields': "access_error,status,title"}

    for url in urls:
        id = _re.match(url).group('ID')
        html = get_url(apiurl % id, get=request)
        info = json.loads(html)

        name = info['title'] + ".mp4" if "title" in info else url

        if "error" in info or info['access_error']:
            status = "offline"

        else:
            status = info['status']

            if status in ("ready", "published"):
                status = "online"

            elif status in ("waiting", "processing"):
                status = "temp. offline"

            else:
                status = "offline"

        result.append((name, 0, statusMap[status], url))

    return result
Exemplo n.º 9
0
    def handle_free(self, pyfile):
        wst = self.account.get_data('wst') if self.account else None

        api_data = get_url("https://webshare.cz/api/file_link/",
                           post={'ident': self.info['pattern']['ID'], 'wst': wst})

        self.log_debug("API data: " + api_data)

        m = re.search('<link>(.+)</link>', api_data)
        if m is not None:
            self.link = m.group(1)
Exemplo n.º 10
0
def get_info(urls):
    result = []

    for url in urls:

        html = get_url(url)
        if re.search(StreamCz.OFFLINE_PATTERN, html):
            #: File offline
            result.append((url, 0, 1, url))
        else:
            result.append((url, 0, 2, url))
    yield result
Exemplo n.º 11
0
    def api_info(cls, url):
        html = get_url(url)
        info = {}

        m = re.search(r"window\['.*?'\]\s*=\s*\"(.*?)\"", html)
        if m is None:
            info['status'] = 8
            info['error'] = _("Encrypted info pattern not found")
            return info

        encrypted_info = m.group(1)

        html = get_url("https://megadysk.pl/dist/index.js")

        m = re.search(r't.ISK\s*=\s*"(\w+)"', html)
        if m is None:
            info['status'] = 8
            info['error'] = _("Encryption key pattern not found")
            return info

        key = m.group(1)

        res = xor_decrypt(encrypted_info, key)
        json_data = json.loads(unquote(res))

        if json_data['app']['maintenance']:
            info['status'] = 6
            return info

        if json_data['app']['downloader'] is None or json_data['app'][
                'downloader']['file']['deleted']:
            info['status'] = 1
            return info

        info['name'] = json_data['app']['downloader']['file']['name']
        info['size'] = json_data['app']['downloader']['file']['size']
        info['download_url'] = json_data['app']['downloader']['url']

        return info
Exemplo n.º 12
0
    def handle_free(self, pyfile):
        wst = self.account.get_data('wst') if self.account else None

        api_data = get_url("https://webshare.cz/api/file_link/",
                           post={
                               'ident': self.info['pattern']['ID'],
                               'wst': wst
                           })

        self.log_debug("API data: " + api_data)

        m = re.search('<link>(.+)</link>', api_data)
        if m is not None:
            self.link = m.group(1)
Exemplo n.º 13
0
    def api_info(cls, url):
        info = {}
        api  = get_url("https://webshare.cz/api/file_info/",
                       post={'ident': re.match(cls.__pattern__, url).group('ID'),
                             'wst'  : ""})

        if not re.search(r'<status>OK', api):
            info['status'] = 1
        else:
            info['status'] = 2
            info['name']   = re.search(r'<name>(.+?)<', api).group(1)
            info['size']   = re.search(r'<size>(.+?)<', api).group(1)

        return info
Exemplo n.º 14
0
 def _get_info(self, url):
     html = get_url(self.URLS[1], post={'urls': url})
     file_info = []
     for li in re.finditer(self.LINKCHECK_TR, html, re.S):
         try:
             cols = re.findall(self.LINKCHECK_TD, li.group(1))
             if cols:
                 file_info.append(
                     (cols[1] if cols[1] != '--' else cols[0],
                      parse_size(cols[2]) if cols[2] != '--' else 0,
                      2 if cols[3].startswith('Available') else 1, cols[0]))
         except Exception:
             continue
     return file_info
Exemplo n.º 15
0
    def api_info(cls, url):
        info    = {}
        file_id = re.search(cls.__pattern__, url).group('ID')

        data = json.loads(get_url("https://nitroflare.com/api/v2/getFileInfo",
                                  get={'files': file_id},
                                  decode=True))

        if data['type'] == 'success':
            fileinfo = data['result']['files'][file_id]
            info['status'] = 2 if fileinfo['status'] == 'online' else 1
            info['name']   = fileinfo['name']
            info['size']   = fileinfo['size']  #: In bytes

        return info
Exemplo n.º 16
0
def check_file(plugin, urls):
    html = get_url(plugin.URLS[1], post={'urls': "\n".join(urls)})

    file_info = []
    for li in re.finditer(plugin.LINKCHECK_TR, html, re.S):
        try:
            cols = re.findall(plugin.LINKCHECK_TD, li.group(1))
            if cols:
                file_info.append((
                    cols[1] if cols[1] != '--' else cols[0],
                    parse_size(cols[2]) if cols[2] != '--' else 0,
                    2 if cols[3].startswith('Available') else 1,
                    cols[0]))
        except Exception, e:
            continue
Exemplo n.º 17
0
    def api_info(cls, url):
        info = {}
        file_id = re.search(cls.__pattern__, url).group('ID')

        data = json.loads(
            get_url("https://nitroflare.com/api/v2/getFileInfo",
                    get={'files': file_id},
                    decode=True))

        if data['type'] == 'success':
            fileinfo = data['result']['files'][file_id]
            info['status'] = 2 if fileinfo['status'] == 'online' else 1
            info['name'] = fileinfo['name']
            info['size'] = fileinfo['size']  #: In bytes

        return info
Exemplo n.º 18
0
    def api_info(cls, url):
        info = super(WebshareCz, cls).api_info(url)

        info['pattern'] = re.match(cls.__pattern__, url).groupdict()

        api_data = get_url("https://webshare.cz/api/file_info/",
                           post={'ident': info['pattern']['ID'], 'wst': ""})

        if not re.search(r'<status>OK', api_data):
            info['status'] = 1
        else:
            info['status'] = 2
            info['name']   = re.search(r'<name>(.+?)<', api_data).group(1)
            info['size']   = re.search(r'<size>(.+?)<', api_data).group(1)

        return info
Exemplo n.º 19
0
    def api_info(cls, url):
        info = {}
        api_data  = get_url("https://webshare.cz/api/file_info/",
                       post={'ident': re.match(cls.__pattern__, url).group('ID'),
                             'wst'  : ""})

        if re.search(r'<status>OK', api_data):
            info['status'] = 2
            info['name']   = re.search(r'<name>(.+?)<', api_data).group(1)
            info['size']   = re.search(r'<size>(.+?)<', api_data).group(1)
        elif re.search(r'<status>FATAL', api_data):
            info['status'] = 1
        else:
            info['status'] = 8
            info['error']  = _("Could not find required xml data")

        return info
Exemplo n.º 20
0
    def get_info(cls, url="", html=""):
        info = super(SimpleHoster, cls).get_info(url)
        info.update(cls.api_info(url))

        if not html and info['status'] != 2:
            if not url:
                info['error']  = "missing url"
                info['status'] = 1

            elif info['status'] in (3, 7):
                try:
                    html = get_url(url, cookies=cls.COOKIES, decode=cls.TEXT_ENCODING)

                except BadHeader, e:
                    info['error'] = "%d: %s" % (e.code, e.content)

                except Exception:
                    pass
Exemplo n.º 21
0
    def get_info(cls, url="", html=""):
        redirect = url
        for i in range(10):
            try:
                headers = dict((k.lower(), v) for k, v in re.findall(
                    r'(?P<name>.+?): (?P<value>.+?)\r?\n',
                    get_url(redirect, just_header=True)))
                if 'location' in headers and headers['location']:
                    redirect = headers['location']

                else:
                    if headers.get(
                            'content-type') == "application/octet-stream":
                        if "filename=" in headers.get('content-disposition'):
                            _name = dict(
                                _i.split("=") for _i in map(
                                    str.strip, headers['content-disposition'].
                                    split(";"))[1:])
                            name = _name['filename'].strip("\"'")
                        else:
                            name = url

                        info = {
                            'name': name,
                            'size': int(headers.get('content-length')),
                            'status': 7,
                            'url': url,
                        }

                    else:
                        info = super(OneFichierCom, cls).get_info(url, html)

                    break

            except Exception as e:
                print(format_exc())
                info = {'status': 8, 'error': e.message}
                break

        else:
            info = {'status': 8, 'error': _("Too many redirects")}

        return info
Exemplo n.º 22
0
    def get_info(cls, url="", html=""):
        info = super(SimpleCrypter, cls).get_info(url)

        info.update(cls.api_info(url))

        if not html and info['status'] != 2:
            if not url:
                info['error'] = "missing url"
                info['status'] = 1

            elif info['status'] in (3, 7):
                try:
                    html = get_url(url,
                                   cookies=cls.COOKIES,
                                   decode=cls.TEXT_ENCODING)

                except BadHeader as e:
                    info['error'] = "%d: %s" % (e.code, e.content)

                except Exception:
                    pass

        if html:
            if cls.OFFLINE_PATTERN and re.search(cls.OFFLINE_PATTERN,
                                                 html) is not None:
                info['status'] = 1

            elif cls.TEMP_OFFLINE_PATTERN and re.search(
                    cls.TEMP_OFFLINE_PATTERN, html) is not None:
                info['status'] = 6

            elif cls.NAME_PATTERN:
                m = re.search(cls.NAME_PATTERN, html)
                if m is not None:
                    info['status'] = 2
                    info['pattern'].update(m.groupdict())

        if 'N' in info['pattern']:
            name = replace_patterns(info['pattern']['N'],
                                    cls.NAME_REPLACEMENTS)
            info['name'] = parse_name(name)

        return info
    def api_info(cls, url):
        info = {}

        for _i in xrange(5):
            html = get_url("http://uploaded.net/api/filemultiple",
                           get={'apikey': cls.API_KEY,
                                'id_0'  : re.match(cls.__pattern__, url).group('ID')})

            if html != "can't find request":
                api = html.split(",", 4)
                if api[0] == "online":
                    info.update({'name': api[4].strip(), 'size': api[2], 'status': 2})
                else:
                    info['status'] = 1
                break
            else:
                time.sleep(3)

        return info
Exemplo n.º 24
0
    def api_info(cls, url):
        info = {}

        for _i in xrange(5):
            html = get_url(
                "http://uploaded.net/api/filemultiple",
                get={"apikey": cls.API_KEY, "id_0": re.match(cls.__pattern__, url).group("ID")},
            )

            if html != "can't find request":
                api = html.split(",", 4)
                if api[0] == "online":
                    info.update({"name": api[4].strip(), "size": api[2], "status": 2})
                else:
                    info["status"] = 1
                break
            else:
                time.sleep(3)

        return info
Exemplo n.º 25
0
    def api_info(cls, url):
        info = {}
        api_data = get_url("https://webshare.cz/api/file_info/",
                           post={
                               'ident': re.match(cls.__pattern__,
                                                 url).group('ID'),
                               'wst': ""
                           })

        if re.search(r'<status>OK', api_data):
            info['status'] = 2
            info['name'] = re.search(r'<name>(.+?)<', api_data).group(1)
            info['size'] = re.search(r'<size>(.+?)<', api_data).group(1)
        elif re.search(r'<status>FATAL', api_data):
            info['status'] = 1
        else:
            info['status'] = 8
            info['error'] = _("Could not find required xml data")

        return info
    def api_info(cls, url):
        info  = {}
        field = get_url("http://api.share-online.biz/linkcheck.php",
                        get={'md5'  : "1",
                             'links': re.match(cls.__pattern__, url).group("ID")}).split(";")
        try:
            if field[1] == "OK":
                info['fileid'] = field[0]
                info['status'] = 2
                info['name']   = field[2]
                info['size']   = field[3]  #: In bytes
                info['md5']    = field[4].strip().lower().replace("\n\n", "")  #: md5

            elif field[1] in ("DELETED", "NOTFOUND"):
                info['status'] = 1

        except IndexError:
            pass

        return info
Exemplo n.º 27
0
def get_info(urls):
    result = []
    for chunk in chunks(urls, 10):
        for url in chunk:
            html = get_url(url)
            if r'<div class="errorMessage mb10">' in html:
                result.append((url, 0, 1, url))
            elif r'Page cannot be displayed' in html:
                result.append((url, 0, 1, url))
            else:
                try:
                    url_pattern = '<a href="(.+?)" onclick="return Act\(this\, \'dlink\'\, event\)">(.+?)</a>'
                    file_name = re.search(url_pattern, html).group(0).split(', event)">')[1].split('</a>')[0]
                    result.append((file_name, 0, 2, url))
                except Exception:
                    pass

        #: status 1=OFFLINE, 2=OK, 3=UNKNOWN
        #: result.append((#name,#size,#status,#url))
        yield result
Exemplo n.º 28
0
    def get_info(cls, url="", html=""):
        info = super(SimpleHoster, cls).get_info(url)
        info.update(cls.api_info(url))

        if not html and info['status'] != 2:
            if not url:
                info['error'] = "missing url"
                info['status'] = 1

            elif info['status'] in (3, 7):
                try:
                    html = get_url(url,
                                   cookies=cls.COOKIES,
                                   decode=cls.TEXT_ENCODING)

                except BadHeader, e:
                    info['error'] = "%d: %s" % (e.code, e.content)

                except Exception:
                    pass
Exemplo n.º 29
0
    def api_info(cls, url):
        info = {}
        field = get_url("http://api.share-online.biz/linkcheck.php",
                        get={'md5': "1",
                             'links': re.match(cls.__pattern__, url).group("ID")}).split(";")
        try:
            if field[1] == "OK":
                info['fileid'] = field[0]
                info['status'] = 2
                info['name'] = field[2]
                info['size'] = field[3]  #: In bytes
                info['md5'] = field[4].strip().lower(
                ).replace("\n\n", "")  #: md5

            elif field[1] in ("DELETED", "NOTFOUND"):
                info['status'] = 1

        except IndexError:
            pass

        return info
Exemplo n.º 30
0
 def api_response(cls, method, **kwargs):
     html = get_url(cls.API_URL + method, post=json.dumps(kwargs))
     return json.loads(html)
Exemplo n.º 31
0
 def api_response(cls, method, **kwargs):
     kwargs['a'] = method
     return json.loads(get_url(cls.API_URL, post=json.dumps([kwargs])))
Exemplo n.º 32
0
def get_info(urls):
    for url in urls:
        html = get_url("http://www.fshare.vn/check_link.php",
                       post={'action': "check_link", 'arrlinks': url})

        yield parse_fileInfo(FshareVn, url, html)
Exemplo n.º 33
0
 def api_response(cls, method, **kwargs):
     return get_url(cls.API_URL + method + "/", post=kwargs)
Exemplo n.º 34
0
 def _load_json(cls, uri):
     return json.loads(get_url(cls.API_URL + uri))
Exemplo n.º 35
0
 def _load_json(cls, uri):
     return json.loads(get_url(cls.API_URL + uri))
Exemplo n.º 36
0
def api_response(url):
    json_data = ["yw7XQy2v9", ["download/info", {"link": url}]]
    api_rep = get_url("http://api.letitbit.net/json", post={"r": json.dumps(json_data)})
    return json.loads(api_rep)
Exemplo n.º 37
0
 def api_respond(cls, subdomain, method, args={}):
     return json.loads(get_url(cls.API_URL % subdomain + method, post=args))
Exemplo n.º 38
0
 def api_response(cls, file_id, method, **kwargs):
     kwargs['file'] = file_id
     return json.loads(get_url(cls.API_URL + "/file/" + method, get=kwargs))