Esempio n. 1
0
def get_info(urls):
    for url in urls:
        h = get_url(url, just_header=True)
        m = re.search(r"Location: (.+)\r\n", h)

        if m and not re.match(
                m.group(1),
                FilefactoryCom.__pattern__):  #: : It's a direct link! Skipping
            yield (url, 0, 7, url)
        else:
            #: It's a standard html page
            yield parse_file_info(FilefactoryCom, url, get_url(url))
Esempio n. 2
0
def get_info(urls):
    for url in urls:
        h = get_url(url, just_header=True)
        m = re.search(r"Location: (.+)\r\n", h)

        if m and not re.match(
            m.group(1), FilefactoryCom.__pattern__
        ):  #: : It's a direct link! Skipping
            yield (url, 0, 7, url)
        else:
            #: It's a standard html page
            yield parse_file_info(FilefactoryCom, url, get_url(url))
Esempio n. 3
0
    def api_info(cls, url):
        info = {}

        for _ in range(5):
            html = get_url(
                "http://uploaded.net/api/filemultiple",
                get={
                    "apikey": cls.API_KEY,
                    "id_0": re.match(cls.__pattern__, url).group("ID"),
                },
            )

            if html != "can't find request":
                api = html.split(",", 4)
                if api[0] == "online":
                    info.update(
                        {
                            "name": api[4].strip(),
                            "size": api[2],
                            "status": 2,
                            "sha1": api[3],
                        }
                    )
                else:
                    info["status"] = 1
                break
            else:
                time.sleep(3)

        return info
Esempio n. 4
0
    def api_info(cls, url):
        info = {}

        for _ in range(5):
            html = get_url(
                "http://uploaded.net/api/filemultiple",
                get={
                    "apikey": cls.API_KEY,
                    "id_0": re.match(cls.__pattern__, url).group("ID"),
                },
            )

            if html != "can't find request":
                api = html.split(",", 4)
                if api[0] == "online":
                    info.update(
                        {
                            "name": api[4].strip(),
                            "size": api[2],
                            "status": 2,
                            "sha1": api[3],
                        }
                    )
                else:
                    info["status"] = 1
                break
            else:
                time.sleep(3)

        return info
Esempio n. 5
0
def get_info(urls):
    result = []
    _re = re.compile(DailymotionCom.__pattern__)
    apiurl = "https://api.dailymotion.com/video/{}"
    request = {"fields": "access_error,status,title"}

    for url in urls:
        id = _re.match(url).group("ID")
        html = get_url(apiurl.format(id), get=request)
        info = json.loads(html)

        name = info["title"] + ".mp4" if "title" in info else url

        if "error" in info or info["access_error"]:
            status = "offline"

        else:
            status = info["status"]

            if status in ("ready", "published"):
                status = "online"

            elif status in ("waiting", "processing"):
                status = "temp. offline"

            else:
                status = "offline"

        result.append((name, 0, status_map[status], url))

    return result
Esempio n. 6
0
    def api_info(cls, url):
        info = {}
        field = get_url(
            "http://api.share-online.biz/linkcheck.php",
            get={
                "md5": "1",
                "links": re.match(cls.__pattern__, url).group("ID")
            },
        ).split(";")
        try:
            if field[1] == "OK":
                info["fileid"] = field[0]
                info["status"] = 2
                info["name"] = field[2]
                info["size"] = field[3]  #: In bytes
                info["md5"] = field[4].strip().lower().replace("\n\n",
                                                               "")  #: md5

            elif field[1] in ("DELETED", "NOTFOUND"):
                info["status"] = 1

        except IndexError:
            pass

        return info
Esempio n. 7
0
def get_info(urls):
    result = []
    _re = re.compile(DailymotionCom.__pattern__)
    apiurl = "https://api.dailymotion.com/video/{}"
    request = {"fields": "access_error,status,title"}

    for url in urls:
        id = _re.match(url).group("ID")
        html = get_url(apiurl.format(id), get=request)
        info = json.loads(html)

        name = info["title"] + ".mp4" if "title" in info else url

        if "error" in info or info["access_error"]:
            status = "offline"

        else:
            status = info["status"]

            if status in ("ready", "published"):
                status = "online"

            elif status in ("waiting", "processing"):
                status = "temp. offline"

            else:
                status = "offline"

        result.append((name, 0, status_map[status], url))

    return result
Esempio n. 8
0
 def api_response(cls, method, **kwargs):
     kwargs["a"] = method
     sid = kwargs.pop("sid", None)
     return json.loads(
         get_url(
             cls.API_URL,
             get={"sid": sid} if sid is not None else {},
             post=json.dumps([kwargs]),
         ))
Esempio n. 9
0
 def api_response(cls, method, **kwargs):
     kwargs["a"] = method
     sid = kwargs.pop("sid", None)
     return json.loads(
         get_url(
             cls.API_URL,
             get={"sid": sid} if sid is not None else {},
             post=json.dumps([kwargs]),
         )
     )
Esempio n. 10
0
    def api_info(cls, url):
        file_id = re.match(cls.__pattern__, url).group("ID")
        json_data = get_url(f"{cls.API_URL}/file/{file_id}/info")
        file_info = json.loads(json_data)

        if file_info["success"] is False:
            return {"status": 1}

        else:
            return {"name": file_info["name"], "size": file_info["size"], "status": 2}
Esempio n. 11
0
    def api_info(cls, url):
        html = get_url(url)
        info = {}

        m = re.search(r"window\['.*?'\]\s*=\s*\"(.*?)\"", html)
        if m is None:
            info["status"] = 8
            info["error"] = "Encrypted info pattern not found"
            return info

        encrypted_info = m.group(1)

        html = get_url("https://megadysk.pl/dist/index.js")

        m = re.search(r't.ISK\s*=\s*"(\w+)"', html)
        if m is None:
            info["status"] = 8
            info["error"] = "Encryption key pattern not found"
            return info

        key = m.group(1)

        res = xor_decrypt(encrypted_info, key)
        json_data = json.loads(urllib.parse.unquote(res))

        if json_data["app"]["maintenance"]:
            info["status"] = 6
            return info

        if (
            json_data["app"]["downloader"] is None
            or json_data["app"]["downloader"]["file"]["deleted"]
        ):
            info["status"] = 1
            return info

        info["name"] = json_data["app"]["downloader"]["file"]["name"]
        info["size"] = json_data["app"]["downloader"]["file"]["size"]
        info["download_url"] = json_data["app"]["downloader"]["url"]

        return info
Esempio n. 12
0
    def api_info(cls, url):
        html = get_url(url)
        info = {}

        m = re.search(r"window\['.*?'\]\s*=\s*\"(.*?)\"", html)
        if m is None:
            info["status"] = 8
            info["error"] = "Encrypted info pattern not found"
            return info

        encrypted_info = m.group(1)

        html = get_url("https://megadysk.pl/dist/index.js")

        m = re.search(r't.ISK\s*=\s*"(\w+)"', html)
        if m is None:
            info["status"] = 8
            info["error"] = "Encryption key pattern not found"
            return info

        key = m.group(1)

        res = xor_decrypt(encrypted_info, key)
        json_data = json.loads(urllib.parse.unquote(res))

        if json_data["app"]["maintenance"]:
            info["status"] = 6
            return info

        if (json_data["app"]["downloader"] is None
                or json_data["app"]["downloader"]["file"]["deleted"]):
            info["status"] = 1
            return info

        info["name"] = json_data["app"]["downloader"]["file"]["name"]
        info["size"] = json_data["app"]["downloader"]["file"]["size"]
        info["download_url"] = json_data["app"]["downloader"]["url"]

        return info
Esempio n. 13
0
    def api_info(cls, url):
        info = {}

        folder_id = re.match(cls.__pattern__, url).group('ID')
        folder_info = json.loads(get_url("https://cript.to/api/v1/folder/info",
                                         get={'id': folder_id}))
        if folder_info["status"] == "error":
            info["status"] = 8
            info["error"] = folder_info["message"]

        else:
            info["status"] = 2
            info["name"] = folder_info["data"]["name"]

        return info
Esempio n. 14
0
 def _get_info(self, url):
     html = get_url(self.URLS[1], post={"urls": url})
     file_info = []
     for li in re.finditer(self.LINKCHECK_TR, html, re.S):
         try:
             cols = re.findall(self.LINKCHECK_TD, li.group(1))
             if cols:
                 file_info.append((
                     cols[1] if cols[1] != "--" else cols[0],
                     parse.bytesize(cols[2]) if cols[2] != "--" else 0,
                     2 if cols[3].startswith("Available") else 1,
                     cols[0],
                 ))
         except Exception:
             continue
     return file_info
Esempio n. 15
0
    def get_info(cls, url="", html=""):
        redirect = url
        for i in range(10):
            try:
                headers = {
                    k.lower(): v
                    for k, v in re.findall(
                        r"(?P<name>.+?): (?P<value>.+?)\r?\n",
                        get_url(redirect, just_header=True),
                    )
                }
                if "location" in headers and headers["location"]:
                    redirect = headers["location"]

                else:
                    if headers.get("content-type") == "application/octet-stream":
                        if "filename=" in headers.get("content-disposition"):
                            _name = dict(
                                i.strip().split("=")
                                for i in headers["content-disposition"].split(";")[1:]
                            )
                            name = _name["filename"].strip("\"'")
                        else:
                            name = url

                        info = {
                            "name": name,
                            "size": int(headers.get("content-length")),
                            "status": 7,
                            "url": url,
                        }

                    else:
                        info = super(OneFichierCom, cls).get_info(url, html)

                    break

            except Exception as exc:
                print(format_exc())
                info = {"status": 8, "error": exc}
                break

        else:
            info = {"status": 8, "error": "Too many redirects"}

        return info
Esempio n. 16
0
    def get_info(cls, url="", html=""):
        redirect = url
        for i in range(10):
            try:
                headers = {
                    k.lower(): v
                    for k, v in re.findall(
                        r"(?P<name>.+?): (?P<value>.+?)\r?\n",
                        get_url(redirect, just_header=True),
                    )
                }
                if "location" in headers and headers["location"]:
                    redirect = headers["location"]

                else:
                    if headers.get("content-type") == "application/octet-stream":
                        if "filename=" in headers.get("content-disposition"):
                            _name = dict(
                                i.strip().split("=")
                                for i in headers["content-disposition"].split(";")[1:]
                            )
                            name = _name["filename"].strip("\"'")
                        else:
                            name = url

                        info = {
                            "name": name,
                            "size": int(headers.get("content-length")),
                            "status": 7,
                            "url": url,
                        }

                    else:
                        info = super(OneFichierCom, cls).get_info(url, html)

                    break

            except Exception as exc:
                print(format_exc())
                info = {"status": 8, "error": exc}
                break

        else:
            info = {"status": 8, "error": "Too many redirects"}

        return info
Esempio n. 17
0
    def api_info(cls, url):
        info = {}
        file_id = re.search(cls.__pattern__, url).group("ID")

        data = json.loads(
            get_url(
                "https://nitroflare.com/api/v2/getFileInfo",
                get={"files": file_id},
                decode=True,
            ))

        if data["type"] == "success":
            fileinfo = data["result"]["files"][file_id]
            info["status"] = 2 if fileinfo["status"] == "online" else 1
            info["name"] = fileinfo["name"]
            info["size"] = fileinfo["size"]  #: In bytes

        return info
Esempio n. 18
0
    def get_info(cls, url="", html=""):
        info = super(SimpleDecrypter, cls).get_info(url)

        info.update(cls.api_info(url))

        if not html and info["status"] != 2:
            if not url:
                info["error"] = "missing url"
                info["status"] = 1

            elif info["status"] in (3, 7):
                try:
                    html = get_url(url,
                                   cookies=cls.COOKIES,
                                   decode=cls.TEXT_ENCODING)

                except BadHeader as exc:
                    info["error"] = "{}: {}".format(exc.code, exc.content)

                except Exception:
                    pass

        if html:
            if cls.OFFLINE_PATTERN and re.search(cls.OFFLINE_PATTERN,
                                                 html) is not None:
                info["status"] = 1

            elif (cls.TEMP_OFFLINE_PATTERN
                  and re.search(cls.TEMP_OFFLINE_PATTERN, html) is not None):
                info["status"] = 6

            elif cls.NAME_PATTERN:
                m = re.search(cls.NAME_PATTERN, html)
                if m is not None:
                    info["status"] = 2
                    info["pattern"].update(m.groupdict())

        if "N" in info["pattern"]:
            name = replace_patterns(info["pattern"]["N"],
                                    cls.NAME_REPLACEMENTS)
            info["name"] = parse_name(name)

        return info
Esempio n. 19
0
    def api_info(cls, url):
        info = {}
        file_id = re.search(cls.__pattern__, url).group("ID")

        data = json.loads(
            get_url(
                "https://nitroflare.com/api/v2/get_file_info",
                get={"files": file_id},
                decode=True,
            )
        )

        if data["type"] == "success":
            fileinfo = data["result"]["files"][file_id]
            info["status"] = 2 if fileinfo["status"] == "online" else 1
            info["name"] = fileinfo["name"]
            info["size"] = fileinfo["size"]  #: In bytes

        return info
Esempio n. 20
0
    def get_info(cls, url="", html=""):
        info = super(SimpleDecrypter, cls).get_info(url)

        info.update(cls.api_info(url))

        if not html and info["status"] != 2:
            if not url:
                info["error"] = "missing url"
                info["status"] = 1

            elif info["status"] in (3, 7):
                try:
                    html = get_url(url, cookies=cls.COOKIES, decode=cls.TEXT_ENCODING)

                except BadHeader as exc:
                    info["error"] = "{}: {}".format(exc.code, exc.content)

                except Exception:
                    pass

        if html:
            if cls.OFFLINE_PATTERN and re.search(cls.OFFLINE_PATTERN, html) is not None:
                info["status"] = 1

            elif (
                cls.TEMP_OFFLINE_PATTERN
                and re.search(cls.TEMP_OFFLINE_PATTERN, html) is not None
            ):
                info["status"] = 6

            elif cls.NAME_PATTERN:
                m = re.search(cls.NAME_PATTERN, html)
                if m is not None:
                    info["status"] = 2
                    info["pattern"].update(m.groupdict())

        if "N" in info["pattern"]:
            name = replace_patterns(info["pattern"]["N"], cls.NAME_REPLACEMENTS)
            info["name"] = parse_name(name)

        return info
Esempio n. 21
0
    def api_info(cls, url):
        info = {}
        field = get_url(
            "http://api.share-online.biz/linkcheck.php",
            get={"md5": "1", "links": re.match(cls.__pattern__, url).group("ID")},
        ).split(";")
        try:
            if field[1] == "OK":
                info["fileid"] = field[0]
                info["status"] = 2
                info["name"] = field[2]
                info["size"] = field[3]  #: In bytes
                info["md5"] = field[4].strip().lower().replace("\n\n", "")  #: md5

            elif field[1] in ("DELETED", "NOTFOUND"):
                info["status"] = 1

        except IndexError:
            pass

        return info
Esempio n. 22
0
    def get_info(cls, url="", html=""):
        info = super(SimpleDownloader, cls).get_info(url)
        info.update(cls.api_info(url))

        if not html and info["status"] != 2:
            if not url:
                info["error"] = "missing url"
                info["status"] = 1

            elif info["status"] in (3, 7):
                try:
                    html = get_url(url, cookies=cls.COOKIES, decode=cls.TEXT_ENCODING)

                except BadHeader as exc:
                    info["error"] = "{}: {}".format(exc.code, exc.content)

                except Exception:
                    pass

        if html:
            if cls.OFFLINE_PATTERN and re.search(cls.OFFLINE_PATTERN, html) is not None:
                info["status"] = 1

            elif (
                cls.TEMP_OFFLINE_PATTERN
                and re.search(cls.TEMP_OFFLINE_PATTERN, html) is not None
            ):
                info["status"] = 6

            else:
                for pattern in (
                    "INFO_PATTERN",
                    "NAME_PATTERN",
                    "SIZE_PATTERN",
                    "HASHSUM_PATTERN",
                ):
                    try:
                        attr = getattr(cls, pattern)
                        pdict = re.search(attr, html).groupdict()

                        if all(True for k in pdict if k not in info["pattern"]):
                            info["pattern"].update(pdict)

                    except Exception:
                        continue

                    else:
                        info["status"] = 2

        if "N" in info["pattern"]:
            name = replace_patterns(info["pattern"]["N"], cls.NAME_REPLACEMENTS)
            info["name"] = parse_name(name)

        if "S" in info["pattern"]:
            size = replace_patterns(
                info["pattern"]["S"] + info["pattern"]["U"]
                if "U" in info["pattern"]
                else info["pattern"]["S"],
                cls.SIZE_REPLACEMENTS,
            )
            info["size"] = parse_size(size)

        elif isinstance(info["size"], str):
            unit = info["units"] if "units" in info else ""
            info["size"] = parse_size(info["size"], unit)

        if "H" in info["pattern"]:
            hash_type = info["pattern"]["H"].strip("-").upper()
            info["hash"][hash_type] = info["pattern"]["D"]

        return info
Esempio n. 23
0
 def api_request(cls, method, **kwargs):
     json_data = get_url(cls.API_URL + method + "/", post=kwargs)
     return json.loads(json_data)
Esempio n. 24
0
 def api_response(cls, method, **kwargs):
     html = get_url(cls.API_URL + method, post=json.dumps(kwargs))
     return json.loads(html)
Esempio n. 25
0
 def api_response(cls, file_id, method, **kwargs):
     kwargs["file"] = file_id
     return json.loads(get_url(cls.API_URL + "/file/" + method, get=kwargs))
Esempio n. 26
0
    def get_info(cls, url="", html=""):
        info = super(SimpleDownloader, cls).get_info(url)
        info.update(cls.api_info(url))

        if not html and info["status"] != 2:
            if not url:
                info["error"] = "missing url"
                info["status"] = 1

            elif info["status"] in (3, 7):
                try:
                    html = get_url(url,
                                   cookies=cls.COOKIES,
                                   decode=cls.TEXT_ENCODING)

                except BadHeader as exc:
                    info["error"] = "{}: {}".format(exc.code, exc.content)

                except Exception:
                    pass

        if html:
            if cls.OFFLINE_PATTERN and re.search(cls.OFFLINE_PATTERN,
                                                 html) is not None:
                info["status"] = 1

            elif (cls.TEMP_OFFLINE_PATTERN
                  and re.search(cls.TEMP_OFFLINE_PATTERN, html) is not None):
                info["status"] = 6

            else:
                for pattern in (
                        "INFO_PATTERN",
                        "NAME_PATTERN",
                        "SIZE_PATTERN",
                        "HASHSUM_PATTERN",
                ):
                    try:
                        attr = getattr(cls, pattern)
                        pdict = re.search(attr, html).groupdict()

                        if all(True for k in pdict
                               if k not in info["pattern"]):
                            info["pattern"].update(pdict)

                    except Exception:
                        continue

                    else:
                        info["status"] = 2

        if "N" in info["pattern"]:
            name = replace_patterns(info["pattern"]["N"],
                                    cls.NAME_REPLACEMENTS)
            info["name"] = parse_name(name)

        if "S" in info["pattern"]:
            size = replace_patterns(
                info["pattern"]["S"] + info["pattern"]["U"]
                if "U" in info["pattern"] else info["pattern"]["S"],
                cls.SIZE_REPLACEMENTS,
            )
            info["size"] = parse.bytesize(size)

        elif isinstance(info["size"], str):
            unit = info["units"] if "units" in info else ""
            info["size"] = parse.bytesize(info["size"], unit)

        if "H" in info["pattern"]:
            hash_type = info["pattern"]["H"].strip("-").upper()
            info["hash"][hash_type] = info["pattern"]["D"]

        return info
Esempio n. 27
0
 def api_response(cls, method, **kwargs):
     html = get_url(cls.API_URL + method, post=json.dumps(kwargs))
     return json.loads(html)
Esempio n. 28
0
 def api_response(cls, method, **kwargs):
     kwargs["a"] = method
     return json.loads(get_url(cls.API_URL, post=json.dumps([kwargs])))
Esempio n. 29
0
 def api_response(cls, method, **kwargs):
     return get_url(cls.API_URL + method + "/", post=kwargs)
Esempio n. 30
0
 def api_response(cls, method, **kwargs):
     return get_url(cls.API_URL + method + "/", post=kwargs)
Esempio n. 31
0
 def api_response(cls, method, **kwargs):
     kwargs.update({"key": cls.API_KEY})
     json_data = get_url(cls.API_URL + method, get=kwargs)
     return json.loads(json_data)