示例#1
0
async def add_movie_to_db():
    data = await request.get_json()
    mgf = os.environ.get("mgf")
    if data.get("token") != mgf:
        return Response(json.dumps({"error": "bad token"}), content_type=json_ctype)
    tp = data.get("type")
    if tp == "add":
        name = data.get("movie")
        thumb = data.get("thumb")
        url, alt1, alt2 = data.get("movie-urls")
        subs = b""
        if not all([name, thumb, url]):
            return Response(json.dumps({"error": "Invalid Values"}))
        thumb = upload(thumb)["secure_url"]
        data_tuple = (name, url, alt1, alt2, thumb, subs)
        add_to_db(data_tuple, db, movieData)
        return Response(json.dumps({"success": "ok"}), content_type=json_ctype)
    if tp == "edit":
        mid = data.get("mid")
        url, alt1, alt2 = data.get("movie-urls")
        movie = movieData.query.filter_by(mid=mid).first()
        if not movie:
            return Response(
                json.dumps({"error": "not exists"}), content_type=json_ctype
            )
        if not any([url, alt1, alt2]):
            return Response(
                json.dumps({"error": "invalid URLS"}), content_type=json_ctype
            )
        if url:
            movie.url = url
        if alt1:
            movie.alt1 = alt1
        if alt2:
            movie.alt2 = alt2
        db.session.commit()
        return Response(json.dumps({"success": "ok"}), content_type=json_ctype)
示例#2
0
def get_(url: str, v: bool = True, n: int = 1, season: int = 0) -> None:
    ua = random.choice([
        "Mozilla/5.0 (Windows; U; Windows NT 10.0; en-US) AppleWebKit/604.1.38 (KHTML, like Gecko) Chrome/68.0.3325.162",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0_2 like Mac OS X) AppleWebKit /604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A421 Safari/604.1",
        "Mozilla/5.0 (Linux; Android 8.1.0; Pixel Build/OPM1.171019.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.137 Mobile Safari/537.36",
        "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:57.0) Like Gecko Firefox/57.0",
        "Mozilla/5.0 (Linux; Android 7.0; SM-G892A Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/60.0.3112.107 Mobile Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36(KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
    ])
    print(colors.OKBLUE + "[debug]" + colors.ENDC + "Fetching:\n", url)
    basic_headers = {
        "Accept-Encoding":
        "gzip,deflate",
        "User-Agent":
        ua,
        "Upgrade-Insecure-Requests":
        "1",
        "dnt":
        "1",
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    }
    sess = requests.Session()
    to_screen(
        [colors.OKBLUE + "[debug]" + colors.ENDC + "Using Standard Headers:"],
        v)
    dict_print(basic_headers, v=v)
    page = sess.get(url, headers=basic_headers, allow_redirects=True)
    to_screen(
        [colors.OKBLUE + "[debug]" + colors.ENDC + "Page URL:", page.url], v)
    soup = bs(page.text, "html5lib")
    to_screen(
        ["\n" + colors.OKBLUE + "[debug]" + colors.ENDC + "Finding Title"], v)
    title = soup.find("input", attrs={"name": "movies_title"}).attrs["value"]
    to_screen([colors.OKBLUE + "[debug]" + colors.ENDC + "Found:", title], v)
    to_screen([colors.OKBLUE + "[debug]" + colors.ENDC + "Finding Thumbnail"],
              v)
    thumbnail = soup.find("input", attrs={"name": "phimimg"}).attrs["value"]
    to_screen([colors.OKBLUE + "[debug]" + colors.ENDC + "Found", thumbnail],
              v)
    if thumbnail.startswith("/"):
        if thumbnail.startswith("//"):
            thumbnail = "https:" + thumbnail
        else:
            thumbnail = input("fix the thumbnail:")
    url_ = page.url
    to_screen([
        colors.OKBLUE + "[debug]" + colors.ENDC + "Adding Referer to headers"
    ], v)
    basic_headers = {**basic_headers, "Referer": url_}
    to_screen(
        [
            colors.OKBLUE + "[debug]" + colors.ENDC +
            "Adding X-Requested-With to headers"
        ],
        v,
    )
    basic_headers = {**basic_headers, "X-Requested-With": "XMLHttpRequest"}
    parsed_url = urlp_(url_)
    origin = "https://" + parsed_url.netloc
    host = origin + "/"
    to_screen(
        [colors.OKBLUE + "[debug]" + colors.ENDC + "Adding Origin to headers"],
        v)
    basic_headers = {**basic_headers, "Origin": origin}
    div = (soup.find(attrs={"id": "ip_episode"})
           or soup.find(attrs={"id": "list-eps"})
           or soup.find(attrs={"class": "pas-list"}))
    to_screen(
        [colors.OKBLUE + "[debug]" + colors.ENDC + "Finding Ipplayer Configs"],
        v)
    if div is None:
        raise Exception("Could Not Find Ipplayer Configs")
    tags = div.findChildren(attrs={"data-next": True})
    number_of_eps = len(tags)
    episode_data = {}
    for i in range(n, number_of_eps + 1):
        attrs_ = tags[i - 1]
        to_screen(
            [
                colors.OKBLUE + "[debug]" + colors.ENDC +
                "Fetching Config URLs for season-%s episode number-%s" %
                (str(season), str(i))
            ],
            v,
        )
        data_headers = {
            "keyurl": "%d" % (i),
            "postid": "server",
            "phimid": attrs_.attrs["data-film"],
        }
        to_screen([
            "\n" + colors.OKBLUE + "[debug]" + colors.ENDC + "Sending Config:"
        ], v)
        dict_print(data_headers, v)
        toparse = sess.post(host + "index.php",
                            data=data_headers,
                            cookies=page.cookies)
        parsed_data = bs(toparse.text, "html.parser")
        tgs = parsed_data.find_all("a")
        data = []
        for t in tgs:
            to_send = t.attrs
            if "netu.tv" in t.text.lower() or "hqq.tv" in t.text.lower():
                print(colors.FAIL + "[info]" + colors.ENDC +
                      "Stopping Execution,found a malacious website")
                continue
            to_screen(
                [
                    colors.OKBLUE + "[debug]" + colors.ENDC +
                    "Working with the configs of",
                    t.string,
                ],
                v,
            )
            to_screen(
                [colors.OKBLUE + "[debug]" + colors.ENDC + "Found Configs:"],
                v)
            dict_print(t.attrs, v)
            sleep(1)
            a = sess.post(
                host + "ip.file/swf/plugins/ipplugins.php",
                headers=basic_headers,
                data={
                    "ipplugins": 1,
                    "ip_film": to_send["data-film"],
                    "ip_server": to_send["data-server"],
                    "ip_name": to_send["data-name"],
                    "fix": "null",
                },
            )
            b = json.loads(a.text)
            sleep(1)
            to_screen([colors.OKBLUE + "[debug]" + colors.ENDC + "Recieved:"],
                      v)
            dict_print(b, v)
            reqdata = urlencode({
                "u": b["s"],
                "w": "100%25",
                "h": "500",
                "s": to_send["data-server"],
                "n": "0",
            })
            print(colors.OKBLUE +
                  "[debug]Sending a GET request with parameters TO:" +
                  colors.ENDC + host + "ip.file/swf/ipplayer/ipplayer.php")
            # looks like they are taking advantage of cloudfares caching..instead or processing data everytime
            ret = sess.get(
                host + "ip.file/swf/ipplayer/ipplayer.php?" + reqdata,
                cookies=page.cookies,
                headers=basic_headers,
            )
            res = json.loads(ret.text)
            to_screen(
                [colors.OKBLUE + "[debug]" + colors.ENDC + "Recieved Data:"],
                v)
            dict_print(res, v)

            url_rec = res.get("data")
            if url_rec:
                if "netu.tv" in url_rec or "hqq.tv" in url_rec:
                    # Garbage website..mines crypto and stuff ,terrible hosting
                    #  videos are usually deleted and other stuff
                    # stay away from this abomination of video hosting
                    print(colors.FAIL + "[info]" + colors.ENDC +
                          "Stopping Execution,found a malacious website")
                    continue
                if url_rec.startswith("//"):
                    url_rec = "https:" + url_rec
                data.append(url_rec.replace("http://", "https://"))
            else:
                print(colors.BOLD + "[info]" + colors.ENDC +
                      "Stopping Execution")
                continue
        while len(data) > 3:
            p_print(data)
            dt_n = input(
                colors.BOLD + "[info]" + colors.ENDC +
                "Enter the number of the url to remove from the List:")
            data.pop(int(dt_n) - 1)
        if len(data) < 3:
            nones = [None] * (3 - len(data))
            data += nones
        if all(s is None for s in data):
            data = []
            print(
                colors.BOLD + "[info]" + colors.ENDC + "no urls for episode:",
                i)
            a = input("Enter URL1:") or None
            b = input("Enter URL2:")
            if len(b) < 5:
                b = None
            c = input("Enter URL3:")
            if len(c) < 5:
                c = None
            data = [a, b, c]
        episode_data[i] = data
        to_screen([colors.OKBLUE + "[debug]" + colors.ENDC + "Episode Data"],
                  v)
        dict_print(episode_data, v)
    to_screen(
        [
            colors.OKBLUE + "[debug]" + colors.ENDC +
            "Fetching Thumbnail and uploading to cdn"
        ],
        v,
    )
    image = upload.upload(thumbnail).get("secure_url")
    to_screen([
        colors.OKBLUE + "[debug]" + colors.ENDC + "Secure URL of Image:", image
    ], v)
    base_template = (title, image, season, episode_data)
    yn = "y"
    if yn == "y":
        print(colors.BOLD + "[info]" + colors.ENDC + "Adding to database:")
        print(dbmanage.add_to_db(base_template, db, tvData))
    else:
        print(colors.BOLD + "[info]" + colors.ENDC + "Returning Values Only")
    print("Done")
示例#3
0
def get_(url, v=True, dbinst=None, movieDatInst=None):
    url = (
        base64.b64decode(codecs.encode(url[::-1], "rot13").encode()).decode()
        + "?play=1"
    )
    ua = "Mozilla/5.0 (Windows; U; Windows NT 10.0; en-US) AppleWebKit/604.1.38 (KHTML, like Gecko) Chrome/68.0.3325.162"
    print("[debug]Fetching:\n", url)
    basic_headers = {
        "User-Agent": ua,
        "Upgrade-Insecure-Requests": "1",
        "dnt": "1",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    }
    sess = requests.Session()
    to_screen(["[debug]Using Standard Headers:", basic_headers], v)
    page = sess.get(url, headers=basic_headers, allow_redirects=True)
    to_screen(["[debug]Page URL:", page.url], v)
    to_screen(["[debug]Cookie Jar For %s:%s\n" % (page.url, dict(page.cookies))], v)
    soup = bs(page.text, "html.parser")
    url_ = page.url
    to_screen(["[debug]Adding Referer to headers"], v)
    basic_headers = {**basic_headers, "Referer": url_}
    to_screen(["[debug]Adding X-Requested-With to headers"], v)
    basic_headers = {**basic_headers, "X-Requested-With": "XMLHttpRequest"}
    parsed_url = urlp_(url_)
    host = "https://" + parsed_url.netloc + "/"
    div = soup.find(attrs={"data-id": "1"})
    to_screen(["[debug]Finding Ipplayer Configs"], v)
    if div is None:
        raise Exception("Could Not Find Ipplayer Configs")
    tags = div.findChildren(attrs={"data-film": True})
    data = []
    subtitles = None
    for t in tags:
        to_screen(["[debug]Working with the configs of", t.string], v)
        to_send = t.attrs
        to_screen(["[debug]Found Configs:", t.attrs], v)
        to_screen(["[debug]Sleeping for 2 seconds"], v)
        sleep(1)
        a = sess.post(
            host + "ip.file/swf/plugins/ipplugins.php",
            headers=basic_headers,
            data={
                "ipplugins": 1,
                "ip_film": to_send["data-film"],
                "ip_server": to_send["data-server"],
                "ip_name": to_send["data-name"],
                "fix": "null",
            },
        )
        b = json.loads(a.text)
        sleep(1)
        to_screen(["[debug]Recieved:", b], v)
        subtitles = b.get("c")
        if isinstance(subtitles, list) and len(subtitles):
            subtitles = subtitles[0].get("file")
        ret = sess.post(
            host + "ip.file/swf/ipplayer/ipplayer.php",
            cookies=page.cookies,
            headers=basic_headers,
            data={
                "u": b["s"],
                "w": "100%25",
                "h": "500",
                "s": to_send["data-server"],
                "n": "0",
            },
        )
        res = json.loads(ret.text)
        to_screen(["[debug]Cookie Jar For %s:%s\n" % (ret.url, dict(ret.cookies))], v)
        to_screen(["[debug]Recieved Data:", res], v)
        data.append(res.get("data"))
    to_screen(["\n[debug]Finding Title"], v)
    title = soup.find("input", attrs={"name": "movies_title"}).attrs["value"]
    to_screen(["[debug]Found:", title], v)
    to_screen(["[debug]Finding Thumbnail"], v)
    thumbnail = soup.find("input", attrs={"name": "phimimg"}).attrs["value"]
    to_screen(["[debug]Found", thumbnail], v)
    to_screen(["[debug]Fetching Thumbnail and uploading to cdn"], v)
    data = [i for i in filter(val_url, data)]
    while len(data) > 3:
        p_print(data)
        input("[info]Enter the number of the url to remove from the List:")
    if len(data) < 3:
        nones = [None] * (3 - len(data))
        data += nones
    image = upload.upload(thumbnail).get("secure_url")
    to_screen(["[debug]Secure URL of Image:", image], v)
    data_dict = {"title": title, "thumbnail": image, "urls": data}
    db_m_tuple = (
        data_dict["title"],
        *data_dict["urls"],
        data_dict["thumbnail"],
        sess.get(
            f"https://{parsed_url.netloc}{subtitles}",
            headers=basic_headers,
            cookies=page.cookies,
        ).text.encode()
        if subtitles
        else b"",
    )
    print("create data tuple")
    print(dbmanage.add_to_db(db_m_tuple, dbinst, movieDatInst))
    return json.dumps(data_dict)
示例#4
0
def get_(url, v=True, n=1, season=0):
    ua = random.choice([
        "Mozilla/5.0 (Windows; U; Windows NT 10.0; en-US) AppleWebKit/604.1.38 (KHTML, like Gecko) Chrome/68.0.3325.162",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0_2 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A421 Safari/604.1",
        "Mozilla/5.0 (Linux; Android 8.1.0; Pixel Build/OPM1.171019.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.137 Mobile Safari/537.36",
        "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
    ])
    print("[debug]Fetching:\n", url)
    basic_headers = {
        "User-Agent":
        ua,
        "Upgrade-Insecure-Requests":
        "1",
        "dnt":
        "1",
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    }
    sess = requests.Session()
    to_screen(["[debug]Using Standard Headers:"], v)
    dict_print(basic_headers, v=v)
    page = sess.get(url, headers=basic_headers, allow_redirects=True)
    to_screen(["[debug]Page URL:", page.url], v)
    to_screen(["[debug]Cookie Jar For %s:" % (page.url)], v)
    dict_print(dict(page.cookies), v)
    soup = bs(page.text, "html.parser")
    to_screen(["\n[debug]Finding Title"], v)
    title = re.search(r"this.page.title\s*?=\s*?'(?P<id>.*?)';",
                      page.text).group("id")
    to_screen(["[debug]Found:", title], v)
    to_screen(["[debug]Finding Thumbnail"], v)
    thumbnail = soup.find("meta", attrs={
        "property": "og:image"
    }).attrs.get("content")
    to_screen(["[debug]Found", thumbnail], v)
    if thumbnail.startswith("/"):
        if thumbnail.startswith("//"):
            thumbnail = "https:" + thumbnail
        else:
            thumbnail = input("fix the thumbnail:")
    next_episode = True
    episode_data = {}
    i = 1
    while next_episode:
        data = []
        data = None
        data = []
        urls = soup.find_all(attrs={"episode-data": str(i)})
        if len(urls) == 0:
            next_episode = False
            break
        for url in urls:
            data.append(
                url.attrs.get("player-data").replace("http://", "https://"))
        if len(data) < 3:
            nones = [None] * (3 - len(data))
            data += nones
        p_print(data)
        dt_n = input(
            "[info]Enter the number of the numbers of urls seperated by spaces:"
        )
        dt_n = dt_n.split()
        print(dt_n)
        to_remove = []
        for num in dt_n:
            to_remove.append(data[int(num) - 1])
        data = list(set(data) - set(to_remove))
        if len(data) < 3:
            nones = [None] * (3 - len(data))
            data += nones
        p_print(data)
        episode_data = {**episode_data, i: data}
        i += 1
        to_screen(["[debug]Episode Data"], v)
        dict_print(episode_data, v)
    to_screen(["[debug]Fetching Thumbnail and uploading to cdn"], v)
    image = upload.upload(thumbnail).get("secure_url")
    to_screen(["[debug]Secure URL of Image:", image], v)
    base_template = (title, image, season, episode_data)
    yn = "y"
    if yn == "y":
        print("[info]Adding to database:")
        print(base_template)
        print(dbmanage.add_to_db(base_template))