Beispiel #1
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    b64_str = re.search(r"window\.atob\('(.*)'\)", html.text).group(1)
    de_str = base64.b64decode(b64_str).decode("utf-8")
    return [de_str]
Beispiel #2
0
def get_all_sources(key):
    headers = header_random_agent()
    headers.update({
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
    })
    url = "%s%s/" % (ROOT_URL, key)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    rows = soup.find_all(class_="MuiTableRow-root jss21 MuiTableRow-hover")

    all = []
    for r in rows:
        url = r.get("href")
        columns = list(r.children)

        streamer = columns[0].getText().strip()
        channel = columns[2].getText().strip()
        lang = columns[4].getText().strip()

        all.append({
            "streamer": streamer,
            "channel": channel,
            "lang": lang,
            "url": url
        })
    return all
def get_all_sources(key):
    headers = header_random_agent()
    headers.update({
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
    })
    url = "%s%s/" % (EVENT_URL, key)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    table = soup.find(class_="table-streams")
    table_body = table.find("tbody")
    rows = table_body.find_all("tr")
    all = []
    for r in rows:
        try:
            streamer_info = r.find("th")
            url = streamer_info.find("a").get("href")
            streamer_name = streamer_info.find(
                class_="media-body").getText().strip(),

            columns = r.find_all("td")
            quality = columns[4].getText().strip()
            channel_name = columns[0].getText().strip()
            lang = columns[1].getText().strip()
            all.append({
                "streamer": streamer_name,
                "quality": quality,
                "channel": channel_name,
                "lang": lang,
                "url": url
            })
        except:
            pass
    return all
Beispiel #4
0
def get_urls(url, referer = ''):
    headers = header_random_agent()
    if referer != '':
        headers.update({"Referer": referer})
    parsed_url = parse_url(url)
    html = http_get(url, headers=headers)
    return search_and_format(html.text)
Beispiel #5
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe = soup.find("iframe")
    iframe_url = "%s://%s/%s" % (p_url.scheme, p_url.netloc, iframe.get("src"))
    return generic_m3u8_searcher.get_urls(iframe_url)
Beispiel #6
0
def nth_iframe_get_urls(url, nth_iframe = 0):
    headers = header_random_agent()
    cookies = {}
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe_url = soup.find_all("iframe")[nth_iframe].get("src")
    return get_urls(iframe_url)
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe = soup.find("iframe")
    html = http_get(iframe.get("src"), headers=headers)
    b64_str = re.search(r"window\.atob\('(.*)'\)", html.text).group(1)
    de_str = base64.b64decode(b64_str).decode("utf-8") 
    return [de_str]
Beispiel #8
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f_iframe_url, headers=headers)
    rSI = algo(html.text)
    return [rSI]
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe = soup.find("iframe")
    iframe_url = iframe.get("src")
    if iframe_url.startswith("//"):
        iframe_url = "https:{}".format(iframe_url)
    return generic_m3u8_searcher.get_urls(iframe_url)
Beispiel #10
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe = soup.find("iframe")
    iframe_url = iframe.get("src")
    html = http_get(iframe_url, headers=headers)
    m3u8 = re.search(r"source: \'(.*)\'", html.text).group(1)
    return [m3u8]
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_1_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f"http:{f_iframe_1_url}", headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    source = re.search(r"source:'(.*?)',", html.text).group(1)
    return [source]
Beispiel #12
0
def get_urls(url):
    headers = header_random_agent()
    cookies = {}
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_1_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f_iframe_1_url, headers=headers, cookies=cookies)
    return [wstreamto(html.text)]
Beispiel #13
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    first_iframe_url = soup.find("iframe").get("src")
    f_url = parse_url(first_iframe_url)
    m3u8_page_url = "%s://%s%s" % (p_url.scheme, BASE_STREAM, f_url.path)
    headers.update({"Referer": BASE_STREAM_REF})
    html = http_get(m3u8_page_url, headers=headers)
    urls = generic_m3u8_searcher.search(html.text)
    return urls
def get_urls(url):
    headers = header_random_agent()
    parsed_url = parse_url(url)
    html = http_get(url, headers=headers)
    urls = search(html.text)
    formatted = []
    for u in urls:
        if u.startswith("//"):
            formatted.append("%s:%s" % (parsed_url.scheme, u))
        else:
            formatted.append(u)
    no_duplicates = list(dict.fromkeys(formatted))
    return no_duplicates
Beispiel #15
0
def get_urls(url):
    urls = []
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_1_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f_iframe_1_url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    try:
        source1 = re.search(r"file\": \"(.*?)\"", html.text).group(1)
        urls.append(source1)
    except:
        pass
    return urls
Beispiel #16
0
def get_urls(url):
    header = header_random_agent()
    cookie = None
    for i in range(5):
        html = http_get(url, headers=header)
        cookie = get_sucuri_cookie(html.text)
        if cookie != None:
            break
        time.sleep(random.uniform(2, 0.5))
    if cookie == None:
        return []
    cookies_jar = requests.cookies.RequestsCookieJar()
    cookies_jar.set(cookie["name"], cookie["value"], path=cookie["path"])
    html = http_get(url, headers=header, cookies=cookies_jar)
    urls = generic_m3u8_searcher.search(html.text)
    return urls
Beispiel #17
0
def get_urls(url):
    p_url = parse_url(url)
    headers = header_random_agent()
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, "html.parser")
    angel = re.search(r"angelthump.com/(.*?)/embed", html.text)
    headers.update({
        "Referer": url,
        "Origin": url,
    })
    if angel:
        angel_id = angel.group(1)
        return [M3U8_URL % angel_id]
    else:
        xyz = soup.find(allowfullscreen="true")
        xyz_url = "%s:%s" % (p_url.scheme, xyz.get("src"))
        html = http_get(xyz_url, headers=headers)
        return xyzembed(html.text)
Beispiel #18
0
def get_all_sources(key):
    headers = header_random_agent()
    headers.update({
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
    })
    url = "%s%s/" % (EVENT_URL, key)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    rows = soup.find_all("tr")
    if len(rows) == 0:
        return []
    all = []
    for r in rows:
        try:
            columns = r.find_all("td")
            if (len(columns) == 3):
                streamer = columns[0].getText().strip()
                quality = columns[2].string
                channel = columns[1]
                channel_name = channel.string
                url = channel.find("a").get("href")
                all.append({
                    "streamer": streamer,
                    "channel": channel_name,
                    "url": url
                })
            else:
                streamer = columns[0].getText().strip()
                quality = columns[1].string
                channel = columns[2]
                channel_name = channel.string
                url = channel.find("a").get("href")
                lang = columns[5].string
                all.append({
                    "streamer": streamer,
                    "quality": quality,
                    "channel": channel_name,
                    "lang": lang,
                    "url": url
                })
        except:
            pass
    return all
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    vidgstream = re.search(r'var vidgstream = \"(.*)\"', html.text).group(1)
    params = {
        "idgstream": vidgstream,
        "serverid": "",
    }
    headers.update({
        "Referer": url,
        "Origin": url,
        "Accept-Encoding": "compress"
    })
    resp = http_get(HLS_URL, params=params, headers=headers)
    json = resp.json()
    rawUrl = json["rawUrl"]
    if rawUrl == 'null':
        return []
    return [rawUrl]
Beispiel #20
0
def dubzalgo(url, nth_iframe=0):
    """
method:
nth iframe
var rSI : string = ""
var tlc : [string]
var mn : int
for each s in tlc:
    b64 = base64.b64decode(s).decode("utf-8")
    str = re.sub('\D', '', b64)
    str_n = int(str)
    str_n -= 61751400
    rSI += chr(str_n)
search_and_format(rSI)
"""
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe_url = soup.find_all("iframe")[nth_iframe].get("src")
    headers.update({"Referer": url})
    html = http_get(iframe_url, headers=headers)
    text = html.text

    regex = r" = \[(.*)\]"
    rSI = ""
    tlc = re.search(regex, text, re.MULTILINE | re.DOTALL).group(1)
    tlc = re.sub('\s', '', tlc)
    tlc = tlc.split(",")
    tlc = list(map(lambda x: x.strip('"'), tlc))
    mn = re.search(r"\)\) - (\d+)\);", text).group(1).strip()
    mn = int(mn)
    for s in tlc:
        b64 = base64.b64decode(s).decode("utf-8")
        str = re.sub('\D', '', b64)
        if (str):
            str_n = int(str)
            str_n -= mn
            rSI += chr(str_n)

    return search_and_format(rSI)
Beispiel #21
0
def get_urls(url):
    headers = header_random_agent()
    cookies = {}
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_1_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f_iframe_1_url, headers=headers, cookies=cookies)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_2_url = soup.find("iframe").get("src")
    html = http_get(f_iframe_2_url, headers=headers)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_3_url = soup.find("iframe").get("src")
    headers.update({"Referer": f_iframe_3_url})
    html = http_get(f_iframe_3_url, headers=headers)
    b64_str = re.search(r"window\.atob\(\"(.*)\"\)", html.text).group(1)
    de_str = base64.b64decode(b64_str).decode("utf-8")
    return [de_str]
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    m3u8 = re.search(r"source: \"(.*)\"", html.text).group(1)
    return [m3u8]