def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    first_iframe_url = soup.find("iframe").get("src")
    f_url = parse_url(first_iframe_url)
    m3u8_page_url = "%s://%s%s" % (p_url.scheme, BASE_STREAM, f_url.path)
    headers.update({"Referer": BASE_STREAM_REF})
    html = http_get(m3u8_page_url, headers=headers)
    urls = generic_m3u8_searcher.search(html.text)
    return urls
Exemple #2
0
def get_urls(url, referer = ''):
    headers = header_random_agent()
    if referer != '':
        headers.update({"Referer": referer})
    parsed_url = parse_url(url)
    html = http_get(url, headers=headers)
    return search_and_format(html.text)
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    b64_str = re.search(r"window\.atob\('(.*)'\)", html.text).group(1)
    de_str = base64.b64decode(b64_str).decode("utf-8") 
    return [de_str]
Exemple #4
0
def nth_iframe_get_urls(url, nth_iframe = 0):
    headers = header_random_agent()
    cookies = {}
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe_url = soup.find_all("iframe")[nth_iframe].get("src")
    return get_urls(iframe_url)
Exemple #5
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe = soup.find("iframe")
    html = http_get(iframe.get("src"), headers=headers)
    b64_str = re.search(r"window\.atob\('(.*)'\)", html.text).group(1)
    de_str = base64.b64decode(b64_str).decode("utf-8")
    return [de_str]
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe = soup.find("iframe")
    iframe_url = iframe.get("src")
    if iframe_url.startswith("//"):
        iframe_url = "https:{}".format(iframe_url)
    return generic_m3u8_searcher.get_urls(iframe_url)
def get_urls(url):
    headers = header_random_agent()
    parsed_url = parse_url(url)
    html = http_get(url, headers=headers)
    urls = search(html.text)
    formatted = []
    for u in urls:
        if u.startswith("//"):
            formatted.append("%s:%s" % (parsed_url.scheme, u))
        else:
            formatted.append(u)
    no_duplicates = list(dict.fromkeys(formatted))
    return no_duplicates
def get_urls(url):
    headers = header_random_agent()
    cookies = {}
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_1_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f_iframe_1_url, headers=headers, cookies=cookies)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    headers.update({"Referer": f_iframe_1_url})
    f_iframe_2_url = soup.find("iframe").get("src")
    html = http_get(f_iframe_2_url, headers=headers)
    return [wstreamto(html.text)]
def get_urls(url):
    p_url = parse_url(url)
    headers = header_random_agent()
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, "html.parser")
    angel = re.search(r"angelthump.com/(.*?)/embed", html.text)
    headers.update({
        "Referer": url,
        "Origin": url,
    })
    if angel:
        angel_id = angel.group(1)
        return [M3U8_URL % angel_id]
    else:
        xyz = soup.find(allowfullscreen="true")
        xyz_url = "%s:%s" % (p_url.scheme, xyz.get("src"))
        html = http_get(xyz_url, headers=headers)
        return xyzembed(html.text)
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    vidgstream = re.search(r'var vidgstream = \"(.*)\"', html.text).group(1)
    params = {
        "idgstream": vidgstream,
        "serverid": "",
    }
    headers.update({
        "Referer": url,
        "Origin": url,
        "Accept-Encoding": "compress"
    })
    resp = http_get(HLS_URL, params=params, headers=headers)
    json = resp.json()
    rawUrl = json["rawUrl"]
    if rawUrl == 'null':
        return []
    return [rawUrl]
Exemple #11
0
def dubzalgo(url, nth_iframe=0):
    """
method:
nth iframe
var rSI : string = ""
var tlc : [string]
var mn : int
for each s in tlc:
    b64 = base64.b64decode(s).decode("utf-8")
    str = re.sub('\D', '', b64)
    str_n = int(str)
    str_n -= 61751400
    rSI += chr(str_n)
search_and_format(rSI)
"""
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe_url = soup.find_all("iframe")[nth_iframe].get("src")
    headers.update({"Referer": url})
    html = http_get(iframe_url, headers=headers)
    text = html.text

    regex = r" = \[(.*)\]"
    rSI = ""
    tlc = re.search(regex, text, re.MULTILINE | re.DOTALL).group(1)
    tlc = re.sub('\s', '', tlc)
    tlc = tlc.split(",")
    tlc = list(map(lambda x: x.strip('"'), tlc))
    mn = re.search(r"\)\) - (\d+)\);", text).group(1).strip()
    mn = int(mn)
    for s in tlc:
        b64 = base64.b64decode(s).decode("utf-8")
        str = re.sub('\D', '', b64)
        if (str):
            str_n = int(str)
            str_n -= mn
            rSI += chr(str_n)

    return search_and_format(rSI)
def get_urls(url):
    headers = header_random_agent()
    cookies = {}
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_1_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f_iframe_1_url, headers=headers, cookies=cookies)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_2_url = soup.find("iframe").get("src")
    html = http_get(f_iframe_2_url, headers=headers)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_3_url = soup.find("iframe").get("src")
    headers.update({"Referer": f_iframe_3_url})
    html = http_get(f_iframe_3_url, headers=headers)
    b64_str = re.search(r"window\.atob\(\"(.*)\"\)", html.text).group(1)
    de_str = base64.b64decode(b64_str).decode("utf-8")
    return [de_str]
Exemple #13
0
def get_urls(url):
    urls = []
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_1_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f_iframe_1_url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    try:
        source1 = re.search(r"source: '(.*?)',", html.text).group(1)
        urls.append(source1)
    except:
        pass
    headers.update({"Referer": f_iframe_1_url})
    try:
        f_iframe_2_url = soup.find("iframe").get("src")
        html = http_get(f_iframe_2_url, headers=headers)
        source2 = re.search(r"source: \"(.*?)\",", html.text).group(1)
        urls.append(source2)
    except:
        pass
    return urls