Ejemplo n.º 1
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe = soup.find("iframe")
    html = http_get(iframe.get("src"), headers=headers)
    b64_str = re.search(r"window\.atob\('(.*)'\)", html.text).group(1)
    de_str = base64.b64decode(b64_str).decode("utf-8") 
    return [de_str]
Ejemplo n.º 2
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe = soup.find("iframe")
    iframe_url = iframe.get("src")
    html = http_get(iframe_url, headers=headers)
    m3u8 = re.search(r"source: \'(.*)\'", html.text).group(1)
    return [m3u8]
Ejemplo n.º 3
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f_iframe_url, headers=headers)
    rSI = algo(html.text)
    return [rSI]
Ejemplo n.º 4
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_1_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f"http:{f_iframe_1_url}", headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    source = re.search(r"source:'(.*?)',", html.text).group(1)
    return [source]
Ejemplo n.º 5
0
def get_urls(url):
    headers = header_random_agent()
    cookies = {}
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_1_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f_iframe_1_url, headers=headers, cookies=cookies)
    return [wstreamto(html.text)]
Ejemplo n.º 6
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    first_iframe_url = soup.find("iframe").get("src")
    f_url = parse_url(first_iframe_url)
    m3u8_page_url = "%s://%s%s" % (p_url.scheme, BASE_STREAM, f_url.path)
    headers.update({"Referer": BASE_STREAM_REF})
    html = http_get(m3u8_page_url, headers=headers)
    urls = generic_m3u8_searcher.search(html.text)
    return urls
def get_all_sources(key):
    headers = header_random_agent()
    headers.update({
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
    })
    url = "%s%s/" % (EVENT_URL, key)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    table = soup.find(class_="table-streams")
    table_body = table.find("tbody")
    rows = table_body.find_all("tr")
    all = []
    for r in rows:
        try:
            streamer_info = r.find("th")
            url = streamer_info.find("a").get("href")
            streamer_name = streamer_info.find(
                class_="media-body").getText().strip(),

            columns = r.find_all("td")
            quality = columns[4].getText().strip()
            channel_name = columns[0].getText().strip()
            lang = columns[1].getText().strip()
            all.append({
                "streamer": streamer_name,
                "quality": quality,
                "channel": channel_name,
                "lang": lang,
                "url": url
            })
        except:
            pass
    return all
Ejemplo n.º 8
0
def get_all_sources(key):
    headers = header_random_agent()
    headers.update({
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
    })
    url = "%s%s/" % (ROOT_URL, key)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    rows = soup.find_all(class_="MuiTableRow-root jss21 MuiTableRow-hover")

    all = []
    for r in rows:
        url = r.get("href")
        columns = list(r.children)

        streamer = columns[0].getText().strip()
        channel = columns[2].getText().strip()
        lang = columns[4].getText().strip()

        all.append({
            "streamer": streamer,
            "channel": channel,
            "lang": lang,
            "url": url
        })
    return all
Ejemplo n.º 9
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    b64_str = re.search(r"window\.atob\('(.*)'\)", html.text).group(1)
    de_str = base64.b64decode(b64_str).decode("utf-8")
    return [de_str]
Ejemplo n.º 10
0
def get_urls(url, referer = ''):
    headers = header_random_agent()
    if referer != '':
        headers.update({"Referer": referer})
    parsed_url = parse_url(url)
    html = http_get(url, headers=headers)
    return search_and_format(html.text)
def get_all_events():
    html = http_get(ROOT_URL)
    soup = BeautifulSoup(html.text, "html.parser")
    container = soup.find(class_="timeline-left")
    els = container.find_all("div")

    all = []
    league = None
    match = {}
    for el in els:
        classes = el.get("class")
        is_league = "timeline-breaker" in classes
        is_details = "timeline-item" in classes
        is_time = "timeline-start-time" in classes

        if is_league:
            league = el.getText().strip()
        elif is_details:
            match = parse_match(el)
        elif is_time:
            time = el.getText().strip()
            match["name"] += " [{}]".format(time)
            match["league"] = league
            all.append(match)

    return all
Ejemplo n.º 12
0
def get_urls(url):
    header = header_random_agent()
    cookie = None
    for i in range(5):
        html = http_get(url, headers=header)
        cookie = get_sucuri_cookie(html.text)
        if cookie != None:
            break
        time.sleep(random.uniform(2, 0.5))
    if cookie == None:
        return []
    cookies_jar = requests.cookies.RequestsCookieJar()
    cookies_jar.set(cookie["name"], cookie["value"], path=cookie["path"])
    html = http_get(url, headers=header, cookies=cookies_jar)
    urls = generic_m3u8_searcher.search(html.text)
    return urls
Ejemplo n.º 13
0
def get_urls(url):
    urls = []
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_1_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f_iframe_1_url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    try:
        source1 = re.search(r"file\": \"(.*?)\"", html.text).group(1)
        urls.append(source1)
    except:
        pass
    return urls
Ejemplo n.º 14
0
    def get_target_system_image(self, target: Target, out_dir: str,
                                progress: Progress):
        # https://api.foundries.io/projects/<factory>/lmp/builds/<build-numb>/runs/<machine>/<image-name>-<machine>.wic.gz

        image_base_url = target['custom']['origUri'] if 'origUri' in target[
            'custom'] else target['custom']['uri']
        image_machine = target['custom']['hardwareIds'][0]
        image_filename = target['custom']['image-file']

        base_url = image_base_url.replace('https://ci.foundries.io',
                                          self.api_base_url)
        image_url = os.path.join(base_url, 'runs', image_machine,
                                 image_filename)
        os_release_url = os.path.join(base_url, 'runs', image_machine,
                                      'os-release')

        image_file_path = os.path.join(out_dir, image_filename)
        extracted_image_file_path = image_file_path.rstrip('.gz')

        p = Progress(2, progress)

        if not os.path.exists(extracted_image_file_path):
            logger.info(
                'Downloading Target system image...; Target: {}, image: {}'.
                format(target.name, image_filename))

            image_resp = http_get(image_url, headers=self._auth_headers)
            with open(image_file_path, 'wb') as image_file:
                for data_chunk in image_resp.iter_content(chunk_size=65536):
                    image_file.write(data_chunk)
            p.tick()

            logger.info(
                'Extracting Target system image: {}'.format(image_file_path))
            subprocess.check_call(['gunzip', '-f', image_file_path])
            p.tick()
        else:
            logger.info(
                'Target system image has been already downloaded: {}'.format(
                    extracted_image_file_path))

        release_resp = requests.get(os_release_url, headers=self._auth_headers)
        if release_resp.ok:
            try:
                release_info = self.Release.parse(
                    dict([
                        line.split('=')
                        for line in release_resp.content.decode().splitlines()
                    ]))
            except Exception as exc:
                logger.error(
                    'Failed to parse a received information about LmP release: '
                    + str(exc))
                release_info = self.Release(0, '')  # or just `raise` ???
        else:
            release_info = self.Release(0, '')
            logger.info('Missing info about LmP release.')

        return extracted_image_file_path, release_info
Ejemplo n.º 15
0
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe = soup.find("iframe")
    iframe_url = "%s://%s/%s" % (p_url.scheme, p_url.netloc, iframe.get("src"))
    return generic_m3u8_searcher.get_urls(iframe_url)
Ejemplo n.º 16
0
def nth_iframe_get_urls(url, nth_iframe = 0):
    headers = header_random_agent()
    cookies = {}
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe_url = soup.find_all("iframe")[nth_iframe].get("src")
    return get_urls(iframe_url)
Ejemplo n.º 17
0
def get_urls(url):
    p_url = parse_url(url)
    headers = header_random_agent()
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, "html.parser")
    angel = re.search(r"angelthump.com/(.*?)/embed", html.text)
    headers.update({
        "Referer": url,
        "Origin": url,
    })
    if angel:
        angel_id = angel.group(1)
        return [M3U8_URL % angel_id]
    else:
        xyz = soup.find(allowfullscreen="true")
        xyz_url = "%s:%s" % (p_url.scheme, xyz.get("src"))
        html = http_get(xyz_url, headers=headers)
        return xyzembed(html.text)
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe = soup.find("iframe")
    iframe_url = iframe.get("src")
    if iframe_url.startswith("//"):
        iframe_url = "https:{}".format(iframe_url)
    return generic_m3u8_searcher.get_urls(iframe_url)
Ejemplo n.º 19
0
 def pull_manifest(self, uri):
     registry_jwt_token = self.__get_registry_jwt_token(uri.repo, uri.app)
     manifest_url = '{}/v2/{}/{}/manifests/{}'.format(self.registry_url, uri.repo, uri.app, uri.digest)
     manifest_resp = http_get(manifest_url,
                              headers={'authorization': 'bearer {}'.format(registry_jwt_token['token']),
                                       'accept': 'application/vnd.oci.image.manifest.v1+json'})
     rec_hash = hashlib.sha256(manifest_resp.content).hexdigest()
     if rec_hash != uri.hash:
         raise Exception("Incorrect manifest hash; expected: {}, received: {}".format(uri.hash, rec_hash))
     return manifest_resp.content
def get_urls(url):
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    vidgstream = re.search(r'var vidgstream = \"(.*)\"', html.text).group(1)
    params = {
        "idgstream": vidgstream,
        "serverid": "",
    }
    headers.update({
        "Referer": url,
        "Origin": url,
        "Accept-Encoding": "compress"
    })
    resp = http_get(HLS_URL, params=params, headers=headers)
    json = resp.json()
    rawUrl = json["rawUrl"]
    if rawUrl == 'null':
        return []
    return [rawUrl]
Ejemplo n.º 21
0
    def __get_registry_jwt_token(self, repo, app):
        user_pass = '******'.format('ci-script-client', self._token)
        headers = {'Authorization': 'Basic ' + base64.b64encode(user_pass.encode()).decode()}

        params = {
            'service': 'registry',
            'scope': 'repository:{}/{}:pull'.format(repo, app)
        }

        token_req = http_get(self.auth_endpoint, headers=headers, params=params)
        return token_req.json()
Ejemplo n.º 22
0
 def _get_targets(self):
     target_resp = http_get(self.targets_endpoint,
                            headers=self._auth_headers)
     resp = target_resp.json()
     # A temporary workaround to switch from old format (a TUF compliant signed targets) to a new
     # format (a simple dictionary of targets).  Will be removed after an ota-lite change.
     targets = resp.get('signed', {}).get('targets', None)
     if targets is None:
         targets = resp
     # end of workaround
     return targets
Ejemplo n.º 23
0
def dubzalgo(url, nth_iframe=0):
    """
method:
nth iframe
var rSI : string = ""
var tlc : [string]
var mn : int
for each s in tlc:
    b64 = base64.b64decode(s).decode("utf-8")
    str = re.sub('\D', '', b64)
    str_n = int(str)
    str_n -= 61751400
    rSI += chr(str_n)
search_and_format(rSI)
"""
    headers = header_random_agent()
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    iframe_url = soup.find_all("iframe")[nth_iframe].get("src")
    headers.update({"Referer": url})
    html = http_get(iframe_url, headers=headers)
    text = html.text

    regex = r" = \[(.*)\]"
    rSI = ""
    tlc = re.search(regex, text, re.MULTILINE | re.DOTALL).group(1)
    tlc = re.sub('\s', '', tlc)
    tlc = tlc.split(",")
    tlc = list(map(lambda x: x.strip('"'), tlc))
    mn = re.search(r"\)\) - (\d+)\);", text).group(1).strip()
    mn = int(mn)
    for s in tlc:
        b64 = base64.b64decode(s).decode("utf-8")
        str = re.sub('\D', '', b64)
        if (str):
            str_n = int(str)
            str_n -= mn
            rSI += chr(str_n)

    return search_and_format(rSI)
Ejemplo n.º 24
0
 def download_manifest(self, image_uri):
     repo, app, digest = self.parse_image_uri(image_uri)
     registry_jwt_token = self.__get_registry_jwt_token(repo, app)
     manifest_url = '{}/v2/{}/{}/manifests/{}'.format(
         self.registry_url, repo, app, digest)
     manifest_resp = http_get(
         manifest_url,
         headers={
             'authorization':
             'bearer {}'.format(registry_jwt_token['token']),
             'accept': 'application/vnd.oci.image.manifest.v1+json'
         })
     return json.loads(manifest_resp.content)
Ejemplo n.º 25
0
def get_urls(url):
    headers = header_random_agent()
    parsed_url = parse_url(url)
    html = http_get(url, headers=headers)
    urls = search(html.text)
    formatted = []
    for u in urls:
        if u.startswith("//"):
            formatted.append("%s:%s" % (parsed_url.scheme, u))
        else:
            formatted.append(u)
    no_duplicates = list(dict.fromkeys(formatted))
    return no_duplicates
Ejemplo n.º 26
0
def get_urls(url):
    headers = header_random_agent()
    cookies = {}
    p_url = parse_url(url)
    html = http_get(url, headers=headers)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_1_url = soup.find("iframe").get("src")
    headers.update({"Referer": url})
    html = http_get(f_iframe_1_url, headers=headers, cookies=cookies)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_2_url = soup.find("iframe").get("src")
    html = http_get(f_iframe_2_url, headers=headers)
    cookies.update(html.cookies)
    soup = BeautifulSoup(html.text, 'html.parser')
    f_iframe_3_url = soup.find("iframe").get("src")
    headers.update({"Referer": f_iframe_3_url})
    html = http_get(f_iframe_3_url, headers=headers)
    b64_str = re.search(r"window\.atob\(\"(.*)\"\)", html.text).group(1)
    de_str = base64.b64decode(b64_str).decode("utf-8")
    return [de_str]
Ejemplo n.º 27
0
    def pull_layer(self, image_uri, layer_digest, token=None):
        if not token and image_uri.factory:
            registry_jwt_token = self.__get_registry_jwt_token(image_uri.factory, image_uri.app)
            token = registry_jwt_token['token']

        layer_url = '{}/v2/{}/blobs/{}'.format(self.registry_url, image_uri.name, layer_digest)
        archive_resp = http_get(layer_url, headers={'authorization': 'bearer {}'.format(token)})
        layer_hash = layer_digest[len('sha256:'):]
        rec_hash = hashlib.sha256(archive_resp.content).hexdigest()
        if rec_hash != layer_hash:
            raise Exception("Incorrect layer blob hash; expected: {}, received: {}".format(layer_hash, rec_hash))

        return archive_resp.content
def get_all_events():
    html = http_get(ROOT_URL)
    soup = BeautifulSoup(html.text, "html.parser")
    schedules = soup.find_all(class_="responsive-table-wrap")
    captions = soup.find_all(class_="table-caption")
    all = []
    for (caption, schedule) in zip(captions, schedules):
        league = caption.getText().strip()
        table_body = schedule.find("tbody")
        matches = table_body.find_all("tr")
        for match in matches:
            parsed_match = parse_match(match)
            parsed_match["league"] = league
            all.append(parsed_match)
    return all
Ejemplo n.º 29
0
def get_all_sources(key):
    headers = header_random_agent()
    headers.update({
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
    })
    url = "%s%s/" % (EVENT_URL, key)
    html = http_get(url, headers=headers)
    soup = BeautifulSoup(html.text, 'html.parser')
    rows = soup.find_all("tr")
    if len(rows) == 0:
        return []
    all = []
    for r in rows:
        try:
            columns = r.find_all("td")
            if (len(columns) == 3):
                streamer = columns[0].getText().strip()
                quality = columns[2].string
                channel = columns[1]
                channel_name = channel.string
                url = channel.find("a").get("href")
                all.append({
                    "streamer": streamer,
                    "channel": channel_name,
                    "url": url
                })
            else:
                streamer = columns[0].getText().strip()
                quality = columns[1].string
                channel = columns[2]
                channel_name = channel.string
                url = channel.find("a").get("href")
                lang = columns[5].string
                all.append({
                    "streamer": streamer,
                    "quality": quality,
                    "channel": channel_name,
                    "lang": lang,
                    "url": url
                })
        except:
            pass
    return all
Ejemplo n.º 30
0
    def download_layers(self, image_uri, manifest=None):
        if not manifest:
            manifest = self.download_manifest(image_uri)

        repo, app, digest = self.parse_image_uri(image_uri)
        registry_jwt_token = self.__get_registry_jwt_token(repo, app)

        layer_archives = []
        for layer in manifest['layers']:
            layer_url = '{}/v2/{}/{}/blobs/{}'.format(self.registry_url, repo,
                                                      app, layer['digest'])
            archive_resp = http_get(layer_url,
                                    headers={
                                        'authorization':
                                        'bearer {}'.format(
                                            registry_jwt_token['token'])
                                    })
            layer_archives.append(archive_resp.content)
        return layer_archives