Esempio n. 1
0
 def resolve(self, url, headers):
     if "vidnext" in url:
         page = net.http(url, headers=headers)
         tree = htmlement.fromstring(page)
         iframe = tree.find(".//iframe[@id='embedvideo_main']")
         if iframe is not None:
             headers["referer"] = url
             url = iframe.get("src")
         else:
             up = parse.urlparse(url)
             jsq = dict(parse.parse_qsl(up.query))
             jsurl = "https://%s/ajax.php" % up.netloc
             js = json.loads(
                 net.http(jsurl,
                          params=jsq,
                          referer=url,
                          headers={"x-requested-with": "XMLHttpRequest"}))
             for k in ["source", "source_bk"]:
                 for vid in js.get(k, []):
                     yield net.tokodiurl(vid["file"],
                                         headers={"referer": url})
     up = parse.urlparse(url)
     if "movcloud.net" in url:
         vid = up.path.split("/")[-1]
         jsurl = "https://api.%s/stream/%s" % (up.netloc, vid)
         js = json.loads(net.http(jsurl, referer=url))
         for vid in sorted(js["data"]["sources"],
                           key=itemgetter("height"),
                           reverse=True):
             yield net.tokodiurl(vid["file"], headers={"referer": url})
     else:
         raise Exception("unknown url:%s" % url)
Esempio n. 2
0
def itermedias(chfilter, isadaptive=True):
    if chfilter:
        found = False
        for selcukurl, url, chname in iteratechannels():
            if chfilter == normalize(chname):
                found = True
                break
        if found:
            links = url.split("#")
            up = parse.urlparse(links[0])
            chdict = dict(parse.parse_qsl(up.query))
            if "id" in chdict:
                subpage = net.http(links[0], referer=selcukurl, cache=None)
                olmusmu = re.findall(rgx1, subpage)
                if len(olmusmu) == 2:
                    # olmusmu = [parse.unquote(x) for x in olmusmu]
                    keslan = re.search(rgx6, subpage)
                    kourl = "%s://%s/%s" % (up.scheme, up.netloc,
                                            keslan.group(1))
                    kopage = net.http(kourl, referer=selcukurl, cache=None)
                    bases = [
                        base64.b64decode(x).decode()
                        for x in re.findall(rgx4, kopage)
                    ]
                    _radardom = bases.pop(-1)
                    selcukdom = bases.pop(-1)
                    for base in bases:
                        media = "https://" + base + selcukdom + "/i/" + olmusmu[
                            1] + "/" + chdict[
                                "id"] + "/playlist.m3u8" + olmusmu[0]
                        yield net.hlsurl(media,
                                         headers={"referer": url},
                                         adaptive=isadaptive)
Esempio n. 3
0
def itermedias(chid, chids=None, adaptive=True):
    if not chids:
        chids = [chid]
    for chid in chids:
        url = domain + chid
        up = parse.urlparse(url)
        chid = up.path.split("/")[-1]
        subpage = htmlement.fromstring(net.http(url, referer=domain))
        embedlink = subpage.find(".//iframe").get("src")
        embedpage = htmlement.fromstring(net.http(embedlink, referer=url))
        script = embedpage.find(".//script[@id='v']")
        jsurl = "%s://%s/embed/%s" % (up.scheme, up.netloc, chid)
        data = {"e": 1, "id": script.get("data-i")}
        scode = net.http(jsurl,
                         referer=embedlink,
                         data=data,
                         headers={"x-requested-with": "XMLHttpRequest"},
                         method="POST")
        url = None
        scode = scode.replace("-", "+")
        scode = scode.replace("_", "/")
        scode = scode[::-1]
        for suffix in ["", "=", "=="]:
            try:
                url = base64.b64decode(scode + suffix)
                break
            except Exception:
                continue
        if url:
            url = url.decode()
            yield net.hlsurl(url,
                             headers={"referer": domain},
                             adaptive=adaptive)
Esempio n. 4
0
def iterprogrammes(chname=None, chid=None):
    link = None
    if chid:
        link = "%s%s-yayin-akisi" % (domain, chid)
    elif chname:
        link = find(chname)
    if link:
        subpage = net.http(link, referer=domain, cache=5)
        apilink = re.search("kanal_detay\:\s?(?:\"|\')(.+?)(?:\"|\')", subpage)
        dslug = re.search("data-slug\=\s?(?:\"|\')(.+?)(?:\"|\')", subpage)
        if apilink and dslug:
            js = json.loads(
                net.http(apilink.group(1) + dslug.group(1), referer=domain))
            for i in range(len(js["content"])):
                try:
                    nextstart = todate(js["content"][i + 1]["brod_start"])
                except IndexError:
                    nextstart = None
                start = todate(js["content"][i]["brod_start"])
                end = todate(js["content"][i]["brod_end"])
                if nextstart is not None and (end is None or
                                              (end is not None
                                               and end <= start)):
                    end = nextstart
                if start and end:
                    yield programme(js["content"][i]["name"],
                                    start,
                                    end,
                                    categories=[
                                        js["content"][i]["type"],
                                        js["content"][i]["type2"]
                                    ])
Esempio n. 5
0
def itermedias(ctvcid, ctvcids=None):
    if not ctvcids:
        ctvcids = [ctvcid]
    for ctvcid in ctvcids:
        u = domain + "/" + ctvcid
        iframe1 = htmlement.fromstring(net.http(
            u, referer=domain)).find(".//iframe").get("src")
        iframe2 = htmlement.fromstring(net.http(
            iframe1, referer=u)).find(".//iframe").get("src")
        src = net.http(iframe2, referer=iframe1)
        media = re.search(
            "file[\s\t]*?\:[\s\t]*?atob\((?:\"|\')(.+?)(?:\"|\')\)", src)
        if media:
            yield net.hlsurl(base64.b64decode(media.group(1)).decode(),
                             headers={"referer": domain})
        else:
            for script in htmlement.fromstring(src).iterfind(".//script"):
                if script.get("src") and "yayin" in script.get("src"):
                    scriptsrc = net.http(script.get("src"), referer=domain)
                    key = re.search(rgxkey, scriptsrc)
                    if key:
                        for link in re.findall(rgxlink, scriptsrc):
                            if "anahtar" in link:
                                link = net.absurl(link, script.get("src"))
                                yield net.hlsurl(link + key.group(1),
                                                 headers={"referer": domain})
                                break
Esempio n. 6
0
def iteratechannels():
    xpage = htmlement.fromstring(net.http(domain))
    for ch in iterpage(xpage):
        yield ch
    pagination = xpage.findall(".//ul[@id='sayfalama']/.//a")
    lastpage = pagination[-1].get("href").split("/")[-1]
    for i in range(2, int(lastpage) + 1):
        xpage = htmlement.fromstring(net.http(domain + "/p/%s" % i))
        for ch in iterpage(xpage):
            yield ch
Esempio n. 7
0
def iteratechannels():
    entrypage = net.http(girisurl, cache=10)
    url = htmlement.fromstring(entrypage).findall(
        ".//div[@class='sites']/.//a")[0].get("href")
    xpage = htmlement.fromstring(net.http(url, cache=10))
    links = xpage.findall(".//div[@class='channels']/.//div[@id='tab5']/.//a")
    for link in links:
        chname = tools.elementsrc(link.find(".//div[@class='name']"),
                                  exclude=[link.find(".//b")]).strip()
        yield url, link.get("data-url"), chname
Esempio n. 8
0
 def resolve(self, url, headers):
     resp = net.http(url, headers=headers, text=False, stream=True)
     up = urlparse(resp.url)
     api_url = up.scheme + "://" + up.netloc + "/api/source/" + up.path.split(
         "/")[-1]
     data = {"r": "", "d": up.netloc}
     jsdata = net.http(api_url, referer=resp.url, data=data, method="POST")
     for data in sorted(json.loads(jsdata)["data"],
                        key=sorter,
                        reverse=True):
         yield data["file"]
Esempio n. 9
0
 def resolve(self, url, headers):
     if "vidlink.org" in url:
         postid = re.search("postID\s?\=\s?(?:\'|\")(.+?)(?:\'|\")", net.http(url, headers=headers))
         up = urlparse(url)
         jsurl = "https://%s/embed/info?postID=%s" % (up.netloc, postid.group(1))
         js = json.loads(net.http(jsurl, referer=url))
         url = js["embed_urls"]
     if "ronemo.com" in url:
         up = urlparse(url)
         jsurl = "https://%s/api/video/get-link?idVid=%s" % (up.netloc, up.path.split("/")[-1])
         js = json.loads(net.http(jsurl, referer=url))
         yield net.tokodiurl("https://hls.ronemo.com/%s" % js["link"], headers={"referer": url})
Esempio n. 10
0
def itermedias(chlink, chlinks=None):
    if not chlinks:
        chlinks = [chlink]
    for chlink in chlinks:
        url = domain + chlink
        page = htmlement.fromstring(net.http(url, referer=domain))
        iurl = page.find(".//iframe").get("src")
        ipage = net.http(iurl, referer=url)
        wise = unwise.unwise(*re.findall(rgx, ipage)[0])
        wise = unwise.unwise(*re.findall(rgx, wise)[0])
        wise = unwise.unwise(*re.findall(rgx, wise)[1])
        media = re.search(rgx2, wise.replace("\\", "")).group(1)
        yield net.hlsurl(media, headers={"referer": domain + "/"})
Esempio n. 11
0
 def get(self):
     u = "%s/ing/%s" % (domain, self.ysid)
     p = net.http(u, referer=domain)
     iframeu = htmlement.fromstring(p).find(".//iframe").get("src")
     iframep = net.http(iframeu, referer=u)
     m3path = re.search("atob\((?:\"|\')(.+?)(?:\"|\')\)", iframep).group(1)
     for suffix in ["", "=", "=="]:
         try:
             yield net.hlsurl(net.absurl(
                 base64.b64decode(m3path + suffix).decode(), iframeu),
                              headers={"Referer": iframeu})
             break
         except Exception:
             pass
Esempio n. 12
0
 def test_thumbnail(self):
     if self.thumbnail:
         self.assertFalse(self.channel.icon is None)
         self.assertFalse(self.channel.icon == "DefaultFolder.png")
         if self.channel.icon.startswith("http"):
             self.assertFalse(
                 net.http(self.channel.icon, method="HEAD") is None)
Esempio n. 13
0
def iterprogrammes(channame):
    prename = predate = None
    for i in range(len(suffixes)):
        pagex = htmlement.fromstring(net.http(url % (channame, suffixes[i])))
        curtxt = pagex.find(".//a[%d]/div[@class='day-date']" % (i + 1)).text
        m1 = re.search("([0-9]+)\s(.+)", curtxt)
        curd = int(m1.group(1))
        curm = trmonmap[m1.group(2).lower().strip()]
        for li in pagex.iterfind(".//div[@class='container']/div/ul/li"):
            ptime = li.find(".//strong")
            pname = li.find(".//p")
            if ptime is not None and pname is not None:
                phour, pmin = ptime.text.split(":")
                phour = int(phour)
                pmin = int(pmin)
                pname = pname.text.strip()
                if pname == "-":
                    continue
                pdate = datetime.datetime(day=curd,
                                          month=curm,
                                          year=now.year,
                                          hour=phour,
                                          minute=pmin,
                                          tzinfo=trtz)
                if prename:
                    yield programme(prename, predate, pdate)
                prename = pname
                predate = pdate
Esempio n. 14
0
def iterprogrammes(polsatname):
    prevargs = {}
    for pagenum in [1, 2]:
        page = json.loads(
            net.http(epgu + "page%s" % pagenum, cache=None, encoding="utf-8"))
        for channel in page["channels"]:
            if channel["title"] == polsatname:
                for prog in channel["programs"]:
                    cur_start = datetime.fromtimestamp(prog["emissionDate"] /
                                                       1000)
                    if prevargs:
                        kwargs = {"end": cur_start}
                        kwargs.update(prevargs)
                        yield programme(**kwargs)
                    desc = None
                    for k in "description", "preview", "progDescription":
                        desc = prog.get(k)
                        if desc:
                            break
                    prevargs = {
                        "title": prog["title"],
                        "start": cur_start,
                        "icon": prog.get("miniImageUrl"),
                        "desc": desc
                    }
Esempio n. 15
0
def iterprogrammes():
    u = "https://www.ssport.tv/yayin-akisi"
    pagex = htmlement.fromstring(net.http(u))
    prename = predate = None
    for day in pagex.iterfind('.//ul[@id="switcher-day-s-sport-2"]/li'):
        datadate = day.get("data-date")
        if datadate is not None:
            curmatch = re.search("([0-9]+)\s(.+?)\s", datadate)
            curd = int(curmatch.group(1))
            curm = trmonmap[curmatch.group(2).lower().strip()]
            for prog in day.iterfind("./ul/li"):
                pdate = prog.find(".//time")
                pname = prog.find(".//h3")
                if pdate is not None and pname is not None:
                    phour, pmin = pdate.get("datetime").split(":")
                    pdate = datetime.datetime(day=curd,
                                              month=curm,
                                              year=now.year,
                                              hour=int(phour),
                                              minute=int(pmin),
                                              tzinfo=trtz)
                    pname = pname.text.strip()
                    if prename:
                        yield programme(prename, predate, pdate)
                    prename = pname
                    predate = pdate
Esempio n. 16
0
def itermedias(youtube_chanid, youtube_stream, youtube_sindex):
    try:
        u = "https://m.youtube.com/%s/videos?view=2&flow=list&live_view=501&" % youtube_chanid
        page = net.http(u, useragent=ua, headers={"Cookie": COOKIE})
        try:
            js = json.loads(
                re.search('<div id="initial-data"><!-- (.+?) -->',
                          page).group(1))
        except AttributeError:
            t = re.search("ytInitialData = '(.+?)'", page).group(1)
            js = json.loads(t.encode("utf-8").decode("unicode-escape"))
        streams = js["contents"]["singleColumnBrowseResultsRenderer"]["tabs"][
            1]["tabRenderer"]["content"]["sectionListRenderer"]["contents"][0][
                "itemSectionRenderer"]["contents"]
        sindex = None

        if youtube_stream:
            for sindex, stream in enumerate(streams):
                if youtube_stream(stream):
                    break

        if not sindex and youtube_sindex:
            sindex = youtube_sindex

        if sindex is None:
            sindex = 0
        vid = streams[sindex]["compactVideoRenderer"]["videoId"]
        # icon = js["metadata"]["channelMetadataRenderer"]["avatar"]["thumbnails"][0]["url"]
    except Exception:
        print(traceback.format_exc())
        return
    page = net.http("https://m.youtube.com/watch?v=%s" % vid,
                    useragent=ua,
                    headers={"Cookie": COOKIE})
    pconfig1 = re.search('ytInitialPlayerConfig = (\{.+?\})\;', page)
    if pconfig1:
        js = json.loads(pconfig1.group(1))
        response = json.loads(js["args"]["player_response"])
    else:
        response = json.loads(
            re.search('ytInitialPlayerResponse\s*?\=\s*?(\{.+?\})\;',
                      page).group(1))
    # dash = response["streamingData"].get("dashManifestUrl")
    # if dash:
    #     yield net.mpdurl(dash)
    yield net.hlsurl(response["streamingData"]["hlsManifestUrl"])
Esempio n. 17
0
def find(chname):
    xpage = htmlement.fromstring(net.http("%s/tv-kanallari" % domain,
                                          cache=60))
    for channel in xpage.iterfind(".//a[@class='channel-card']"):
        div = channel.find(".//div[@class='name']")
        if div is not None and normalize(
                tools.elementsrc(div)) == normalize(chname):
            return channel.get("href")
Esempio n. 18
0
 def resolve(self, url, headers):
     page = net.http(url, headers=headers)
     if packer.detect(page):
         page = packer.unpack(page)
     for vid in re.findall("file\s*?\:\s*?(?:'|\")(.+?)(?:'|\")", page):
         if vid.endswith(".m3u8") or vid.endswith(".mp4"):
             headers = {"referer": url}
             yield net.tokodiurl(vid, headers=headers)
Esempio n. 19
0
def mobile_itermedias(chid, isadaptive=True):
    # https://app.selcuksportsappltf.com/app/belgesel.json
    mdom = "https://app.selcuksportsappltf.com/app/"
    jsu = "%skanal/%s.json" % (mdom, chid)
    js = net.http(jsu, json=True)
    for result in js.get("results", []):
        m3u = result.get("m3u8_url")
        if m3u:
            yield net.hlsurl(m3u, adaptive=isadaptive)
Esempio n. 20
0
 def _recorder_thread(timerid, url, fname, startts, endts):
     segmentsdone = []
     hasstarted = False
     stopflag = False
     manifest = re.findall("(http\:\/\/localhost.+)", net.http(url, cache=None))[0]
     fnamenorm = re.sub("[^0-9a-zA-Z]+", "_", fname) + ".ts"
     fullname = os.path.join(cfg.pvrtemp, fnamenorm)
     while True:
         if time.time() > endts or stopflag:
             if fname in iptv.activerecords:
                 iptv.activerecords.pop(fname)
                 tools.copy(fullname, cfg.pvrlocation + "/" + fnamenorm)
                 os.remove(fullname)
                 gui.notify("Recording Finished", "%s is recorded" % fname)
             break
         if not hasstarted:
             gui.notify("Recording Started", "%s is being recorded" % fname)
             hasstarted = True
             iptv.activerecords[fname] = timerid
         lastmanifest = time.time()
         parsed = parser.parse(net.http(manifest, cache=None))
         targetduration = parsed.get("targetduration", 1)
         for segment in parsed.get("segments", []):
             if segment["uri"] not in segmentsdone:  # TO-DO: this can be faster
                 segmentsdone.append(segment["uri"])
                 segmentts = segment.get("current_program_date_time")
                 if segmentts:
                     segmentts = datetime.timestamp(segmentts)  # TO-DO: make sure this is UTC
                     if segmentts < startts:
                         continue
                     elif segmentts > endts:
                         stopflag = True
                         break
                 try:
                     segdata = net.http(segment["uri"], text=False, cache=None).content
                     with open(fullname, "ab") as f:  # TO-do: Windows?
                         f.write(segdata)
                 except Exception:
                     # shit segment, skip it
                     print(traceback.format_exc())
             while True:
                 if (time.time() - lastmanifest) > targetduration:
                     break
                 time.sleep(0.5)
Esempio n. 21
0
def itermedias(dadyid=None, dadyname=None):
    if not dadyid:
        u = getchanurl(dadyname)
    else:
        u = "%s/embed/stream-%s.php" % (dom, dadyid)
    iframeu = htmlement.fromstring(net.http(u)).find(".//iframe").get("src")
    iframe = net.http(iframeu, referer=u)
    iframeu2 = re.search("iframe\s*?src=(?:\'|\")(.+?)(?:\'|\")",
                         iframe).group(1)
    iframe = net.http(iframeu2, referer=iframeu)
    src = re.findall(mrgx, iframe)
    ref = parse.urlparse(iframeu2)
    ref = "%s://%s/" % (ref.scheme, ref.netloc)
    yield net.hlsurl(src[-1],
                     headers={
                         "Referer": ref,
                         "User-Agent": ua
                     },
                     adaptive=False)
Esempio n. 22
0
def itermedias(chid):
    iframeurl = "%s/bm/vid.php?id=%s" % (domain, chid)
    iframesrc = net.http(iframeurl, referer=domain)
    mpd = re.search('var src = "(.+?)"', iframesrc)
    mpdlic = re.search('var myWV = "(.+?)"', iframesrc)
    headers = {"Referer": iframeurl}
    if mpd and mpdlic:
        mpd = net.absurl(base64.b64decode(mpd.group(1)).decode(), iframeurl)
        mpdlic = net.absurl(base64.b64decode(mpdlic.group(1)).decode(), iframeurl)
        m = net.mpdurl(mpd, headers, mpdlic, headers.copy())
        yield m
Esempio n. 23
0
 def resolve(self, url, headers):
     iframe = htmlement.fromstring(net.http(
         url, headers=headers)).find(".//iframe").get("src")
     url2 = self.domain + iframe
     page = net.http(url2, referer=url)
     vidpage = re.search('var\s*?query\s*?=\s*?(?:\'|")(.+?)(?:\'|")\;',
                         page).group(1)
     resp = net.http(self.domain + "/watching" + vidpage,
                     referer=url2,
                     text=False)
     url3 = resp.url.replace("/v/", "/api/source/")
     headers = {"X-Requested-With": "XMLHttpRequest", "referer": resp.url}
     data = {"d": parse.urlparse(url3).netloc, "r": url2}
     js = json.loads(
         net.http(url3, headers=headers, data=data, method="POST"))
     if not js["success"]:
         print("VIDSRC ERROR: %s, %s" % (js["data"], url))
         yield
     for vid in js["data"]:
         yield net.tokodiurl(vid["file"], headers={"referer": resp.url})
Esempio n. 24
0
 def resolve(self, url, headers):
     if url.endswith(".m3u8"):
         yield net.tokodiurl(url, headers=headers)
     else:
         url = url.replace("/streaming.php", "/ajax.php")
         url = url.replace("/load.php", "/ajax.php")
         headers = {"x-requested-with": "XMLHttpRequest"}
         jpage = net.http(url, headers=headers, referer=self.domain)
         js = json.loads(jpage)
         for src in js["source"]:
             yield net.tokodiurl(src["file"],
                                 headers={"referer": self.domain})
Esempio n. 25
0
    def getfile(self):
        location = gui.select("Location", ["Local", "Remote"], multi=False)
        if location == 0:
            path = gui.browse(1, "Select File")
            try:
                with open(path, "r") as f:
                    return f.read()
            except Exception:
                pass
        elif location == 1:
            conf, url = gui.keyboard()
            if conf:
                try:
                    cfg = net.http(url)
                except Exception, e:
                    gui.warn(str(e), "%s url can't be reached" % url)
                    return
Esempio n. 26
0
def iterprogrammes(tvid):
    u = "%s/api/stationAirings?stationId=%s" % (domain, tvid)
    for p in json.loads(net.http(u, referer=domain)):
        start = datetime.strptime(p["startTime"],
                                  "%Y-%m-%dT%H:%MZ").replace(tzinfo=UTC)
        end = datetime.strptime(p["endTime"],
                                "%Y-%m-%dT%H:%MZ").replace(tzinfo=UTC)
        img = p["program"].get("preferredImage")
        if img:
            img = img.get("uri")
        if img:
            img = net.absurl(img, imgdomain)
        yield programme(p["program"]["title"],
                        start,
                        end,
                        desc=p["program"].get(
                            "longDescription",
                            p["program"].get("shortDescription", None)),
                        icon=img)
Esempio n. 27
0
    def download(self,
                 url,
                 params=None,
                 data=None,
                 headers=None,
                 timeout=None,
                 json=None,
                 method="GET",
                 referer=None,
                 useragent=None,
                 encoding=None,
                 verify=None,
                 proxies=None):

        if not (headers and "user-agent" in [x.lower()
                                             for x in headers] or useragent):
            useragent = self._container.useragent
        if not timeout:
            timeout = self._container.httptimeout
        ret = net.http(url, params, data, headers, timeout, json, method,
                       referer, useragent, encoding, verify, proxies)
        return ret
Esempio n. 28
0
    def request(self, *args, **kwargs):
        ''' Helper method to make an http query. This is method is good enough
        for vast majority of your needs, includding cookie handlers, with/get
        post requests, if you need more advanced queries you can also use
        requests or your own implementation

        Params:
            u: url of the request
            query: dict carrying the url request arguments
            data: dict carrying the values to be posted
            referer: referer for request header
            binary: bool flag that determines if the return data should be
                encoded text if set False, or urllib2.response object
                if set True

        Returns:
            str/urllib2.reponse:response
        '''
        if "headers" not in kwargs:
            kwargs["headers"] = {"User-agent": self._ua}
        else:
            kwargs["headers"]["User-agent"] = self._ua
        return net.http(*args, **kwargs)
Esempio n. 29
0
def getchanurl(chname):
    for a in htmlement.fromstring(
            net.http("%s/24-hours-channels.php" % dom,
                     cache=60 * 24)).iterfind(".//table/.//tr/td/a"):
        if a.find(".//i").tail.replace(" ", "").lower().strip() == chname:
            return net.absurl(a.get("href"), dom)
Esempio n. 30
0
 def resolve(self, url, headers):
     for vid in re.finditer("file\s?\:\s?(?:\'|\")(.+?)(?:\'|\")",
                            net.http(url, headers=headers)):
         yield net.tokodiurl(vid.group(1), headers={"referer": url})