コード例 #1
0
ファイル: source_utils.py プロジェクト: po50on/cherry-dev
def is_anime(content, type, type_id):
    try:
        r = trakt.getGenre(content, type, type_id)
        return 'anime' in r or 'animation' in r
    except:
        log_exception()
        return False
コード例 #2
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         result = self.session.get(url).content
         result = result.decode('utf-8')
         h = HTMLParser()
         result = h.unescape(result)
         result = client.parseDOM(result, 'table', attrs={'class': 'table table-bordered'})
         result = client.parseDOM(result, 'tr')
         for item in result:
             try:
                 tabela = client.parseDOM(item, 'td')
                 info = self.get_lang_by_type(tabela[1])
                 quality = tabela[2]
                 if 'wysoka' in quality.lower():
                     quality = 'HD'
                 else:
                     quality = 'SD'
                 try:
                     video_link = str(client.parseDOM(tabela[0], 'a', ret='href')[0])
                     valid, host = source_utils.is_host_valid(video_link, hostDict)
                     sources.append(
                         {'source': host, 'quality': quality, 'language': info[0], 'url': video_link,
                          'info': info[1], 'direct': False,
                          'debridonly': False})
                 except:
                     continue
             except:
                 continue
         return sources
     except:
         log_exception()
         return sources
コード例 #3
0
    def search(self, title, localtitle, year, is_movie_search):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle)))

            for title in titles:
                url = urlparse.urljoin(self.base_link, self.search_link)
                url = url % urllib.quote(str(title).replace(" ", "+"))
                result = client.request(url)
                result = result.decode('utf-8')
                h = HTMLParser()
                result = h.unescape(result)
                result = client.parseDOM(result,
                                         'div',
                                         attrs={'class': 'col-sm-4'})

                for item in result:
                    link = str(client.parseDOM(item, 'a', ret='href')[0])
                    nazwa = str(client.parseDOM(item, 'a', ret='title')[0])
                    name = cleantitle.normalize(cleantitle.getsearch(nazwa))
                    name = name.replace("  ", " ")
                    title = title.replace("  ", " ")
                    words = title.split(" ")
                    if self.contains_all_words(name,
                                               words) and str(year) in link:
                        return link
        except Exception as e:
            log_exception()
            return
コード例 #4
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            import base64
            import json

            result = client.request(url)
            result = result.decode('utf-8')
            h = HTMLParser()
            result = h.unescape(result)
            tabela = client.parseDOM(result, 'div', attrs={'class': "link-to-video"})
            # items = client.parseDOM(tabela, 'tr')
            for item in tabela:
                try:
                    jezyk = client.parseDOM(item, 'span')[0].replace('<b>', '').replace("</b>", '')
                    jezyk, wersja = self.get_lang_by_type(jezyk)
                    # quality = client.parseDOM(item, 'td')[2]
                    link = json.loads(client.parseDOM(result, 'a', ret='data-iframe')[0].decode('base64'))['src']
                    valid, host = source_utils.is_host_valid(link, hostDict)
                    sources.append(
                        {'source': host, 'quality': 'SD', 'language': jezyk, 'url': link, 'info': wersja,
                         'direct': False,
                         'debridonly': False})
                except:
                    pass
            return sources
        except:
            log_exception()
            return sources
コード例 #5
0
def work(items, start, end):
    for item in items[start:end]:
        try:
            h = HTMLParser()
            item = h.unescape(item)
            link = str(client.parseDOM(item, 'a', ret='href')[0])
            title = str(client.parseDOM(item, 'a')[0])
            if title == "Gry": continue
            try:
                plot = str(client.parseDOM(item, 'a', ret='title')[0])
            except:
                log_exception()
                plot = ""
            fanart, banner, thumb = Tvdb_Scraper(title)
            addon.addDir(title,
                         link,
                         mode=11,
                         banner=banner,
                         thumb=thumb,
                         fanart=fanart,
                         plot=plot,
                         genre="Bajka")
        except Exception:
            log_exception()
            print('error with item')
コード例 #6
0
 def search(self, titles, season, episode):
     try:
         for title in titles:
             log("FanFilm.IITVX Wyszukiwanie serialu po tytule: %s" % title)
             headers = {
                 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',
                 'Referer': self.base_link
             }
             data = {'text': title}
             result = self.session.post(self.search_link, data=data, headers=headers).content
             if result is None:
                 continue
             query = 'S00E00'
             if int(season) < 10:
                 query = query.replace('S00', 'S0' + season)
             if int(season) >= 10:
                 query = query.replace('S00', 'S' + season)
             if int(episode) < 10:
                 query = query.replace('E00', 'E0' + episode)
             if int(episode) >= 10:
                 query = query.replace('E00', 'E' + episode)
             result = client.parseDOM(result, 'div', attrs={'class': 'episodes-list'})
             results = client.parseDOM(result, 'li')
             for result in results:
                 test = client.parseDOM(result, 'span')[1]
                 if query == str(test):
                     log("FanFilm.IITVX Znalazlem odcinek: %s" % query)
                     link = client.parseDOM(result, 'a', ret='href')[0]
                     log("FanFilm.IITVX Znalazlem serial pod linkiem: %s" % link)
                     return link
     except Exception:
         log_exception()
         return
コード例 #7
0
    def search(self, title, localtitle, year, is_movie_search):
        try:

            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(cleantitle.normalize(cleantitle.getsearch(localtitle)))

            for title in titles:
                url = self.search_link + str(title)
                result = client.request(url)
                result = result.decode('utf-8')
                h = HTMLParser()
                result = h.unescape(result)
                result = client.parseDOM(result, 'div', attrs={'class': 'row'})

                for item in result:
                    try:
                        link = str(client.parseDOM(item, 'a', ret='href')[0])
                        if link.startswith('//'):
                            link = "https:" + link
                        nazwa = str(client.parseDOM(item, 'img', ret='alt')[0])
                        name = cleantitle.normalize(cleantitle.getsearch(nazwa))
                        rok = link
                        name = name.replace("  ", " ")
                        title = title.replace("  ", " ")
                        words = title.split(" ")
                        if self.contains_all_words(name, words) and str(year) in rok:
                            return link
                    except:
                        continue
        except Exception as e:
            log_exception()
            return
コード例 #8
0
ファイル: main.py プロジェクト: po50on/cherry-dev
def get_epsiode_link(sess, data):
    headers = {
        'Accept':
        '*/*',
        'Accept-Language':
        'pl,en-US;q=0.7,en;q=0.3',
        'Cache-Control':
        'max-age=0',
        'Connection':
        'keep-alive',
        'Host':
        'www.animezone.pl',
        'Referer':
        str(url).replace("http://", "http://www."),
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
    }

    verify = sess.get('http://animezone.pl/images/statistics.gif',
                      headers=headers)
    hostDict = resolveurl.relevant_resolvers(order_matters=True)
    hostDict = [i.domains for i in hostDict if not '*' in i.domains]
    hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)]
    hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]]

    headers = {
        'Host': 'www.animezone.pl',
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',
        'Accept': '*/*',
        'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3',
        'Referer': str(url).replace("http://", "http://www."),
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'X-Requested-With': 'XMLHttpRequest',
        'Connection': 'keep-alive',
        'Pragma': 'no-cache',
        'Cache-Control': 'no-cache',
    }
    data = {'data': data}
    response = sess.post(str(url).replace("http://", "https://www."),
                         headers=headers,
                         data=data).content
    try:
        link = client.parseDOM(response, 'a', ret='href')[0]
    except:
        link = client.parseDOM(response, 'iframe', ret='src')[0]

    if not link:
        raise InvalidLink('No link')
    if str(link).startswith('//'):
        link = str(link).replace("//", "http://")
    try:

        valid, host = source_utils.is_host_valid(str(link), hostDict)
    except Exception as e:
        log_exception()
        raise InvalidLink('Exception {!r}'.format(e))
    if not valid:
        raise InvalidLink('Invalid host')
    return host, link
コード例 #9
0
def ListowanieMeczy():
    try:
        url = params['url']
        result = client.request(url)
        result_images = client.parseDOM(result,
                                        'article',
                                        attrs={'class': 'post hentry'})
        result = client.parseDOM(result,
                                 'h2',
                                 attrs={'class': 'post-title entry-title'})

        images = client.parseDOM(result_images, 'a', ret='content')
        links = client.parseDOM(result, 'a', ret='href')
        titles = client.parseDOM(result, 'a', ret='title')
        for item in zip(links, titles, images):
            link = str(item[0])
            title = str(item[1])
            image = str(item[2])
            addon.addDir(title,
                         link,
                         mode='ListowanieLinkow',
                         icon=image,
                         fanart=_default_background)
    except:
        log_exception()
コード例 #10
0
ファイル: main.py プロジェクト: po50on/cherry-dev
def Rankingi(counter):
    url = params['url']
    r = client.request(url)

    result = client.parseDOM(
        r,
        'table',
        attrs={
            'class': 'table table-bordered table-striped table-hover ranking'
        })
    linki = client.parseDOM(result, 'a', ret='href')
    nazwy = client.parseDOM(result, 'a')
    n = 1
    try:
        for link in linki:
            linki[counter] = 'http://animezone.pl' + linki[counter]
            addon.addDir(str(n) + ". " + str(nazwy[counter]).replace(
                "<mark>", "").replace("</mark>", ""),
                         linki[counter],
                         mode=4)
            counter += 1
            n += 1
    except:
        log_exception()
        pass
コード例 #11
0
ファイル: iitvx.py プロジェクト: po50on/cherry-dev
 def search(self, titles,season,episode):
     try:
         for title in titles:
             log("FanFilm.IITVX Wyszukiwanie serialu po tytule: %s" % title)
             headers = {
                 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
                 'Referer': 'http://iitvx.pl/'
             }
             data = {'text': title}
             result = client.request(self.search_link, post=data, headers=headers)
             if result is None:
                 continue
             query = 'S00E00'
             if int(season)<10:
                 query = query.replace('S00', 'S0'+season)
             if int(season)>=10:
                 query = query.replace('S00', 'S'+season)
             if int(episode)<10:
                 query = query.replace('E00', 'E0'+episode)
             if int(episode)>=10:
                 query = query.replace('E00', 'E'+episode)
             result = client.parseDOM(result, 'div', attrs={'class':'episodes-list'})
             results = client.parseDOM(result, 'li')
             for result in results:
                 test = client.parseDOM(result, 'span')[1]
                 if query == str(test):
                     log("FanFilm.IITVX Znalazlem odcinek: %s" % query)
                     link = client.parseDOM(result, 'a', ret='href')[0]
                     log("FanFilm.IITVX Znalazlem serial pod linkiem: %s" % link)
                     return link
     except Exception:
         log_exception()
         return
コード例 #12
0
def _basic_request(url, headers=None, post=None, timeout='30', limit=None):
    try:
        request = urllib2.Request(url, data=post)
        _add_request_header(request, headers or {})
        response = urllib2.urlopen(request, timeout=int(timeout))
        return _get_result(response, limit)
    except:
        log_exception()
コード例 #13
0
 def Link(self, url, s):
     wynik = ''
     try:
         link = self.getVideoLink(url, s)
         wynik = link[0]['url']
     except:
         log_exception()
     return str(wynik)
コード例 #14
0
ファイル: source_utils.py プロジェクト: po50on/cherry-dev
def get_size(url):
    try:
        size = client.request(url, output='file_size')
        if size == '0': size = False
        size = convert_size(size)
        return size
    except:
        log_exception()
        return False
コード例 #15
0
ファイル: source_utils.py プロジェクト: po50on/cherry-dev
def strip_domain(url):
    try:
        if url.lower().startswith('http') or url.startswith('/'):
            url = re.findall('(?://.+?|)(/.+)', url)[0]
        url = client.replaceHTMLCodes(url)
        url = url.encode('utf-8')
        return url
    except:
        log_exception()
コード例 #16
0
def wyciaganieLinku():
    try:
        url = urllib.unquote_plus(params['url'])
        r = client.request(url)
        video_url = client.parseDOM(r, 'iframe', ret='src')[0]
        return video_url
    except:
        log_exception()
        return
コード例 #17
0
 def ListaKanalow(self):
     try:
         s, channelList = self.getList()
     except:
         log_exception()
         return ''
     if len(channelList) < 2:
         return ''
     else:
         return s, json.dumps(channelList)
コード例 #18
0
def WyciaganieLinku():
    try:
        url = params['url']
        r = client.request(url)
        video_url = client.parseDOM(r, 'iframe', ret='src')[0]
        video_url = video_url.split("?")[0].replace(
            '/embed/', '/watch?v=').replace('www.youtube', 'youtube')
        return video_url
    except:
        log_exception()
        return
コード例 #19
0
ファイル: source_utils.py プロジェクト: po50on/cherry-dev
def aliases_to_array(aliases, filter=None):
    try:
        if not filter:
            filter = []
        if isinstance(filter, str):
            filter = [filter]

        return [x.get('title') for x in aliases if not filter or x.get('country') in filter]
    except:
        log_exception()
        return []
コード例 #20
0
    def search(self, title, localtitle, year=''):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle)))
            self.login()
            for title in titles:

                data = {
                    'type': '1',
                    'search': title + ' ' + year + ' (avi|mkv|mp4)'
                }

                self.session.post('https://tb7.pl/mojekonto/szukaj',
                                  data=data).content

                headers = {
                    'Connection': 'keep-alive',
                    'Cache-Control': 'max-age=0',
                    'Origin': 'https://tb7.pl',
                    'Upgrade-Insecure-Requests': '1',
                    'DNT': '1',
                    'Content-Type': 'application/x-www-form-urlencoded',
                    'User-Agent':
                    'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.66 Mobile Safari/537.36',
                    'Accept':
                    'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
                    'Referer': 'https://tb7.pl/mojekonto/szukaj/1',
                    'Accept-Language': 'pl-PL,pl;q=0.9,en-US;q=0.8,en;q=0.7',
                }

                data = {'sort': 'size'}

                self.session.post('https://tb7.pl/mojekonto/szukaj/1',
                                  headers=headers,
                                  data=data)
                r = self.session.post('https://tb7.pl/mojekonto/szukaj/1',
                                      headers=headers,
                                      data=data).content

                rows = client.parseDOM(r, 'tr')

                if rows:
                    cookies = self.session.cookies
                    cookies = "; ".join(
                        [str(x) + "=" + str(y) for x, y in cookies.items()])
                    cache.cache_insert('tb7_cookie', cookies)
                    return rows
        except Exception as e:
            log_exception()
            return
コード例 #21
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         url = url
         result = client.request(url)
         tabela = client.parseDOM(result,
                                  'table',
                                  attrs={'class':
                                         'table table-bordered'})[0]
         tabela = client.parseDOM(tabela, 'tr')
         for item in tabela:
             if 'fa fa-sort' in item:
                 continue
             lang, info = self.get_lang_by_type(str(item))
             url = str(client.parseDOM(item, 'a', ret='href')[0])
             valid, host = source_utils.is_host_valid(url, hostDict)
             if not valid: continue
             if "Wysoka" in item:
                 sources.append({
                     'source': host,
                     'quality': 'HD',
                     'language': lang,
                     'url': url,
                     'info': info,
                     'direct': False,
                     'debridonly': False
                 })
             elif "Średnia" in item:
                 sources.append({
                     'source': host,
                     'quality': 'SD',
                     'language': lang,
                     'url': url,
                     'info': info,
                     'direct': False,
                     'debridonly': False
                 })
             elif "Niska" in item:
                 sources.append({
                     'source': host,
                     'quality': 'SD',
                     'language': lang,
                     'url': url,
                     'info': info,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         log_exception()
         return sources
コード例 #22
0
    def search(self, title, localtitle, year, is_movie_search):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle)))
            titles.append(title)
            titles.append(localtitle)
            for title in titles:
                try:
                    url = self.search_link + str(title)
                    result = self.session.get(url).content
                    result = result.decode('utf-8')
                    h = HTMLParser()
                    result = h.unescape(result)
                    result = client.parseDOM(result,
                                             'div',
                                             attrs={'class': 'card-body p-2'})

                    for item in result:
                        try:
                            nazwa = re.findall("""Film online: (.*?)\"""",
                                               item)[0]
                            try:
                                nazwa = re.findall(""">(.*?)<""", nazwa)[0]
                            except:
                                pass
                            name = cleantitle.normalize(
                                cleantitle.getsearch(nazwa))
                            rok = re.findall(
                                """Rok wydania filmu online\".*>(.*?)<""",
                                item)[0]
                            item = str(item).replace(
                                "<span style='color:red'>",
                                "").replace("</span>", "")
                            link = re.findall("""href=\"(.*?)\"""", item)[0]
                            if link.startswith('//'):
                                link = "https:" + link
                            name = name.replace("  ", " ")
                            title = title.replace("  ", " ")
                            words = name.split(" ")
                            if self.contains_all_words(
                                    title, words) and str(year) in rok:
                                return link
                        except:
                            continue
                except:
                    continue
        except Exception as e:
            log_exception()
            return
コード例 #23
0
    def getVideoLink(self, url, s):
        log("WizjaTvApi.getVideoLink")
        urlsTab = []

        data = s.get(url).content
        data = client.parseDOM(data, 'iframe', ret='src')
        log("WizjaTvApi." + str(data))
        for url in data:
            HTTP_HEADER = dict(self.HTTP_HEADER)
            HTTP_HEADER.update({'Referer': url})
            params = dict(self.http_params)
            params['header'] = HTTP_HEADER

            tries = 0
            while tries < 2:
                tries += 1

                if 'porter' in url or 'player' in url:
                    tmp = s.get("http://wizja.tv/" + url).text
                    videoUrl = re.search('src: "(.*?)"', tmp)
                    try:
                        videoUrl = videoUrl.group(1)
                        videoUrl = urllib.unquote(videoUrl).decode('utf8')
                    except:
                        log_exception()
                        videoUrl = ''
                    killUrl = re.search("""<a href="(.*?)" target="_top">Z""",
                                        tmp)
                    try:
                        killUrl = killUrl.group(1)
                        killUrl = urllib.unquote(killUrl).decode('utf8')
                    except:
                        log_exception()
                        killUrl = ''
                    if videoUrl != '':
                        urlTab = re.search(
                            """rtmp:\/\/([^\/]+?)\/([^\/]+?)\/([^\/]+?)\?(.+?)&streamType""",
                            str(videoUrl))
                        xbmc_rtmp = 'rtmp://' + urlTab.group(1) + '/' + urlTab.group(
                            2) + '?' + urlTab.group(4) + \
                                    ' app=' + urlTab.group(2) + '?' + urlTab.group(4) + \
                                    ' playpath=' + urlTab.group(3) + '?' + urlTab.group(4) + \
                                    ' swfVfy=1 flashver=LNX\\25,0,0,12 timeout=25 ' \
                                    'swfUrl=https://wizja.tv/player/StrobeMediaPlayback_v5.swf live=0 ' \
                                    'pageUrl=https://wizja.tv/' + str(url).replace("porter.php?ch", "watch.php?id")
                        urlsTab.append({'name': 'rtmp', 'url': xbmc_rtmp})
                    else:
                        s.get("http://wizja.tv/" + killUrl)
                        continue
                break
        return urlsTab
コード例 #24
0
def PlayMedia(link):
    import resolveurl
    try:
        pDialog = xbmcgui.DialogProgress()
        pDialog.create('Odtwarzanie', 'Odpalanie linku...')
        url = resolveurl.resolve(link)
        if url is False:
            raise ValueError('Nie udało się wyciągnąć linku')
        pDialog.close()
        xbmc.Player().play(str(url))
    except Exception as e:
        pDialog.close()
        xbmcgui.Dialog().ok('Error', 'Błąd odpalania linku! %s' % e)
        log_exception()
コード例 #25
0
 def sources(self, url, hostDict, hostprDict):
     try:
         # data = {'login': self.user_name, 'password': self.user_pass}
         # result = self.session.post('https://zalukaj.com/account.php', headers={'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}, data=data)
         headers = {
             'Cookie':
             '__cfduid=d61b42b729455a590ff291892cb688ea11546349293; PHPSESSID=7u6cbc5pagnhqfm84jgjhg9hc2; __PHPSESSIDS=de81fa674b436a948cb337b7f4d2fa3898bd308c'
         }
         if url.startswith('//'):
             url = "https:" + url
         sources = []
         if url == None: return sources
         url = url
         result = self.session.get(url, headers=headers).content
         link = "https://zalukaj.com" + str(
             client.parseDOM(result, 'iframe', ret='src')[0]) + "&x=1"
         details = str(
             client.parseDOM(result, 'div', attrs={'class': 'details'})[0])
         lang, info = self.get_lang_by_type(str(details))
         result = self.session.get(link, headers=headers).content
         try:
             url = str(client.parseDOM(result, 'source', ret='src')[0])
             valid, host = source_utils.is_host_valid(url, hostDict)
             sources.append({
                 'source': host,
                 'quality': 'HD',
                 'language': lang,
                 'url': url,
                 'info': info,
                 'direct': True,
                 'debridonly': False
             })
             return sources
         except:
             url = str(client.parseDOM(result, 'iframe', ret='src')[0])
             valid, host = source_utils.is_host_valid(url, hostDict)
             sources.append({
                 'source': host,
                 'quality': 'HD',
                 'language': lang,
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': False
             })
             return sources
     except:
         log_exception()
         return sources
コード例 #26
0
ファイル: source_utils.py プロジェクト: po50on/cherry-dev
def is_host_valid(url, domains):
    try:
        host = __top_domain(url)
        hosts = [domain.lower() for domain in domains if host and host in domain.lower()]

        if hosts and '.' not in host:
            host = hosts[0]
        if hosts and any([h for h in ['google', 'picasa', 'blogspot'] if h in host]):
            host = 'gvideo'
        if hosts and any([h for h in ['akamaized','ocloud'] if h in host]):
            host = 'CDN'
        return any(hosts), host
    except:
        log_exception()
        return False, ''
コード例 #27
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            data = {
                'fid_name': url[0],
                'sezon': url[1],
                'odcinek': url[2],
                'title': url[0]
            }

            result = requests.post(
                'http://178.19.110.218/forumserialeco/skrypt/szukaj3.php',
                data=data).content
            result = result.decode('utf-8')
            h = HTMLParser()
            result = h.unescape(result)
            if result:
                wersja = re.findall("""wersja: <b>(.*?)<\/b>""", result)
                id = re.findall("""url='(.*?)'""", result)
                for item in zip(wersja, id):
                    try:
                        if item[1]:
                            info = self.get_lang_by_type(item[0])
                            content = client.request(
                                "http://seriale.co/frame.php?src=" + item[1])
                            video_link = str(
                                client.parseDOM(content, 'iframe',
                                                ret='src')[0])
                            valid, host = source_utils.is_host_valid(
                                video_link, hostDict)
                            if valid:
                                sources.append({
                                    'source': host,
                                    'quality': 'SD',
                                    'language': info[0],
                                    'url': video_link,
                                    'info': info[1],
                                    'direct': False,
                                    'debridonly': False
                                })
                            else:
                                continue
                    except:
                        continue
                return sources
        except:
            log_exception()
            return sources
コード例 #28
0
 def sources(self, url, hostDict, hostprDict):
     try:
         # import pydevd
         # pydevd.settrace('localhost', port=5678, stdoutToServer=True, stderrToServer=True)
         sources = []
         result = self.session.get(url).content
         result = result.decode('utf-8')
         h = HTMLParser()
         result = h.unescape(result)
         result = client.parseDOM(result,
                                  'div',
                                  attrs={'class': 'tabela_wiersz mb-1'})
         for counter, item in enumerate(result, 0):
             try:
                 test = client.parseDOM(result,
                                        'span',
                                        attrs={'class': 'tabela_text'})
                 info = test[(2 + (3 * counter))]
                 info = self.get_lang_by_type(info)
                 quality = test[(1 + (3 * counter))]
                 quality = source_utils.check_sd_url(quality)
                 try:
                     id = re.findall("""ShowMovie\('(.*?)'\)""", item)[0]
                 except:
                     id = re.findall("""ShowSer\('(.*?)'\)""", item)[0]
                 try:
                     host = re.findall("""<\/i> (.*?)<\/span>""", item)[0]
                     if 'serial' in url:
                         id = id + '/s'
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': info[0],
                         'url': id,
                         'info': info[1],
                         'direct': False,
                         'debridonly': False
                     })
                 except:
                     continue
             except Exception as e:
                 print(e)
                 continue
         return sources
     except Exception as e:
         print(e)
         log_exception()
         return sources
コード例 #29
0
def ListowanieLinkow():
    try:
        url = params['url']
        result = client.request(url)
        nazwy = client.parseDOM(result, 'a', attrs={'class': 'link-iframe'})
        result = client.parseDOM(result, 'div', attrs={'dir': 'ltr'})[2]
        linki = client.parseDOM(result, 'a', ret='href')[1:len(nazwy) + 1]
        for item in zip(linki, nazwy):
            link = str(item[0])
            title = str(item[1])
            addon.addLink(title,
                          link,
                          mode='OdpalanieLinku',
                          fanart=_default_background)
    except:
        log_exception()
コード例 #30
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources
            url = self.base_link + url
            result = client.request(url)
            title = client.parseDOM(result,
                                    'span',
                                    attrs={'style': 'margin-right: 3px;'})[0]
            lang, info = self.get_lang_by_type(title)
            valid, host = source_utils.is_host_valid(url, hostDict)
            if not valid: return sources
            if "?wersja=1080p" in result:
                sources.append({
                    'source': host,
                    'quality': '1080p',
                    'language': lang,
                    'url': url + "?wersja=1080p",
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
            if "?wersja=720p" in result:
                sources.append({
                    'source': host,
                    'quality': 'HD',
                    'language': lang,
                    'url': url + "?wersja=720p",
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
            if "?wersja=480p" in result:
                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': lang,
                    'url': url + "?wersja=480p",
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except:
            log_exception()
            return sources