Example #1
0
    def __search(self, titles, year, content):
        try:

            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0])))

            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i][0] #cleantitle.get(titles[0])

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'tab-content clearfix'})

            if content == 'movies':
                r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
            else:
                r = client.parseDOM(r, 'div', attrs={'id': 'series'})

            data = dom_parser.parse_dom(r, 'figcaption')

            for i in data:
                title = i[0]['title']
                title = cleantitle.get(title)
                if title in t:
                    x = dom_parser.parse_dom(i, 'a', req='href')
                    return source_utils.strip_domain(x[0][0]['href'])
                else:
                    url = dom_parser.parse_dom(i, 'a', req='href')
                    data = client.request(url[0][0]['href'])
                    data = re.findall('<h1><a.+?">(.+?)\((\d{4})\).*?</a></h1>', data, re.DOTALL)[0]
                    if titles[0] in data[0] and year == data[1]: return source_utils.strip_domain(url[0][0]['href'])

            return
        except:
            return
Example #2
0
    def __search(self, titles):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'nag'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item-video'})
            r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'entry-title'})
            r = dom_parser.parse_dom(r, 'a', req='href')

                           
                                         

            for i in r:
                title = i[1]
                if re.search('\*(?:.*?)\*', title) is not None:
                    title = re.sub('\*(?:.*?)\*', '', title)
                title = cleantitle.get(title)
                if title in t:
                    return source_utils.strip_domain(i[0]['href'])
                else:
                    return
        except:
            return
Example #3
0
    def __search(self, search_link, imdb, titles):
        try:
            query = search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'big-list'})
            r = dom_parser.parse_dom(r, 'table', attrs={'class': 'row'})
            r = dom_parser.parse_dom(r, 'td', attrs={'class': 'list-name'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [i.attrs['href']for i in r if i and cleantitle.get(i.content) in t][0]

            url = source_utils.strip_domain(r)

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'a', attrs={'href': re.compile('.*/tt\d+.*')}, req='href')
            r = [re.findall('.+?(tt\d+).*?', i.attrs['href']) for i in r]
            r = [i[0] for i in r if i]

            return url if imdb in r else None
        except:
            return
Example #4
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            tvshowtitle = data['tvshowtitle']
            localtvshowtitle = data['localtvshowtitle']
            aliases = source_utils.aliases_to_array(eval(data['aliases']))

            url = self.__search([localtvshowtitle] + aliases, data['year'], season)
            if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + aliases, data['year'], season)
            if not url: return

            r = client.request(urlparse.urljoin(self.base_link, url))

            r = dom_parser.parse_dom(r, 'ul', attrs={'class': ['list-inline', 'list-film']})
            r = dom_parser.parse_dom(r, 'li')
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content) for i in r if i]
            r = [(i[0], i[1] if re.compile("^(\d+)$").match(i[1]) else '0') for i in r]
            r = [i[0] for i in r if int(i[1]) == int(episode)][0]

            return source_utils.strip_domain(r)
        except:
            return
Example #5
0
    def __search(self, titles, type, year, season=0, episode=False):
        try:
            years = [str(year), str(int(year) + 1), str(int(year) - 1)]
            years = ['&veroeffentlichung[]=%s' % i for i in years]

            query = self.search_link % (type, urllib.quote_plus(cleantitle.query(titles[0])))
            query += ''.join(years)
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]

            r = self.__proceed_search(query)
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and int(i[2]) == int(season)][0]

            url = source_utils.strip_domain(r)
            if episode:
                r = client.request(urlparse.urljoin(self.base_link, url))
                r = dom_parser.parse_dom(r, 'div', attrs={'class': 'season-list'})
                r = dom_parser.parse_dom(r, 'li')
                r = dom_parser.parse_dom(r, 'a', req='href')
                r = [i.attrs['href'] for i in r if i and int(i.content) == int(episode)][0]

                url = source_utils.strip_domain(r)
            return url
        except:
            return
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'hosterSiteVideo'})
            r = dom_parser.parse_dom(r, 'li', attrs={'data-lang-key': re.compile('[1|3]')})
            r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'h4'), 'subbed' if i.attrs['data-lang-key'] == '3' else '') for i in r]
            r = [(i[0][0].attrs['href'], i[1][0].content.lower(), i[2]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1], re.findall('(.+?)\s*<br\s*/?>(.+?)$', i[1], re.DOTALL), i[2]) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '', i[3]) for i in r]
            r = [(i[0], i[1], 'HD' if 'hosterhdvideo' in i[2] else 'SD', i[3]) for i in r]

            for link, host, quality, info in r:
                valid, host = source_utils.is_host_valid(host, hostDict)
                if not valid: continue

                sources.append({'source': host, 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'entries'})

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i.content for i in r]))
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', req='src')]
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    sources.append({'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Example #8
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'article')
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'entry-content'})

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i.content for i in r]))
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', req='src')]
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]

            for i in links:
                try:
                    valid, hoster = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls = []
                    if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                    if 'google' in i and not urls and directstream.googletag(i): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                    elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                    else: host = hoster; direct = False; urls = [{'quality': 'SD', 'url': i}]

                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Example #9
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = data.get('url')
            episode = int(data.get('episode', 1))

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = {'': dom_parser.parse_dom(r, 'div', attrs={'id': 'gerdub'}), 'subbed': dom_parser.parse_dom(r, 'div', attrs={'id': 'gersub'})}

            for info, data in r.iteritems():
                data = dom_parser.parse_dom(data, 'tr')
                data = [dom_parser.parse_dom(i, 'a', req='href') for i in data if dom_parser.parse_dom(i, 'a', attrs={'id': str(episode)})]
                data = [(link.attrs['href'], dom_parser.parse_dom(link.content, 'img', req='src')) for i in data for link in i]
                data = [(i[0], i[1][0].attrs['src']) for i in data if i[1]]
                data = [(i[0], re.findall('/(\w+)\.\w+', i[1])) for i in data]
                data = [(i[0], i[1][0]) for i in data if i[1]]

                for link, hoster in data:
                    valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                    if not valid: continue

                    sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            html = client.request(url)
            html = html.replace('\\"', '"')

            sources = self.get_hoster_list(html, sources, hostDict)

            links = dom_parser.parse_dom(html, 'tr', attrs={'id': 'tablemoviesindex2'})

            for i in links:
                try:
                    host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt']
                    host = host.split()[0].rsplit('.', 1)[0].strip().lower()
                    host = host.encode('utf-8')

                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid: continue

                    url = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href']
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')
                    selected_url = client.request(url)
                    Links = re.compile('href="(.+?)"',re.DOTALL).findall(selected_url)
                    for link in Links:
                        if host.lower() in link.lower():
                            sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False})
                except:
                    pass
            return sources
        except:
            return sources
Example #11
0
File: movie2z.py Project: mpie/repo
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'mainmenu'})
            r = dom_parser.parse_dom(r, 'li')

            for i in r:
                i = dom_parser.parse_dom(i, 'a')
                i = i[0][0]['href']
                i = client.request(i)
                i = dom_parser.parse_dom(i, 'select', attrs={'id': 'selecthost'})
                i = dom_parser.parse_dom(i, 'option')

                for x in i:
                    hoster = re.search('^\S*', x[1]).group().lower()
                    url = x[0]['value']

                    valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                    if not valid: continue

                    sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            quality = dom_parser.parse_dom(r, 'span', attrs={'id': 'release_text'})[0].content.split('&nbsp;')[0]
            quality, info = source_utils.get_release_quality(quality)

            r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'currentStreamLinks'})
            r = [(dom_parser.parse_dom(i, 'p', attrs={'class': 'hostName'}), dom_parser.parse_dom(i, 'a', attrs={'class': 'stream-src'}, req='data-id')) for i in r]
            r = [(re.sub(' hd$', '', i[0][0].content.lower()), [x.attrs['data-id'] for x in i[1]]) for i in r if i[0] and i[1]]

            for hoster, id in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'info': ' | '.join(info + ['' if len(id) == 1 else 'multi-part']), 'url': id, 'direct': False, 'debridonly': False, 'checkquality': True})

            return sources
        except:
            return sources
Example #13
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0])))

            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(titles[0])
            scraper = cfscrape.create_scraper()
            data = scraper.get(query).content
            #data = client.request(query, referer=self.base_link)
            data = client.parseDOM(data, 'div', attrs={'class': 'result-item'})
            r = dom_parser.parse_dom(data, 'div', attrs={'class': 'title'})
            r = zip(dom_parser.parse_dom(r, 'a'), dom_parser.parse_dom(data, 'span', attrs={'class': 'year'}))

            url = []
            for i in range(len(r)):
                title = cleantitle.get(r[i][0][1])
                title = re.sub('(\d+p|4k|3d|hd|season\d+)','',title)
                y = r[i][1][1]
                link = r[i][0][0]['href']
                if 'season' in title: continue
                if t == title and y == year:
                    if 'season' in link:
                        url.append(source_utils.strip_domain(link))
                        print url[0]
                        return url[0]
                    else: url.append(source_utils.strip_domain(link))

            return url
        except:
            return
Example #14
0
    def __search(self, titles, year, season='0'):
        try:
            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            post = {'story': utils.uni2cp(titles[0]), 'titleonly': 3, 'do': 'search', 'subaction': 'search', 'search_start': 1, 'full_search': 0, 'result_from': 1}
            html = client.request(self.base_link, post=post)

            html = html.decode('cp1251').encode('utf-8')

            r = dom_parser.parse_dom(html, 'div', attrs={'id': re.compile('news-id-\d+')})
            r = [(i.attrs['id'], dom_parser.parse_dom(i, 'a', req='href')) for i in r]
            r = [(re.sub('[^\d]+', '', i[0]), dom_parser.parse_dom(i[1], 'img', req='title')) for i in r]
            r = [(i[0], i[1][0].attrs['title'], '') for i in r if i[1]]
            r = [(i[0], i[1], i[2], re.findall(u'(.+?)\s+(\d+)\s+(?:сезон)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1]), i[3]) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0', i[3]) for i in r]
            r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]
            r = dom_parser.parse_dom(html, 'a', attrs={'href': re.compile('.*/%s-' % r)}, req='href')[0].attrs['href']

            return source_utils.strip_domain(r)
        except:
            return
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = data['url']
            episode = data.get('episode')

            r = client.request(urlparse.urljoin(self.base_link, url))

            if episode:
                rel = dom_parser.parse_dom(r, 'a', attrs={'class': 'fstab', 'title': re.compile('Episode %s$' % episode)}, req='data-rel')
                rel = [dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['data-rel']}) for i in rel]
                rel = [i[0].content for i in rel if i]
                r = ' '.join(rel)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': re.compile('s?elink')})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [i.attrs['href'] for i in r]

            for h_url in r:
                valid, hoster = source_utils.is_host_valid(h_url, hostDict)
                if not valid: continue

                sources.append({'source': hoster, 'quality': 'SD', 'language': 'fr', 'url': h_url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
Example #16
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'tab-plot_german'})
            r = dom_parser.parse_dom(r, 'tbody')
            r = dom_parser.parse_dom(r, 'tr')

            for i in r:
                if re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip():
                    hoster = re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip()
                    link = re.search('(?<=href=\")(.*?)(?=\")', i[1]).group()
                    rel = re.search('(?<=oddCell qualityCell">)(\n.*?)(?=<\/td>)', i[1]).group().strip()
                    quality, info = source_utils.get_release_quality(rel)
                    if not quality:
                        quality = 'SD'

                    valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                    if not valid: continue

                    sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
Example #17
0
    def __search(self, titles, year, imdb):
        try:
            query = self.search_link % (urllib.quote_plus(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie_cell'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'bottom'}), dom_parser.parse_dom(i, 'div', attrs={'class': 'year'})) for i in r]
            r = [(dom_parser.parse_dom(i[0], 'a', req=['href', 'title']), re.findall('[(](\d{4})[)]', i[1][0].content)) for i in r if i[0] and i[1]]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0]) for i in r if i[0] and i[1]]
            r = [(i[0], i[1].lower(), i[2]) for i in r if i[2] in y]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t]

            if len(r) > 1:
                for i in r:
                    data = client.request(urlparse.urljoin(self.base_link, i))
                    data = dom_parser.parse_dom(data, 'a', attrs={'name': re.compile('.*/tt\d+.*')}, req='name')
                    data = [re.findall('.+?(tt\d+).*?', d.attrs['name']) for d in data]
                    data = [d[0] for d in data if len(d) > 0 and d[0] == imdb]

                    if len(data) >= 1:
                        url = i
            else:
                url = r[0]

            if url:
                return source_utils.strip_domain(url)
        except:
            return
Example #18
0
    def __search(self, titles, year, content):
        try:
            t = [cleantitle.get(i) for i in set(titles) if i]

            c = client.request(urlparse.urljoin(self.base_link, self.year_link % int(year)), output='cookie')

            p = urllib.urlencode({'search': cleantitle.query(titles[0])})
            c = client.request(urlparse.urljoin(self.base_link, self.search_link), cookie=c, post=p, output='cookie')
            r = client.request(urlparse.urljoin(self.base_link, self.type_link % content), cookie=c, post=p)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'content'})
            r = dom_parser.parse_dom(r, 'tr')
            r = [dom_parser.parse_dom(i, 'td') for i in r]
            r = [dom_parser.parse_dom(i, 'a', req='href') for i in r]

            r = [(i[0].attrs['href'], i[0].content, i[1].content) for i in r if i]
            x = []
            for i in r:
                if re.search('(?<=<i>\().*$', i[1]):
                    x.append((i[0], re.search('(.*?)(?=\s<)', i[1]).group(), re.search('(?<=<i>\().*$', i[1]).group(), i[2]))
                else:
                    x.append((i[0], i[1], i[1], i[2]))
            r = [i[0] for i in x if (cleantitle.get(i[1]) in t or cleantitle.get(i[2]) in t) and i[3] == year][0]

            return source_utils.strip_domain(r)
        except:
            return
Example #19
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = r.replace('\\"', '"')

            links = dom_parser.parse_dom(r, 'tr', attrs={'id': 'tablemoviesindex2'})

            for i in links:
                try:
                    host = dom_parser.parse_dom(i, 'img', req='alt')[0].attrs['alt']
                    host = host.split()[0].rsplit('.', 1)[0].strip().lower()
                    host = host.encode('utf-8')

                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid: continue

                    url = dom_parser.parse_dom(i, 'a', req='href')[0].attrs['href']
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Example #20
0
    def __search(self, titles, year, season='0'):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            post = {'story': utils.uni2cp(titles[0]), 'titleonly': 3, 'do': 'search', 'subaction': 'search', 'search_start': 1, 'full_search': 0, 'result_from': 1}
            r = client.request(url, post=post)

            r = r.decode('cp1251').encode('utf-8')

            r = dom_parser.parse_dom(r, 'table', attrs={'class': 'eBlock'})
            r = [(dom_parser.parse_dom(i, 'div', attrs={'class': 'eTitle'}), dom_parser.parse_dom(i[1], 'a', attrs={'href': re.compile('.*\d+_goda/')})) for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a', req='href'), [x.content for x in i[1] if re.match('\d{4}', x.content)][0] if i[1] else '0') for i in r if i[0]]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1]) for i in r if i[0]]
            r = [(i[0], i[1], i[2], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r if i]
            r = [(i[0], i[3][0][0] if i[3] else i[1], i[2]) for i in r]
            r = [(i[0], i[1], i[2], re.findall(u'(.+?)\s+(\d+)\s+(?:сезон)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]

            return source_utils.strip_domain(r)
        except:
            return
Example #21
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'player'})
            r = dom_parser.parse_dom(r, 'iframe', req='src')
            r = client.request(r[0][0]['src'])
            r = dom_parser.parse_dom(r, 'a', attrs={'class': 'play_container'}, req='href')
            r = client.request(r[0][0]['href'])
            url = self.get_link % (re.search('(?<=var id = \")(.*?)(?=\")', r).group(), re.search('(?<=var links = \")(.*?)(?=\")', r).group())
            r = client.request(url)
            r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'articleList'})
            r = dom_parser.parse_dom(r, 'a')

            for i in r:
                if 'http' in i[0]['href']:
                    link = i[0]['href']
                elif 'http' in i[0]['onclick']:
                    link = re.search('http(.*?)(?=\")', i[0]['onclick']).group()
                else:
                    return sources

                valid, hoster = source_utils.is_host_valid(link, hostDict)
                if not valid: continue

                sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
    def __search(self, titles, year, episode='0'):
        try:
            title = titles[0]
            if int(episode) > 0: title += ' episode %s' % episode

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(urlparse.urljoin(self.base_link, self.search_link) % urllib.quote_plus(cleantitle.query(title)))

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'entries'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'post'})
            r = dom_parser.parse_dom(r, 'h3', attrs={'class': 'title'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:episode)\s+(\d+)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(episode) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(episode)][0]

            return source_utils.strip_domain(r)
        except:
            return
Example #23
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url'))
            season = data.get('season')
            episode = data.get('episode')

            if season and episode:
                r = urllib.urlencode({'imdbid': data['imdb'], 'language': 'de', 'season': season, 'episode': episode})
                r = client.request(urlparse.urljoin(self.base_link, self.hoster_link), XHR=True, post=r)
            else:
                r = client.request(url)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'linkbox'})[0].content
            r = re.compile('(<a.+?/a>)', re.DOTALL).findall(r)
            r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'img', attrs={'class': re.compile('.*linkbutton')}, req='class')) for i in r]
            r = [(i[0][0].attrs['href'], i[1][0].attrs['class'].lower()) for i in r if i[0] and i[1]]
            r = [(i[0].strip(), 'HD' if i[1].startswith('hd') else 'SD') for i in r]

            for url, quli in r:
                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid: continue

                sources.append({'source': host, 'quality': quli, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
Example #24
0
    def __search(self, titles, year, season='0'):
        try:
            query = self.search_link % (urllib.quote_plus(titles[0]))
            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'list_movies'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item_movie'})
            r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'tit'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], i.content.lower()) for i in r if i]
            r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:\s*-?\s*(?:season|s))\s*(\d+)', i[1])) for i in r]
            r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
            r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
            r = sorted(r, key=lambda i: int(i[2]), reverse=True)  # with year > no year
            r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]

            return source_utils.strip_domain(r)
        except:
            return
Example #25
0
 def __proceed_search(query):
     r = client.request(query)
     r = dom_parser.parse_dom(r, 'div', attrs={'class': 'ml-items'})
     r = dom_parser.parse_dom(r, 'div', attrs={'class': 'ml-item'})
     r = dom_parser.parse_dom(r, 'a', attrs={'class': 'ml-name'}, req='href')
     r = [(i.attrs['href'], re.sub('<.+?>|</.+?>', '', i.content).strip()) for i in r if i[0]]
     r = [(i[0], i[1], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1].lower())) for i in r]
     r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
     return r
Example #26
0
File: filmix.py Project: mpie/repo
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = data.get('url')
            season = data.get('season')
            episode = data.get('episode')
            abs_episode = 0

            if season and episode:
                abs_episode = str(tvmaze.tvMaze().episodeAbsoluteNumber(data.get('tvdb'), int(season), int(episode)))

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = r.decode('cp1251').encode('utf-8')

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'players'}, req='data-player')
            r = [(i.attrs['data-player'], dom_parser.parse_dom(i, 'a', req='href')) for i in r]
            r = [(i[0], i[1][0].attrs['href']) for i in r if i[1]]

            for post_id, play_url in r:
                i = client.request(play_url, referer=url, output='extended')

                headers = i[3]
                headers.update({'Cookie': i[2].get('Set-Cookie')})

                i = client.request(urlparse.urljoin(self.base_link, self.player_link), post={'post_id': post_id}, headers=headers, referer=i, XHR=True)
                i = json.loads(i).get('message', {}).get('translations', {}).get('flash', {})

                for title, link in i.iteritems():
                    try:
                        link = self.decode_direct_media_url(link)

                        if link.endswith('.txt'):
                            link = self.decode_direct_media_url(client.request(link))
                            link = json.loads(link).get('playlist', [])
                            link = [i.get('playlist', []) for i in link]
                            link = [x.get('file') for i in link for x in i if (x.get('season') == season and x.get('serieId') == episode) or (x.get('season') == '0' and x.get('serieId') == abs_episode)][0]

                        urls = [(source_utils.label_to_quality(q), self.format_direct_link(link, q)) for q in self.get_qualitys(link)]
                        urls = [{'quality': x[0], 'url': x[1]} for x in urls if x[0] in ['SD', 'HD']]  # filter premium

                        for i in urls: sources.append({'source': 'CDN', 'quality': i['quality'], 'info': title, 'language': 'ru', 'url': i['url'], 'direct': True, 'debridonly': False})
                    except:
                        pass

            return sources
        except:
            return sources
Example #27
0
    def resolve(self, url):
        try:
            if self.base_link in url:
                r = client.request(url)
                r = dom_parser.parse_dom(r, 'div', attrs={'class': 'cupe'})
                r = dom_parser.parse_dom(r, 'div', attrs={'class': 'reloading'})
                url = dom_parser.parse_dom(r, 'a', req='href')[0].attrs['href']

            return url
        except:
            return
Example #28
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = urlparse.urljoin(self.base_link, self.search_link %cleantitle.geturl(title).replace('-','+'))
         r = client.request(url, cookie='check=2')
         m = dom_parser.parse_dom(r, 'div', attrs={'class': 'masonry'})
         m = dom_parser.parse_dom(m, 'a', req='href')
         m = [(i.attrs['href']) for i in m if i.content == title]
         url = urlparse.urljoin(self.base_link,m[0])
         return url
     except:
         return
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'mediaplayer'})
            r = [i.attrs['src'] for i in dom_parser.parse_dom(r, 'iframe', req='src')]

            for i in r:
                try:
                    if 'vidnow.' in i:
                        i = client.request(i, referer=url)

                        gdata = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', i, re.DOTALL)]
                        gdata += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', i, re.DOTALL)]
                        gdata = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in gdata]

                        for u, q in gdata:
                            try:
                                tag = directstream.googletag(u)

                                if tag:
                                    sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'de', 'url': u, 'direct': True, 'debridonly': False})
                                else:
                                    sources.append({'source': 'CDN', 'quality': q, 'language': 'de', 'url': u, 'direct': True,'debridonly': False})
                            except:
                                pass

                        i = dom_parser.parse_dom(i, 'div', attrs={'id': 'myElement'})
                        i = dom_parser.parse_dom(i, 'iframe', req='src')[0].attrs['src']

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls = []
                    if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                    if 'google' in i and not urls and directstream.googletag(i): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                    elif 'ok.ru' in i:  host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                    else:  direct = False; urls = [{'quality': 'SD', 'url': i}]

                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
    def resolve(self, url):
        try:
            url = client.request(urlparse.urljoin(self.base_link, url), output='geturl')
            if self.base_link not in url:
                return url

            r = client.request(url)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'})
            return dom_parser.parse_dom(r, 'iframe', req='src')[0].attrs['src']
        except:
            return
 def resolve(self, url):
     r = client.request(url)
     r = dom_parser.parse_dom(r, 'div', {'class': 'link_under_video'})
     r = dom_parser.parse_dom(r, 'a', req='href')
     return r[0].attrs['href']
Example #32
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            cookie = self.__get_premium_cookie()

            r = client.request(url, mobile=True, cookie=cookie)

            query = urlparse.urljoin(self.base_link, self.part_link)
            id = re.compile('var\s*video_id\s*=\s*"(\d+)"').findall(r)[0]

            p = dom_parser.parse_dom(r,
                                     'a',
                                     attrs={
                                         'class': 'changePart',
                                         'data-part': re.compile('\d+p')
                                     },
                                     req='data-part')

            for i in p:
                i = i.attrs['data-part']

                p = urllib.urlencode({
                    'video_id': id,
                    'part_name': i,
                    'page': '0'
                })
                p = client.request(query,
                                   cookie=cookie,
                                   mobile=True,
                                   XHR=True,
                                   post=p,
                                   referer=url)

                p = json.loads(p)
                p = p.get('part_count', 0)

                for part_count in range(0, p):
                    try:
                        r = urllib.urlencode({
                            'video_id': id,
                            'part_name': i,
                            'page': part_count
                        })
                        r = client.request(query,
                                           cookie=cookie,
                                           mobile=True,
                                           XHR=True,
                                           post=r,
                                           referer=url)

                        r = json.loads(r)
                        r = r.get('part', {})

                        s = r.get('source', '')
                        url = r.get('code', '')

                        if s == 'url' and 'http' not in url:
                            url = self.__decode_hash(url)
                        elif s == 'other':
                            url = dom_parser.parse_dom(url,
                                                       'iframe',
                                                       req='src')
                            if len(url) < 1: continue
                            url = url[0].attrs['src']
                            if '/old/seframer.php' in url:
                                url = self.__get_old_url(url)

                            if 'keepup' in url:
                                print url
                                # needs to be fixed (keepup.gq)
                            elif self.domains[0] in url:
                                url = re.search('(?<=id=).*$', url).group()
                                url = 'https://drive.google.com/file/d/' + url

                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue

                        if i in ['720p', 'HD']: quali = 'HD'
                        elif i in ['1080p', '1440p']: quali = i
                        elif i in ['2160p']: quali = '4K'
                        else: quali = 'SD'

                        urls, host, direct = source_utils.check_directstreams(
                            url, host, quali)

                        for i in urls:
                            sources.append({
                                'source': host,
                                'quality': i['quality'],
                                'language': 'de',
                                'url': i['url'],
                                'direct': direct,
                                'debridonly': False
                            })
                    except:
                        pass

            return sources
        except:
            return sources
Example #33
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = data.get('url')
            episode = int(data.get('episode', 1))

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'streams'})

            rels = dom_parser.parse_dom(r, 'ul', attrs={'class': 'nav'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = dom_parser.parse_dom(
                rels,
                'a',
                attrs={'href': re.compile('#stream_\d*')},
                req='href')
            rels = [(re.findall('stream_(\d+)', i.attrs['href']),
                     re.findall('flag-(\w{2})', i.content)) for i in rels if i]
            rels = [(i[0][0], ['subbed'] if i[1][0] != 'de' else [])
                    for i in rels if i[0] and 'de' in i[1]]

            for id, info in rels:
                rel = dom_parser.parse_dom(r,
                                           'div',
                                           attrs={'id': 'stream_%s' % id})
                rel = [(dom_parser.parse_dom(
                    i, 'div', attrs={'id': 'streams_episodes_%s' % id}),
                        dom_parser.parse_dom(i, 'tr')) for i in rel]
                rel = [(i[0][0].content,
                        [x for x in i[1] if 'fa-desktop' in x.content])
                       for i in rel if i[0] and i[1]]
                rel = [(i[0], dom_parser.parse_dom(i[1][0].content, 'td'))
                       for i in rel if i[1]]
                rel = [(i[0], re.findall('\d{3,4}x(\d{3,4})$',
                                         i[1][0].content)) for i in rel
                       if i[1]]
                rel = [(i[0], source_utils.label_to_quality(i[1][0]))
                       for i in rel if len(i[1]) > 0]

                for html, quality in rel:
                    try:
                        s = dom_parser.parse_dom(
                            html,
                            'a',
                            attrs={
                                'href':
                                re.compile('#streams_episodes_%s_\d+' % id)
                            })
                        s = [(dom_parser.parse_dom(
                            i,
                            'div',
                            attrs={'data-loop': re.compile('\d+')},
                            req='data-loop'), dom_parser.parse_dom(i, 'span'))
                             for i in s]
                        s = [(i[0][0].attrs['data-loop'], [
                            x.content for x in i[1] if '<strong' in x.content
                        ]) for i in s if i[0]]
                        s = [(i[0],
                              re.findall('<.+?>(\d+)</.+?> (.+?)$', i[1][0]))
                             for i in s if len(i[1]) > 0]
                        s = [(i[0], i[1][0]) for i in s if len(i[1]) > 0]
                        s = [(i[0], int(i[1][0]),
                              re.findall('Episode (\d+):',
                                         i[1][1]), re.IGNORECASE) for i in s
                             if len(i[1]) > 1]
                        s = [(i[0], i[1],
                              int(i[2][0]) if len(i[2]) > 0 else -1)
                             for i in s]
                        s = [(i[0], i[2] if i[2] >= 0 else i[1]) for i in s]
                        s = [i[0] for i in s if i[1] == episode][0]

                        enc = dom_parser.parse_dom(
                            html,
                            'div',
                            attrs={
                                'id':
                                re.compile('streams_episodes_%s_%s' % (id, s))
                            },
                            req='data-enc')[0].attrs['data-enc']

                        hosters = dom_parser.parse_dom(
                            html,
                            'a',
                            attrs={
                                'href':
                                re.compile('#streams_episodes_%s_%s' % (id, s))
                            })
                        hosters = [
                            dom_parser.parse_dom(i, 'i', req='class')
                            for i in hosters
                        ]
                        hosters = [
                            re.findall('hoster-(\w+)',
                                       ' '.join([x.attrs['class'] for x in i]))
                            for i in hosters if i
                        ][0]
                        hosters = [(source_utils.is_host_valid(
                            re.sub('(co|to|net|pw|sx|tv|moe|ws|icon)$', '', i),
                            hostDict), i) for i in hosters]
                        hosters = [(i[0][1], i[1]) for i in hosters
                                   if i[0] and i[0][0]]

                        info = ' | '.join(info)

                        for source, hoster in hosters:
                            sources.append({
                                'source': source,
                                'quality': quality,
                                'language': 'de',
                                'url': [enc, hoster],
                                'info': info,
                                'direct': False,
                                'debridonly': False,
                                'checkquality': True
                            })
                    except:
                        pass

            return sources
        except:
            return sources
Example #34
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'tab-pane'})
            r = dom_parser.parse_dom(r, 'iframe', req='src')
            r = [i.attrs['src'] for i in r]

            for i in r:
                try:
                    if 'drama4u' in i or 'k-vid' in i:
                        r = client.request(i, referer=url)
                        r = re.findall(
                            '''var\s*source\s*=\s*\[({.*?})\]\s*;''', r)[0]

                        i = [(match[1], match[0]) for match in re.findall(
                            '''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''',
                            r, re.DOTALL)]
                        i += [(match[0], match[1]) for match in re.findall(
                            '''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''',
                            r, re.DOTALL)]
                        r = [(x[0].replace('\/', '/'),
                              source_utils.label_to_quality(x[1])) for x in i]

                        for u, q in list(set(r)):
                            try:
                                tag = directstream.googletag(u)
                                if tag:
                                    sources.append({
                                        'source':
                                        'gvideo',
                                        'quality':
                                        tag[0].get('quality', 'SD'),
                                        'language':
                                        'ko',
                                        'url':
                                        u,
                                        'direct':
                                        True,
                                        'debridonly':
                                        False
                                    })
                                else:
                                    sources.append({
                                        'source': 'CDN',
                                        'quality': q,
                                        'language': 'ko',
                                        'url': u,
                                        'direct': True,
                                        'debridonly': False
                                    })
                            except:
                                pass
                    else:
                        valid, host = source_utils.is_host_valid(i, hostDict)
                        if not valid: continue

                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'ko',
                            'url': i,
                            'direct': False,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
Example #35
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'article')
            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'entry-content'})

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                               ''.join([i.content for i in r]))
            links += [
                l.attrs['src'] for i in r
                for l in dom_parser.parse_dom(i, 'iframe', req='src')
            ]
            links += [
                l.attrs['src'] for i in r
                for l in dom_parser.parse_dom(i, 'source', req='src')
            ]

            for i in links:
                try:
                    valid, hoster = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls = []
                    if 'google' in i:
                        host = 'gvideo'
                        direct = True
                        urls = directstream.google(i)
                    if 'google' in i and not urls and directstream.googletag(
                            i):
                        host = 'gvideo'
                        direct = True
                        urls = [{
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'url':
                            i
                        }]
                    elif 'ok.ru' in i:
                        host = 'vk'
                        direct = True
                        urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i:
                        host = 'vk'
                        direct = True
                        urls = directstream.vk(i)
                    else:
                        host = hoster
                        direct = False
                        urls = [{
                            'quality': 'SD',
                            'url': i
                        }]

                    for x in urls:
                        sources.append({
                            'source': host,
                            'quality': x['quality'],
                            'language': 'ko',
                            'url': x['url'],
                            'direct': direct,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
Example #36
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = data.get('url')
            season = data.get('season')
            episode = data.get('episode')
            abs_episode = 0

            if season and episode:
                abs_episode = str(tvmaze.tvMaze().episodeAbsoluteNumber(
                    data.get('tvdb'), int(season), int(episode)))

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = r.decode('cp1251').encode('utf-8')

            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'players'},
                                     req='data-player')
            r = [(i.attrs['data-player'],
                  dom_parser.parse_dom(i, 'a', req='href')) for i in r]
            r = [(i[0], i[1][0].attrs['href']) for i in r if i[1]]

            for post_id, play_url in r:
                i = client.request(play_url, referer=url, output='extended')

                headers = i[3]
                headers.update({'Cookie': i[2].get('Set-Cookie')})

                i = client.request(urlparse.urljoin(self.base_link,
                                                    self.player_link),
                                   post={'post_id': post_id},
                                   headers=headers,
                                   referer=i,
                                   XHR=True)
                i = json.loads(i).get('message', {}).get('translations',
                                                         {}).get('flash', {})

                for title, link in i.iteritems():
                    try:
                        link = self.decode_direct_media_url(link)

                        if link.endswith('.txt'):
                            link = self.decode_direct_media_url(
                                client.request(link))
                            link = json.loads(link).get('playlist', [])
                            link = [i.get('playlist', []) for i in link]
                            link = [
                                x.get('file') for i in link for x in i
                                if (x.get('season') == season
                                    and x.get('serieId') == episode) or (
                                        x.get('season') == '0'
                                        and x.get('serieId') == abs_episode)
                            ][0]

                        urls = [(source_utils.label_to_quality(q),
                                 self.format_direct_link(link, q))
                                for q in self.get_qualitys(link)]
                        urls = [{
                            'quality': x[0],
                            'url': x[1]
                        } for x in urls
                                if x[0] in ['SD', 'HD']]  # filter premium

                        for i in urls:
                            sources.append({
                                'source': 'CDN',
                                'quality': i['quality'],
                                'info': title,
                                'language': 'ru',
                                'url': i['url'],
                                'direct': True,
                                'debridonly': False
                            })
                    except:
                        pass

            return sources
        except:
            return sources
Example #37
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)
            content = self.scraper.get(url).content
            cookies = requests.utils.dict_from_cookiejar(self.scraper.cookies)

            content_id = re.findall(
                '\d+',
                dom_parser.parse_dom(content, 'body')[0].attrs['onload'])[0]

            link = self.hoster_link % ('Filme' if 'film' in url else 'Serien')
            if 'film' in url:
                params = self.getParams(content_id, cookies)
            else:
                temp = re.findall('.*staffel\/(\d+).*?(\d+)', url)[0]
                if not self.isEpisodeAvailable(content_id, url, cookies,
                                               temp[0], temp[1]):
                    return sources
                params = self.getParams(content_id,
                                        cookies,
                                        s=temp[0],
                                        e=temp[1])

            content = cache.get(self.scraper.post,
                                4,
                                link,
                                headers=self.getHeader(url),
                                data=params).content

            links = dom_parser.parse_dom(content, 'li')
            links = [(i.attrs['title'], i.attrs['onclick'],
                      dom_parser.parse_dom(i, 'img')[0].attrs['title'],
                      re.findall(
                          '/(\d+)',
                          dom_parser.parse_dom(i,
                                               'div',
                                               attrs={'class':
                                                      'col2'})[0].content)[0])
                     for i in links]

            for hoster, params, quality, mirrorcount in links:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                url_dict = self.get_url_dict(params, url,
                                             True if 'film' in url else False)
                quality = source_utils.get_release_quality(quality)[0]
                for i in range(1, int(mirrorcount) + 1):
                    url_dict['zm'] = unicode(i)
                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': url_dict.copy(),
                        'direct': False,
                        'debridonly': False,
                        'checkquality': False
                    })

            if len(sources) == 0:
                raise Exception()
            return sources
        except:
            source_faultlog.logFault(__name__, source_faultlog.tagScrape, url)
            return sources
Example #38
0
    def __search(self, title, year, season=0, episode=False):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(title)))
            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(title)
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'container'})
            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'ml-item-content'})

            f = []
            for i in r:
                _url = dom_parser.parse_dom(i,
                                            'a',
                                            attrs={'class': 'ml-image'},
                                            req='href')[0].attrs['href']

                _title = re.sub('<.+?>|</.+?>', '',
                                dom_parser.parse_dom(i,
                                                     'h6')[0].content).strip()
                try:
                    _title = re.search('(.*?)\s(?:staf+el|s)\s*(\d+)', _title,
                                       re.I).group(1)
                except:
                    pass

                _season = '0'

                _year = re.findall(
                    'calendar.+?>.+?(\d{4})', ''.join([
                        x.content for x in dom_parser.parse_dom(
                            i, 'ul', attrs={'class': 'item-params'})
                    ]))
                _year = _year[0] if len(_year) > 0 else '0'

                if season > 0:
                    s = dom_parser.parse_dom(i,
                                             'span',
                                             attrs={'class': 'season-label'})
                    s = dom_parser.parse_dom(s,
                                             'span',
                                             attrs={'class': 'el-num'})
                    if s: _season = s[0].content.strip()

                if t == cleantitle.get(_title) and _year in y and int(
                        _season) == int(season):
                    f.append((_url, _year))
            r = f
            r = sorted(r, key=lambda i: int(i[1]),
                       reverse=True)  # with year > no year
            r = [i[0] for i in r if r[0]][0]

            url = source_utils.strip_domain(r)
            if episode:
                r = client.request(urlparse.urljoin(self.base_link, url))
                r = dom_parser.parse_dom(r,
                                         'div',
                                         attrs={'class': 'season-list'})
                r = dom_parser.parse_dom(r, 'li')
                r = dom_parser.parse_dom(r, 'a', req='href')
                r = [(i.attrs['href'], i.content) for i in r]
                r = [i[0] for i in r if i[1] and int(i[1]) == int(episode)][0]
                url = source_utils.strip_domain(r)
            return url
        except:
            return
Example #39
0
    def __search(self, imdb, titles, year):
        try:
            q = self.search_link % urllib.quote_plus(
                cleantitle.query(titles[0]))
            q = urlparse.urljoin(self.base_link, q)

            t = [cleantitle.get(i) for i in set(titles) if i]
            y = [
                '%s' % str(year),
                '%s' % str(int(year) + 1),
                '%s' % str(int(year) - 1), '0'
            ]

            r = client.request(q)

            r = dom_parser.parse_dom(
                r, 'tr', attrs={'id': re.compile('coverPreview.+?')})
            r = [(dom_parser.parse_dom(i, 'a', req='href'),
                  dom_parser.parse_dom(i,
                                       'div',
                                       attrs={'style': re.compile('.+?')}),
                  dom_parser.parse_dom(i, 'img', req='src')) for i in r]
            r = [(i[0][0].attrs['href'].strip(), i[0][0].content.strip(), i[1],
                  i[2]) for i in r if i[0] and i[2]]
            r = [(i[0], i[1], [
                x.content for x in i[2]
                if x.content.isdigit() and len(x.content) == 4
            ], i[3]) for i in r]
            r = [(i[0], i[1], i[2][0] if i[2] else '0', i[3]) for i in r]
            r = [
                i for i in r if any('us_ger_' in x.attrs['src'] for x in i[3])
            ]
            r = [(i[0], i[1], i[2], [
                re.findall('(\d+)', x.attrs['src']) for x in i[3]
                if 'smileys' in x.attrs['src']
            ]) for i in r]
            r = [(i[0], i[1], i[2], [x[0] for x in i[3] if x]) for i in r]
            r = [(i[0], i[1], i[2], int(i[3][0]) if i[3] else 0) for i in r]
            r = sorted(r, key=lambda x: x[3])[::-1]
            r = [(i[0], i[1], i[2], re.findall('\((.+?)\)$', i[1])) for i in r]
            r = [(i[0], i[1], i[2]) for i in r if not i[3]]
            r = [i for i in r if i[2] in y]
            r = sorted(r, key=lambda i: int(i[2]),
                       reverse=True)  # with year > no year

            r = [(client.replaceHTMLCodes(i[0]), i[1], i[2]) for i in r]

            match = [
                i[0] for i in r if cleantitle.get(i[1]) in t and year == i[2]
            ]

            match2 = [i[0] for i in r]
            match2 = [x for y, x in enumerate(match2) if x not in match2[:y]]
            if match2 == []: return

            for i in match2[:5]:
                try:
                    if match:
                        url = match[0]
                        break
                    r = client.request(urlparse.urljoin(self.base_link, i))
                    r = re.findall('(tt\d+)', r)
                    if imdb in r:
                        url = i
                        break
                except:
                    pass

            return source_utils.strip_domain(url)
        except:
            return
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))

            links = dom_parser.parse_dom(r, 'table')
            links = [
                i.content for i in links if dom_parser.parse_dom(
                    i, 'span', attrs={'class': re.compile('linkSearch(-a)?')})
            ]
            links = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(links))
            links = [
                dom_parser.parse_dom(i, 'a', req='href') for i in links
                if re.findall('(.+?)\s*\(\d+\)\s*<', i)
            ]
            links = [i[0].attrs['href'] for i in links if i]

            url = re.sub('/streams-\d+', '', url)

            for link in links:
                if '/englisch/' in link: continue

                if link != url:
                    r = client.request(urlparse.urljoin(self.base_link, link))

                quality = 'SD'
                info = []

                detail = dom_parser.parse_dom(r,
                                              'th',
                                              attrs={'class': 'thlink'})
                detail = [
                    dom_parser.parse_dom(i, 'a', req='href') for i in detail
                ]
                detail = [(i[0].attrs['href'],
                           i[0].content.replace('&#9654;', '').strip())
                          for i in detail if i]

                if detail:
                    quality, info = source_utils.get_release_quality(
                        detail[0][1])
                    r = client.request(
                        urlparse.urljoin(self.base_link, detail[0][0]))

                r = dom_parser.parse_dom(r, 'table')
                r = [
                    dom_parser.parse_dom(i, 'a', req=['href', 'title'])
                    for i in r if not dom_parser.parse_dom(i, 'table')
                ]
                r = [(l.attrs['href'], l.attrs['title']) for i in r for l in i
                     if l.attrs['title']]

                info = ' | '.join(info)

                for stream_link, hoster in r:
                    valid, hoster = source_utils.is_host_valid(
                        hoster, hostDict)
                    if not valid: continue

                    direct = False

                    if hoster.lower() == 'gvideo':
                        direct = True

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': stream_link,
                        'info': info,
                        'direct': direct,
                        'debridonly': False,
                        'checkquality': True
                    })

            return sources
        except:
            source_faultlog.logFault(__name__, source_faultlog.tagScrape)
            return sources
Example #41
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|\.|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = client.parseDOM(r, 'div', attrs={'class': 'blocks'})[0]
            r = client.parseDOM(r, 'div', attrs={'id': 'post.+?'})
            r = [
                re.findall(
                    '<a href="(.+?)" rel=".+?" title="Permanent Link: (.+?)"',
                    i, re.DOTALL) for i in r
            ]

            hostDict = hostprDict + hostDict

            items = []

            for item in r:
                try:
                    t = item[0][1]
                    t = re.sub('(\[.*?\])|(<.+?>)', '', t)
                    t1 = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', t)

                    if not cleantitle.get(t1) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        t)[-1].upper()

                    if not y == hdlr: raise Exception()

                    data = client.request(item[0][0])
                    data = client.parseDOM(
                        data,
                        'div',
                        attrs={'class': 'post-content clear-block'})[0]
                    data = dom_parser.parse_dom(data, 'a', req='href')

                    u = [(t, i.attrs['href']) for i in data]
                    items += u

                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    quality, info = source_utils.get_release_quality(
                        name, item[1])

                    url = item[1]
                    if 'https://www.extmatrix.com/files/' not in url:
                        raise Exception()
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    info = ' | '.join(info)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('---Crazyhdsource Testing - Exception: \n' +
                          str(failure))
            return sources
Example #42
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, output='extended')

            headers = r[3]
            headers.update({
                'Cookie': r[2].get('Set-Cookie'),
                'Referer': self.base_link
            })
            r = r[0]

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i,
                                          'a',
                                          attrs={'class': 'options'},
                                          req='href'),
                     dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:],
                     re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src']))
                    for i in rels if i[0] and i[1]]
            rels = [
                i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
            ]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                               ''.join([i[0].content for i in r]))
            links += [
                l.attrs['src'] for i in r for l in dom_parser.parse_dom(
                    i, 'iframe', attrs={'class': 'metaframe'}, req='src')
            ]
            links += [
                l.attrs['src'] for i in r
                for l in dom_parser.parse_dom(i, 'source', req='src')
            ]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if '/play/' in i: i = urlparse.urljoin(self.base_link, i)

                    if self.domains[0] in i:
                        i = client.request(i, headers=headers, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try:
                                i += jsunpack.unpack(
                                    base64.decodestring(
                                        re.sub('"\s*\+\s*"', '',
                                               x))).replace('\\', '')
                            except:
                                pass

                        for x in re.findall('(eval\s*\(function.*?)</script>',
                                            i, re.DOTALL):
                            try:
                                i += jsunpack.unpack(x).replace('\\', '')
                            except:
                                pass

                        links = [(match[0], match[1]) for match in re.findall(
                            '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''',
                            i, re.DOTALL)]
                        links = [(x[0].replace('\/', '/'),
                                  source_utils.label_to_quality(x[1]))
                                 for x in links if '/no-video.mp4' not in x[0]]

                        doc_links = [
                            directstream.google(
                                'https://drive.google.com/file/d/%s/view' %
                                match)
                            for match in re.findall(
                                '''file:\s*["'](?:[^"']+youtu.be/([^"']+))''',
                                i, re.DOTALL)
                        ]
                        doc_links = [(u['url'], u['quality'])
                                     for x in doc_links if x for u in x]
                        links += doc_links

                        for url, quality in links:
                            if self.base_link in url:
                                url = url + '|Referer=' + self.base_link

                            sources.append({
                                'source': 'gvideo',
                                'quality': quality,
                                'language': 'de',
                                'url': url,
                                'direct': True,
                                'debridonly': False
                            })
                    else:
                        try:
                            # as long as urlresolver get no Update for this URL (So just a Temp-Solution)
                            did = re.findall(
                                'youtube.googleapis.com.*?docid=(\w+)', i)
                            if did:
                                i = 'https://drive.google.com/file/d/%s/view' % did[
                                    0]

                            valid, host = source_utils.is_host_valid(
                                i, hostDict)
                            if not valid: continue

                            urls, host, direct = source_utils.check_directstreams(
                                i, host)

                            for x in urls:
                                sources.append({
                                    'source': host,
                                    'quality': x['quality'],
                                    'language': 'de',
                                    'url': x['url'],
                                    'direct': direct,
                                    'debridonly': False
                                })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url'))
            episode = data.get('episode')

            r = client.request(url)

            aj = self.__get_ajax_object(r)

            b = dom_parser.parse_dom(r,
                                     'img',
                                     attrs={'class': 'dgvaup'},
                                     req='data-img')[0].attrs['data-img']

            if episode:
                r = dom_parser.parse_dom(r,
                                         'a',
                                         attrs={
                                             'class': 'btn-stream-ep',
                                             'data-episode': episode
                                         },
                                         req=['data-episode', 'data-server'])
            else:
                r = dom_parser.parse_dom(r, 'div', attrs={'id': 'lang-de'})
                r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie'})
                r = dom_parser.parse_dom(r,
                                         'a',
                                         attrs={'class': 'btn-stream'},
                                         req=['data-episode', 'data-server'])

            r = [(i.attrs['data-episode'], i.attrs['data-server']) for i in r]

            for epi, server in r:
                try:
                    x = {
                        'action': aj.get('load_episodes'),
                        'episode': epi,
                        'pid': aj.get('postid'),
                        'server': server,
                        'nonce': aj.get('nonce'),
                        'b': b
                    }
                    x = client.request(aj.get('ajax_url'),
                                       post=x,
                                       XHR=True,
                                       referer=url)
                    x = json.loads(x)

                    q = source_utils.label_to_quality(x.get('q'))
                    x = json.loads(base64.decodestring(x.get('u')))

                    u = source_utils.evp_decode(x.get('ct'),
                                                base64.decodestring(b),
                                                x.get('s').decode("hex"))
                    u = u.replace('\/', '/').strip('"')

                    valid, host = source_utils.is_host_valid(u, hostDict)
                    if not valid: continue

                    sources.append({
                        'source': host,
                        'quality': q,
                        'language': 'de',
                        'url': u,
                        'direct': False,
                        'debridonly': False,
                        'checkquality': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)
            c = client.request(url, output='cookie')
            result = client.request(url)

            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result, 'div', attrs={'class': 'item'}, req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page}, cookie=c)
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'): url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid: sources.append({'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})

                    if '.asp' not in url: continue

                    result = client.request(url, cookie=c)

                    try:
                        url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                        url = url.replace('https://href.li/?', '')
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            if host == 'gvideo':
                                ginfo = directstream.google(url)
                                for g in ginfo: sources.append({'source': host, 'quality': g['quality'], 'language': 'en', 'url': g['url'], 'direct': True, 'debridonly': False})
                            else: sources.append({'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                    except: pass

                    captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''', result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]), x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result if not i[1].endswith('.vtt')]

                    for quality, url in result: sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Example #45
0
    def movie(self, imdb, title, localtitle, aliases, year):
        '''
        Takes movie information and returns a set name value pairs, encoded as
        url params. These params include ts
        (a unqiue identifier, used to grab sources) and list of source ids

        Keyword arguments:

        imdb -- string - imdb movie id
        title -- string - name of the movie
        localtitle -- string - regional title of the movie
        year -- string - year the movie was released

        Returns:

        url -- string - url encoded params

        '''
        try:
            clean_title = cleantitle.geturl(title).replace('-', '+')
            query = (self.search_path % clean_title)
            url = urlparse.urljoin(self.base_link, query)

            search_response = client.request(url)

            r = client.parseDOM(search_response,
                                'div',
                                attrs={'class': 'row movie-list'})[0]

            r = dom_parser.parse_dom(r, 'a', req='href')
            url = [(i.attrs['href']) for i in r
                   if cleantitle.get(title) in cleantitle.get(i.content)][0]

            r = client.request(url)
            quality = client.parseDOM(r, 'span', attrs={'class': 'quality'})[0]
            r = client.parseDOM(r, 'div', attrs={'class': 'mt row'})[0]
            sources_list = []
            try:
                if client.parseDOM(r, 'div', ret='data-streamgo')[0]:
                    sources_list.append(
                        'https://streamgo.me/player/%s' %
                        client.parseDOM(r, 'div', ret='data-streamgo')[0])
            except Exception:
                pass
            try:
                if client.parseDOM(r, 'div', ret='data-server_openload')[0]:
                    sources_list.append(
                        'https://openload.co/embed/%s' % client.parseDOM(
                            r, 'div', ret='data-server_openload')[0])
            except Exception:
                pass
            data = {
                'imdb': imdb,
                'title': title,
                'localtitle': localtitle,
                'year': year,
                'quality': quality,
                'sources': sources_list
            }
            url = urllib.urlencode(data)

            return url

        except:
            log_utils.log(
                '>>>> %s TRACE <<<<\n%s' %
                (__file__.upper().split('\\')[-1].split('.')[0],
                 traceback.format_exc()), log_utils.LOGDEBUG)
            return
Example #46
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = dom_parser.parse_dom(r, 'div', {'class': 'repro'})

            r = dom_parser.parse_dom(r[0].content, 'iframe', req='src')
            f = r[0].attrs['src']

            r = client.request(f)
            r = dom_parser.parse_dom(r, 'div', {'id': 'botones'})
            r = dom_parser.parse_dom(r, 'a', req='href')
            r = [(i.attrs['href'], urlparse.urlparse(i.attrs['href']).netloc) for i in r]

            links = []

            for u, h in r:
                if not 'pelispedia' in h:
                    valid, host = source_utils.is_host_valid(u, hostDict)
                    if not valid: continue

                    links.append({'source': host, 'quality': 'SD', 'url': u, 'direct': False})
                    continue

                result = client.request(u, headers={'Referer': f}, timeout='10')

                try:
                    if 'pelispedia' in h: raise Exception()

                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,\s*label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)
                    url = [i[0] for i in url if '720' in i[1]][0]

                    links.append({'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': False})
                except:
                    pass

                try:
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)

                    for i in url:
                        try:
                            links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True})
                        except:
                            pass
                except:
                    pass

                try:
                    post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0]
                    post = urllib.urlencode({'link': post})

                    url = urlparse.urljoin(self.base_link, '/gkphp_flv/plugins/gkpluginsphp.php')
                    url = client.request(url, post=post, XHR=True, referer=u, timeout='10')
                    url = json.loads(url)['link']

                    links.append({'source': 'gvideo', 'quality': 'HD', 'url': url, 'direct': True})
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0]

                    post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0]
                    post = urllib.urlencode({'sou': 'pic', 'fv': '25', 'url': post})

                    url = client.request(self.protect_link, post=post, XHR=True, timeout='10')
                    url = json.loads(url)[0]['url']

                    links.append({'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': True})
                except:
                    pass

                try:
                    if not jsunpack.detect(result): raise Exception()

                    result = jsunpack.unpack(result)
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*.*?\'(.+?)\'', url)
                    for i in url:
                        try:
                            i = client.request(i, headers={'Referer': f}, output='geturl', timeout='10')
                            links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i,
                                          'direct': True})
                        except:
                            pass
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0]

                    post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0]
                    token = 'eyJjdCI6InZGS3QySm9KRWRwU0k4SzZoZHZKL2c9PSIsIml2IjoiNDRkNmMwMWE0ZjVkODk4YThlYmE2MzU0NDliYzQ5YWEiLCJzIjoiNWU4MGUwN2UwMjMxNDYxOCJ9'
                    post = urllib.urlencode({'sou': 'pic', 'fv': '0', 'url': post, 'token': token})

                    url = client.request(self.protect_link, post=post, XHR=True, timeout='10')
                    js = json.loads(url)
                    url = [i['url'] for i in js]
                    for i in url:
                        try:
                            i = client.request(i, headers={'Referer': f}, output='geturl', timeout='10')
                            links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True})
                        except:
                            pass
                except:
                    pass

            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'language': 'en', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})

            return sources
        except:
            return sources
Example #47
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        '''
        Takes episode information, finds the ts and list sources, encodes it as
        name value pairs, and returns a string of url params

        Keyword arguments:

        url -- string - url params
        imdb -- string - imdb tv show id
        tvdb -- string - tvdb tv show id
        title -- string - episode title
        premiered -- string - date the episode aired (format: year-month-day)
        season -- string - the episodes season
        episode -- string - the episode number

        Returns:

        url -- string - url encoded params

        '''
        try:
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)

            clean_title = cleantitle.geturl(data['tvshowtitle']).replace(
                '-', '+')
            query = (self.search_path % clean_title)
            url = urlparse.urljoin(self.base_link, query)

            search_response = client.request(url)

            r = client.parseDOM(search_response,
                                'div',
                                attrs={'class': 'row movie-list'})[0]

            r = dom_parser.parse_dom(r, 'a', req='href')
            url = [(i.attrs['href']) for i in r if '%s - Season %01d' %
                   (data['tvshowtitle'], int(season)) in i.content][0]

            r = client.request(url)
            r = client.parseDOM(r, 'div', attrs={'id': 'player'})[0]

            url = client.parseDOM(r, 'a', ret='href')[0]
            film_response = client.request(url)

            servers = client.parseDOM(film_response,
                                      'div',
                                      attrs={'id': 'servers'})[0]
            r = dom_parser.parse_dom(servers, 'a', req='title')

            url = [(i) for i in r
                   if 'Episode %02d' % (int(episode)) in i.attrs['title']]
            sources_list = []

            for i in url:
                try:
                    if i.attrs['data-streamgo']:
                        sources_list.append('https://streamgo.me/player/%s' %
                                            i.attrs['data-streamgo'])
                except Exception:
                    pass
                try:
                    if i.attrs['data-openload']:
                        sources_list.append('https://openload.co/embed/%s' %
                                            i.attrs['data-openload'])
                except Exception:
                    pass
            quality = client.parseDOM(film_response,
                                      'span',
                                      attrs={'class': 'quality'})[0]

            data.update({
                'title': title,
                'premiered': premiered,
                'season': season,
                'episode': episode,
                'quality': quality,
                'sources': sources_list
            })

            url = urllib.urlencode(data)

            return url

        except:
            log_utils.log(
                '>>>> %s TRACE <<<<\n%s' %
                (__file__.upper().split('\\')[-1].split('.')[0],
                 traceback.format_exc()), log_utils.LOGDEBUG)
            return
Example #48
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'watch_video'})
            r = [
                i.attrs['data-src']
                for i in dom_parser.parse_dom(r, 'iframe', req='data-src')
            ]

            for i in r:
                try:
                    if 'k-vid' in i:
                        i = client.request(i, referer=url)
                        i = dom_parser.parse_dom(
                            i, 'div', attrs={'class': 'videocontent'})

                        gvid = dom_parser.parse_dom(i, 'source', req='src')
                        gvid = [
                            (g.attrs['src'],
                             g.attrs['label'] if 'label' in g.attrs else 'SD')
                            for g in gvid
                        ]
                        gvid = [(x[0], source_utils.label_to_quality(x[1]))
                                for x in gvid if x[0] != 'auto']

                        for u, q in gvid:
                            try:
                                tag = directstream.googletag(u)
                                if tag:
                                    sources.append({
                                        'source':
                                        'gvideo',
                                        'quality':
                                        tag[0].get('quality', 'SD'),
                                        'language':
                                        'ko',
                                        'url':
                                        u,
                                        'direct':
                                        True,
                                        'debridonly':
                                        False
                                    })
                                else:
                                    sources.append({
                                        'source': 'CDN',
                                        'quality': q,
                                        'language': 'ko',
                                        'url': u,
                                        'direct': True,
                                        'debridonly': False
                                    })
                            except:
                                pass

                        i = dom_parser.parse_dom(i,
                                                 'iframe',
                                                 attrs={'id': 'embedvideo'},
                                                 req='src')[0].attrs['src']

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'ko',
                        'url': i,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i,
                                          'a',
                                          attrs={'class': 'options'},
                                          req='href'),
                     dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:],
                     re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src']))
                    for i in rels if i[0] and i[1]]
            rels = [
                i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
            ]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
            r = [(re.findall('link"?\s*:\s*"(.+?)"',
                             ''.join([x.content for x in i])),
                  dom_parser.parse_dom(i,
                                       'iframe',
                                       attrs={'class': 'metaframe'},
                                       req='src')) for i in r]
            r = [
                i[0][0] if i[0] else i[1][0].attrs['src'] for i in r
                if i[0] or i[1]
            ]

            for i in r:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)
                    if not i.startswith('http'): i = self.__decode_hash(i)
                    if 'play.seriesever' in i:
                        i = client.request(i)
                        i = dom_parser.parse_dom(i, 'iframe', req='src')
                        if len(i) < 1: continue
                        i = i[0].attrs['src']

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls, host, direct = source_utils.check_directstreams(
                        i, host)

                    for x in urls:
                        sources.append({
                            'source': host,
                            'quality': x['quality'],
                            'language': 'de',
                            'url': x['url'],
                            'direct': direct,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
Example #50
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i,
                                          'a',
                                          attrs={'class': 'options'},
                                          req='href'),
                     dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:],
                     re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src']))
                    for i in rels if i[0] and i[1]]
            rels = [
                i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
            ]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                               ''.join([i[0].content for i in r]))
            links += [
                l.attrs['src'] for l in dom_parser.parse_dom(
                    i, 'iframe', attrs={'class': 'metaframe'}, req='src')
                for i in r
            ]
            links += [
                l.attrs['src']
                for l in dom_parser.parse_dom(i, 'source', req='src')
                for i in r
            ]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if self.domains[0] in i:
                        i = client.request(i, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try:
                                i += jsunpack.unpack(
                                    base64.decodestring(
                                        re.sub('"\s*\+\s*"', '', x)))
                            except:
                                pass

                        s = re.compile('(eval\(function.*?)</script>',
                                       re.DOTALL).findall(i)

                        for x in s:
                            try:
                                i += jsunpack.unpack(x)
                            except:
                                pass

                        i = [(match[0], match[1]) for match in re.findall(
                            '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''',
                            i, re.DOTALL)]
                        i = [(x[0].replace('\/', '/'),
                              source_utils.label_to_quality(x[1])) for x in i]

                        for url, quality in i:
                            sources.append({
                                'source': 'gvideo',
                                'quality': quality,
                                'language': 'de',
                                'url': url,
                                'direct': True,
                                'debridonly': False
                            })
                    else:
                        try:
                            valid, host = source_utils.is_host_valid(
                                i, hostDict)
                            if not valid: continue

                            urls = []
                            if 'google' in i:
                                host = 'gvideo'
                                direct = True
                                urls = directstream.google(i)
                            if 'google' in i and not urls and directstream.googletag(
                                    i):
                                host = 'gvideo'
                                direct = True
                                urls = [{
                                    'quality':
                                    directstream.googletag(i)[0]['quality'],
                                    'url':
                                    i
                                }]
                            elif 'ok.ru' in i:
                                host = 'vk'
                                direct = True
                                urls = directstream.odnoklassniki(i)
                            elif 'vk.com' in i:
                                host = 'vk'
                                direct = True
                                urls = directstream.vk(i)
                            else:
                                direct = False
                                urls = [{
                                    'quality': 'SD',
                                    'url': i
                                }]

                            for x in urls:
                                sources.append({
                                    'source': host,
                                    'quality': x['quality'],
                                    'language': 'de',
                                    'url': x['url'],
                                    'direct': direct,
                                    'debridonly': False
                                })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
Example #51
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url', ''))
            imdb = data.get('imdb')
            season = data.get('season')
            episode = data.get('episode')

            if season and episode and imdb:
                r = urllib.urlencode({
                    'val': 's%se%s' % (season, episode),
                    'IMDB': imdb
                })
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.episode_link),
                                   XHR=True,
                                   post=r)
            else:
                r = client.request(url)

            l = dom_parser.parse_dom(r, 'select', attrs={'id': 'sel_sprache'})
            l = dom_parser.parse_dom(l, 'option', req='id')

            r = [(dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['id']}))
                 for i in l if i.attrs['id'] == 'deutsch']
            r = [(i[0], dom_parser.parse_dom(i[0], 'option', req='id'))
                 for i in r]
            r = [(id.attrs['id'],
                  dom_parser.parse_dom(content,
                                       'div',
                                       attrs={'id': id.attrs['id']}))
                 for content, ids in r for id in ids]
            r = [(re.findall('hd(\d{3,4})',
                             i[0]), dom_parser.parse_dom(i[1], 'a',
                                                         req='href'))
                 for i in r if i[1]]
            r = [(i[0][0] if i[0] else '0', [x.attrs['href'] for x in i[1]])
                 for i in r if i[1]]
            r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r]

            for quality, urls in r:
                for link in urls:
                    try:
                        data = urlparse.parse_qs(urlparse.urlparse(link).query,
                                                 keep_blank_values=True)

                        if 'm' in data:
                            data = data.get('m')[0]
                            link = base64.b64decode(data)

                        link = link.strip()

                        valid, host = source_utils.is_host_valid(
                            link, hostDict)
                        if not valid: continue

                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'de',
                            'url': link,
                            'direct': False,
                            'debridonly': False,
                            'checkquality': True
                        })
                    except:
                        pass

            return sources
        except:
            return sources
Example #52
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = dom_parser.parse_dom(rels,
                                        'a',
                                        attrs={'class': 'options'},
                                        req='href')
            rels = [i.attrs['href'][1:] for i in rels]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                               ''.join([i[0].content for i in r]))
            links += [
                l.attrs['src'] for i in r for l in dom_parser.parse_dom(
                    i, 'iframe', attrs={'class': 'metaframe'}, req='src')
            ]
            links += [
                l.attrs['src'] for i in r
                for l in dom_parser.parse_dom(i, 'source', req='src')
            ]

            for i in set(links):
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if 'videoapi.io' in i:
                        i = client.request(i, referer=url)

                        match = re.findall('videoApiPlayer\((.*?)\);', i)
                        if match:
                            i = client.request(
                                'https://videoapi.io/api/getlink/actionEmbed',
                                post=json.loads(match[0]),
                                XHR=True)
                            i = json.loads(i).get('sources', [])
                            i = [
                                x.get('file', '').replace('\/', '/') for x in i
                            ]

                            for x in i:
                                gtag = directstream.googletag(x)
                                sources.append({
                                    'source':
                                    'gvideo',
                                    'quality':
                                    gtag[0]['quality'] if gtag else 'SD',
                                    'language':
                                    'ko',
                                    'url':
                                    x,
                                    'direct':
                                    True,
                                    'debridonly':
                                    False
                                })
                    else:
                        try:
                            valid, host = source_utils.is_host_valid(
                                i, hostDict)
                            if not valid: continue

                            urls = []
                            if 'google' in i:
                                host = 'gvideo'
                                direct = True
                                urls = directstream.google(i)
                            if 'google' in i and not urls and directstream.googletag(
                                    i):
                                host = 'gvideo'
                                direct = True
                                urls = [{
                                    'quality':
                                    directstream.googletag(i)[0]['quality'],
                                    'url':
                                    i
                                }]
                            elif 'ok.ru' in i:
                                host = 'vk'
                                direct = True
                                urls = directstream.odnoklassniki(i)
                            elif 'vk.com' in i:
                                host = 'vk'
                                direct = True
                                urls = directstream.vk(i)
                            else:
                                direct = False
                                urls = [{
                                    'quality': 'SD',
                                    'url': i
                                }]

                            for x in urls:
                                sources.append({
                                    'source': host,
                                    'quality': x['quality'],
                                    'language': 'ko',
                                    'url': x['url'],
                                    'direct': direct,
                                    'debridonly': False
                                })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            # if (self.user != '' and self.password != ''): #raise Exception()

            # login = urlparse.urljoin(self.base_link, '/login.html')

            # post = urllib.urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'})

            # cookie = client.request(login, post=post, output='cookie', close=False)

            # r = client.request(login, post=post, cookie=cookie, output='extended')

            # headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            # else:
            # headers = {}

            headers = {'User-Agent': client.randomagent()}
            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']

                year = data['year']

                def searchname(r):
                    r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                    r = [
                        i for i in r
                        if cleantitle.get(title) == cleantitle.get(i[1])
                    ]
                    r = [] if r == [] else [i[0] for i in r][0]
                    return r

                if 'tvshowtitle' in data:
                    link = urlparse.urljoin(
                        self.base_link, 'tvshow-%s.html' % title[0].upper())
                    r = client.request(link, headers=headers)
                    pages = dom_parser.parse_dom(
                        r, 'span', attrs={'class': 'break-pagination-2'})
                    pages = dom_parser.parse_dom(pages, 'a', req='href')
                    pages = [(i.attrs['href']) for i in pages]
                    if pages == []:
                        r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                        r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i))
                             for i in r]
                        r = searchname(r)
                    else:
                        for page in pages:
                            link = urlparse.urljoin(self.base_link, page)
                            r = client.request(link, headers=headers)
                            r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                            r = [(i,
                                  re.findall('watch-tvshow-(.+?)-\d+\.html',
                                             i)) for i in r]
                            r = searchname(r)
                            if r != []: break
                else:
                    link = urlparse.urljoin(
                        self.base_link, 'movies-%s.html' % title[0].upper())
                    r = client.request(link, headers=headers)
                    pages = dom_parser.parse_dom(
                        r, 'span', attrs={'class': 'break-pagination-2'})
                    pages = dom_parser.parse_dom(pages, 'a', req='href')
                    pages = [(i.attrs['href']) for i in pages]
                    if pages == []:
                        r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                        r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i))
                             for i in r]
                        r = searchname(r)
                    else:
                        for page in pages:
                            log_utils.log('shit Returned: %s' % str('in loop'),
                                          log_utils.LOGNOTICE)
                            link = urlparse.urljoin(self.base_link, page)
                            r = client.request(link, headers=headers)
                            r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                            r = [(i,
                                  re.findall('watch-movie-(.+?)-\d+\.html', i))
                                 for i in r]
                            r = searchname(r)
                            if r != []: break

                # leaving old search in for if streamlord renables searching on the site
                # query = urlparse.urljoin(self.base_link, self.search_link)

                # post = urllib.urlencode({'searchapi2': title})

                # r = client.request(query, post=post, headers=headers)

                # if 'tvshowtitle' in data:
                # r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                # r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i)) for i in r]
                # else:
                # r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                # r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i)) for i in r]

                # r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                # r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]
                # r = [i[0] for i in r][0]

                u = urlparse.urljoin(self.base_link, r)
                for i in range(3):
                    r = client.request(u, headers=headers)
                    if not 'failed' in r: break

                if 'season' in data and 'episode' in data:
                    r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r)
                    r = [
                        i for i in r if '-s%02de%02d-' %
                        (int(data['season']),
                         int(data['episode'])) in i.lower()
                    ][0]

                    r = urlparse.urljoin(self.base_link, r)

                    r = client.request(r, headers=headers)

            else:
                r = urlparse.urljoin(self.base_link, url)

                r = client.request(r, post=post, headers=headers)

            quality = 'HD' if '-movie-' in r else 'SD'

            try:
                f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0]
                f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0]

                u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0]
                u = re.findall(
                    '\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)',
                    u)[0]

                a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0]
                b = client.parseDOM(r, 'span', {'id': u[2]})[0]

                url = u[0] + a + b
                url = url.replace('"', '').replace(',', '').replace('\/', '/')
                url += '|' + urllib.urlencode(headers)
            except:
                try:
                    url = r = jsunpack.unpack(r)
                    url = url.replace('"', '')
                except:
                    url = re.findall(
                        r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)',
                        r, re.DOTALL)[0][1]

            sources.append({
                'source': 'cdn',
                'quality': quality,
                'language': 'en',
                'url': url,
                'direct': True,
                'debridonly': False,
                'autoplay': True
            })

            return sources
        except:
            return sources
Example #54
0
	def sources(self, url, hostDict, hostprDict):
	
		
	
		try:
			sources = []
			if url == None: return sources

			req = urlparse.urljoin(self.base_link, url)
			
			
			for i in range(4):
				result = client.request(req, timeout=3)
				if not result == None: break
				
				
			
			dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'})
			result = dom[0].content		
			links = re.compile('<i class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result)
			random.shuffle(links)
			
			
			
			if debrid.status() == True:
				debrid_links = []
				for pair in links:
					for r in debrid.debrid_resolvers:
						if r.valid_url('', pair[0].strip()): debrid_links.append(pair)
				links = debrid_links + links


			
			hostDict = hostDict + hostprDict
			
			conns = 0 
			for pair in links:
			
				
				if conns > self.max_conns and len(sources) > self.min_srcs: break	 

				
				
				host = pair[0].strip()	  
				link = pair[1]
				
				
				
				valid, host = source_utils.is_host_valid(host, hostDict)
				if not valid: continue
				
				
				 
				link = urlparse.urljoin(self.base_link, link)
				for i in range(2):
					result = client.request(link, timeout=3)
					conns += 1
					if not result == None: break	 
				
				
				
				try:
					link = re.compile('href="([^"]+)"\s+class="action-btn').findall(result)[0]
				except: 
					continue
					
					
				
				try:
					u_q, host, direct = source_utils.check_directstreams(link, host)
				except:
					continue
					
				
				link, quality = u_q[0]['url'], u_q[0]['quality']
				

				
				sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': direct, 'debridonly': False})
					
			return sources
		except:
			failure = traceback.format_exc()
			log_utils.log('WATCHSERIES - Exception: \n' + str(failure))
			return sources
Example #55
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         for item_id, episode, content_type in self.__get_episode(
                 data.get('url'), data.get('episode')):
             stream_link = urlparse.urljoin(
                 self.base_link,
                 '/watch/%s/%s/%s' % (item_id, episode, content_type))
             info = 'subbed' if content_type.endswith('sub') else ''
             r = client.request(stream_link)
             r = dom_parser.parse_dom(r, 'script')
             r = ' '.join([i.content for i in r if i.content])
             r = json.loads(
                 re.findall('var\s*streams\s*=\s*(\[.*?\])\s*;', r)[0])
             r = [(i.get('replace'), i.get('code')) for i in r]
             r = [(i[0].replace('#', i[1])) for i in r if i[0] and i[1]]
             for stream_link in r:
                 if stream_link.startswith('/'):
                     stream_link = 'http:%s' % stream_link
                 if self.domains[0] in stream_link:
                     stream_link = client.request(stream_link,
                                                  cookie=urllib.urlencode({
                                                      'proxerstream_player':
                                                      'flash'
                                                  }))
                     i = [(match[0], match[1]) for match in re.findall(
                         '''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*width\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''',
                         stream_link, re.DOTALL)]
                     i = [(x[0].replace('\/', '/'),
                           source_utils.label_to_quality(x[1])) for x in i]
                     for url, quality in i:
                         sources.append({
                             'source': 'cdn',
                             'quality': quality,
                             'language': 'de',
                             'url': url,
                             'info': info,
                             'direct': True,
                             'debridonly': False
                         })
                 else:
                     valid, host = source_utils.is_host_valid(
                         stream_link, hostDict)
                     if not valid: continue
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'de',
                         'url': stream_link,
                         'info': info,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Example #56
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            mid = re.findall('-(\d+)', url)[-1]

            try:
                headers = {'Referer': url}
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = client.request(u, headers=headers, XHR=True)
                r = json.loads(r)['html']
                r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                ids = client.parseDOM(r, 'li', ret='data-id')
                servers = client.parseDOM(r, 'li', ret='data-server')
                labels = client.parseDOM(r, 'a', ret='title')
                r = zip(ids, servers, labels)
                u = urlparse.urljoin(self.base_link, self.info_link % mid)
                quality = client.request(u, headers=headers)
                quality = dom_parser.parse_dom(quality,
                                               'div',
                                               attrs={'class': 'jtip-quality'
                                                      })[0].content
                if quality == "HD":
                    quality = "720p"
                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?',
                                            eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            if eid[1] != '6':
                                url = urlparse.urljoin(
                                    self.base_link, self.embed_link % eid[0])
                                link = client.request(url)
                                link = json.loads(link)['src']
                                valid, host = source_utils.is_host_valid(
                                    link, hostDict)
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': link,
                                    'info': [],
                                    'direct': False,
                                    'debridonly': False
                                })
                            else:
                                url = urlparse.urljoin(
                                    self.base_link,
                                    self.token_link % (eid[0], mid))
                                script = client.request(url)
                                if '$_$' in script:
                                    params = self.uncensored1(script)
                                elif script.startswith(
                                        '[]') and script.endswith('()'):
                                    params = self.uncensored2(script)
                                elif '_x=' in script:
                                    x = re.search('''_x=['"]([^"']+)''',
                                                  script).group(1)
                                    y = re.search('''_y=['"]([^"']+)''',
                                                  script).group(1)
                                    params = {'x': x, 'y': y}
                                else:
                                    raise Exception()

                                u = urlparse.urljoin(
                                    self.base_link, self.source_link %
                                    (eid[0], params['x'], params['y']))
                                r = client.request(u, XHR=True)
                                url = json.loads(r)['playlist'][0]['sources']
                                url = [i['file'] for i in url if 'file' in i]
                                url = [directstream.googletag(i) for i in url]
                                url = [i[0] for i in url if i]

                                for s in url:
                                    if 'lh3.googleusercontent.com' in s['url']:
                                        s['url'] = directstream.googleredirect(
                                            s['url'])

                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': s['quality'],
                                        'language': 'en',
                                        'url': s['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Example #57
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'Module'})
            r = [(r,
                  dom_parser.parse_dom(
                      r,
                      'a',
                      attrs={
                          'href': re.compile('[^\'"]*xrel_search_query[^\'"]*')
                      },
                      req='href'))]
            r = [(i[0], i[1][0].attrs['href'] if i[1] else '') for i in r]

            rels = dom_parser.parse_dom(
                r[0][0],
                'a',
                attrs={'href': re.compile('[^\'"]*ReleaseList[^\'"]*')},
                req='href')
            if rels and len(rels) > 1:
                r = []
                for rel in rels:
                    relData = client.request(
                        urlparse.urljoin(self.base_link, rel.attrs['href']))
                    relData = dom_parser.parse_dom(
                        relData, 'table', attrs={'class': 'release-list'})
                    relData = dom_parser.parse_dom(relData,
                                                   'tr',
                                                   attrs={'class': 'row'})
                    relData = [
                        (dom_parser.parse_dom(
                            i,
                            'td',
                            attrs={
                                'class': re.compile('[^\'"]*list-name[^\'"]*')
                            }),
                         dom_parser.parse_dom(i,
                                              'img',
                                              attrs={'class': 'countryflag'},
                                              req='alt'),
                         dom_parser.parse_dom(i,
                                              'td',
                                              attrs={'class':
                                                     'release-types'}))
                        for i in relData
                    ]
                    relData = [(i[0][0].content, i[1][0].attrs['alt'].lower(),
                                i[2][0].content) for i in relData
                               if i[0] and i[1] and i[2]]
                    relData = [(i[0], i[2]) for i in relData
                               if i[1] == 'deutsch']
                    relData = [(i[0],
                                dom_parser.parse_dom(
                                    i[1],
                                    'img',
                                    attrs={'class': 'release-type-stream'}))
                               for i in relData]
                    relData = [i[0] for i in relData if i[1]]
                    #relData = dom_parser.parse_dom(relData, 'a', req='href')[:3]
                    relData = dom_parser.parse_dom(relData, 'a', req='href')

                    for i in relData:
                        i = client.request(
                            urlparse.urljoin(self.base_link, i.attrs['href']))
                        i = dom_parser.parse_dom(i,
                                                 'div',
                                                 attrs={'id': 'Module'})
                        i = [(i,
                              dom_parser.parse_dom(
                                  i,
                                  'a',
                                  attrs={
                                      'href':
                                      re.compile(
                                          '[^\'"]*xrel_search_query[^\'"]*')
                                  },
                                  req='href'))]
                        r += [(x[0], x[1][0].attrs['href'] if x[1] else '')
                              for x in i]

            r = [(dom_parser.parse_dom(i[0],
                                       'div',
                                       attrs={'id':
                                              'ModuleReleaseDownloads'}), i[1])
                 for i in r]
            r = [(dom_parser.parse_dom(
                i[0][0],
                'a',
                attrs={'class': re.compile('.*-stream.*')},
                req='href'), i[1]) for i in r if len(i[0]) > 0]

            for items, rel in r:
                rel = urlparse.urlparse(rel).query
                rel = urlparse.parse_qs(rel)['xrel_search_query'][0]

                quality, info = source_utils.get_release_quality(rel)

                items = [(i.attrs['href'], i.content) for i in items]
                items = [(i[0], dom_parser.parse_dom(i[1], 'img', req='src'))
                         for i in items]
                items = [(i[0], i[1][0].attrs['src']) for i in items if i[1]]
                items = [(i[0], re.findall('.+/(.+\.\w+)\.\w+', i[1]))
                         for i in items]
                items = [(i[0], i[1][0]) for i in items if i[1]]

                info = ' | '.join(info)

                for link, hoster in items:
                    valid, hoster = source_utils.is_host_valid(
                        hoster, hostDict)
                    if not valid: continue

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': link,
                        'info': info,
                        'direct': False,
                        'debridonly': False,
                        'checkquality': True
                    })

            return sources
        except:
            return sources
Example #58
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
        
            hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com']
            result = client.request(url, timeout=10)
            
            dom = dom_parser.parse_dom(result, 'a', req='data-video')
            urls = [i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video'] for i in dom]

            for url in urls:
                dom = []
                if 'vidnode.net' in url:
                    result = client.request(url, timeout=10)
                    dom = dom_parser.parse_dom(result, 'source', req=['src','label'])
                    dom = [(i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'], i.attrs['label']) for i in dom if i]
                elif 'openload' in url:
                    result = client.request(url, timeout=10)
                    base = re.findall('<base href="([^"]+)">', result)[0]
                    hostDict += [base]
                    dom = dom_parser.parse_dom(result, 'a', req=['href','id'])
                    dom = [(i.attrs['href'].replace('./embed',base+'embed'), i.attrs['id']) for i in dom if i]
                    dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i]                        
                if dom:                
                    try:
                        for r in dom:
                            valid, hoster = source_utils.is_host_valid(r[0], hostDict)

                            if not valid: continue
                            quality = source_utils.label_to_quality(r[1])
                            urls, host, direct = source_utils.check_directstreams(r[0], hoster)
                            for x in urls:
                                if direct: size = source_utils.get_size(x['url'])
                                if size: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False, 'info': size})         
                                else: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})         
                    except: pass
                else:
                    valid, hoster = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    try:
                        url.decode('utf-8')
                        sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                elif 'streamango.com' in url:
                    result = client.request(url, timeout=10)
                    base = re.findall('<base href="([^"]+)">', result)[0]
                    hostDict += [base]
                    dom = dom_parser.parse_dom(result, 'a', req=['href','id'])
                    dom = [(i.attrs['href'].replace('./embed',base+'embed'), i.attrs['id']) for i in dom if i]
                    dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i]                        
                if dom:                
                    try:
                        for r in dom:
                            valid, hoster = source_utils.is_host_valid(r[0], hostDict)

                            if not valid: continue
                            quality = source_utils.label_to_quality(r[1])
                            urls, host, direct = source_utils.check_directstreams(r[0], hoster)
                            for x in urls:
                                if direct: size = source_utils.get_size(x['url'])
                                if size: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False, 'info': size})         
                                else: sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})         
                    except: pass
                else:
                    valid, hoster = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    try:
                        url.decode('utf-8')
                        sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})						
                    except:
                        pass
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'player'})
            r = [
                i.attrs['src']
                for i in dom_parser.parse_dom(r, 'iframe', req='src')
            ]

            for i in r:
                try:
                    if 'vidnow.' in i:
                        i = client.request(i, referer=url)

                        gdata = [(match[1], match[0]) for match in re.findall(
                            '''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''',
                            i, re.DOTALL)]
                        gdata += [(match[0], match[1]) for match in re.findall(
                            '''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''',
                            i, re.DOTALL)]
                        gdata = [(x[0].replace('\/', '/'),
                                  source_utils.label_to_quality(x[1]))
                                 for x in gdata]

                        for u, q in gdata:
                            try:
                                tag = directstream.googletag(u)

                                if tag:
                                    sources.append({
                                        'source':
                                        'gvideo',
                                        'quality':
                                        tag[0].get('quality', 'SD'),
                                        'language':
                                        'de',
                                        'url':
                                        u,
                                        'direct':
                                        True,
                                        'debridonly':
                                        False
                                    })
                                else:
                                    sources.append({
                                        'source': 'CDN',
                                        'quality': q,
                                        'language': 'de',
                                        'url': u,
                                        'direct': True,
                                        'debridonly': False
                                    })
                            except:
                                pass

                        i = dom_parser.parse_dom(i,
                                                 'div',
                                                 attrs={'id': 'myElement'})
                        i = dom_parser.parse_dom(i, 'iframe',
                                                 req='src')[0].attrs['src']

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls = []
                    if 'google' in i:
                        host = 'gvideo'
                        direct = True
                        urls = directstream.google(i)
                    if 'google' in i and not urls and directstream.googletag(
                            i):
                        host = 'gvideo'
                        direct = True
                        urls = [{
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'url':
                            i
                        }]
                    elif 'ok.ru' in i:
                        host = 'vk'
                        direct = True
                        urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i:
                        host = 'vk'
                        direct = True
                        urls = directstream.vk(i)
                    else:
                        direct = False
                        urls = [{
                            'quality': 'SD',
                            'url': i
                        }]

                    for x in urls:
                        sources.append({
                            'source': host,
                            'quality': x['quality'],
                            'language': 'ko',
                            'url': x['url'],
                            'direct': direct,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
Example #60
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            hostDict.append('fastload.co')  # seems like the internal host

            url = urlparse.urljoin(self.base_link, url)
            url = url.replace('-online.html', '.html')

            r = client.request(url)
            r = dom_parser.parse_dom(r,
                                     'img',
                                     attrs={'class': 'info-poster-img'},
                                     req=['data-id', 'data-name'])[0]
            id = r.attrs['data-id']
            n = r.attrs['data-name']

            r = client.request(urlparse.urljoin(self.base_link,
                                                self.server_link),
                               post={
                                   'film_id': id,
                                   'n': n,
                                   'epid': 0
                               },
                               referer=url)
            r = json.loads(r).get('list', '')

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'server-film'})
            r = dom_parser.parse_dom(r,
                                     'a',
                                     attrs={'class': 'btn'},
                                     req='data-id')

            for i in r:
                try:
                    l = client.request(urlparse.urljoin(
                        self.base_link, self.episode_link),
                                       post={'epid': i.attrs['data-id']},
                                       referer=url)
                    l = json.loads(l).get('link', {})

                    l = zip(l.get('l', []), l.get('q', []))
                    l = [(link[0], re.sub('[^\d]+', '', link[1]))
                         for link in l]

                    links = [(x[0], '4K') for x in l if int(x[1]) >= 2160]
                    links += [(x[0], '1440p') for x in l if int(x[1]) >= 1440]
                    links += [(x[0], '1080p') for x in l if int(x[1]) >= 1080]
                    links += [(x[0], 'HD') for x in l
                              if 720 <= int(x[1]) < 1080]
                    links += [(x[0], 'SD') for x in l if int(x[1]) < 720]

                    for link, quality in links:
                        valid, host = source_utils.is_host_valid(
                            link, hostDict)
                        if not valid: continue

                        if directstream.googletag(link):
                            host = 'gvideo'
                            direct = True
                        elif 'fastload.co' in link:
                            direct = True
                        else:
                            direct = False

                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'de',
                            'url': link,
                            'direct': direct,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources