def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = re.findall('''vicode\s*=\s*["'](.*?)["'];''',
                           r)[0].decode('string_escape')
            r = dom_parser.parse_dom(r, 'iframe', req='src')
            r = [i.attrs['src'] for i in r]

            for i in r:
                valid, host = source_utils.is_host_valid(i, hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'de',
                    'url': i,
                    'direct': False,
                    'debridonly': False,
                    'checkquality': True
                })

            return sources
        except:
            return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         scraper = cfscrape.create_scraper()
         r = scraper.get(url).content
         try:
             qual = re.compile('class="quality">(.+?)<').findall(r)
             print qual
             for i in qual:
                 if 'HD' in i:
                     quality = '1080p'
                 else:
                     quality = 'SD'
             match = re.compile('<iframe src="(.+?)"').findall(r)
             for url in match:
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except:
             return
     except Exception:
         return
     return sources
Example #3
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = url.replace('/en/', '/de/')

            video_id = re.search('(?<=\/)(\d*?)(?=-)', url).group()
            if not video_id:
                return sources

            # load player
            query = self.get_player % (video_id)
            query = urlparse.urljoin(self.base_link, query)
            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'le-server'})

            # for each hoster
            for i in r:
                hoster = dom_parser.parse_dom(i,
                                              'div',
                                              attrs={'class': 'les-title'})
                hoster = dom_parser.parse_dom(hoster, 'strong')
                hoster = hoster[0][1]

                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                links = dom_parser.parse_dom(i,
                                             'a',
                                             attrs={'class': 'ep-item'})

                # for each link
                for i in links:
                    if '1080p' in i[0]['title']:
                        quality = '1080p'
                    elif 'HD' in i[0]['title']:
                        quality = 'HD'
                    else:
                        quality = 'SD'

                    url = i[0]['id']
                    if not url: continue

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': url,
                        'direct': False,
                        'debridonly': False,
                        'checkquality': True
                    })

            return sources
        except:
            return sources
Example #4
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'mainmenu'})
            r = dom_parser.parse_dom(r, 'li')

            for i in r:
                i = dom_parser.parse_dom(i, 'a')
                i = i[0][0]['href']
                i = client.request(i)
                i = dom_parser.parse_dom(i, 'select', attrs={'id': 'selecthost'})
                i = dom_parser.parse_dom(i, 'option')

                for x in i:
                    hoster = re.search('^\S*', x[1]).group().lower()
                    url = x[0]['value']

                    valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                    if not valid: continue

                    sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
Example #5
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources
            url = urlparse.urljoin(self.base_link, url)
            for i in range(3):
                result = client.request(url, timeout=10)
                if not result == None: break
            
            dom = dom_parser.parse_dom(result, 'div', attrs={'class':'links', 'id': 'noSubs'})
            result = dom[0].content
            
            links = re.compile('<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',re.DOTALL).findall(result)         
            for link in links[:5]:
                try:
                    url2 = urlparse.urljoin(self.base_link, link[1])
                    for i in range(2):
                        result2 = client.request(url2, timeout=3)
                        if not result2 == None: break                    
                    r = re.compile('href="([^"]+)"\s+class="action-btn').findall(result2)[0]
                    valid, hoster = source_utils.is_host_valid(r, hostDict)
                    if not valid: continue
                    #log_utils.log('JairoxDebug1: %s - %s' % (url2,r), log_utils.LOGDEBUG)
                    urls, host, direct = source_utils.check_directstreams(r, hoster)
                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
                    
                except:
                    #traceback.print_exc()
                    pass           
                    
            #log_utils.log('JairoxDebug2: %s' % (str(sources)), log_utils.LOGDEBUG)
            return sources
        except:
            return sources
Example #6
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            data = urllib.urlencode({'ID': re.sub('[^0-9]', '', str(data['imdb'])), 'lang': 'de'})

            data = client.request(urlparse.urljoin(self.base_link, self.request_link), post=data, XHR=True)
            data = json.loads(data)
            data = [(i, data['links'][i]) for i in data['links'] if 'links' in data]
            data = [(i[0], i[1][0], (i[1][1:])) for i in data]

            for hoster, quli, links in data:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                for link in links:
                    try: sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': self.out_link % link, 'direct': False, 'debridonly': False})
                    except: pass

            return sources
        except:
            return sources
Example #7
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            quality = dom_parser.parse_dom(r, 'span', attrs={'id': 'release_text'})[0].content.split('&nbsp;')[0]
            quality, info = source_utils.get_release_quality(quality)

            r = dom_parser.parse_dom(r, 'ul', attrs={'class': 'currentStreamLinks'})
            r = [(dom_parser.parse_dom(i, 'p', attrs={'class': 'hostName'}), dom_parser.parse_dom(i, 'a', attrs={'class': 'stream-src'}, req='data-id')) for i in r]
            r = [(re.sub(' hd$', '', i[0][0].content.lower()), [x.attrs['data-id'] for x in i[1]]) for i in r if i[0] and i[1]]

            for hoster, id in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'info': ' | '.join(info + ['' if len(id) == 1 else 'multi-part']), 'url': id, 'direct': False, 'debridonly': False, 'checkquality': True})

            return sources
        except:
            return sources
Example #8
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = data.get('url')
            episode = int(data.get('episode', 1))

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'streams'})

            rels = dom_parser.parse_dom(r, 'ul', attrs={'class': 'nav'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = dom_parser.parse_dom(rels, 'a', attrs={'href': re.compile('#stream_\d*')}, req='href')
            rels = [(re.findall('stream_(\d+)', i.attrs['href']), re.findall('flag-(\w{2})', i.content)) for i in rels if i]
            rels = [(i[0][0], ['subbed'] if i[1][0] != 'de' else []) for i in rels if i[0] and 'de' in i[1]]

            for id, info in rels:
                rel = dom_parser.parse_dom(r, 'div', attrs={'id': 'stream_%s' % id})
                rel = [(dom_parser.parse_dom(i, 'div', attrs={'id': 'streams_episodes_%s' % id}), dom_parser.parse_dom(i, 'tr')) for i in rel]
                rel = [(i[0][0].content, [x for x in i[1] if 'fa-desktop' in x.content]) for i in rel if i[0] and i[1]]
                rel = [(i[0], dom_parser.parse_dom(i[1][0].content, 'td')) for i in rel if i[1]]
                rel = [(i[0], re.findall('\d{3,4}x(\d{3,4})$', i[1][0].content)) for i in rel if i[1]]
                rel = [(i[0], source_utils.label_to_quality(i[1][0])) for i in rel if len(i[1]) > 0]

                for html, quality in rel:
                    try:
                        s = dom_parser.parse_dom(html, 'a', attrs={'href': re.compile('#streams_episodes_%s_\d+' % id)})
                        s = [(dom_parser.parse_dom(i, 'div', attrs={'data-loop': re.compile('\d+')}, req='data-loop'), dom_parser.parse_dom(i, 'span')) for i in s]
                        s = [(i[0][0].attrs['data-loop'], [x.content for x in i[1] if '<strong' in x.content]) for i in s if i[0]]
                        s = [(i[0], re.findall('<.+?>(\d+)</.+?> (.+?)$', i[1][0])) for i in s if len(i[1]) > 0]
                        s = [(i[0], i[1][0]) for i in s if len(i[1]) > 0]
                        s = [(i[0], int(i[1][0]), re.findall('Episode (\d+):', i[1][1]), re.IGNORECASE) for i in s if len(i[1]) > 1]
                        s = [(i[0], i[1], int(i[2][0]) if len(i[2]) > 0 else -1) for i in s]
                        s = [(i[0], i[2] if i[2] >= 0 else i[1]) for i in s]
                        s = [i[0] for i in s if i[1] == episode][0]

                        enc = dom_parser.parse_dom(html, 'div', attrs={'id': re.compile('streams_episodes_%s_%s' % (id, s))}, req='data-enc')[0].attrs['data-enc']

                        hosters = dom_parser.parse_dom(html, 'a', attrs={'href': re.compile('#streams_episodes_%s_%s' % (id, s))})
                        hosters = [dom_parser.parse_dom(i, 'i', req='class') for i in hosters]
                        hosters = [re.findall('hoster-(\w+)', ' '.join([x.attrs['class'] for x in i])) for i in hosters if i][0]
                        hosters = [(source_utils.is_host_valid(re.sub('(co|to|net|pw|sx|tv|moe|ws|icon)$', '', i), hostDict), i) for i in hosters]
                        hosters = [(i[0][1], i[1]) for i in hosters if i[0] and i[0][0]]

                        info = ' | '.join(info)

                        for source, hoster in hosters:
                            sources.append({'source': source, 'quality': quality, 'language': 'de', 'url': [enc, hoster], 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True})
                    except:
                        pass

            return sources
        except:
            return sources
Example #9
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url'))
            season = data.get('season')
            episode = data.get('episode')

            if season and episode:
                r = urllib.urlencode({
                    'imdbid': data['imdb'],
                    'language': 'de',
                    'season': season,
                    'episode': episode
                })
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.hoster_link),
                                   XHR=True,
                                   post=r)
            else:
                r = client.request(url)

            r = dom_parser.parse_dom(r, 'div', attrs={'class':
                                                      'linkbox'})[0].content
            r = re.compile('(<a.+?/a>)', re.DOTALL).findall(r)
            r = [(dom_parser.parse_dom(i, 'a', req='href'),
                  dom_parser.parse_dom(
                      i,
                      'img',
                      attrs={'class': re.compile('.*linkbutton')},
                      req='class')) for i in r]
            r = [(i[0][0].attrs['href'], i[1][0].attrs['class'].lower())
                 for i in r if i[0] and i[1]]
            r = [(i[0].strip(), 'HD' if i[1].startswith('hd') else 'SD')
                 for i in r]

            for url, quli in r:
                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': quli,
                    'language': 'de',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Example #10
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = data.get('url')
            episode = int(data.get('episode', 1))

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = {
                '': dom_parser.parse_dom(r, 'div', attrs={'id': 'gerdub'}),
                'subbed': dom_parser.parse_dom(r,
                                               'div',
                                               attrs={'id': 'gersub'})
            }

            for info, data in r.iteritems():
                data = dom_parser.parse_dom(data, 'tr')
                data = [
                    dom_parser.parse_dom(i, 'a', req='href') for i in data
                    if dom_parser.parse_dom(i, 'a', attrs={'id': str(episode)})
                ]
                data = [(link.attrs['href'],
                         dom_parser.parse_dom(link.content, 'img', req='src'))
                        for i in data for link in i]
                data = [(i[0], i[1][0].attrs['src']) for i in data if i[1]]
                data = [(i[0], re.findall('/(\w+)\.\w+', i[1])) for i in data]
                data = [(i[0], i[1][0]) for i in data if i[1]]

                for link, hoster in data:
                    valid, hoster = source_utils.is_host_valid(
                        hoster, hostDict)
                    if not valid: continue

                    sources.append({
                        'source': hoster,
                        'quality': 'SD',
                        'language': 'de',
                        'url': link,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })

            return sources
        except:
            return sources
Example #11
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rel = dom_parser.parse_dom(r, 'div', attrs={'id': 'info'})
            rel = dom_parser.parse_dom(rel,
                                       'div',
                                       attrs={'itemprop': 'description'})
            rel = dom_parser.parse_dom(rel, 'p')
            rel = [re.sub('<.+?>|</.+?>', '', i.content) for i in rel]
            rel = [re.findall('release:\s*(.*)', i, re.I) for i in rel]
            rel = [source_utils.get_release_quality(i[0]) for i in rel if i]
            quality, info = (rel[0]) if rel else ('SD', [])

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'links'})
            r = dom_parser.parse_dom(r, 'table')
            r = dom_parser.parse_dom(r, 'tr', attrs={'id': re.compile('\d+')})
            r = [dom_parser.parse_dom(i, 'td') for i in r]
            r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1].content).strip())
                 for i in r if len(r) >= 1]
            r = [(dom_parser.parse_dom(i[0], 'a', req='href'), i[1])
                 for i in r]
            r = [(i[0][0].attrs['href'], i[1]) for i in r if i[0]]

            info = ' | '.join(info)

            for link, hoster in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({
                    'source': hoster,
                    'quality': quality,
                    'language': 'de',
                    'url': link,
                    'info': info,
                    'direct': False,
                    'debridonly': False,
                    'checkquality': True
                })

            return sources
        except:
            return sources
Example #12
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'ko-bind'})
            r = dom_parser.parse_dom(r,
                                     'table',
                                     attrs={'class': 'links-table'})
            r = dom_parser.parse_dom(r, 'tbody')
            r = dom_parser.parse_dom(r, 'tr')

            for i in r:
                if re.search('(?<=<td>)(HD)(?=</td>)', i[1]):
                    quality = 'HD'
                else:
                    quality = 'SD'

                x = dom_parser.parse_dom(i,
                                         'td',
                                         attrs={'class': 'name'},
                                         req='data-bind')

                hoster = re.search("(?<=>).*$", x[0][1])
                hoster = hoster.group().lower()

                url = re.search("http(.*?)(?=')", x[0][0]['data-bind'])
                url = url.group()

                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({
                    'source': hoster,
                    'quality': 'SD',
                    'language': 'de',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Example #13
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'downloads'})
            r = dom_parser.parse_dom(r, 'table')
            r = dom_parser.parse_dom(r, 'tbody')
            r = dom_parser.parse_dom(r, 'tr')

            for i in r:

                if re.search('German', i[1]):

                    hoster = re.search('(?<=domain=)(.*?)(?=\")', i[1])
                    hoster = hoster.group().lower()

                    valid, hoster = source_utils.is_host_valid(
                        hoster, hostDict)
                    if not valid: continue

                    link = re.search('(?<=links/)(.*?)(?=/)', i[1])
                    link = link.group()

                    if re.search('<td>HD</td>', i[1]):
                        quality = 'HD'
                    else:
                        quality = 'SD'

                    url = self.__get_link(link)

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            id = data.get('id')
            season = data.get('season')
            episode = data.get('episode')

            if season and episode:
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.get_episodes),
                                   post={
                                       'series_id': id,
                                       'mlang': 'de',
                                       'season': season,
                                       'episode': episode
                                   })
                r = json.loads(r).get('episode_links', [])
                r = [([i.get('id')], i.get('hostername')) for i in r]
            else:
                data.update({'lang': 'de'})
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.get_links),
                                   post=data)
                r = json.loads(r).get('links', [])
                r = [(i.get('ids'), i.get('hoster')) for i in r]

            for link_ids, hoster in r:
                valid, host = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                for link_id in link_ids:
                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'de',
                        'url': self.out_link % (link_id, hoster),
                        'direct': False,
                        'debridonly': False
                    })
            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'player'})
            r = dom_parser.parse_dom(r, 'iframe', req='src')
            r = client.request(r[0][0]['src'])
            r = dom_parser.parse_dom(r,
                                     'a',
                                     attrs={'class': 'play_container'},
                                     req='href')
            r = client.request(r[0][0]['href'])
            url = self.get_link % (
                re.search('(?<=var id = \")(.*?)(?=\")', r).group(),
                re.search('(?<=var links = \")(.*?)(?=\")', r).group())
            r = client.request(url)
            r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'articleList'})
            r = dom_parser.parse_dom(r, 'a')

            for i in r:
                if 'http' in i[0]['href']:
                    link = i[0]['href']
                elif 'http' in i[0]['onclick']:
                    link = re.search('http(.*?)(?=\")',
                                     i[0]['onclick']).group()
                else:
                    return sources

                valid, hoster = source_utils.is_host_valid(link, hostDict)
                if not valid: continue

                sources.append({
                    'source': hoster,
                    'quality': 'SD',
                    'language': 'de',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Example #16
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url', ''))
            episode = data.get('episode')

            r = client.request(url)
            r = r.replace('\n', ' ')
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'fullstory'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'row'})

            if episode:
                r = dom_parser.parse_dom(r, 'select', attrs={'id': 'series'})
                r = dom_parser.parse_dom(r, 'option', req='value')
                r = [(i.attrs['value'], i.content) for i in r]
                r = [(i[0],
                      re.findall('\s+(\d+)\s+episode', i[1], re.IGNORECASE))
                     for i in r]
                r = [i[0].strip() for i in r if i[1] and episode in i[1]]
            else:
                r = dom_parser.parse_dom(r, 'div', attrs={'class': 'inner'})
                r = dom_parser.parse_dom(r, 'a', req='href')
                r = [i.attrs['href'].strip() for i in r]

            for link in r:
                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'de',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Example #17
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            j = self.__get_json(data['url'])

            if not j: return

            sid = data['sid'] if 'sid' in data else j.keys()[0]
            pcnt = int(j[sid]['1']) if '1' in j[sid] else 1

            for jHoster in j[sid]['links']:
                jLinks = [
                    i[3] for i in j[sid]['links'][jHoster] if i[5] == 'stream'
                ]
                if len(jLinks) < pcnt: continue

                h_url = jLinks[0]
                valid, hoster = source_utils.is_host_valid(h_url, hostDict)
                if not valid: continue

                h_url = h_url if pcnt == 1 else 'stack://' + ' , '.join(jLinks)

                try:
                    sources.append({
                        'source': hoster,
                        'quality': 'SD',
                        'language': 'de',
                        'info': '' if pcnt == 1 else 'multi-part',
                        'url': h_url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Example #18
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))

            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'hosterSiteVideo'})
            r = dom_parser.parse_dom(
                r, 'li', attrs={'data-lang-key': re.compile('[1|3]')})
            r = [(dom_parser.parse_dom(i, 'a', req='href'),
                  dom_parser.parse_dom(i, 'h4'),
                  'subbed' if i.attrs['data-lang-key'] == '3' else '')
                 for i in r]
            r = [(i[0][0].attrs['href'], i[1][0].content.lower(), i[2])
                 for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1],
                  re.findall('(.+?)\s*<br\s*/?>(.+?)$', i[1], re.DOTALL), i[2])
                 for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '', i[3]) for i in r]
            r = [(i[0], i[1], 'HD' if 'hosterhdvideo' in i[2] else 'SD', i[3])
                 for i in r]

            for link, host, quality, info in r:
                valid, host = source_utils.is_host_valid(host, hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'de',
                    'url': link,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Example #19
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = r.replace('\\"', '"')

            links = dom_parser.parse_dom(r,
                                         'tr',
                                         attrs={'id': 'tablemoviesindex2'})

            for i in links:
                try:
                    host = dom_parser.parse_dom(i, 'img',
                                                req='alt')[0].attrs['alt']
                    host = host.split()[0].rsplit('.', 1)[0].strip().lower()
                    host = host.encode('utf-8')

                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid: continue

                    url = dom_parser.parse_dom(i, 'a',
                                               req='href')[0].attrs['href']
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'de',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Example #20
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            for item_id, episode, content_type in self.__get_episode(data.get('url'), data.get('episode')):
                stream_link = urlparse.urljoin(self.base_link, '/watch/%s/%s/%s' % (item_id, episode, content_type))

                info = 'subbed' if content_type.endswith('sub') else ''

                r = client.request(stream_link)

                r = dom_parser.parse_dom(r, 'script')
                r = ' '.join([i.content for i in r if i.content])
                r = json.loads(re.findall('var\s*streams\s*=\s*(\[.*?\])\s*;', r)[0])
                r = [(i.get('replace'), i.get('code')) for i in r]
                r = [(i[0].replace('#', i[1])) for i in r if i[0] and i[1]]

                for stream_link in r:
                    if stream_link.startswith('/'): stream_link = 'http:%s' % stream_link

                    if self.domains[0] in stream_link:
                        stream_link = client.request(stream_link, cookie=urllib.urlencode({'proxerstream_player': 'flash'}))

                        i = [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*width\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', stream_link, re.DOTALL)]
                        i = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i]

                        for url, quality in i:
                            sources.append({'source': 'cdn', 'quality': quality, 'language': 'de', 'url': url, 'info': info, 'direct': True, 'debridonly': False})
                    else:
                        valid, host = source_utils.is_host_valid(stream_link, hostDict)
                        if not valid: continue

                        sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': stream_link, 'info': info, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
Example #21
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url'))
            season = data.get('season')
            episode = data.get('episode')

            r = client.request(url)

            if season and episode:
                r = dom_parser.parse_dom(r, 'select', attrs={'id': 'SeasonSelection'}, req='rel')[0]
                r = client.replaceHTMLCodes(r.attrs['rel'])[1:]
                r = urlparse.parse_qs(r)
                r = dict([(i, r[i][0]) if r[i] else (i, '') for i in r])
                r = urlparse.urljoin(self.base_link, self.get_links_epi % (r['Addr'], r['SeriesID'], season, episode))
                r = client.request(r)

            r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'HosterList'})[0]
            r = dom_parser.parse_dom(r, 'li', attrs={'id': re.compile('Hoster_\d+')}, req='rel')
            r = [(client.replaceHTMLCodes(i.attrs['rel']), i.content) for i in r if i[0] and i[1]]
            r = [(i[0], re.findall('class="Named"[^>]*>([^<]+).*?(\d+)/(\d+)', i[1])) for i in r]
            r = [(i[0], i[1][0][0].lower().rsplit('.', 1)[0], i[1][0][2]) for i in r if len(i[1]) > 0]

            for link, hoster, mirrors in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue
                u = urlparse.parse_qs('&id=%s' % link)
                u = dict([(x, u[x][0]) if u[x] else (x, '') for x in u])
                for x in range(0, int(mirrors)):
                    url = self.mirror_link % (u['id'], u['Hoster'], x + 1)
                    if season and episode: url += "&Season=%s&Episode=%s" % (season, episode)
                    try: sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})
                    except: pass

            return sources
        except:
            return sources
Example #22
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'tab-plot_german'})
            r = dom_parser.parse_dom(r, 'tbody')
            r = dom_parser.parse_dom(r, 'tr')

            for i in r:
                if re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip():
                    hoster = re.search('(?<=">)(\n.*?)(?=<\/a>)',
                                       i[1]).group().strip()
                    link = re.search('(?<=href=\")(.*?)(?=\")', i[1]).group()
                    rel = re.search(
                        '(?<=oddCell qualityCell">)(\n.*?)(?=<\/td>)',
                        i[1]).group().strip()
                    quality, info = source_utils.get_release_quality(rel)
                    if not quality:
                        quality = 'SD'

                    valid, hoster = source_utils.is_host_valid(
                        hoster, hostDict)
                    if not valid: continue

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': link,
                        'direct': False,
                        'debridonly': False
                    })

            return sources
        except:
            return sources
Example #23
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            j = self.__get_json(url)
            j = [i for i in j['links'] if 'links' in j]
            j = [(i['hoster'].lower(), i['id']) for i in j]
            j = [(re.sub('hd$', '', i[0]), i[1], 'HD' if i[0].endswith('hd') else 'SD') for i in j]
            j = [(i[0], i[1], i[2]) for i in j]

            for hoster, url, quality in j:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue
                sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': ('watch/%s' % url), 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
Example #24
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            tvshowtitle = data.get('tvshowtitle')
            localtvshowtitle = data.get('localtvshowtitle')
            aliases = source_utils.aliases_to_array(eval(data['aliases']))
            episode = tvmaze.tvMaze().episodeAbsoluteNumber(
                data.get('tvdb'), int(data.get('season')),
                int(data.get('episode')))

            alt_title = anilist.getAlternativTitle(tvshowtitle)
            links = self.__search([alt_title] + aliases, episode)
            if not links and localtvshowtitle != alt_title:
                links = self.__search([localtvshowtitle] + aliases, episode)
            if not links and tvshowtitle != localtvshowtitle:
                links = self.__search([tvshowtitle] + aliases, episode)

            for link in links:
                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'de',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Example #25
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            data.update({'raw': 'true', 'language': 'de'})
            data = urllib.urlencode(data)
            data = client.request(urlparse.urljoin(self.base_link,
                                                   self.request_link),
                                  post=data)
            data = json.loads(data)
            data = [i[1] for i in data[1].items()]
            data = [(i['name'].lower(), i['links']) for i in data]

            for host, links in data:
                valid, host = source_utils.is_host_valid(host, hostDict)
                if not valid: continue

                for link in links:
                    try:
                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'de',
                            'url': link['URL'],
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        pass

            return sources
        except:
            return sources
Example #26
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r,
                                     'table',
                                     attrs={'class': 'stream_links'})
            r = dom_parser.parse_dom(r, 'tr')
            r = [(dom_parser.parse_dom(i, 'td'),
                  dom_parser.parse_dom(i,
                                       'td',
                                       attrs={'class': 'hide-for-small-only'}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a',
                                       req='href'), i[1][0].content.lower())
                 for i in r if i[0] and i[1]]
            r = [(i[0][0].attrs['href'], i[1]) for i in r if i[0]]

            for link, hoster in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({
                    'source': hoster,
                    'quality': 'SD',
                    'language': 'de',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Example #27
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])) \
                if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url).replace('-', '+')

            r = client.request(url)
            if r == None and 'tvshowtitle' in data:
                season = re.search('S(.*?)E', hdlr)
                season = season.group(1)
                url = title

                r = client.request(url)

            for loopCount in range(0, 2):
                if loopCount == 1 or (r == None and 'tvshowtitle' in data):

                    r = client.request(url)

                posts = client.parseDOM(
                    r, "div", attrs={"class": "postpage_movie_download"})
                hostDict = hostprDict + hostDict
                items = []
                for post in posts:
                    try:
                        u = client.parseDOM(post, 'a', ret='href')
                        for i in u:
                            try:
                                name = str(i)
                                items.append(name)
                                print items
                            except:
                                pass
                    except:
                        pass

                if len(items) > 0: break

            for item in items:
                try:
                    info = []

                    i = str(item)
                    r = client.request(i)
                    u = client.parseDOM(r,
                                        "div",
                                        attrs={"class": "multilink_lnks"})
                    for t in u:
                        r = client.parseDOM(t, 'a', ret='href')
                        for url in r:
                            if '1080p' in url:
                                quality = '1080p'
                            elif '1080' in url:
                                quality = '1080p'
                            elif '720p' in url:
                                quality = '720p'
                            elif '720' in url:
                                quality = '720p'
                            elif 'HD' in url:
                                quality = '720p'
                            else:
                                quality = 'SD'
                            info = ' | '.join(info)
                            valid, host = source_utils.is_host_valid(
                                url, hostDict)
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })

                except:
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
Example #28
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url', ''))
            imdb = data.get('imdb')
            season = data.get('season')
            episode = data.get('episode')

            if season and episode and imdb:
                r = urllib.urlencode({
                    'val': 's%se%s' % (season, episode),
                    'IMDB': imdb
                })
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.episode_link),
                                   XHR=True,
                                   post=r)
            else:
                r = client.request(url)

            l = dom_parser.parse_dom(r, 'select', attrs={'id': 'sel_sprache'})
            l = dom_parser.parse_dom(l, 'option', req='id')

            r = [(dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['id']}))
                 for i in l if i.attrs['id'] == 'deutsch']
            r = [(i[0], dom_parser.parse_dom(i[0], 'option', req='id'))
                 for i in r]
            r = [(id.attrs['id'],
                  dom_parser.parse_dom(content,
                                       'div',
                                       attrs={'id': id.attrs['id']}))
                 for content, ids in r for id in ids]
            r = [(re.findall('hd(\d{3,4})',
                             i[0]), dom_parser.parse_dom(i[1], 'a',
                                                         req='href'))
                 for i in r if i[1]]
            r = [(i[0][0] if i[0] else '0', [x.attrs['href'] for x in i[1]])
                 for i in r if i[1]]
            r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r]

            for quality, urls in r:
                for link in urls:
                    try:
                        data = urlparse.parse_qs(urlparse.urlparse(link).query,
                                                 keep_blank_values=True)

                        if 'm' in data:
                            data = data.get('m')[0]
                            link = base64.b64decode(data)

                        link = link.strip()

                        valid, host = source_utils.is_host_valid(
                            link, hostDict)
                        if not valid: continue

                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'de',
                            'url': link,
                            'direct': False,
                            'debridonly': False,
                            'checkquality': True
                        })
                    except:
                        pass

            return sources
        except:
            return sources
Example #29
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'Module'})
            r = [(r,
                  dom_parser.parse_dom(
                      r,
                      'a',
                      attrs={
                          'href': re.compile('[^\'"]*xrel_search_query[^\'"]*')
                      },
                      req='href'))]
            r = [(i[0], i[1][0].attrs['href'] if i[1] else '') for i in r]

            rels = dom_parser.parse_dom(
                r[0][0],
                'a',
                attrs={'href': re.compile('[^\'"]*ReleaseList[^\'"]*')},
                req='href')
            if rels and len(rels) > 1:
                r = []
                for rel in rels:
                    relData = client.request(
                        urlparse.urljoin(self.base_link, rel.attrs['href']))
                    relData = dom_parser.parse_dom(
                        relData, 'table', attrs={'class': 'release-list'})
                    relData = dom_parser.parse_dom(relData,
                                                   'tr',
                                                   attrs={'class': 'row'})
                    relData = [
                        (dom_parser.parse_dom(
                            i,
                            'td',
                            attrs={
                                'class': re.compile('[^\'"]*list-name[^\'"]*')
                            }),
                         dom_parser.parse_dom(i,
                                              'img',
                                              attrs={'class': 'countryflag'},
                                              req='alt'),
                         dom_parser.parse_dom(i,
                                              'td',
                                              attrs={'class':
                                                     'release-types'}))
                        for i in relData
                    ]
                    relData = [(i[0][0].content, i[1][0].attrs['alt'].lower(),
                                i[2][0].content) for i in relData
                               if i[0] and i[1] and i[2]]
                    relData = [(i[0], i[2]) for i in relData
                               if i[1] == 'deutsch']
                    relData = [(i[0],
                                dom_parser.parse_dom(
                                    i[1],
                                    'img',
                                    attrs={'class': 'release-type-stream'}))
                               for i in relData]
                    relData = [i[0] for i in relData if i[1]]
                    #relData = dom_parser.parse_dom(relData, 'a', req='href')[:3]
                    relData = dom_parser.parse_dom(relData, 'a', req='href')

                    for i in relData:
                        i = client.request(
                            urlparse.urljoin(self.base_link, i.attrs['href']))
                        i = dom_parser.parse_dom(i,
                                                 'div',
                                                 attrs={'id': 'Module'})
                        i = [(i,
                              dom_parser.parse_dom(
                                  i,
                                  'a',
                                  attrs={
                                      'href':
                                      re.compile(
                                          '[^\'"]*xrel_search_query[^\'"]*')
                                  },
                                  req='href'))]
                        r += [(x[0], x[1][0].attrs['href'] if x[1] else '')
                              for x in i]

            r = [(dom_parser.parse_dom(i[0],
                                       'div',
                                       attrs={'id':
                                              'ModuleReleaseDownloads'}), i[1])
                 for i in r]
            r = [(dom_parser.parse_dom(
                i[0][0],
                'a',
                attrs={'class': re.compile('.*-stream.*')},
                req='href'), i[1]) for i in r if len(i[0]) > 0]

            for items, rel in r:
                rel = urlparse.urlparse(rel).query
                rel = urlparse.parse_qs(rel)['xrel_search_query'][0]

                quality, info = source_utils.get_release_quality(rel)

                items = [(i.attrs['href'], i.content) for i in items]
                items = [(i[0], dom_parser.parse_dom(i[1], 'img', req='src'))
                         for i in items]
                items = [(i[0], i[1][0].attrs['src']) for i in items if i[1]]
                items = [(i[0], re.findall('.+/(.+\.\w+)\.\w+', i[1]))
                         for i in items]
                items = [(i[0], i[1][0]) for i in items if i[1]]

                info = ' | '.join(info)

                for link, hoster in items:
                    valid, hoster = source_utils.is_host_valid(
                        hoster, hostDict)
                    if not valid: continue

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': link,
                        'info': info,
                        'direct': False,
                        'debridonly': False,
                        'checkquality': True
                    })

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i,
                                          'a',
                                          attrs={'class': 'options'},
                                          req='href'),
                     dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:],
                     re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src']))
                    for i in rels if i[0] and i[1]]
            rels = [
                i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
            ]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
            r = [(re.findall('link"?\s*:\s*"(.+?)"',
                             ''.join([x.content for x in i])),
                  dom_parser.parse_dom(i,
                                       'iframe',
                                       attrs={'class': 'metaframe'},
                                       req='src')) for i in r]
            r = [
                i[0][0] if i[0] else i[1][0].attrs['src'] for i in r
                if i[0] or i[1]
            ]

            for i in r:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)
                    if not i.startswith('http'): i = self.__decode_hash(i)
                    if 'play.seriesever' in i:
                        i = client.request(i)
                        i = dom_parser.parse_dom(i, 'iframe', req='src')
                        if len(i) < 1: continue
                        i = i[0].attrs['src']

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls, host, direct = source_utils.check_directstreams(
                        i, host)

                    for x in urls:
                        sources.append({
                            'source': host,
                            'quality': x['quality'],
                            'language': 'de',
                            'url': x['url'],
                            'direct': direct,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources