Ejemplo n.º 1
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            j = self.__get_json(url)
            j = [i for i in j['links'] if 'links' in j]
            j = [(i['hoster'].lower(), i['id']) for i in j]
            j = [(re.sub('hd$', '',
                         i[0]), i[1], 'HD' if i[0].endswith('hd') else 'SD')
                 for i in j]
            j = [(i[0], i[1], i[2]) for i in j]

            for hoster, url, quality in j:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue
                sources.append({
                    'source': hoster,
                    'quality': quality,
                    'language': 'de',
                    'url': ('watch/%s' % url),
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Ejemplo n.º 2
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = re.findall('''vicode\s*=\s*["'](.*?)["'];''',
                           r)[0].decode('string_escape')
            r = dom_parser.parse_dom(r, 'iframe', req='src')
            r = [i.attrs['src'] for i in r]

            for i in r:
                valid, host = source_utils.is_host_valid(i, hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'de',
                    'url': i,
                    'direct': False,
                    'debridonly': False,
                    'checkquality': True
                })

            return sources
        except:
            return sources
Ejemplo n.º 3
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            data = urllib.urlencode({'ID': re.sub('[^0-9]', '', str(data['imdb'])), 'lang': 'de'})

            data = client.request(urlparse.urljoin(self.base_link, self.request_link), post=data, XHR=True)
            data = json.loads(data)
            data = [(i, data['links'][i]) for i in data['links'] if 'links' in data]
            data = [(i[0], i[1][0], (i[1][1:])) for i in data]

            for hoster, quli, links in data:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                for link in links:
                    try: sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': self.out_link % link, 'direct': False, 'debridonly': False})
                    except: pass

            return sources
        except:
            return sources
Ejemplo n.º 4
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            j = self.__get_json(data['url'])

            if not j: return

            sid = data['sid'] if 'sid' in data else j.keys()[0]
            pcnt = int(j[sid]['1']) if '1' in j[sid] else 1

            for jHoster in j[sid]['links']:
                jLinks = [i[3] for i in j[sid]['links'][jHoster] if i[5] == 'stream']
                if len(jLinks) < pcnt: continue

                h_url = jLinks[0]
                valid, hoster = source_utils.is_host_valid(h_url, hostDict)
                if not valid: continue

                h_url = h_url if pcnt == 1 else 'stack://' + ' , '.join(jLinks)

                try: sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'info' : '' if pcnt == 1 else 'multi-part', 'url': h_url, 'direct': False, 'debridonly': False})
                except: pass

            return sources
        except:
            return sources
Ejemplo n.º 5
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         scraper = cfscrape.create_scraper()
         r = scraper.get(url).content
         try:
             qual = re.compile('class="quality">(.+?)<').findall(r)
             print qual
             for i in qual:
                 if 'HD' in i:
                     quality = '1080p'
                 else:
                     quality = 'SD'
             match = re.compile('<iframe src="(.+?)"').findall(r)
             for url in match:
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except:
             return
     except Exception:
         return
     return sources
Ejemplo n.º 6
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = url.replace('/en/', '/de/')

            video_id = re.search('(?<=\/)(\d*?)(?=-)', url).group()
            if not video_id:
                return sources

            # load player
            query = self.get_player % (video_id)
            query = urlparse.urljoin(self.base_link, query)
            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'le-server'})

            # for each hoster
            for i in r:
                hoster = dom_parser.parse_dom(i,
                                              'div',
                                              attrs={'class': 'les-title'})
                hoster = dom_parser.parse_dom(hoster, 'strong')
                hoster = hoster[0][1]

                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                links = dom_parser.parse_dom(i,
                                             'a',
                                             attrs={'class': 'ep-item'})

                # for each link
                for i in links:
                    if '1080p' in i[0]['title']:
                        quality = '1080p'
                    elif 'HD' in i[0]['title']:
                        quality = 'HD'
                    else:
                        quality = 'SD'

                    url = i[0]['id']
                    if not url: continue

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': url,
                        'direct': False,
                        'debridonly': False,
                        'checkquality': True
                    })

            return sources
        except:
            return sources
Ejemplo n.º 7
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            data.update({'raw': 'true', 'language': 'de'})
            data = urllib.urlencode(data)
            data = client.request(urlparse.urljoin(self.base_link, self.request_link), post=data)
            data = json.loads(data)
            data = [i[1] for i in data[1].items()]
            data = [(i['name'].lower(), i['links']) for i in data]

            for host, links in data:
                valid, host = source_utils.is_host_valid(host, hostDict)
                if not valid: continue

                for link in links:
                    try:sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': link['URL'], 'direct': False, 'debridonly': False})
                    except: pass

            return sources
        except:
            return sources
Ejemplo n.º 8
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = data.get('url')
            episode = int(data.get('episode', 1))

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = {'': dom_parser.parse_dom(r, 'div', attrs={'id': 'gerdub'}), 'subbed': dom_parser.parse_dom(r, 'div', attrs={'id': 'gersub'})}

            for info, data in r.iteritems():
                data = dom_parser.parse_dom(data, 'tr')
                data = [dom_parser.parse_dom(i, 'a', req='href') for i in data if dom_parser.parse_dom(i, 'a', attrs={'id': str(episode)})]
                data = [(link.attrs['href'], dom_parser.parse_dom(link.content, 'img', req='src')) for i in data for link in i]
                data = [(i[0], i[1][0].attrs['src']) for i in data if i[1]]
                data = [(i[0], re.findall('/(\w+)\.\w+', i[1])) for i in data]
                data = [(i[0], i[1][0]) for i in data if i[1]]

                for link, hoster in data:
                    valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                    if not valid: continue

                    sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
Ejemplo n.º 9
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url'))
            season = data.get('season')
            episode = data.get('episode')

            if season and episode:
                r = urllib.urlencode({
                    'imdbid': data['imdb'],
                    'language': 'de',
                    'season': season,
                    'episode': episode
                })
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.hoster_link),
                                   XHR=True,
                                   post=r)
            else:
                r = client.request(url)

            r = dom_parser.parse_dom(r, 'div', attrs={'class':
                                                      'linkbox'})[0].content
            r = re.compile('(<a.+?/a>)', re.DOTALL).findall(r)
            r = [(dom_parser.parse_dom(i, 'a', req='href'),
                  dom_parser.parse_dom(
                      i,
                      'img',
                      attrs={'class': re.compile('.*linkbutton')},
                      req='class')) for i in r]
            r = [(i[0][0].attrs['href'], i[1][0].attrs['class'].lower())
                 for i in r if i[0] and i[1]]
            r = [(i[0].strip(), 'HD' if i[1].startswith('hd') else 'SD')
                 for i in r]

            for url, quli in r:
                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': quli,
                    'language': 'de',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Ejemplo n.º 10
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            quality = dom_parser.parse_dom(
                r, 'span',
                attrs={'id': 'release_text'})[0].content.split('&nbsp;')[0]
            quality, info = source_utils.get_release_quality(quality)

            r = dom_parser.parse_dom(r,
                                     'ul',
                                     attrs={'class': 'currentStreamLinks'})
            r = [(dom_parser.parse_dom(i, 'p', attrs={'class': 'hostName'}),
                  dom_parser.parse_dom(i,
                                       'a',
                                       attrs={'class': 'stream-src'},
                                       req='data-id')) for i in r]
            r = [(re.sub(' hd$', '', i[0][0].content.lower()),
                  [x.attrs['data-id'] for x in i[1]]) for i in r
                 if i[0] and i[1]]

            for hoster, id in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({
                    'source':
                    hoster,
                    'quality':
                    quality,
                    'language':
                    'de',
                    'info':
                    ' | '.join(info + ['' if len(id) == 1 else 'multi-part']),
                    'url':
                    id,
                    'direct':
                    False,
                    'debridonly':
                    False,
                    'checkquality':
                    True
                })

            return sources
        except:
            return sources
Ejemplo n.º 11
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))

            links = dom_parser.parse_dom(r, 'table')
            links = [i.content for i in links if dom_parser.parse_dom(i, 'span', attrs={'class': re.compile('linkSearch(-a)?')})]
            links = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(links))
            links = [dom_parser.parse_dom(i, 'a', req='href') for i in links if re.findall('(.+?)\s*\(\d+\)\s*<', i)]
            links = [i[0].attrs['href'] for i in links if i]

            url = re.sub('/streams-\d+', '', url)

            for link in links:
                if '/englisch/' in link: continue
                control.sleep(3000)
                if link != url: r = client.request(urlparse.urljoin(self.base_link, link))

                quality = 'SD'
                info = []

                detail = dom_parser.parse_dom(r, 'th', attrs={'class': 'thlink'})
                detail = [dom_parser.parse_dom(i, 'a', req='href') for i in detail]
                detail = [(i[0].attrs['href'], i[0].content.replace('&#9654;', '').strip()) for i in detail if i]

                if detail:
                    quality, info = source_utils.get_release_quality(detail[0][1])
                    r = client.request(urlparse.urljoin(self.base_link, detail[0][0]))

                r = dom_parser.parse_dom(r, 'table')
                r = [dom_parser.parse_dom(i, 'a', req=['href', 'title']) for i in r if not dom_parser.parse_dom(i, 'table')]
                r = [(l.attrs['href'], l.attrs['title']) for i in r for l in i if l.attrs['title']]

                info = ' | '.join(info)

                for stream_link, hoster in r:
                    valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                    if not valid: continue

                    direct = False

                    if hoster.lower() == 'gvideo':
                        direct = True

                    sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': stream_link, 'info': info, 'direct': direct, 'debridonly': False, 'checkquality': True})

            return sources
        except:
            return sources
Ejemplo n.º 12
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources
            url = urlparse.urljoin(self.base_link, url)
            for i in range(3):
                result = client.request(url, timeout=10)
                if not result == None: break

            dom = dom_parser.parse_dom(result,
                                       'div',
                                       attrs={
                                           'class': 'links',
                                           'id': 'noSubs'
                                       })
            result = dom[0].content

            links = re.compile(
                '<tr\s*>\s*<td><i\s+class="fa fa-youtube link-logo"></i>([^<]+).*?href="([^"]+)"\s+class="watch',
                re.DOTALL).findall(result)
            for link in links[:5]:
                try:
                    url2 = urlparse.urljoin(self.base_link, link[1])
                    for i in range(2):
                        result2 = client.request(url2, timeout=3)
                        if not result2 == None: break
                    r = re.compile('href="([^"]+)"\s+class="action-btn'
                                   ).findall(result2)[0]
                    valid, hoster = source_utils.is_host_valid(r, hostDict)
                    if not valid: continue
                    #log_utils.log('JairoxDebug1: %s - %s' % (url2,r), log_utils.LOGDEBUG)
                    urls, host, direct = source_utils.check_directstreams(
                        r, hoster)
                    for x in urls:
                        sources.append({
                            'source': host,
                            'quality': x['quality'],
                            'language': 'en',
                            'url': x['url'],
                            'direct': direct,
                            'debridonly': False
                        })

                except:
                    #traceback.print_exc()
                    pass

            #log_utils.log('JairoxDebug2: %s' % (str(sources)), log_utils.LOGDEBUG)
            return sources
        except:
            return sources
Ejemplo n.º 13
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'TpRwCont'})
            r = dom_parser.parse_dom(r, 'main')

            options1 = dom_parser.parse_dom(r, 'li', attrs={'class': 'STPb'})
            options2 = dom_parser.parse_dom(r,
                                            'div',
                                            attrs={'class': 'TPlayerTb'})

            for o1, o2 in itertools.izip(options1, options2):
                if 'trailer' in o1[1].lower():
                    continue
                elif '1080p' in o1[1].lower():
                    quality = '1080p'
                elif '720p' in o1[1].lower():
                    quality = 'HD'
                else:
                    quality = 'SD'

                s = '(?<=src=\")(.*?)(?=\")'
                if re.match(s, o2[1]) is not None:
                    url = re.search(s, o2[1]).group()
                else:
                    h = HTMLParser.HTMLParser()
                    h = h.unescape(o2[1])
                    url = re.search(s, h).group()

                valid, hoster = source_utils.is_host_valid(url, hostDict)
                if not valid: continue

                sources.append({
                    'source': hoster,
                    'quality': quality,
                    'language': 'de',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Ejemplo n.º 14
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rel = dom_parser.parse_dom(r, 'div', attrs={'id': 'info'})
            rel = dom_parser.parse_dom(rel,
                                       'div',
                                       attrs={'itemprop': 'description'})
            rel = dom_parser.parse_dom(rel, 'p')
            rel = [re.sub('<.+?>|</.+?>', '', i.content) for i in rel]
            rel = [re.findall('release:\s*(.*)', i, re.I) for i in rel]
            rel = [source_utils.get_release_quality(i[0]) for i in rel if i]
            quality, info = (rel[0]) if rel else ('SD', [])

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'links'})
            r = dom_parser.parse_dom(r, 'table')
            r = dom_parser.parse_dom(r, 'tr', attrs={'id': re.compile('\d+')})
            r = [dom_parser.parse_dom(i, 'td') for i in r]
            r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1].content).strip())
                 for i in r if len(r) >= 1]
            r = [(dom_parser.parse_dom(i[0], 'a', req='href'), i[1])
                 for i in r]
            r = [(i[0][0].attrs['href'], i[1]) for i in r if i[0]]

            info = ' | '.join(info)

            for link, hoster in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({
                    'source': hoster,
                    'quality': quality,
                    'language': 'de',
                    'url': link,
                    'info': info,
                    'direct': False,
                    'debridonly': False,
                    'checkquality': True
                })

            return sources
        except:
            return sources
Ejemplo n.º 15
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'ko-bind'})
            r = dom_parser.parse_dom(r,
                                     'table',
                                     attrs={'class': 'links-table'})
            r = dom_parser.parse_dom(r, 'tbody')
            r = dom_parser.parse_dom(r, 'tr')

            for i in r:
                if re.search('(?<=<td>)(HD)(?=</td>)', i[1]):
                    quality = 'HD'
                else:
                    quality = 'SD'

                x = dom_parser.parse_dom(i,
                                         'td',
                                         attrs={'class': 'name'},
                                         req='data-bind')

                hoster = re.search("(?<=>).*$", x[0][1])
                hoster = hoster.group().lower()

                url = re.search("http(.*?)(?=')", x[0][0]['data-bind'])
                url = url.group()

                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({
                    'source': hoster,
                    'quality': 'SD',
                    'language': 'de',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Ejemplo n.º 16
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            id = data.get('id')
            season = data.get('season')
            episode = data.get('episode')

            if season and episode:
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.get_episodes),
                                   post={
                                       'series_id': id,
                                       'mlang': 'de',
                                       'season': season,
                                       'episode': episode
                                   })
                r = json.loads(r).get('episode_links', [])
                r = [([i.get('id')], i.get('hostername')) for i in r]
            else:
                data.update({'lang': 'de'})
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.get_links),
                                   post=data)
                r = json.loads(r).get('links', [])
                r = [(i.get('ids'), i.get('hoster')) for i in r]

            for link_ids, hoster in r:
                valid, host = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                for link_id in link_ids:
                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'de',
                        'url': self.out_link % (link_id, hoster),
                        'direct': False,
                        'debridonly': False
                    })
            return sources
        except:
            return sources
Ejemplo n.º 17
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'downloads'})
            r = dom_parser.parse_dom(r, 'table')
            r = dom_parser.parse_dom(r, 'tbody')
            r = dom_parser.parse_dom(r, 'tr')

            for i in r:

                if re.search('German', i[1]):

                    hoster = re.search('(?<=domain=)(.*?)(?=\")', i[1])
                    hoster = hoster.group().lower()

                    valid, hoster = source_utils.is_host_valid(
                        hoster, hostDict)
                    if not valid: continue

                    link = re.search('(?<=links/)(.*?)(?=/)', i[1])
                    link = link.group()

                    if re.search('<td>HD</td>', i[1]):
                        quality = 'HD'
                    else:
                        quality = 'SD'

                    url = self.__get_link(link)

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })

            return sources
        except:
            return sources
Ejemplo n.º 18
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'player'})
            r = dom_parser.parse_dom(r, 'iframe', req='src')
            r = client.request(r[0][0]['src'])
            r = dom_parser.parse_dom(r,
                                     'a',
                                     attrs={'class': 'play_container'},
                                     req='href')
            r = client.request(r[0][0]['href'])
            url = self.get_link % (
                re.search('(?<=var id = \")(.*?)(?=\")', r).group(),
                re.search('(?<=var links = \")(.*?)(?=\")', r).group())
            r = client.request(url)
            r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'articleList'})
            r = dom_parser.parse_dom(r, 'a')

            for i in r:
                if 'http' in i[0]['href']:
                    link = i[0]['href']
                elif 'http' in i[0]['onclick']:
                    link = re.search('http(.*?)(?=\")',
                                     i[0]['onclick']).group()
                else:
                    return sources

                valid, hoster = source_utils.is_host_valid(link, hostDict)
                if not valid: continue

                sources.append({
                    'source': hoster,
                    'quality': 'SD',
                    'language': 'de',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Ejemplo n.º 19
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url', ''))
            episode = data.get('episode')

            r = client.request(url)
            r = r.replace('\n', ' ')
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'fullstory'})
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'row'})

            if episode:
                r = dom_parser.parse_dom(r, 'select', attrs={'id': 'series'})
                r = dom_parser.parse_dom(r, 'option', req='value')
                r = [(i.attrs['value'], i.content) for i in r]
                r = [(i[0],
                      re.findall('\s+(\d+)\s+episode', i[1], re.IGNORECASE))
                     for i in r]
                r = [i[0].strip() for i in r if i[1] and episode in i[1]]
            else:
                r = dom_parser.parse_dom(r, 'div', attrs={'class': 'inner'})
                r = dom_parser.parse_dom(r, 'a', req='href')
                r = [i.attrs['href'].strip() for i in r]

            for link in r:
                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'de',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Ejemplo n.º 20
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))

            r = dom_parser.parse_dom(r,
                                     'div',
                                     attrs={'class': 'hosterSiteVideo'})
            r = dom_parser.parse_dom(
                r, 'li', attrs={'data-lang-key': re.compile('[1|3]')})
            r = [(dom_parser.parse_dom(i, 'a', req='href'),
                  dom_parser.parse_dom(i, 'h4'),
                  'subbed' if i.attrs['data-lang-key'] == '3' else '')
                 for i in r]
            r = [(i[0][0].attrs['href'], i[1][0].content.lower(), i[2])
                 for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1],
                  re.findall('(.+?)\s*<br\s*/?>(.+?)$', i[1], re.DOTALL), i[2])
                 for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '', i[3]) for i in r]
            r = [(i[0], i[1], 'HD' if 'hosterhdvideo' in i[2] else 'SD', i[3])
                 for i in r]

            for link, host, quality, info in r:
                valid, host = source_utils.is_host_valid(host, hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'de',
                    'url': link,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Ejemplo n.º 21
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = r.replace('\\"', '"')

            links = dom_parser.parse_dom(r,
                                         'tr',
                                         attrs={'id': 'tablemoviesindex2'})

            for i in links:
                try:
                    host = dom_parser.parse_dom(i, 'img',
                                                req='alt')[0].attrs['alt']
                    host = host.split()[0].rsplit('.', 1)[0].strip().lower()
                    host = host.encode('utf-8')

                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid: continue

                    url = dom_parser.parse_dom(i, 'a',
                                               req='href')[0].attrs['href']
                    url = client.replaceHTMLCodes(url)
                    url = urlparse.urljoin(self.base_link, url)
                    url = url.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'de',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 22
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if url == None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url'))
            season = data.get('season')
            episode = data.get('episode')

            r = client.request(url)

            if season and episode:
                r = dom_parser.parse_dom(r, 'select', attrs={'id': 'SeasonSelection'}, req='rel')[0]
                r = client.replaceHTMLCodes(r.attrs['rel'])[1:]
                r = urlparse.parse_qs(r)
                r = dict([(i, r[i][0]) if r[i] else (i, '') for i in r])
                r = urlparse.urljoin(self.base_link, self.get_links_epi % (r['Addr'], r['SeriesID'], season, episode))
                r = client.request(r)

            r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'HosterList'})[0]
            r = dom_parser.parse_dom(r, 'li', attrs={'id': re.compile('Hoster_\d+')}, req='rel')
            r = [(client.replaceHTMLCodes(i.attrs['rel']), i.content) for i in r if i[0] and i[1]]
            r = [(i[0], re.findall('class="Named"[^>]*>([^<]+).*?(\d+)/(\d+)', i[1])) for i in r]
            r = [(i[0], i[1][0][0].lower().rsplit('.', 1)[0], i[1][0][2]) for i in r if len(i[1]) > 0]

            for link, hoster, mirrors in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue
                u = urlparse.parse_qs('&id=%s' % link)
                u = dict([(x, u[x][0]) if u[x] else (x, '') for x in u])
                for x in range(0, int(mirrors)):
                    url = self.mirror_link % (u['id'], u['Hoster'], x + 1)
                    if season and episode: url += "&Season=%s&Episode=%s" % (season, episode)
                    try: sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})
                    except: pass

            return sources
        except:
            return sources
Ejemplo n.º 23
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'ul', attrs={'id': 'mainmenu'})
            r = dom_parser.parse_dom(r, 'li')

            for i in r:
                i = dom_parser.parse_dom(i, 'a')
                i = i[0][0]['href']
                i = client.request(i)
                i = dom_parser.parse_dom(i,
                                         'select',
                                         attrs={'id': 'selecthost'})
                i = dom_parser.parse_dom(i, 'option')

                for x in i:
                    hoster = re.search('^\S*', x[1]).group().lower()
                    url = x[0]['value']

                    valid, hoster = source_utils.is_host_valid(
                        hoster, hostDict)
                    if not valid: continue

                    sources.append({
                        'source': hoster,
                        'quality': 'SD',
                        'language': 'de',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })

            return sources
        except:
            return sources
Ejemplo n.º 24
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'tab-plot_german'})
            r = dom_parser.parse_dom(r, 'tbody')
            r = dom_parser.parse_dom(r, 'tr')

            for i in r:
                if re.search('(?<=">)(\n.*?)(?=<\/a>)', i[1]).group().strip():
                    hoster = re.search('(?<=">)(\n.*?)(?=<\/a>)',
                                       i[1]).group().strip()
                    link = re.search('(?<=href=\")(.*?)(?=\")', i[1]).group()
                    rel = re.search(
                        '(?<=oddCell qualityCell">)(\n.*?)(?=<\/td>)',
                        i[1]).group().strip()
                    quality, info = source_utils.get_release_quality(rel)
                    if not quality:
                        quality = 'SD'

                    valid, hoster = source_utils.is_host_valid(
                        hoster, hostDict)
                    if not valid: continue

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': link,
                        'direct': False,
                        'debridonly': False
                    })

            return sources
        except:
            return sources
Ejemplo n.º 25
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            tvshowtitle = data.get('tvshowtitle')
            localtvshowtitle = data.get('localtvshowtitle')
            aliases = source_utils.aliases_to_array(eval(data['aliases']))
            episode = tvmaze.tvMaze().episodeAbsoluteNumber(
                data.get('tvdb'), int(data.get('season')),
                int(data.get('episode')))

            alt_title = anilist.getAlternativTitle(tvshowtitle)
            links = self.__search([alt_title] + aliases, episode)
            if not links and localtvshowtitle != alt_title:
                links = self.__search([localtvshowtitle] + aliases, episode)
            if not links and tvshowtitle != localtvshowtitle:
                links = self.__search([tvshowtitle] + aliases, episode)

            for link in links:
                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': 'SD',
                    'language': 'de',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Ejemplo n.º 26
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r,
                                     'table',
                                     attrs={'class': 'stream_links'})
            r = dom_parser.parse_dom(r, 'tr')
            r = [(dom_parser.parse_dom(i, 'td'),
                  dom_parser.parse_dom(i,
                                       'td',
                                       attrs={'class': 'hide-for-small-only'}))
                 for i in r]
            r = [(dom_parser.parse_dom(i[0][0], 'a',
                                       req='href'), i[1][0].content.lower())
                 for i in r if i[0] and i[1]]
            r = [(i[0][0].attrs['href'], i[1]) for i in r if i[0]]

            for link, hoster in r:
                valid, hoster = source_utils.is_host_valid(hoster, hostDict)
                if not valid: continue

                sources.append({
                    'source': hoster,
                    'quality': 'SD',
                    'language': 'de',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Ejemplo n.º 27
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i,
                                          'a',
                                          attrs={'class': 'options'},
                                          req='href'),
                     dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:],
                     re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src']))
                    for i in rels if i[0] and i[1]]
            rels = [
                i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
            ]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
            r = [(re.findall('link"?\s*:\s*"(.+?)"',
                             ''.join([x.content for x in i])),
                  dom_parser.parse_dom(i,
                                       'iframe',
                                       attrs={'class': 'metaframe'},
                                       req='src')) for i in r]
            r = [
                i[0][0] if i[0] else i[1][0].attrs['src'] for i in r
                if i[0] or i[1]
            ]

            for i in r:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)
                    if not i.startswith('http'): i = self.__decode_hash(i)
                    if 'play.seriesever' in i:
                        i = client.request(i)
                        i = dom_parser.parse_dom(i, 'iframe', req='src')
                        if len(i) < 1: continue
                        i = i[0].attrs['src']

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls, host, direct = source_utils.check_directstreams(
                        i, host)

                    for x in urls:
                        sources.append({
                            'source': host,
                            'quality': x['quality'],
                            'language': 'de',
                            'url': x['url'],
                            'direct': direct,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 28
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, output='extended')

            headers = r[3]
            headers.update({'Cookie': r[2].get('Set-Cookie'), 'Referer': self.base_link})
            r = r[0]

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:], re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
            rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r]))
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')]
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if '/play/' in i: i = urlparse.urljoin(self.base_link, i)

                    if self.domains[0] in i:
                        i = client.request(i, headers=headers, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try: i += jsunpack.unpack(base64.decodestring(re.sub('"\s*\+\s*"', '', x))).replace('\\', '')
                            except: pass

                        for x in re.findall('(eval\s*\(function.*?)</script>', i, re.DOTALL):
                            try: i += jsunpack.unpack(x).replace('\\', '')
                            except: pass

                        links = [(match[0], match[1]) for match in re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', i, re.DOTALL)]
                        links = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in links if '/no-video.mp4' not in x[0]]

                        doc_links = [directstream.google('https://drive.google.com/file/d/%s/view' % match) for match in re.findall('''file:\s*["'](?:[^"']+youtu.be/([^"']+))''', i, re.DOTALL)]
                        doc_links = [(u['url'], u['quality']) for x in doc_links if x for u in x]
                        links += doc_links

                        for url, quality in links:
                            if self.base_link in url:
                                url = url + '|Referer=' + self.base_link

                            sources.append({'source': 'gvideo', 'quality': quality, 'language': 'de', 'url': url, 'direct': True, 'debridonly': False})
                    else:
                        try:
                            # as long as resolveurl get no Update for this URL (So just a Temp-Solution)
                            did = re.findall('youtube.googleapis.com.*?docid=(\w+)', i)
                            if did: i = 'https://drive.google.com/file/d/%s/view' % did[0]

                            valid, host = source_utils.is_host_valid(i, hostDict)
                            if not valid: continue

                            urls, host, direct = source_utils.check_directstreams(i, host)

                            for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 29
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url', ''))
            imdb = data.get('imdb')
            season = data.get('season')
            episode = data.get('episode')

            if season and episode and imdb:
                r = urllib.urlencode({
                    'val': 's%se%s' % (season, episode),
                    'IMDB': imdb
                })
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.episode_link),
                                   XHR=True,
                                   post=r)
            else:
                r = client.request(url)

            l = dom_parser.parse_dom(r, 'select', attrs={'id': 'sel_sprache'})
            l = dom_parser.parse_dom(l, 'option', req='id')

            r = [(dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['id']}))
                 for i in l if i.attrs['id'] == 'deutsch']
            r = [(i[0], dom_parser.parse_dom(i[0], 'option', req='id'))
                 for i in r]
            r = [(id.attrs['id'],
                  dom_parser.parse_dom(content,
                                       'div',
                                       attrs={'id': id.attrs['id']}))
                 for content, ids in r for id in ids]
            r = [(re.findall('hd(\d{3,4})',
                             i[0]), dom_parser.parse_dom(i[1], 'a',
                                                         req='href'))
                 for i in r if i[1]]
            r = [(i[0][0] if i[0] else '0', [x.attrs['href'] for x in i[1]])
                 for i in r if i[1]]
            r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r]

            for quality, urls in r:
                for link in urls:
                    try:
                        data = urlparse.parse_qs(urlparse.urlparse(link).query,
                                                 keep_blank_values=True)

                        if 'm' in data:
                            data = data.get('m')[0]
                            link = base64.b64decode(data)

                        link = link.strip()

                        valid, host = source_utils.is_host_valid(
                            link, hostDict)
                        if not valid: continue

                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'de',
                            'url': link,
                            'direct': False,
                            'debridonly': False,
                            'checkquality': True
                        })
                    except:
                        pass

            return sources
        except:
            return sources
Ejemplo n.º 30
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            for item_id, episode, content_type in self.__get_episode(
                    data.get('url'), data.get('episode')):
                stream_link = urlparse.urljoin(
                    self.base_link,
                    '/watch/%s/%s/%s' % (item_id, episode, content_type))

                info = 'subbed' if content_type.endswith('sub') else ''

                r = client.request(stream_link)

                r = dom_parser.parse_dom(r, 'script')
                r = ' '.join([i.content for i in r if i.content])
                r = json.loads(
                    re.findall('var\s*streams\s*=\s*(\[.*?\])\s*;', r)[0])
                r = [(i.get('replace'), i.get('code')) for i in r]
                r = [(i[0].replace('#', i[1])) for i in r if i[0] and i[1]]

                for stream_link in r:
                    if stream_link.startswith('/'):
                        stream_link = 'http:%s' % stream_link

                    if self.domains[0] in stream_link:
                        stream_link = client.request(stream_link,
                                                     cookie=urllib.urlencode({
                                                         'proxerstream_player':
                                                         'flash'
                                                     }))

                        i = [(match[0], match[1]) for match in re.findall(
                            '''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*width\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''',
                            stream_link, re.DOTALL)]
                        i = [(x[0].replace('\/', '/'),
                              source_utils.label_to_quality(x[1])) for x in i]

                        for url, quality in i:
                            sources.append({
                                'source': 'cdn',
                                'quality': quality,
                                'language': 'de',
                                'url': url,
                                'info': info,
                                'direct': True,
                                'debridonly': False
                            })
                    else:
                        valid, host = source_utils.is_host_valid(
                            stream_link, hostDict)
                        if not valid: continue

                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'de',
                            'url': stream_link,
                            'info': info,
                            'direct': False,
                            'debridonly': False
                        })

            return sources
        except:
            return sources