コード例 #1
0
def __get_moonwalk(url, ref, info=''):
    try:
        host = urlparse.urlparse(url)
        host = '%s://%s' % (host.scheme, host.netloc)

        r = client.request(url, referer=ref, output='extended')

        headers = r[3]
        headers.update({'Cookie': r[2].get('Set-Cookie')})
        r = r[0]

        csrf = re.findall('name="csrf-token" content="(.*?)"', r)[0]
        story = re.findall(
            '''["']X-CSRF-Token["']\s*:\s*[^,]+,\s*["']([\w\-]+)["']\s*:\s*["'](\w+)["']''',
            r)[0]
        headers.update({'X-CSRF-Token': csrf, story[0]: story[1]})

        for i in re.findall('window\[(.*?)\]', r):
            r = r.replace(i, re.sub('''["']\s*\+\s*["']''', '', i))

        varname, post_url = re.findall(
            '''var\s*(\w+)\s*=\s*["'](.*?/all/?)["']\s*;''', r)[0]
        jsid = re.findall('''\.post\(\s*%s\s*,\s*([^(\);)]+)''' % varname,
                          r)[0]

        jsdata = re.findall('(?:var\s*)?%s\s*=\s*({.*?})' % re.escape(jsid), r,
                            re.DOTALL)[0]
        jsdata = re.sub(r'([\{\s,])(\w+)(:)', r'\1"\2"\3', jsdata)
        jsdata = re.sub(r'''(?<=:)\s*\'''', ' "', jsdata)
        jsdata = re.sub(r'''(?<=\w)\'''', '"', jsdata)
        jsdata = re.sub(''':\s*\w+\s*\?[^,}]+''', ': 0', jsdata)
        jsdata = re.sub(''':\s*[a-zA-Z]+[^,}]+''', ': 0', jsdata)
        jsdata = json.loads(jsdata)

        mw_key = re.findall('''var\s*mw_key\s*=\s*["'](\w+)["']''', r)[0]
        newatt = re.findall(
            '''%s\[["']([^=]+)["']\]\s*=\s*["']([^;]+)["']''' %
            re.escape(jsid), r)[0]
        newatt = [re.sub('''["']\s*\+\s*["']''', '', i) for i in newatt]

        jsdata.update({'mw_key': mw_key, newatt[0]: newatt[1]})

        r = client.request(urlparse.urljoin(host, post_url),
                           post=jsdata,
                           headers=headers,
                           XHR=True)
        r = json.loads(r).get('mans', {}).get('manifest_m3u8')

        r = client.request(r, headers=headers)

        r = [(i[0], i[1]) for i in re.findall(
            '#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+).*?(http.*?(?:\.abst|\.f4m|\.m3u8)).*?',
            r, re.DOTALL) if i]
        r = [(source_utils.label_to_quality(i[0]),
              i[1] + '|%s' % urllib.urlencode(headers)) for i in r]
        r = [{'quality': i[0], 'url': i[1], 'info': info} for i in r]

        return r
    except:
        return []
コード例 #2
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            if 'tvshowtitle' in data:
                url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
                year = re.findall('(\d{4})', data['premiered'])[0]
                r = self.scraper.get(url).content

                y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]
                y = re.findall('(\d{4})', y)[0]
                if not y == year: raise Exception()
            else:
                r = self.scraper.get(url).content

            result = re.findall('''['"]file['"]:['"]([^'"]+)['"],['"]label['"]:['"]([^'"]+)''', r)

            for i in result:
                url = i[0].replace('\/', '/')
                sources.append({'source': 'gvideo', 'quality': source_utils.label_to_quality(i[1]), 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})

            return sources
        except:
            return
コード例 #3
0
ファイル: tata.py プロジェクト: CYBERxNUKE/xbmc-addon
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            ref = urlparse.urljoin(self.base_link, url)
            url = urlparse.urljoin(self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0])

            headers = {'Referer': ref, 'User-Agent': client.randomagent()}

            result = client.request(url, headers=headers, post='')
            result = base64.decodestring(result)
            result = json.loads(result).get('playinfo', [])

            if isinstance(result, basestring):
                result = result.replace('embed.html', 'index.m3u8')

                base_url = re.sub('index\.m3u8\?token=[\w\-]+', '', result)

                r = client.request(result, headers=headers)
                r = [(i[0], i[1]) for i in re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)', r, re.DOTALL) if i]
                r = [(source_utils.label_to_quality(i[0]), i[1] + source_utils.append_headers(headers)) for i in r]
                r = [{'quality': i[0], 'url': base_url+i[1]} for i in r]
                for i in r: sources.append({'source': 'CDN', 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': True, 'debridonly': False})
            elif result:
                result = [i.get('link_mp4') for i in result]
                result = [i for i in result if i]
                for i in result:
                    try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False})
                    except: pass

            return sources
        except:
            return
コード例 #4
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            if 'tvshowtitle' in data:
                url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
                year = re.findall('(\d{4})', data['premiered'])[0]
                r = client.request(url)

                y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]
                y = re.findall('(\d{4})', y)[0]
                if not y == year: raise Exception()
            else:
                r = client.request(url)


            result = re.findall('''['"]file['"]:['"]([^'"]+)['"],['"]label['"]:['"]([^'"]+)''', r)

            for i in result:
                url = i[0].replace('\/', '/')
                sources.append({'source': 'gvideo', 'quality': source_utils.label_to_quality(i[1]), 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})

            return sources
        except:
            return
コード例 #5
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = cache.get(client.request,
                          4,
                          urlparse.urljoin(self.base_link, self.conf_link),
                          XHR=True)
            r = json.loads(r).get('streamer')
            r = cache.get(client.request,
                          4,
                          r + '%s.mp4/master.m3u8' % url,
                          XHR=True)

            r = re.findall('RESOLUTION\s*=\s*\d+x(\d+).*?\n(http.*?)(?:\n|$)',
                           r, re.IGNORECASE)
            r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r]

            for quality, link in r:
                sources.append({
                    'source': 'CDN',
                    'quality': quality,
                    'language': 'de',
                    'url': link,
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            source_faultlog.logFault(__name__, source_faultlog.tagScrape, url)
            return sources
コード例 #6
0
ファイル: imdark.py プロジェクト: vphuc81/MyRepository
    def sources(self, url, hostDict, locDict):
        sources = []

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            #query = urlparse.urljoin(self.base_link, self.ajax_link)            
            #post = urllib.urlencode({'action':'sufi_search', 'search_string': title})
            
            result = client.request(query)
            r = client.parseDOM(result, 'div', attrs={'id':'showList'})
            r = re.findall(r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0])     
            r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and data['year'] in i[1]][0]
            url = r[0]                     
            result = client.request(url)
            r = re.findall(r'video\s+id="\w+.*?src="([^"]+)".*?data-res="([^"]+)',result,re.DOTALL)
            
            for i in r:                
                try:
                    q = source_utils.label_to_quality(i[1])
                    sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False})                
                except:
                    pass

            return sources
        except Exception as e:
            return sources
コード例 #7
0
ファイル: tata.py プロジェクト: hpduong/retropie_configs
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources

            ref = urlparse.urljoin(self.base_link, url)
            url = urlparse.urljoin(self.base_link, self.ajax_link % re.findall('-(\w+)$', ref)[0])

            headers = {'Referer': ref, 'User-Agent': client.randomagent()}

            result = client.request(url, headers=headers, post='')
            result = base64.decodestring(result)
            result = json.loads(result).get('playinfo', [])

            if isinstance(result, basestring):
                result = result.replace('embed.html', 'index.m3u8')

                base_url = re.sub('index\.m3u8\?token=[\w\-]+[^/$]*', '', result)

                r = client.request(result, headers=headers)
                r = [(i[0], i[1]) for i in re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+)[^\n]+\n([^\n]+)', r, re.DOTALL) if i]
                r = [(source_utils.label_to_quality(i[0]), i[1] + source_utils.append_headers(headers)) for i in r]
                r = [{'quality': i[0], 'url': base_url+i[1]} for i in r]
                for i in r: sources.append({'source': 'CDN', 'quality': i['quality'], 'language': 'de', 'url': i['url'], 'direct': True, 'debridonly': False})
            elif result:
                result = [i.get('link_mp4') for i in result]
                result = [i for i in result if i]
                for i in result:
                    try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'de', 'url': i, 'direct': True, 'debridonly': False})
                    except: pass

            return sources
        except:
            return
コード例 #8
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = data.get('url')
            episode = int(data.get('episode', 1))

            r = client.request(urlparse.urljoin(self.base_link, url))
            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'streams'})

            rels = dom_parser.parse_dom(r, 'ul', attrs={'class': 'nav'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = dom_parser.parse_dom(rels, 'a', attrs={'href': re.compile('#stream_\d*')}, req='href')
            rels = [(re.findall('stream_(\d+)', i.attrs['href']), re.findall('flag-(\w{2})', i.content)) for i in rels if i]
            rels = [(i[0][0], ['subbed'] if i[1][0] != 'de' else []) for i in rels if i[0] and 'de' in i[1]]

            for id, info in rels:
                rel = dom_parser.parse_dom(r, 'div', attrs={'id': 'stream_%s' % id})
                rel = [(dom_parser.parse_dom(i, 'div', attrs={'id': 'streams_episodes_%s' % id}), dom_parser.parse_dom(i, 'tr')) for i in rel]
                rel = [(i[0][0].content, [x for x in i[1] if 'fa-desktop' in x.content]) for i in rel if i[0] and i[1]]
                rel = [(i[0], dom_parser.parse_dom(i[1][0].content, 'td')) for i in rel if i[1]]
                rel = [(i[0], re.findall('\d{3,4}x(\d{3,4})$', i[1][0].content)) for i in rel if i[1]]
                rel = [(i[0], source_utils.label_to_quality(i[1][0])) for i in rel if len(i[1]) > 0]

                for html, quality in rel:
                    try:
                        s = dom_parser.parse_dom(html, 'a', attrs={'href': re.compile('#streams_episodes_%s_\d+' % id)})
                        s = [(dom_parser.parse_dom(i, 'div', attrs={'data-loop': re.compile('\d+')}, req='data-loop'), dom_parser.parse_dom(i, 'span')) for i in s]
                        s = [(i[0][0].attrs['data-loop'], [x.content for x in i[1] if '<strong' in x.content]) for i in s if i[0]]
                        s = [(i[0], re.findall('<.+?>(\d+)</.+?> (.+?)$', i[1][0])) for i in s if len(i[1]) > 0]
                        s = [(i[0], i[1][0]) for i in s if len(i[1]) > 0]
                        s = [(i[0], int(i[1][0]), re.findall('Episode (\d+):', i[1][1]), re.IGNORECASE) for i in s if len(i[1]) > 1]
                        s = [(i[0], i[1], int(i[2][0]) if len(i[2]) > 0 else -1) for i in s]
                        s = [(i[0], i[2] if i[2] >= 0 else i[1]) for i in s]
                        s = [i[0] for i in s if i[1] == episode][0]

                        enc = dom_parser.parse_dom(html, 'div', attrs={'id': re.compile('streams_episodes_%s_%s' % (id, s))}, req='data-enc')[0].attrs['data-enc']

                        hosters = dom_parser.parse_dom(html, 'a', attrs={'href': re.compile('#streams_episodes_%s_%s' % (id, s))})
                        hosters = [dom_parser.parse_dom(i, 'i', req='class') for i in hosters]
                        hosters = [re.findall('hoster-(\w+)', ' '.join([x.attrs['class'] for x in i])) for i in hosters if i][0]
                        hosters = [(source_utils.is_host_valid(re.sub('(co|to|net|pw|sx|tv|moe|ws|icon)$', '', i), hostDict), i) for i in hosters]
                        hosters = [(i[0][1], i[1]) for i in hosters if i[0] and i[0][0]]

                        info = ' | '.join(info)

                        for source, hoster in hosters:
                            sources.append({'source': source, 'quality': quality, 'language': 'de', 'url': [enc, hoster], 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True})
                    except:
                        pass

            return sources
        except:
            return sources
コード例 #9
0
ファイル: hdfilme.py プロジェクト: N2Roar/roar-repository
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         r = re.findall('(\d+)-stream(?:\?episode=(\d+))?', url)
         r = [(i[0], i[1] if i[1] else '1') for i in r][0]
         r = self.scraper.get(urlparse.urljoin(self.base_link,
                                               self.get_link % r),
                              output='extended').content
         headers = r[3]
         headers.update({
             'Cookie': r[2].get('Set-Cookie'),
             'Referer': self.base_link
         })
         r = r[0]
         r += '=' * (-len(r) % 4)
         r = base64.b64decode(r)
         i = [(match[1], match[0]) for match in re.findall(
             '''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''',
             r, re.DOTALL)]
         i += [(match[0], match[1]) for match in re.findall(
             '''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''',
             r, re.DOTALL)]
         r = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1]))
              for x in i]
         for u, q in r:
             try:
                 tag = directstream.googletag(u)
                 if tag:
                     sources.append({
                         'source': 'gvideo',
                         'quality': tag[0].get('quality', 'SD'),
                         'language': 'de',
                         'url': u,
                         'direct': True,
                         'debridonly': False
                     })
                 else:
                     sources.append({
                         'source':
                         'CDN',
                         'quality':
                         q,
                         'language':
                         'de',
                         'url':
                         u + '|%s' % urllib.urlencode(headers),
                         'direct':
                         True,
                         'debridonly':
                         False
                     })
             except:
                 pass
         return sources
     except:
         return sources
コード例 #10
0
ファイル: watchseries.py プロジェクト: mpmendespt/repo.alado
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com']
         result = client.request(url, headers=self.headers, timeout=10)
         dom = dom_parser.parse_dom(result, 'a', req='data-video')
         urls = [i.attrs['data-video'] if i.attrs['data-video'].startswith('http') else 'https:' + i.attrs['data-video'] for i in dom]
         for url in urls:
             dom = []
             if 'ocloud.stream' in url:
                 result = client.request(url, headers=self.headers, timeout=10)
                 base = re.findall('<base href="([^"]+)">', result)[0]
                 hostDict += [base]
                 dom = dom_parser.parse_dom(result, 'a', req=['href', 'id'])
                 dom = [(i.attrs['href'].replace('./embed', base + 'embed'), i.attrs['id']) for i in dom if i]
                 dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i]
             if dom:
                 try:
                     for r in dom:
                         valid, hoster = source_utils.is_host_valid(r[0], hostDict)
                         if not valid:
                             continue
                         quality = source_utils.label_to_quality(r[1])
                         urls, host, direct = source_utils.check_directstreams(r[0], hoster)
                         for x in urls:
                             if direct:
                                 size = source_utils.get_size(x['url'])
                             if size:
                                 sources.append(
                                     {'source': host, 'quality': quality, 'language': 'en', 'url': x['url'],
                                      'direct': direct, 'debridonly': False, 'info': size})
                             else:
                                 sources.append(
                                     {'source': host, 'quality': quality, 'language': 'en', 'url': x['url'],
                                      'direct': direct, 'debridonly': False})
                 except:
                     pass
             else:
                 if 'load.php' not in url:
                     valid, hoster = source_utils.is_host_valid(url, hostDict)
                     if valid:
                         try:
                             url.decode('utf-8')
                             if 'vidnode.net' in url:
                                 url = url.replace('vidnode.net', 'vidcloud9.com')
                                 hoster = 'vidcloud9'
                             sources.append(
                                 {'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False,
                                  'debridonly': False})
                         except:
                             pass
         return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('---WATCHSERIES Testing - Exception: \n' + str(failure))
         return sources
コード例 #11
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = data.get('url')
            season = data.get('season')
            episode = data.get('episode')
            abs_episode = 0

            if season and episode:
                abs_episode = str(tvmaze.tvMaze().episodeAbsoluteNumber(data.get('tvdb'), int(season), int(episode)))

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = r.decode('cp1251').encode('utf-8')

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'players'}, req='data-player')
            r = [(i.attrs['data-player'], dom_parser.parse_dom(i, 'a', req='href')) for i in r]
            r = [(i[0], i[1][0].attrs['href']) for i in r if i[1]]

            for post_id, play_url in r:
                i = client.request(play_url, referer=url, output='extended')

                headers = i[3]
                headers.update({'Cookie': i[2].get('Set-Cookie')})

                i = client.request(urlparse.urljoin(self.base_link, self.player_link), post={'post_id': post_id}, headers=headers, referer=i, XHR=True)
                i = json.loads(i).get('message', {}).get('translations', {}).get('flash', {})

                for title, link in i.iteritems():
                    try:
                        link = self.decode_direct_media_url(link)

                        if link.endswith('.txt'):
                            link = self.decode_direct_media_url(client.request(link))
                            link = json.loads(link).get('playlist', [])
                            link = [i.get('playlist', []) for i in link]
                            link = [x.get('file') for i in link for x in i if (x.get('season') == season and x.get('serieId') == episode) or (x.get('season') == '0' and x.get('serieId') == abs_episode)][0]

                        urls = [(source_utils.label_to_quality(q), self.format_direct_link(link, q)) for q in self.get_qualitys(link)]
                        urls = [{'quality': x[0], 'url': x[1]} for x in urls if x[0] in ['SD', 'HD']]  # filter premium

                        for i in urls: sources.append({'source': 'CDN', 'quality': i['quality'], 'info': title, 'language': 'ru', 'url': i['url'], 'direct': True, 'debridonly': False})
                    except:
                        pass

            return sources
        except:
            return sources
コード例 #12
0
ファイル: playbox.py プロジェクト: CYBERxNUKE/xbmc-addon
    def sources(self, url, hostDict, locDict):
        sources = []

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query, mobile=True, timeout=20, output='extended')
            r = json.loads(result[0])
            r = r['data']['films']

            years = [str(data['year']), str(int(data['year']) + 1), str(int(data['year']) - 1)]

            #print r
            if 'episode' in data:
                r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])]
                r = [(i,re.sub('[^0-9]', '', str(i['publishDate']))) for i in r ]
                r = [i[0] for i in r if any(x in i[1] for x in years)][0]
                result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended')
                r = json.loads(result[0])
                r = [i for i in r['data']['chapters'] if i['title'].replace('0','').lower() == 's%se%s' %(data['season'],data['episode'])][0]

            else:
                r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])]
                r = [i for i in r if any(x in i['publishDate'] for x in years)][0]
                #print r
                result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended')
                r = json.loads(result[0])
                r = r['data']['chapters'][0]

            result = client.request(urlparse.urljoin(self.base_link, self.stream_link % r['id']), mobile=True,
                                    headers=result[4], output='extended')
            r = json.loads(result[0])

            r = [(i['quality'], i['server'], self._decrypt(i['stream'])) for i in r['data']]
            sources = []
            for i in r:                
                try:
                    valid, hoster = source_utils.is_host_valid(i[2], hostDict)
                    if not valid: continue
                    urls, host, direct = source_utils.check_directstreams(i[2], hoster)
                    for x in urls:
                        q = x['quality'] if host == 'gvideo' else source_utils.label_to_quality(i[0])
                        u = x['url'] if host == 'gvideo' else i[2]
                        sources.append({'source': host, 'quality': q, 'language': 'en', 'url': u, 'direct': direct, 'debridonly': False})       

                except:
                    pass

            return sources
        except Exception as e:
            return sources
コード例 #13
0
ファイル: filmix.py プロジェクト: mpie/repo
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = data.get('url')
            season = data.get('season')
            episode = data.get('episode')
            abs_episode = 0

            if season and episode:
                abs_episode = str(tvmaze.tvMaze().episodeAbsoluteNumber(data.get('tvdb'), int(season), int(episode)))

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = r.decode('cp1251').encode('utf-8')

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'players'}, req='data-player')
            r = [(i.attrs['data-player'], dom_parser.parse_dom(i, 'a', req='href')) for i in r]
            r = [(i[0], i[1][0].attrs['href']) for i in r if i[1]]

            for post_id, play_url in r:
                i = client.request(play_url, referer=url, output='extended')

                headers = i[3]
                headers.update({'Cookie': i[2].get('Set-Cookie')})

                i = client.request(urlparse.urljoin(self.base_link, self.player_link), post={'post_id': post_id}, headers=headers, referer=i, XHR=True)
                i = json.loads(i).get('message', {}).get('translations', {}).get('flash', {})

                for title, link in i.iteritems():
                    try:
                        link = self.decode_direct_media_url(link)

                        if link.endswith('.txt'):
                            link = self.decode_direct_media_url(client.request(link))
                            link = json.loads(link).get('playlist', [])
                            link = [i.get('playlist', []) for i in link]
                            link = [x.get('file') for i in link for x in i if (x.get('season') == season and x.get('serieId') == episode) or (x.get('season') == '0' and x.get('serieId') == abs_episode)][0]

                        urls = [(source_utils.label_to_quality(q), self.format_direct_link(link, q)) for q in self.get_qualitys(link)]
                        urls = [{'quality': x[0], 'url': x[1]} for x in urls if x[0] in ['SD', 'HD']]  # filter premium

                        for i in urls: sources.append({'source': 'CDN', 'quality': i['quality'], 'info': title, 'language': 'ru', 'url': i['url'], 'direct': True, 'debridonly': False})
                    except:
                        pass

            return sources
        except:
            return sources
コード例 #14
0
    def sources(self, url, hostDict, locDict):
        sources = []

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            result = client.request(query, mobile=True, timeout=20, output='extended')
            r = json.loads(result[0])
            r = r['data']['films']

            years = [str(data['year']), str(int(data['year']) + 1), str(int(data['year']) - 1)]

            #print r
            if 'episode' in data:
                r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])]
                r = [(i,re.sub('[^0-9]', '', str(i['publishDate']))) for i in r ]
                r = [i[0] for i in r if any(x in i[1] for x in years)][0]
                result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended')
                r = json.loads(result[0])
                r = [i for i in r['data']['chapters'] if i['title'].replace('0','').lower() == 's%se%s' %(data['season'],data['episode'])][0]

            else:
                r = [i for i in r if cleantitle.get(title) == cleantitle.get(i['title'])]
                r = [i for i in r if any(x in i['publishDate'] for x in years)][0]
                #print r
                result = client.request(urlparse.urljoin(self.base_link, self.sources_link % r['id']), mobile=True, headers=result[4], output='extended')
                r = json.loads(result[0])
                r = r['data']['chapters'][0]

            result = client.request(urlparse.urljoin(self.base_link, self.stream_link % r['id']), mobile=True,
                                    headers=result[4], output='extended')
            r = json.loads(result[0])

            r = [(i['quality'], i['server'], self._decrypt(i['stream'])) for i in r['data']]
            sources = []
            for i in r:                
                try:
                    valid, hoster = source_utils.is_host_valid(i[2], hostDict)
                    if not valid: continue
                    urls, host, direct = source_utils.check_directstreams(i[2], hoster)
                    for x in urls:
                        q = x['quality'] if host == 'gvideo' else source_utils.label_to_quality(i[0])
                        u = x['url'] if host == 'gvideo' else i[2]
                        sources.append({'source': host, 'quality': q, 'language': 'en', 'url': u, 'direct': direct, 'debridonly': False})       

                except:
                    pass

            return sources
        except Exception as e:
            return sources
コード例 #15
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'mediaplayer'})
            r = [i.attrs['src'] for i in dom_parser.parse_dom(r, 'iframe', req='src')]

            for i in r:
                try:
                    if 'vidnow.' in i:
                        i = client.request(i, referer=url)

                        gdata = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', i, re.DOTALL)]
                        gdata += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', i, re.DOTALL)]
                        gdata = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in gdata]

                        for u, q in gdata:
                            try:
                                tag = directstream.googletag(u)

                                if tag:
                                    sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'de', 'url': u, 'direct': True, 'debridonly': False})
                                else:
                                    sources.append({'source': 'CDN', 'quality': q, 'language': 'de', 'url': u, 'direct': True,'debridonly': False})
                            except:
                                pass

                        i = dom_parser.parse_dom(i, 'div', attrs={'id': 'myElement'})
                        i = dom_parser.parse_dom(i, 'iframe', req='src')[0].attrs['src']

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls = []
                    if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                    if 'google' in i and not urls and directstream.googletag(i): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                    elif 'ok.ru' in i:  host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                    else:  direct = False; urls = [{'quality': 'SD', 'url': i}]

                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #16
0
ファイル: dramabus.py プロジェクト: enursha101/xbmc-addon
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            r = dom_parser.parse_dom(r, 'div', attrs={'id': 'mediaplayer'})
            r = [i.attrs['src'] for i in dom_parser.parse_dom(r, 'iframe', req='src')]

            for i in r:
                try:
                    if 'vidnow.' in i:
                        i = client.request(i, referer=url)

                        gdata = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', i, re.DOTALL)]
                        gdata += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', i, re.DOTALL)]
                        gdata = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in gdata]

                        for u, q in gdata:
                            try:
                                tag = directstream.googletag(u)

                                if tag:
                                    sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'de', 'url': u, 'direct': True, 'debridonly': False})
                                else:
                                    sources.append({'source': 'CDN', 'quality': q, 'language': 'de', 'url': u, 'direct': True,'debridonly': False})
                            except:
                                pass

                        i = dom_parser.parse_dom(i, 'div', attrs={'id': 'myElement'})
                        i = dom_parser.parse_dom(i, 'iframe', req='src')[0].attrs['src']

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    urls = []
                    if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                    if 'google' in i and not urls and directstream.googletag(i): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                    elif 'ok.ru' in i:  host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                    elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                    else:  direct = False; urls = [{'quality': 'SD', 'url': i}]

                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'ko', 'url': x['url'], 'direct': direct, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #17
0
def __get_moonwalk(url, ref, info=''):
    try:
        host = urlparse.urlparse(url)
        host = '%s://%s' % (host.scheme, host.netloc)

        r = client.request(url, referer=ref, output='extended')

        headers = r[3]
        headers.update({'Cookie': r[2].get('Set-Cookie')})
        r = r[0]

        csrf = re.findall('name="csrf-token" content="(.*?)"', r)[0]
        story = re.findall('''["']X-User-Story["']\s*:\s*["'](\w+)["']''',
                           r)[0]
        headers.update({'X-CSRF-Token': csrf, 'X-User-Story': story})

        varname = re.findall(
            '''var\s*(\w+)\s*=\s*'/sessions/new_session'\s*;''', r)[0]
        jsid = re.findall('''\.post\(\s*%s\s*,\s*(\w+)''' % varname, r)[0]

        jsdata = re.findall('var\s*%s\s*=\s*({.*?})' % jsid, r, re.DOTALL)[0]
        jsdata = re.sub(r'([\{\s,])(\w+)(:)', r'\1"\2"\3', jsdata)
        jsdata = re.sub(r'''(?<=:)\s*\'''', ' "', jsdata)
        jsdata = re.sub(r'''(?<=\w)\'''', '"', jsdata)
        jsdata = re.sub(''':\s*\w+\s*\?[^,}]+''', ': 0', jsdata)
        jsdata = re.sub(''':\s*[a-zA-Z]+[^,}]+''', ': 0', jsdata)
        jsdata = json.loads(jsdata)

        mw_key = re.findall('''var\s*mw_key\s*=\s*["'](\w+)["']''', r)[0]
        newatt = re.findall(
            '''f33f2ea23c8b8030b2454792b013a550\[["'](\w+)["']\]\s*=\s*["'](\w+)["']''',
            r)[0]

        jsdata.update({'mw_key': mw_key, newatt[0]: newatt[1]})

        r = client.request(urlparse.urljoin(host, '/sessions/new_session'),
                           post=jsdata,
                           headers=headers,
                           XHR=True)
        r = json.loads(r).get('mans', {}).get('manifest_m3u8')

        r = client.request(r, headers=headers)

        r = [(i[0], i[1]) for i in re.findall(
            '#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+).*?(http.*?(?:\.abst|\.f4m|\.m3u8)).*?',
            r, re.DOTALL) if i]
        r = [(source_utils.label_to_quality(i[0]),
              i[1] + '|%s' % urllib.urlencode(headers)) for i in r]
        r = [{'quality': i[0], 'url': i[1], 'info': info} for i in r]

        return r
    except:
        return []
コード例 #18
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url', ''))
            imdb = data.get('imdb')
            season = data.get('season')
            episode = data.get('episode')

            if season and episode and imdb:
                r = urllib.urlencode({'val': 's%se%s' % (season, episode), 'IMDB': imdb})
                r = cache.get(client.request, 4, urlparse.urljoin(self.base_link, self.episode_link), XHR=True, post=r)
            else:
                r = cache.get(client.request, 4, url)

            l = dom_parser.parse_dom(r, 'select', attrs={'id': 'sel_sprache'})
            l = dom_parser.parse_dom(l, 'option', req='id')

            r = [(dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['id']})) for i in l if i.attrs['id'] == 'deutsch']
            r = [(i[0], dom_parser.parse_dom(i[0], 'option', req='id')) for i in r]
            r = [(id.attrs['id'], dom_parser.parse_dom(content, 'div', attrs={'id': id.attrs['id']})) for content, ids in r for id in ids]
            r = [(re.findall('hd(\d{3,4})', i[0]), dom_parser.parse_dom(i[1], 'a', req='href')) for i in r if i[1]]
            r = [(i[0][0] if i[0] else '0', [x.attrs['href'] for x in i[1]]) for i in r if i[1]]
            r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r]

            for quality, urls in r:
                for link in urls:
                    try:
                        data = urlparse.parse_qs(urlparse.urlparse(link).query, keep_blank_values=True)

                        if 'm' in data:
                            data = data.get('m')[0]
                            link = base64.b64decode(data)

                        link = link.strip()

                        valid, host = source_utils.is_host_valid(link, hostDict)
                        if not valid: continue

                        sources.append({'source': host, 'quality': quality, 'language': 'de', 'url': link, 'direct': False, 'debridonly': False, 'checkquality': True})
                    except:
                        pass

            return sources
        except:
            source_faultlog.logFault(__name__,source_faultlog.tagScrape, url)
            return sources
コード例 #19
0
ファイル: mzmovies.py プロジェクト: CYBERxNUKE/xbmc-addon
 def mz_server(self,url):
     try:
         scraper = cfscrape.create_scraper()
         urls = []
         data = scraper.get(url).content
         data = re.findall('''file:\s*["']([^"']+)",label:\s*"(\d{3,}p)"''', data, re.DOTALL)
         for url, label in data:
             label = source_utils.label_to_quality(label)
             if label == 'SD': continue
             urls.append({'url': url, 'quality': label})
         return urls
     except:
         return url
コード例 #20
0
ファイル: mzmovies.py プロジェクト: staycanuca/CerebroTVRepo
 def mz_server(self,url):
     try:
         scraper = cfscrape.create_scraper()
         urls = []
         data = scraper.get(url).content
         data = re.findall('''file:\s*["']([^"']+)",label:\s*"(\d{3,}p)"''', data, re.DOTALL)
         for url, label in data:
             label = source_utils.label_to_quality(label)
             if label == 'SD': continue
             urls.append({'url': url, 'quality': label})
         return urls
     except:
         return url
コード例 #21
0
 def mz_server(self, url):
     try:
         urls = []
         data = client.request(url, referer=self.base_link)
         data = re.findall('''file:\s*["']([^"']+)",label:\s*"(\d{3,}p)"''',
                           data, re.DOTALL)
         for url, label in data:
             label = source_utils.label_to_quality(label)
             if label == 'SD': continue
             urls.append({'url': url, 'quality': label})
         return urls
     except:
         return url
コード例 #22
0
ファイル: streamit.py プロジェクト: vphuc81/MyRepository
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url', ''))
            imdb = data.get('imdb')
            season = data.get('season')
            episode = data.get('episode')

            if season and episode and imdb:
                r = urllib.urlencode({'val': 's%se%s' % (season, episode), 'IMDB': imdb})
                r = client.request(urlparse.urljoin(self.base_link, self.episode_link), XHR=True, post=r)
            else:
                r = client.request(url)

            l = dom_parser.parse_dom(r, 'select', attrs={'id': 'sel_sprache'})
            l = dom_parser.parse_dom(l, 'option', req='id')

            r = [(dom_parser.parse_dom(r, 'div', attrs={'id': i.attrs['id']})) for i in l if i.attrs['id'] == 'deutsch']
            r = [(i[0], dom_parser.parse_dom(i[0], 'option', req='id')) for i in r]
            r = [(id.attrs['id'], dom_parser.parse_dom(content, 'div', attrs={'id': id.attrs['id']})) for content, ids in r for id in ids]
            r = [(re.findall('hd(\d{3,4})', i[0]), dom_parser.parse_dom(i[1], 'a', req='href')) for i in r if i[1]]
            r = [(i[0][0] if i[0] else '0', [x.attrs['href'] for x in i[1]]) for i in r if i[1]]
            r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r]

            for quality, urls in r:
                for link in urls:
                    try:
                        data = urlparse.parse_qs(urlparse.urlparse(link).query, keep_blank_values=True)

                        if 'm' in data:
                            data = data.get('m')[0]
                            link = base64.b64decode(data)

                        link = link.strip()

                        valid, host = source_utils.is_host_valid(link, hostDict)
                        if not valid: continue

                        sources.append({'source': host, 'quality': quality, 'language': 'de', 'url': link, 'direct': False, 'debridonly': False, 'checkquality': True})
                    except:
                        pass

            return sources
        except:
            return sources
コード例 #23
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result, 'div', attrs={'class': 'item'}, req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page})
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'): url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid: sources.append({'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})

                    if '.asp' not in url: continue

                    result = client.request(url)

                    captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''', result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]), x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result if not i[1].endswith('.vtt')]

                    for quality, url in result: sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #24
0
ファイル: hdstreams.py プロジェクト: krazware/therealufo
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url'))
            episode = data.get('episode')

            r = client.request(url)

            aj = self.__get_ajax_object(r)

            b = dom_parser.parse_dom(r, 'img', attrs={'class': 'dgvaup'}, req='data-img')[0].attrs['data-img']

            if episode:
                r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream-ep', 'data-episode': episode}, req=['data-episode', 'data-server'])
            else:
                r = dom_parser.parse_dom(r, 'div', attrs={'id': 'lang-de'})
                r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie'})
                r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream'}, req=['data-episode', 'data-server'])

            r = [(i.attrs['data-episode'], i.attrs['data-server']) for i in r]

            for epi, server in r:
                try:
                    x = {'action': aj.get('load_episodes'), 'episode': epi, 'pid': aj.get('postid'), 'server': server, 'nonce': aj.get('nonce'), 'b': b}
                    x = client.request(aj.get('ajax_url'), post=x, XHR=True, referer=url)
                    x = json.loads(x)

                    q = source_utils.label_to_quality(x.get('q'))
                    x = json.loads(base64.decodestring(x.get('u')))

                    u = source_utils.evp_decode(x.get('ct'), base64.decodestring(b), x.get('s').decode("hex"))
                    u = u.replace('\/', '/').strip('"')

                    valid, host = source_utils.is_host_valid(u, hostDict)
                    if not valid: continue

                    sources.append({'source': host, 'quality': q, 'language': 'de', 'url': u, 'direct': False, 'debridonly': False, 'checkquality': True})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #25
0
    def sources(self, url, hostDict, locDict):
        sources = []

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            query = self.search_link % (urllib.quote_plus(title))
            #query = urlparse.urljoin(self.base_link, query)
            query = urlparse.urljoin(self.base_link, self.ajax_link)
            post = urllib.urlencode({
                'action': 'sufi_search',
                'search_string': title
            })

            result = client.request(query)
            r = client.parseDOM(result, 'div', attrs={'id': 'showList'})
            r = re.findall(
                r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0])
            r = [
                i for i in r if cleantitle.get(title) == cleantitle.get(i[1])
                and data['year'] in i[1]
            ][0]
            url = r[0]
            result = client.request(url)
            r = re.findall(
                r'video\s+id="\w+.*?src="([^"]+)".*?data-res="([^"]+)', result,
                re.DOTALL)

            for i in r:
                try:
                    q = source_utils.label_to_quality(i[1])
                    sources.append({
                        'source': 'CDN',
                        'quality': q,
                        'language': 'en',
                        'url': i[0],
                        'direct': True,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except Exception as e:
            return sources
コード例 #26
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            url = urlparse.urljoin(self.base_link, data.get('url'))
            episode = data.get('episode')

            r = client.request(url)

            aj = self.__get_ajax_object(r)

            b = dom_parser.parse_dom(r, 'img', attrs={'class': 'dgvaup'}, req='data-img')[0].attrs['data-img']

            if episode:
                r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream-ep', 'data-episode': episode}, req=['data-episode', 'data-server'])
            else:
                r = dom_parser.parse_dom(r, 'div', attrs={'id': 'lang-de'})
                r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie'})
                r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream'}, req=['data-episode', 'data-server'])

            r = [(i.attrs['data-episode'], i.attrs['data-server']) for i in r]

            for epi, server in r:
                try:
                    x = {'action': aj.get('load_episodes'), 'episode': epi, 'pid': aj.get('postid'), 'server': server, 'nonce': aj.get('nonce'), 'b': b}
                    x = client.request(aj.get('ajax_url'), post=x, XHR=True, referer=url)
                    x = json.loads(x)

                    q = source_utils.label_to_quality(x.get('q'))
                    x = json.loads(base64.decodestring(x.get('u')))

                    u = source_utils.evp_decode(x.get('ct'), base64.decodestring(b), x.get('s').decode("hex"))
                    u = u.replace('\/', '/').strip('"')

                    valid, host = source_utils.is_host_valid(u, hostDict)
                    if not valid: continue

                    sources.append({'source': host, 'quality': q, 'language': 'de', 'url': u, 'direct': False, 'debridonly': False, 'checkquality': True})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #27
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result, 'div', attrs={'class': 'item'}, req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page})
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'): url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid: sources.append({'source': host, 'quality': 'HD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})

                    if '.asp' not in url: continue

                    result = client.request(url)

                    captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''', result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]), x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result if not i[1].endswith('.vtt')]

                    for quality, url in result: sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #28
0
ファイル: watchseries.py プロジェクト: vphuc81/MyRepository
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
        
            hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com']
            result = client.request(url, timeout=10)
            
            dom = dom_parser.parse_dom(result, 'a', req='data-video')
            urls = [i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video'] for i in dom]

            for url in urls:
                dom = []
                if 'vidnode.net' in url:
                    result = client.request(url, timeout=10)
                    dom = dom_parser.parse_dom(result, 'source', req=['src','label'])
                    dom = [(i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'], i.attrs['label']) for i in dom if i]
                elif 'ocloud.stream' in url:
                    result = client.request(url, timeout=10)
                    base = re.findall('<base href="([^"]+)">', result)[0]
                    hostDict += [base]
                    dom = dom_parser.parse_dom(result, 'a', req=['href','id'])
                    dom = [(i.attrs['href'].replace('./embed',base+'embed'), i.attrs['id']) for i in dom if i]
                    dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i]                        
                if dom:                
                    try:
                        for r in dom:
                            valid, hoster = source_utils.is_host_valid(r[0], hostDict)

                            if not valid: continue
                            quality = source_utils.label_to_quality(r[1])
                            urls, host, direct = source_utils.check_directstreams(r[0], hoster)
                            for x in urls:
                                if direct: size = source_utils.get_size(x['url'])
                                if size: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False, 'info': size})         
                                else: sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})         
                    except: pass
                else:
                    valid, hoster = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    try:
                        url.decode('utf-8')
                        sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
                    except:
                        pass
            return sources
        except:
            return sources
コード例 #29
0
ファイル: moonwalk.py プロジェクト: amadu80/repository.xvbmc
def __get_moonwalk(url, ref, info=''):
    try:
        host = urlparse.urlparse(url)
        host = '%s://%s' % (host.scheme, host.netloc)

        r = client.request(url, referer=ref, output='extended')

        headers = r[3]
        headers.update({'Cookie': r[2].get('Set-Cookie')})
        r = r[0]

        csrf = re.findall('name="csrf-token" content="(.*?)"', r)[0]
        story = re.findall('''["']X-CSRF-Token["']\s*:\s*[^,]+,\s*["']([\w\-]+)["']\s*:\s*["'](\w+)["']''', r)[0]
        headers.update({'X-CSRF-Token': csrf, story[0]: story[1]})

        for i in re.findall('window\[(.*?)\]', r):
            r = r.replace(i, re.sub('''["']\s*\+\s*["']''', '', i))

        varname, post_url = re.findall('''var\s*(\w+)\s*=\s*["'](.*?/all/?)["']\s*;''', r)[0]
        jsid = re.findall('''\.post\(\s*%s\s*,\s*([^(\);)]+)''' % varname, r)[0]

        jsdata = re.findall('(?:var\s*)?%s\s*=\s*({.*?})' % re.escape(jsid), r, re.DOTALL)[0]
        jsdata = re.sub(r'([\{\s,])(\w+)(:)', r'\1"\2"\3', jsdata)
        jsdata = re.sub(r'''(?<=:)\s*\'''', ' "', jsdata)
        jsdata = re.sub(r'''(?<=\w)\'''', '"', jsdata)
        jsdata = re.sub(''':\s*\w+\s*\?[^,}]+''', ': 0', jsdata)
        jsdata = re.sub(''':\s*[a-zA-Z]+[^,}]+''', ': 0', jsdata)
        jsdata = json.loads(jsdata)

        mw_key = re.findall('''var\s*mw_key\s*=\s*["'](\w+)["']''', r)[0]
        newatt = re.findall('''%s\[["']([^=]+)["']\]\s*=\s*["']([^;]+)["']''' % re.escape(jsid), r)[0]
        newatt = [re.sub('''["']\s*\+\s*["']''', '', i) for i in newatt]

        jsdata.update({'mw_key': mw_key, newatt[0]: newatt[1]})

        r = client.request(urlparse.urljoin(host, post_url), post=jsdata, headers=headers, XHR=True)
        r = json.loads(r).get('mans', {}).get('manifest_m3u8')

        r = client.request(r, headers=headers)

        r = [(i[0], i[1]) for i in re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+).*?(http.*?(?:\.abst|\.f4m|\.m3u8)).*?', r, re.DOTALL) if i]
        r = [(source_utils.label_to_quality(i[0]), i[1] + '|%s' % urllib.urlencode(headers)) for i in r]
        r = [{'quality': i[0], 'url': i[1], 'info': info} for i in r]

        return r
    except:
        return []
コード例 #30
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if not url:
                return sources
            url = urlparse.urljoin(self.base_link, url)
            content = cache.get(client.request, 4, url)

            links = dom_parser.parse_dom(content, 'tr', attrs={'class': 'partItem'})
            links = [(i.attrs['data-id'], i.attrs['data-controlid'], re.findall("(.*)\.png", i.content)[0].split("/")[-1]) for i in
                     links if 'data-id' in i[0]]

            temp = [i for i in links if i[2].lower() == 'vip']

            for id, controlId, host in temp:
                link = self.resolve((url, id, controlId, 'film' in url))
                import json
                params = {
                    'Referer': url,
                    'Host': 'www.alleserienplayer.com',
                    'Upgrade-Insecure-Requests': '1'
                }

                result = client.request(link, headers=params)
                result = re.findall('sources:\s(.*?])', result, flags=re.S)[0]
                result = json.loads(result)
                [sources.append({'source': 'CDN', 'quality': source_utils.label_to_quality(i['label']), 'language': 'de', 'url': i['file'],
                                'direct': True, 'debridonly': False, 'checkquality': False}) for i in result]

            for i in links:
                multiPart = re.findall('(.*?)-part-\d+', i[2])
                if(len(multiPart) > 0):
                    links = [(i[0], i[1], i[2] + '-part-1' if i[2] == multiPart[0] else i[2]) for i in links]

            links = [(i[0], i[1], re.findall('(.*?)-part-\d+', i[2])[0] if len(re.findall('\d+', i[2])) > 0 else i[2], 'Multi-Part ' + re.findall('\d+', i[2])[0] if len(re.findall('\d+', i[2])) > 0 else None) for i in links]

            for id, controlId, host, multiPart in links:
                valid, hoster = source_utils.is_host_valid(host, hostDict)
                if not valid: continue

                sources.append({'source': hoster, 'quality': 'SD', 'language': 'de', 'url': (url, id, controlId, 'film' in url),
                                'info': multiPart if multiPart else '', 'direct': False, 'debridonly': False, 'checkquality': False})

            return sources
        except Exception as e:
            source_faultlog.logFault(__name__, source_faultlog.tagScrape)
            return sources
コード例 #31
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            token = dom_parser.parse_dom(cache.get(client.request, 4, urlparse.urljoin(self.base_link,url)), 'input', attrs={'id': 'proxerToken'})[0].attrs['value']

            for item_id, episode, content_type in self.__get_episode(data.get('url'), token, data.get('episode')):
                stream_link = urlparse.urljoin(self.base_link, '/watch/%s/%s/%s' % (item_id, episode, content_type))

                info = 'subbed' if content_type.endswith('sub') else ''

                r = cache.get(client.request, 4, stream_link)

                r = dom_parser.parse_dom(r, 'script')
                r = ' '.join([i.content for i in r if i.content])
                r = json.loads(re.findall('var\s*streams\s*=\s*(\[.*?\])\s*;', r)[0])
                r = [(i.get('replace'), i.get('code')) for i in r]
                r = [(i[0].replace('#', i[1])) for i in r if i[0] and i[1]]

                for stream_link in r:
                    if stream_link.startswith('/'): stream_link = 'http:%s' % stream_link

                    if self.domains[0] in stream_link:
                        stream_link = cache.get(client.request, 4, stream_link, cookie=urllib.urlencode({'proxerstream_player': 'flash'}))

                        i = [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*width\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', stream_link, re.DOTALL)]
                        i = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i]

                        for link, quality in i:
                            sources.append({'source': 'cdn', 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': True, 'debridonly': False})
                    else:
                        valid, host = source_utils.is_host_valid(stream_link, hostDict)
                        if not valid: continue

                        sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': stream_link, 'info': info, 'direct': False, 'debridonly': False})

            return sources
        except:
            source_faultlog.logFault(__name__,source_faultlog.tagScrape, url)
            return sources
コード例 #32
0
    def sources(self, url, hostDict, locDict):
        sources = []
        req = requests.Session()
        headers = {'User-Agent': client.randomagent(), 'Origin': 'http://imdark.com', 'Referer': 'http://imdark.com',
                   'X-Requested-With': 'XMLHttpRequest'}

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            query = urllib.quote_plus(title).lower()
            result = req.get(self.base_link, headers=headers).text
            darksearch = re.findall(r'darkestsearch" value="(.*?)"', result)[0]

            result = req.get(self.base_link + self.search_link % (query, darksearch), headers=headers).text

            r = client.parseDOM(result, 'div', attrs={'id':'showList'})
            r = re.findall(r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0])     
            r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and data['year'] in i[1]][0]
            url = r[0]
            print("INFO - " + url)
            result = req.get(url, headers=headers).text
            nonce = re.findall(r"nonce = '(.*?)'", result)[0]
            tipi = re.findall(r'tipi = (.*?);', result)[0]
            postData = {'action':'getitsufiplaying', 'tipi':tipi, 'jhinga':nonce}
            result = req.post(self.base_link + self.ajax_link, data=postData, headers=headers).text
            r = re.findall(r'"src":"(.*?)","type":"(.*?)","data-res":"(\d*?)"', result)
            linkHeaders = 'Referer=http://imdark.com/&User-Agent=' + urllib.quote(client.randomagent()) + '&Cookie=' + urllib.quote('mykey123=mykeyvalue')
            for i in r:
                print(str(i))
                try:
                    q = source_utils.label_to_quality(i[2])
                    sources.append({'source': 'CDN', 'quality': q, 'info': i[1].replace('\\', ''), 'language': 'en',
                                    'url': i[0].replace('\\','') + '|' + linkHeaders,
                                    'direct': True, 'debridonly': False})
                except:
                    traceback.print_exc()
                    pass
            for i in sources:
                print("INFO SOURCES " + str(i))
            return sources
        except:
            traceback.print_exc()
            return sources
コード例 #33
0
ファイル: dramacool.py プロジェクト: amadu80/repository.xvbmc
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'watch_video'})
            r = [i.attrs['data-src'] for i in dom_parser.parse_dom(r, 'iframe', req='data-src')]

            for i in r:
                try:
                    if 'k-vid' in i:
                        i = client.request(i, referer=url)
                        i = dom_parser.parse_dom(i, 'div', attrs={'class': 'videocontent'})

                        gvid = dom_parser.parse_dom(i, 'source', req='src')
                        gvid = [(g.attrs['src'], g.attrs['label'] if 'label' in g.attrs else 'SD') for g in gvid]
                        gvid = [(x[0], source_utils.label_to_quality(x[1])) for x in gvid if x[0] != 'auto']

                        for u, q in gvid:
                            try:
                                tag = directstream.googletag(u)
                                if tag:
                                    sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False})
                                else:
                                    sources.append({'source': 'CDN', 'quality': q, 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False})
                            except:
                                pass

                        i = dom_parser.parse_dom(i, 'iframe', attrs={'id': 'embedvideo'}, req='src')[0].attrs['src']

                    valid, host = source_utils.is_host_valid(i, hostDict)
                    if not valid: continue

                    sources.append({'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #34
0
ファイル: imdark.py プロジェクト: YourFriendCaspian/dotfiles
    def sources(self, url, hostDict, locDict):
        sources = []
        req = requests.Session()
        headers = {'User-Agent': client.randomagent(), 'Origin': 'http://imdark.com', 'Referer': 'http://imdark.com',
                   'X-Requested-With': 'XMLHttpRequest'}

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            query = urllib.quote_plus(title).lower()
            result = req.get(self.base_link, headers=headers).text
            darksearch = re.findall(r'darkestsearch" value="(.*?)"', result)[0]

            result = req.get(self.base_link + self.search_link % (query, darksearch), headers=headers).text

            r = client.parseDOM(result, 'div', attrs={'id':'showList'})
            r = re.findall(r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0])     
            r = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and data['year'] in i[1]][0]
            url = r[0]
            print("INFO - " + url)
            result = req.get(url, headers=headers).text
            nonce = re.findall(r"nonce = '(.*?)'", result)[0]
            tipi = re.findall(r'tipi = (.*?);', result)[0]
            postData = {'action':'getitsufiplaying', 'tipi':tipi, 'jhinga':nonce}
            result = req.post(self.base_link + self.ajax_link, data=postData, headers=headers).text
            r = re.findall(r'"src":"(.*?)","type":"(.*?)","data-res":"(\d*?)"', result)
            linkHeaders = 'Referer=http://imdark.com/&User-Agent=' + urllib.quote(client.randomagent()) + '&Cookie=' + urllib.quote('mykey123=mykeyvalue')
            for i in r:
                print(str(i))
                try:
                    q = source_utils.label_to_quality(i[2])
                    sources.append({'source': 'CDN', 'quality': q, 'info': i[1].replace('\\', ''), 'language': 'en',
                                    'url': i[0].replace('\\','') + '|' + linkHeaders,
                                    'direct': True, 'debridonly': False})
                except:
                    traceback.print_exc()
                    pass
            for i in sources:
                print("INFO SOURCES " + str(i))
            return sources
        except:
            traceback.print_exc()
            return sources
コード例 #35
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'tab-pane'})
            r = dom_parser.parse_dom(r, 'iframe', req='src')
            r = [i.attrs['src'] for i in r]

            for i in r:
                try:
                    if 'drama4u' in i or 'k-vid' in i:
                        r = client.request(i, referer=url)
                        r = re.findall('''var\s*source\s*=\s*\[({.*?})\]\s*;''', r)[0]

                        i = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', r, re.DOTALL)]
                        i += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', r, re.DOTALL)]
                        r = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i]

                        for u, q in list(set(r)):
                            try:
                                tag = directstream.googletag(u)
                                if tag:
                                    sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False})
                                else:
                                    sources.append({'source': 'CDN', 'quality': q, 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False})
                            except:
                                pass
                    else:
                        valid, host = source_utils.is_host_valid(i, hostDict)
                        if not valid: continue

                        sources.append({'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #36
0
ファイル: drama4u.py プロジェクト: CYBERxNUKE/xbmc-addon
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'tab-pane'})
            r = dom_parser.parse_dom(r, 'iframe', req='src')
            r = [i.attrs['src'] for i in r]

            for i in r:
                try:
                    if 'drama4u' in i or 'k-vid' in i:
                        r = client.request(i, referer=url)
                        r = re.findall('''var\s*source\s*=\s*\[({.*?})\]\s*;''', r)[0]

                        i = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', r, re.DOTALL)]
                        i += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', r, re.DOTALL)]
                        r = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i]

                        for u, q in list(set(r)):
                            try:
                                tag = directstream.googletag(u)
                                if tag:
                                    sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False})
                                else:
                                    sources.append({'source': 'CDN', 'quality': q, 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False})
                            except:
                                pass
                    else:
                        valid, host = source_utils.is_host_valid(i, hostDict)
                        if not valid: continue

                        sources.append({'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #37
0
ファイル: proxer.py プロジェクト: mpie/repo
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            for item_id, episode, content_type in self.__get_episode(data.get('url'), data.get('episode')):
                stream_link = urlparse.urljoin(self.base_link, '/watch/%s/%s/%s' % (item_id, episode, content_type))

                info = 'subbed' if content_type.endswith('sub') else ''

                r = client.request(stream_link)

                r = dom_parser.parse_dom(r, 'script')
                r = ' '.join([i.content for i in r if i.content])
                r = json.loads(re.findall('var\s*streams\s*=\s*(\[.*?\])\s*;', r)[0])
                r = [(i.get('replace'), i.get('code')) for i in r]
                r = [(i[0].replace('#', i[1])) for i in r if i[0] and i[1]]

                for stream_link in r:
                    if stream_link.startswith('/'): stream_link = 'http:%s' % stream_link

                    if self.domains[0] in stream_link:
                        stream_link = client.request(stream_link, cookie=urllib.urlencode({'proxerstream_player': 'flash'}))

                        i = [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*width\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', stream_link, re.DOTALL)]
                        i = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i]

                        for url, quality in i:
                            sources.append({'source': 'cdn', 'quality': quality, 'language': 'de', 'url': url, 'info': info, 'direct': True, 'debridonly': False})
                    else:
                        valid, host = source_utils.is_host_valid(stream_link, hostDict)
                        if not valid: continue

                        sources.append({'source': host, 'quality': 'SD', 'language': 'de', 'url': stream_link, 'info': info, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #38
0
def __get_moonwalk(url, ref, info=''):
    try:
        host = urlparse.urlparse(url)
        host = '%s://%s' % (host.scheme, host.netloc)

        r = client.request(url, referer=ref, output='extended')

        headers = r[3]
        headers.update({'Cookie': r[2].get('Set-Cookie')})
        r = r[0]

        csrf = re.findall('name="csrf-token" content="(.*?)"', r)[0]
        story = re.findall('''["']X-User-Story["']\s*:\s*["'](\w+)["']''', r)[0]
        headers.update({'X-CSRF-Token': csrf, 'X-User-Story': story})

        varname = re.findall('''var\s*(\w+)\s*=\s*'/sessions/new_session'\s*;''', r)[0]
        jsid = re.findall('''\.post\(\s*%s\s*,\s*(\w+)''' % varname, r)[0]

        jsdata = re.findall('var\s*%s\s*=\s*({.*?})' % jsid, r, re.DOTALL)[0]
        jsdata = re.sub(r'([\{\s,])(\w+)(:)', r'\1"\2"\3', jsdata)
        jsdata = re.sub(r'''(?<=:)\s*\'''', ' "', jsdata)
        jsdata = re.sub(r'''(?<=\w)\'''', '"', jsdata)
        jsdata = re.sub(''':\s*\w+\s*\?[^,}]+''', ': 0', jsdata)
        jsdata = re.sub(''':\s*[a-zA-Z]+[^,}]+''', ': 0', jsdata)
        jsdata = json.loads(jsdata)

        mw_key = re.findall('''var\s*mw_key\s*=\s*["'](\w+)["']''', r)[0]
        newatt = re.findall('''f33f2ea23c8b8030b2454792b013a550\[["'](\w+)["']\]\s*=\s*["'](\w+)["']''', r)[0]

        jsdata.update({'mw_key': mw_key, newatt[0]: newatt[1]})

        r = client.request(urlparse.urljoin(host, '/sessions/new_session'), post=jsdata, headers=headers, XHR=True)
        r = json.loads(r).get('mans', {}).get('manifest_m3u8')

        r = client.request(r, headers=headers)

        r = [(i[0], i[1]) for i in re.findall('#EXT-X-STREAM-INF:.*?RESOLUTION=\d+x(\d+).*?(http.*?(?:\.abst|\.f4m|\.m3u8)).*?', r, re.DOTALL) if i]
        r = [(source_utils.label_to_quality(i[0]), i[1] + '|%s' % urllib.urlencode(headers)) for i in r]
        r = [{'quality': i[0], 'url': i[1], 'info': info} for i in r]

        return r
    except:
        return []
コード例 #39
0
ファイル: netzkino.py プロジェクト: CYBERxNUKE/xbmc-addon
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, self.conf_link), XHR=True)
            r = json.loads(r).get('streamer')
            r = client.request(r + '%s.mp4/master.m3u8' % url, XHR=True)

            r = re.findall('RESOLUTION\s*=\s*\d+x(\d+).*?\n(http.*?)(?:\n|$)', r, re.IGNORECASE)
            r = [(source_utils.label_to_quality(i[0]), i[1]) for i in r]

            for quality, link in r:
                sources.append({'source': 'CDN', 'quality': quality, 'language': 'de', 'url': link, 'direct': True, 'debridonly': False})

            return sources
        except:
            return sources
コード例 #40
0
ファイル: hdfilme.py プロジェクト: amadu80/repository.xvbmc
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = re.findall('(\d+)-stream(?:\?episode=(\d+))?', url)
            r = [(i[0], i[1] if i[1] else '1') for i in r][0]

            r = client.request(urlparse.urljoin(self.base_link, self.get_link % r), output='extended')

            headers = r[3]
            headers.update({'Cookie': r[2].get('Set-Cookie'), 'Referer': self.base_link})
            r = r[0]

            r += '=' * (-len(r) % 4)
            r = base64.b64decode(r)

            i = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', r, re.DOTALL)]
            i += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', r, re.DOTALL)]
            r = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i]

            for u, q in r:
                try:
                    tag = directstream.googletag(u)

                    if tag:
                        sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'de', 'url': u, 'direct': True, 'debridonly': False})
                    else:
                        sources.append({'source': 'CDN', 'quality': q, 'language': 'de', 'url': u + '|%s' % urllib.urlencode(headers), 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
コード例 #41
0
ファイル: kingmovies.py プロジェクト: vphuc81/MyRepository
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(data['premiered'])[0][0]
                episode = '%01d' % int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)

            else:
                episode = None
                year = data['year']
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            referer = url
            r = client.request(url)
            if episode == None:
                y = re.findall('Released\s*:\s*.+?\s*(\d{4})', r)[0]
                if not year == y: raise Exception()

            r = client.parseDOM(r, 'div', attrs = {'class': 'sli-name'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))

            if not episode == None:
                r = [i[0] for i in r if i[1].lower().startswith('episode %02d:' % int(data['episode']))]
            else:
                r = [i[0] for i in r]

            for u in r:
                try:
                    p = client.request(u, referer=referer, timeout='10')
                    src = re.findall('src\s*=\s*"(.*streamdor.co/video/\d+)"', p)[0]
                    if src.startswith('//'):
                        src = 'http:'+src
                    episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0]
                    p = client.request(src, referer=u)
                    try:
                        p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0]
                        p = re.sub(r'\"\s*\+\s*\"','', p)
                        p = re.sub(r'[^A-Za-z0-9+\\/=]','', p)    
                        p = base64.b64decode(p)                
                        p = jsunpack.unpack(p)
                        p = unicode(p, 'utf-8')
                    except:
                        continue

                    fl = re.findall(r'file"\s*:\s*"([^"]+)',p)[0]                   
                    post = {'episodeID': episodeId, 'file': fl, 'subtitle': 'false', 'referer': urllib.quote_plus(u)}
                    p = client.request(self.source_link, post=post, referer=src, XHR=True)

                    js = json.loads(p)

                    try:
                        ss = js['sources']
                        ss = [(i['file'], i['label']) for i in ss if 'file' in i]

                        for i in ss:
                            try:                                                                
                                sources.append({'source': 'CDN', 'quality': source_utils.label_to_quality(i[1]), 'language': 'en', 'url': i[0], 'direct': True, 'debridonly': False})
                            except: pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #42
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
                if 'season' in data: season = data['season']
                if 'episode' in data: episode = data['episode']
                year = data['year']

                r = client.request(self.base_link, output='extended', timeout='10')
                cookie = r[4] ; headers = r[3] ; result = r[0]
                headers['Cookie'] = cookie

                query = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
                r = client.request(query, headers=headers, XHR=True)
                r = json.loads(r)['content']
                r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
                
                
                if 'tvshowtitle' in data:                   
                    cltitle = cleantitle.get(title+'season'+season)
                    cltitle2 = cleantitle.get(title+'season%02d'%int(season))
                    r = [i for i in r if cltitle == cleantitle.get(i[1]) or cltitle2 == cleantitle.get(i[1])]
                    vurl = '%s%s-episode-%s'%(self.base_link, str(r[0][0]).replace('/info',''), episode)
                    vurl2 = None
                else:
                    cltitle = cleantitle.getsearch(title)
                    cltitle2 = cleantitle.getsearch('%s (%s)'%(title,year))
                    r = [i for i in r if cltitle2 == cleantitle.getsearch(i[1]) or cltitle == cleantitle.getsearch(i[1])]
                    vurl = '%s%s-episode-0'%(self.base_link, str(r[0][0]).replace('/info',''))
                    vurl2 = '%s%s-episode-1'%(self.base_link, str(r[0][0]).replace('/info',''))                

                r = client.request(vurl, headers=headers)
                headers['Referer'] = vurl
                
                slinks = client.parseDOM(r, 'div', attrs = {'class': 'anime_muti_link'})
                slinks = client.parseDOM(slinks, 'li', ret='data-video')
                if len(slinks) == 0 and not vurl2 == None:
                    r = client.request(vurl2, headers=headers)
                    headers['Referer'] = vurl2
                    slinks = client.parseDOM(r, 'div', attrs = {'class': 'anime_muti_link'})                
                    slinks = client.parseDOM(slinks, 'li', ret='data-video')

                for slink in slinks:
                    try:
                        if 'vidnode.net/streaming.php' in slink:
                            r = client.request('https:%s'%slink, headers=headers)
                            clinks = re.findall(r'sources:\[(.*?)\]',r)[0]
                            clinks = re.findall(r'file:\s*\'(http[^\']+)\',label:\s*\'(\d+)', clinks)
                            for clink in clinks:
                                q = source_utils.label_to_quality(clink[1])
                                sources.append({'source': 'cdn', 'quality': q, 'language': 'en', 'url': clink[0], 'direct': True, 'debridonly': False})
                        else:
                            valid, hoster = source_utils.is_host_valid(slink, hostDict)
                            if valid:
                                sources.append({'source': hoster, 'quality': 'SD', 'language': 'en', 'url': slink, 'direct': False, 'debridonly': False})
                    except:
                        pass

            return sources
        except:
            return sources
コード例 #43
0
ファイル: plocker.py プロジェクト: vphuc81/MyRepository
    def sources(self, url, hostDict, hostprDict):
        '''
        Loops over site sources and returns a dictionary with corresponding
        file locker sources and information

        Keyword arguments:

        url -- string - url params

        Returns:

        sources -- string - a dictionary of source information

        '''

        sources = []

        try:
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)
            data['sources'] = re.findall("[^', u\]\[]+", data['sources'])

            for i in data['sources']:
                token = str(self.__token(
                    {'id': i, 'update': 0, 'ts': data['ts']}))
                query = (self.info_path % (data['ts'], token, i))
                url = urlparse.urljoin(self.base_link, query)
                info_response = client.request(url, XHR=True)
                grabber_dict = json.loads(info_response)

                try:
                    if grabber_dict['type'] == 'direct':
                        token64 = grabber_dict['params']['token']
                        query = (self.grabber_path % (data['ts'], i, token64))
                        url = urlparse.urljoin(self.base_link, query)

                        response = client.request(url, XHR=True)

                        sources_list = json.loads(response)['data']

                        for j in sources_list:

                            quality = j['label'] if not j['label'] == '' else 'SD'
                            quality = source_utils.label_to_quality(quality)

                            if 'googleapis' in j['file']:
                                sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': j['file'], 'direct': True, 'debridonly': False})
                                continue

                            valid, hoster = source_utils.is_host_valid(j['file'], hostDict)
                            urls, host, direct = source_utils.check_directstreams(j['file'], hoster)
                            for x in urls:
                                sources.append({
                                    'source': 'gvideo',
                                    'quality': quality,
                                    'language': 'en',
                                    'url': x['url'],
                                    'direct': True,
                                    'debridonly': False
                                })

                    elif not grabber_dict['target'] == '':
                        url = 'https:' + grabber_dict['target'] if not grabber_dict['target'].startswith('http') else grabber_dict['target']
                        valid, hoster = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue
                        urls, host, direct = source_utils.check_directstreams(url, hoster)
                        url = urls[0]['url']

                        if 'cloud.to' in host:
                            headers = {
                                'Referer': self.base_link
                            }
                            url = url + source_utils.append_headers(headers)

                        sources.append({
                            'source': hoster,
                            'quality': urls[0]['quality'],
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
                except: pass

            return sources

        except Exception:
            return sources
コード例 #44
0
ファイル: gostream.py プロジェクト: vphuc81/MyRepository
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user != '' and self.password != ''): #raise Exception()

               login = urlparse.urljoin(self.base_link, '/login.html')

               post = urllib.urlencode({'username': self.user, 'password': self.password, 'submit': 'Login'})

               cookie = client.request(login, post=post, output='cookie', close=False)

               r = client.request(login, post=post, cookie=cookie, output='extended')

               headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            else:
               headers = {}


            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
                if 'season' in data: season = data['season']
                if 'episode' in data: episode = data['episode']
                year = data['year']

                query = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
                query2 = urlparse.urljoin(self.base_link, self.search_link % re.sub('\s','+',title))
                r = client.request(query)
                r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'})
                if len(r)==0:
                    r = client.request(query2)
                    r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'})
                r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'), client.parseDOM(r, 'a', ret='data-url'))
                
                if 'tvshowtitle' in data:                   
                    cltitle = cleantitle.get(title+'season'+season)
                    cltitle2 = cleantitle.get(title+'season%02d'%int(season))
                else:
                    cltitle = cleantitle.get(title)

                r = [i for i in r if cltitle == cleantitle.get(i[1]) or cltitle2 == cleantitle.get(i[1])]
                id = [re.findall('/(\d+)$',i[2])[0] for i in r][0]

                ajx = urlparse.urljoin(self.base_link, '/ajax/movie_episodes/'+id)

                r = client.request(ajx)
                if 'episode' in data:
                    eids = re.findall(r'title=\\"Episode\s+%02d.*?data-id=\\"(\d+)'%int(episode),r)
                else:
                    eids = re.findall(r'title=.*?data-id=\\"(\d+)',r)

                for eid in eids:
                    try:
                        ajx = 'ajax/movie_token?eid=%s&mid=%s&_=%d' % (eid, id, int(time.time() * 1000))
                        ajx = urlparse.urljoin(self.base_link, ajx)
                        r = client.request(ajx)
                        [x,y] = re.findall(r"_x='([^']+)',\s*_y='([^']+)'",r)[0]
                        ajx = 'ajax/movie_sources/%s?x=%s&y=%s'%(eid,x,y)
                        ajx = urlparse.urljoin(self.base_link, ajx)
                        r = client.request(ajx)
                        r = json.loads(r)
                        r = r['playlist'][0]['sources']
                        for i in r:
                            try: label = source_utils.label_to_quality(i['label']) 
                            except: label = 'SD',
                            sources.append({'source': host, 'quality': label, 'language': 'en', 'url': i['file'], 'direct': True, 'debridonly': False})
                    except:
                        pass

            return sources
        except:
            return sources
コード例 #45
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(
                    data['premiered'])[0][0]
                episode = '%01d' % int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)

            else:
                episode = None
                year = data['year']
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            referer = url
            r = client.request(url)
            if episode == None:
                y = re.findall('Released\s*:\s*.+?\s*(\d{4})', r)[0]
                if not year == y: raise Exception()

            r = client.parseDOM(r, 'div', attrs={'class': 'sli-name'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))

            if not episode == None:
                r = [
                    i[0] for i in r
                    if i[1].lower().startswith('episode %02d:' %
                                               int(data['episode']))
                    or i[1].lower().startswith('episode %d:' %
                                               int(data['episode']))
                ]
            else:
                r = [i[0] for i in r]

            for u in r:
                try:
                    p = client.request(u, referer=referer, timeout='10')
                    quali = re.findall(r'Quality:\s*<.*?>([^<]+)', p)[0]
                    quali = quali if quali in [
                        'HD', 'SD'
                    ] else source_utils.label_to_quality(quali)
                    src = re.findall('src\s*=\s*"(.*streamdor.co/video/\d+)"',
                                     p)[0]
                    if src.startswith('//'):
                        src = 'http:' + src
                    episodeId = re.findall('.*streamdor.co/video/(\d+)',
                                           src)[0]
                    p = client.request(src, referer=u)
                    try:
                        p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p,
                                       re.IGNORECASE)[0]
                        p = re.sub(r'\"\s*\+\s*\"', '', p)
                        p = re.sub(r'[^A-Za-z0-9+\\/=]', '', p)
                        p = base64.b64decode(p)
                        p = jsunpack.unpack(p)
                        p = unicode(p, 'utf-8')
                    except:
                        continue

                    try:

                        fl = re.findall(r'file"\s*:\s*"([^"]+)', p)
                        if len(fl) > 0:
                            fl = fl[0]
                            post = {
                                'episodeID': episodeId,
                                'file': fl,
                                'subtitle': 'false',
                                'referer': urllib.quote_plus(u)
                            }
                            p = client.request(self.source_link,
                                               post=post,
                                               referer=src,
                                               XHR=True)
                            js = json.loads(p)
                            src = js['sources']
                            p = client.request('http:' + src, referer=src)
                            js = json.loads(p)[0]
                            ss = js['sources']
                            ss = [(i['file'], i['label']) for i in ss
                                  if 'file' in i]

                        else:
                            try:
                                post = {'id': episodeId}
                                p2 = client.request(
                                    'https://embed.streamdor.co/token.php?v=5',
                                    post=post,
                                    referer=src,
                                    XHR=True)
                                js = json.loads(p2)
                                tok = js['token']
                                p = re.findall(r'var\s+episode=({[^}]+});',
                                               p)[0]
                                js = json.loads(p)
                                ss = []
                                if 'eName' in js and js['eName'] != '':
                                    quali = source_utils.label_to_quality(
                                        js['eName'])
                                if 'fileEmbed' in js and js['fileEmbed'] != '':
                                    ss.append([js['fileEmbed'], quali])
                                if 'fileHLS' in js and js['fileHLS'] != '':
                                    ss.append([
                                        'https://hls.streamdor.co/%s%s' %
                                        (tok, js['fileHLS']), quali
                                    ])
                            except:
                                pass

                        for i in ss:
                            try:
                                valid, hoster = source_utils.is_host_valid(
                                    i[0], hostDict)
                                direct = False
                                if not valid:
                                    hoster = 'CDN'
                                    direct = True
                                sources.append({
                                    'source': hoster,
                                    'quality': quali,
                                    'language': 'en',
                                    'url': i[0],
                                    'direct': direct,
                                    'debridonly': False
                                })
                            except:
                                pass

                    except:
                        url = re.findall(r'embedURL"\s*:\s*"([^"]+)', p)[0]
                        valid, hoster = source_utils.is_host_valid(
                            url, hostDict)
                        if not valid: continue
                        urls, host, direct = source_utils.check_directstreams(
                            url, hoster)
                        for x in urls:
                            sources.append({
                                'source': host,
                                'quality': 'SD',
                                'language': 'en',
                                'url': x['url'],
                                'direct': direct,
                                'debridonly': False
                            })

                except:
                    pass

            return sources
        except:
            return sources
コード例 #46
0
ファイル: foxx.py プロジェクト: enursha101/xbmc-addon
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:], re.findall('\/flags\/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
            rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r]))
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')]
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if self.domains[0] in i:
                        i = client.request(i, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try: i += jsunpack.unpack(base64.decodestring(re.sub('"\s*\+\s*"', '', x)))
                            except: pass

                        s = re.compile('(eval\(function.*?)</script>', re.DOTALL).findall(i)

                        for x in s:
                            try: i += jsunpack.unpack(x)
                            except: pass

                        i = [(match[0], match[1]) for match in re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', i, re.DOTALL)]
                        i = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i if '/no-video.mp4' not in x[0]]

                        for url, quality in i:
                            sources.append({'source': 'gvideo', 'quality': quality, 'language': 'de', 'url': url, 'direct': True, 'debridonly': False})
                    else:
                        try:
                            valid, host = source_utils.is_host_valid(i, hostDict)
                            if not valid: continue

                            urls = []
                            if 'google' in i: host = 'gvideo'; direct = True; urls = directstream.google(i);
                            if 'google' in i and not urls and directstream.googletag(i):  host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(i)[0]['quality'], 'url': i}]
                            elif 'ok.ru' in i: host = 'vk'; direct = True; urls = directstream.odnoklassniki(i)
                            elif 'vk.com' in i: host = 'vk'; direct = True; urls = directstream.vk(i)
                            else: direct = False; urls = [{'quality': 'SD', 'url': i}]

                            for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #47
0
ファイル: watchseries.py プロジェクト: CYBERxNUKE/xbmc-addon
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)

            url = self.__get_episode_url(data)

            result = client.request(url)

            dom = re.findall('data-video="(.+?)"', result)
            urls = [i if i.startswith('https') else 'https:' + i for i in dom]

            for url in urls:
                if 'vidnode.net' in url:
                    link = url
                    files = []

                    while True:
                        try:
                            try:r = client.request(link)
                            except: continue

                            files.extend(re.findall("(?!file: '.+?',label: 'Auto')file: '(.+?)',label: '(.+?)'", r))

                            link = re.findall('window\.location = \"(.+?)\";', r)[0]

                            if not 'vidnode' in link:
                                break

                        except Exception:
                            break

                    for i in files:
                        try:
                            video = i[0]
                            quality = i[1]
                            host = 'CDN'

                            if 'google' in video or 'blogspot' in video:
                                pass

                            sources.append({
                                'source': host,
                                'quality': source_utils.label_to_quality(quality),
                                'language': 'en',
                                'url': video,
                                'direct': True,
                                'debridonly': False
                            })

                        except:
                            pass

                else:
                    try:
                        host = urlparse.urlparse(link.strip().lower()).netloc

                        if not host in hostDict: raise Exception()

                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')

                        sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False, 'debridonly': False})
                    except:
                        pass

            return sources

        except:
            return sources
コード例 #48
0
    def sources(self, url, hostDict, hostprDict):

        sources = []

        try:
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)
            data['sources'] = re.findall("[^', u\]\[]+", data['sources'])
            try:
                q = re.findall("\.(.*)", data['id'])[0]
            except:
                q = data['id']
            query = (self.tooltip_path % q)
            url = urlparse.urljoin(self.base_link, query)
            q = client.request(url)
            quality = re.findall('ty">(.*?)<', q)[0]
            if '1080p' in quality:
                quality = '1080p'
            elif '720p' in quality:
                quality = 'HD'
                        
            for i in data['sources']:
                token = str(self.__token(
                    {'id': i, 'server': 28, 'update': 0, 'ts': data['ts']}))
                query = (self.info_path % (data['ts'], token, i))
                url = urlparse.urljoin(self.base_link, query)
                info_response = client.request(url, XHR=True)
                grabber_dict = json.loads(info_response)

                try:
                    if grabber_dict['type'] == 'direct':
                        token64 = grabber_dict['params']['token']
                        query = (self.grabber_path % (data['ts'], i, token64))
                        url = urlparse.urljoin(self.base_link, query)

                        response = client.request(url, XHR=True)

                        sources_list = json.loads(response)['data']
                        
                        for j in sources_list:
                            
                            quality = j['label'] if not j['label'] == '' else 'SD'
                            #quality = 'HD' if quality in ['720p','1080p'] else 'SD'
                            quality = source_utils.label_to_quality(quality)

                            if 'googleapis' in j['file']:
                                sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': j['file'], 'direct': True, 'debridonly': False})
                                continue

                            #source = directstream.googlepass(j['file'])
                            valid, hoster = source_utils.is_host_valid(j['file'], hostDict)
                            urls, host, direct = source_utils.check_directstreams(j['file'], hoster)
                            for x in urls:
                                sources.append({
                                    'source': 'gvideo',
                                    'quality': quality,
                                    'language': 'en',
                                    'url': x['url'],
                                    'direct': True,
                                    'debridonly': False
                                })

                    elif not grabber_dict['target'] == '':
                        url = 'https:' + grabber_dict['target'] if not grabber_dict['target'].startswith('http') else grabber_dict['target']
                        #host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                        valid, hoster = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue
                        urls, host, direct = source_utils.check_directstreams(url, hoster)
                        sources.append({
                            'source': hoster,
                            'quality': quality,
                            'language': 'en',
                            'url': urls[0]['url'], #url.replace('\/','/'),
                            'direct': False,
                            'debridonly': False
                        })
                except: pass
                    
            return sources

        except Exception:
            return sources
コード例 #49
0
ファイル: plocker.py プロジェクト: vbprofi/kodi-repo
    def sources(self, url, hostDict, hostprDict):
        '''
        Loops over site sources and returns a dictionary with corresponding
        file locker sources and information

        Keyword arguments:

        url -- string - url params

        Returns:

        sources -- string - a dictionary of source information

        '''

        sources = []

        try:
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)
            data['sources'] = re.findall("[^', u\]\[]+", data['sources'])

            for i in data['sources']:
                token = str(
                    self.__token({
                        'id': i,
                        'update': 0,
                        'ts': data['ts']
                    }))
                query = (self.info_path % (data['ts'], token, i))
                url = urlparse.urljoin(self.base_link, query)
                info_response = client.request(url, XHR=True)
                grabber_dict = json.loads(info_response)

                try:
                    if grabber_dict['type'] == 'direct':
                        token64 = grabber_dict['params']['token']
                        query = (self.grabber_path % (data['ts'], i, token64))
                        url = urlparse.urljoin(self.base_link, query)

                        response = client.request(url, XHR=True)

                        sources_list = json.loads(response)['data']

                        for j in sources_list:

                            quality = j[
                                'label'] if not j['label'] == '' else 'SD'
                            quality = source_utils.label_to_quality(quality)

                            if 'googleapis' in j['file']:
                                sources.append({
                                    'source': 'GVIDEO',
                                    'quality': quality,
                                    'language': 'en',
                                    'url': j['file'],
                                    'direct': True,
                                    'debridonly': False
                                })
                                continue

                            valid, hoster = source_utils.is_host_valid(
                                j['file'], hostDict)
                            urls, host, direct = source_utils.check_directstreams(
                                j['file'], hoster)
                            for x in urls:
                                sources.append({
                                    'source': 'gvideo',
                                    'quality': quality,
                                    'language': 'en',
                                    'url': x['url'],
                                    'direct': True,
                                    'debridonly': False
                                })

                    elif not grabber_dict['target'] == '':
                        url = 'https:' + grabber_dict[
                            'target'] if not grabber_dict['target'].startswith(
                                'http') else grabber_dict['target']
                        valid, hoster = source_utils.is_host_valid(
                            url, hostDict)
                        if not valid: continue
                        urls, host, direct = source_utils.check_directstreams(
                            url, hoster)
                        url = urls[0]['url']

                        if 'cloud.to' in host:
                            headers = {'Referer': self.base_link}
                            url = url + source_utils.append_headers(headers)

                        sources.append({
                            'source': hoster,
                            'quality': urls[0]['quality'],
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
                except:
                    pass

            return sources

        except Exception:
            return sources
コード例 #50
0
ファイル: foxx.py プロジェクト: YourFriendCaspian/dotfiles
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, output='extended')

            headers = r[3]
            headers.update({'Cookie': r[2].get('Set-Cookie'), 'Referer': self.base_link})
            r = r[0]

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i, 'a', attrs={'class': 'options'}, req='href'), dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:], re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src'])) for i in rels if i[0] and i[1]]
            rels = [i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de']

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''', ''.join([i[0].content for i in r]))
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'iframe', attrs={'class': 'metaframe'}, req='src')]
            links += [l.attrs['src'] for i in r for l in dom_parser.parse_dom(i, 'source', req='src')]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if '/play/' in i: i = urlparse.urljoin(self.base_link, i)

                    if self.domains[0] in i:
                        i = client.request(i, headers=headers, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try: i += jsunpack.unpack(base64.decodestring(re.sub('"\s*\+\s*"', '', x))).replace('\\', '')
                            except: pass

                        for x in re.findall('(eval\s*\(function.*?)</script>', i, re.DOTALL):
                            try: i += jsunpack.unpack(x).replace('\\', '')
                            except: pass

                        links = [(match[0], match[1]) for match in re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', i, re.DOTALL)]
                        links = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in links if '/no-video.mp4' not in x[0]]

                        doc_links = [directstream.google('https://drive.google.com/file/d/%s/view' % match) for match in re.findall('''file:\s*["'](?:[^"']+youtu.be/([^"']+))''', i, re.DOTALL)]
                        doc_links = [(u['url'], u['quality']) for x in doc_links if x for u in x]
                        links += doc_links

                        for url, quality in links:
                            if self.base_link in url:
                                url = url + '|Referer=' + self.base_link

                            sources.append({'source': 'gvideo', 'quality': quality, 'language': 'de', 'url': url, 'direct': True, 'debridonly': False})
                    else:
                        try:
                            # as long as resolveurl get no Update for this URL (So just a Temp-Solution)
                            did = re.findall('youtube.googleapis.com.*?docid=(\w+)', i)
                            if did: i = 'https://drive.google.com/file/d/%s/view' % did[0]

                            valid, host = source_utils.is_host_valid(i, hostDict)
                            if not valid: continue

                            urls, host, direct = source_utils.check_directstreams(i, host)

                            for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'de', 'url': x['url'], 'direct': direct, 'debridonly': False})
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #51
0
ファイル: ninemovies.py プロジェクト: vphuc81/MyRepository
    def sources(self, url, hostDict, hostprDict):
        '''
        Loops over site sources and returns a dictionary with corresponding
        file locker sources and information

        Keyword arguments:

        url -- string - url params

        Returns:

        sources -- string - a dictionary of source information

        '''

        sources = []

        try:
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)

            data['sources'] = ast.literal_eval(data['sources'])

            for i in data['sources']:
                try:
                    token = str(self.__token(
                        {'id': i[0], 'update': '0', 'ts': data['ts'], 'server': i[1]}))

                    query = (self.info_path % (data['ts'], token, i[0], i[1]))
                    url = urlparse.urljoin(self.base_link, query)

                    info_response = client.request(url, headers={'Referer': self.base_link}, XHR=True)

                    info_dict = json.loads(info_response)

                    if info_dict['type'] == 'direct':
                        token64 = info_dict['params']['token']
                        query = (self.grabber_path % (data['ts'], i[0], self.__decode_shift(token64, -18)))
                        url = urlparse.urljoin(self.base_link, query)
                        response = client.request(url, XHR=True)

                        grabber_dict = json.loads(response)

                        if not grabber_dict['error'] == None:
                            continue

                        sources_list = grabber_dict['data']

                        for j in sources_list:
                            try:
                                quality = source_utils.label_to_quality(j['label'])
                                link = j['file']

                                if 'lh3.googleusercontent' in link:
                                    link = directstream.googleproxy(link)

                                sources.append({
                                    'source': 'gvideo',
                                    'quality': 'SD',
                                    'language': 'en',
                                    'url': link,
                                    'direct': True,
                                    'debridonly': False
                                })

                            except Exception:
                                pass

                    elif info_dict['type'] == 'iframe':
                        # embed = self.__decode_shift(info_dict['target'], -18)
                        embed = info_dict['target']

                        valid, hoster = source_utils.is_host_valid(embed, hostDict)
                        if not valid: continue

                        headers = {
                            'Referer': self.base_link
                        }

                        embed = embed + source_utils.append_headers(headers)

                        sources.append({
                            'source': hoster,
                            'quality': '720p', # need a better way of identifying quality
                            'language': 'en',
                            'url': embed,
                            'direct': False,
                            'debridonly': False
                        })

                except Exception:
                    pass

            return sources

        except Exception:
            return sources
コード例 #52
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, output='extended')

            headers = r[3]
            headers.update({
                'Cookie': r[2].get('Set-Cookie'),
                'Referer': self.base_link
            })
            r = r[0]

            rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
            rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
            rels = dom_parser.parse_dom(rels, 'li')
            rels = [(dom_parser.parse_dom(i,
                                          'a',
                                          attrs={'class': 'options'},
                                          req='href'),
                     dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
            rels = [(i[0][0].attrs['href'][1:],
                     re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src']))
                    for i in rels if i[0] and i[1]]
            rels = [
                i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
            ]

            r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]

            links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                               ''.join([i[0].content for i in r]))
            links += [
                l.attrs['src'] for i in r for l in dom_parser.parse_dom(
                    i, 'iframe', attrs={'class': 'metaframe'}, req='src')
            ]
            links += [
                l.attrs['src'] for i in r
                for l in dom_parser.parse_dom(i, 'source', req='src')
            ]

            for i in links:
                try:
                    i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                    i = client.replaceHTMLCodes(i)

                    if '/play/' in i: i = urlparse.urljoin(self.base_link, i)

                    if self.domains[0] in i:
                        i = client.request(i, headers=headers, referer=url)

                        for x in re.findall('''\(["']?(.*)["']?\)''', i):
                            try:
                                i += jsunpack.unpack(
                                    base64.decodestring(
                                        re.sub('"\s*\+\s*"', '',
                                               x))).replace('\\', '')
                            except:
                                pass

                        for x in re.findall('(eval\s*\(function.*?)</script>',
                                            i, re.DOTALL):
                            try:
                                i += jsunpack.unpack(x).replace('\\', '')
                            except:
                                pass

                        links = [(match[0], match[1]) for match in re.findall(
                            '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''',
                            i, re.DOTALL)]
                        links = [(x[0].replace('\/', '/'),
                                  source_utils.label_to_quality(x[1]))
                                 for x in links if '/no-video.mp4' not in x[0]]

                        doc_links = [
                            directstream.google(
                                'https://drive.google.com/file/d/%s/view' %
                                match)
                            for match in re.findall(
                                '''file:\s*["'](?:[^"']+youtu.be/([^"']+))''',
                                i, re.DOTALL)
                        ]
                        doc_links = [(u['url'], u['quality'])
                                     for x in doc_links if x for u in x]
                        links += doc_links

                        for url, quality in links:
                            if self.base_link in url:
                                url = url + '|Referer=' + self.base_link

                            sources.append({
                                'source': 'gvideo',
                                'quality': quality,
                                'language': 'de',
                                'url': url,
                                'direct': True,
                                'debridonly': False
                            })
                    else:
                        try:
                            # as long as urlresolver get no Update for this URL (So just a Temp-Solution)
                            did = re.findall(
                                'youtube.googleapis.com.*?docid=(\w+)', i)
                            if did:
                                i = 'https://drive.google.com/file/d/%s/view' % did[
                                    0]

                            valid, host = source_utils.is_host_valid(
                                i, hostDict)
                            if not valid: continue

                            urls, host, direct = source_utils.check_directstreams(
                                i, host)

                            for x in urls:
                                sources.append({
                                    'source': host,
                                    'quality': x['quality'],
                                    'language': 'de',
                                    'url': x['url'],
                                    'direct': direct,
                                    'debridonly': False
                                })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #53
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)

            if 'episode' in data:
                url = self.__get_episode_url(data)
                get_body = 'type=episode&%s=%s&imd_id=%s&seasonsNo=%02d&episodesNo=%02d'
            else:
                url = self.__get_movie_url(data)

            response = client.request(url)
            url = re.findall('<iframe .+? src="(.+?)"', response)[0]

            response = client.request(url)

            token = re.findall('var tc = \'(.+?)\'', response)[0]
            seeds = re.findall(
                '_tsd_tsd\(s\) .+\.slice\((.+?),(.+?)\).+ return .+? \+ \"(.+?)\"\+\"(.+?)";',
                response)[0]
            pair = re.findall('\'type\': \'.+\',\s*\'(.+?)\': \'(.+?)\'',
                              response)[0]

            header_token = self.__xtoken(token, seeds)
            body = 'tokenCode=' + token

            headers = {
                'Content-Type':
                'application/x-www-form-urlencoded; charset=UTF-8',
                'x-token': header_token
            }

            url = urlparse.urljoin(self.source_link, self.decode_file)
            response = client.request(url,
                                      XHR=True,
                                      post=body,
                                      headers=headers)

            sources_dict = json.loads(response)

            for source in sources_dict:
                try:
                    if 'vidushare.com' in source:
                        sources.append({
                            'source': 'CDN',
                            'quality': 'HD',
                            'language': 'en',
                            'url': source,
                            'direct': True,
                            'debridonly': False
                        })
                except Exception:
                    pass

            body = get_body % (pair[0], pair[1], data['imdb'],
                               int(data['season']), int(data['episode']))

            url = urlparse.urljoin(self.source_link, self.grabber_file)
            response = client.request(url,
                                      XHR=True,
                                      post=body,
                                      headers=headers)

            sources_dict = json.loads(response)

            for source in sources_dict:
                try:
                    quality = source_utils.label_to_quality(source['label'])
                    link = source['file']

                    if 'lh3.googleusercontent' in link:
                        link = directstream.googleredirect(link)

                    sources.append({
                        'source': 'gvideo',
                        'quality': quality,
                        'language': 'en',
                        'url': link,
                        'direct': True,
                        'debridonly': False
                    })

                except Exception:
                    pass

            return sources

        except:
            return sources
コード例 #54
0
ファイル: openmovies.py プロジェクト: CYBERxNUKE/xbmc-addon
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            if 'tvshowtitle' in data:
                url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
                year = re.findall('(\d{4})', data['premiered'])[0]
                url = client.request(url, output='geturl')
                if url == None: raise Exception()

                r = client.request(url)

                y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]

                y = re.findall('(\d{4})', y)[0]
                if not y == year: raise Exception()

            else:
                url = client.request(url, output='geturl')
                if url == None: raise Exception()

                r = client.request(url)


            try:
                result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                r = re.findall('"file"\s*:\s*"(.+?)"', result)

                for url in r:
                    try:
                        url = url.replace('\\', '')
                        url = directstream.googletag(url)[0]
                        sources.append({'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False})
                    except:
                        pass
            except:
                pass

            links = client.parseDOM(r, 'iframe', ret='src')

            for link in links:
                try:
                    if 'openload.io' in link or 'openload.co' in link or 'oload.tv' in link:
                        sources.append(
                            {'source': 'openload.co', 'quality': 'SD', 'language': 'en', 'url': link, 'direct': False,
                             'debridonly': False})
                        raise Exception()
                    elif 'putstream' in link:
                        r = client.request(link)
                        r = re.findall(r'({"file.*?})',r)
                        for i in r:
                             try:
                                i = json.loads(i)
                                url = i['file']
                                q = source_utils.label_to_quality(i['label'])                           
                                if 'google' in url:
                                    valid, hoster = source_utils.is_host_valid(url, hostDict)
                                    urls, host, direct = source_utils.check_directstreams(url, hoster)
                                    for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
             
                                else:
                                    valid, hoster = source_utils.is_host_valid(url, hostDict)
                                    if not valid:
                                        if 'blogspot' in hoster or 'vidushare' in hoster:
                                            sources.append({'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
                                            continue
                                        else: continue
                                    sources.append({'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})                            
                                
                             except:
                                pass

                except:
                    pass

                try:
                    url = link.replace('\/', '/')
                    url = client.replaceHTMLCodes(url)
                    url = 'http:' + url if url.startswith('//') else url
                    url = url.encode('utf-8')

                    if not '/play/' in url: raise Exception()

                    r = client.request(url, timeout='10')

                    s = re.compile('<script type="text/javascript">(.+?)</script>', re.DOTALL).findall(r)

                    for i in s:
                        try:
                            r += jsunpack.unpack(i)
                        except:
                            pass

                    try:
                        result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                        r = re.findall('"file"\s*:\s*"(.+?)"', result)

                        for url in r:
                            try:
                                url = url.replace('\\', '')
                                url = directstream.googletag(url)[0]
                                sources.append({'source': 'gvideo', 'quality': url['quality'], 'language': 'en', 'url': url['url'], 'direct': True, 'debridonly': False})
                            except:
                                pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
コード例 #55
0
    def sources(self, url, hostDict, hostprDict):
        '''
        Loops over site sources and returns a dictionary with corresponding
        file locker sources and information

        Keyword arguments:

        url -- string - url params

        Returns:

        sources -- string - a dictionary of source information

        '''

        sources = []

        try:
            data = urlparse.parse_qs(url)
            data = dict((i, data[i][0]) for i in data)

            data['sources'] = ast.literal_eval(data['sources'])

            for i in data['sources']:
                try:
                    token = str(
                        self.__token({
                            'id': i[0],
                            'update': '0',
                            'ts': data['ts'],
                            'server': i[1]
                        }))

                    query = (self.info_path % (data['ts'], token, i[0], i[1]))
                    url = urlparse.urljoin(self.base_link, query)

                    info_response = client.request(
                        url, headers={'Referer': self.base_link}, XHR=True)

                    info_dict = json.loads(info_response)

                    if info_dict['type'] == 'direct':
                        token64 = info_dict['params']['token']
                        query = (self.grabber_path %
                                 (data['ts'], i[0],
                                  self.__decode_shift(token64, -18)))
                        url = urlparse.urljoin(self.base_link, query)
                        response = client.request(url, XHR=True)

                        grabber_dict = json.loads(response)

                        if not grabber_dict['error'] == None:
                            continue

                        sources_list = grabber_dict['data']

                        for j in sources_list:
                            try:
                                quality = source_utils.label_to_quality(
                                    j['label'])
                                link = j['file']

                                if 'lh3.googleusercontent' in link:
                                    link = directstream.googleproxy(link)

                                sources.append({
                                    'source': 'gvideo',
                                    'quality': quality,
                                    'language': 'en',
                                    'url': link,
                                    'direct': True,
                                    'debridonly': False
                                })

                            except Exception:
                                pass

                    elif info_dict['type'] == 'iframe':
                        # embed = self.__decode_shift(info_dict['target'], -18)
                        embed = info_dict['target']

                        valid, hoster = source_utils.is_host_valid(
                            embed, hostDict)
                        if not valid: continue

                        headers = {'Referer': self.base_link}

                        embed = embed + source_utils.append_headers(headers)

                        sources.append({
                            'source': hoster,
                            'quality':
                            '720p',  # need a better way of identifying quality
                            'language': 'en',
                            'url': embed,
                            'direct': False,
                            'debridonly': False
                        })

                except Exception:
                    pass

            return sources

        except Exception:
            return sources
コード例 #56
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            if 'tvshowtitle' in data:
                url = '%s/episodes/%s-%01dx%01d/' % (
                    self.base_link, cleantitle.geturl(data['tvshowtitle']),
                    int(data['season']), int(data['episode']))
                year = re.findall('(\d{4})', data['premiered'])[0]
                url = client.request(url, output='geturl')
                if url == None: raise Exception()

                r = client.request(url)

                y = client.parseDOM(r, 'span', attrs={'class': 'date'})[0]

                y = re.findall('(\d{4})', y)[0]
                if not y == year: raise Exception()

            else:
                url = client.request(url, output='geturl')
                if url == None: raise Exception()
                ref = url
                r = client.request(url)

            try:
                result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                r = re.findall('"file"\s*:\s*"(.+?)"', result)

                for url in r:
                    try:
                        url = url.replace('\\', '')
                        url = directstream.googletag(url)[0]
                        sources.append({
                            'source': 'gvideo',
                            'quality': url['quality'],
                            'language': 'en',
                            'url': url['url'],
                            'direct': True,
                            'debridonly': False
                        })
                    except:
                        pass
            except:
                pass

            links = client.parseDOM(r, 'iframe', ret='src')
            q = re.findall(r'class="qualityx">([^<]+)', r)[0] if re.search(
                r'class="qualityx">([^<]+)', r) != None else 'SD'
            q = source_utils.get_release_quality(q)[0]

            for link in links:
                try:
                    if 'openload.io' in link or 'openload.co' in link or 'oload.tv' in link:
                        sources.append({
                            'source': 'openload.co',
                            'quality': 'SD',
                            'language': 'en',
                            'url': link,
                            'direct': False,
                            'debridonly': False
                        })
                        raise Exception()
                    if re.search(r'^((?!youtube).)*embed.*$', link) == None:
                        values = re.findall(
                            r'nonces":{"ajax_get_video_info":"(\w+)".*?data-servers="(\d+)"\s+data-ids="([^"]+)',
                            r, re.DOTALL)
                        post = urllib.urlencode({
                            'action': 'ajax_get_video_info',
                            'ids': values[0][2],
                            'server': values[0][1],
                            'nonce': values[0][0]
                        })
                        r = client.request(
                            urlparse.urljoin(self.base_link, self.post_link),
                            post=post,
                            headers={
                                'Referer': ref,
                                'X-Requested-With': 'XMLHttpRequest',
                                'Accept-Encoding': 'gzip, deflate'
                            })
                    else:
                        r = client.request(link)

                    links = re.findall(
                        r'((?:{"file.*?})|(?:\/embed\/[^\']+))\'\s+id="(\d+)',
                        r)
                    strm_urls = re.findall(r'(https?.*-)\d+\.mp\w+', r)

                    for i in links:
                        try:
                            try:
                                i = json.loads(i[0])
                                url = i['file']
                                q = source_utils.label_to_quality(i['label'])
                            except:

                                url = '%s%s.mp4' % (strm_urls[0], i[1])
                                q = source_utils.label_to_quality(i[1])

                            if 'google' in url:
                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                urls, host, direct = source_utils.check_directstreams(
                                    url, hoster)
                                for x in urls:
                                    sources.append({
                                        'source': host,
                                        'quality': x['quality'],
                                        'language': 'en',
                                        'url': x['url'],
                                        'direct': direct,
                                        'debridonly': False
                                    })

                            else:
                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                if not valid:
                                    sources.append({
                                        'source': 'CDN',
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                                    continue
                                else:
                                    sources.append({
                                        'source': hoster,
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': False,
                                        'debridonly': False
                                    })

                        except:
                            pass

                except:
                    pass

                try:
                    url = link.replace('\/', '/')
                    url = client.replaceHTMLCodes(url)
                    url = 'http:' + url if url.startswith('//') else url
                    url = url.encode('utf-8')

                    if not '/play/' in url: raise Exception()

                    r = client.request(url, timeout='10')

                    s = re.compile(
                        '<script type="text/javascript">(.+?)</script>',
                        re.DOTALL).findall(r)

                    for i in s:
                        try:
                            r += jsunpack.unpack(i)
                        except:
                            pass

                    try:
                        result = re.findall('sources\s*:\s*\[(.+?)\]', r)[0]
                        r = re.findall('"file"\s*:\s*"(.+?)"', result)

                        for url in r:
                            try:
                                url = url.replace('\\', '')
                                url = directstream.googletag(url)[0]
                                sources.append({
                                    'source': 'gvideo',
                                    'quality': url['quality'],
                                    'language': 'en',
                                    'url': url['url'],
                                    'direct': True,
                                    'debridonly': False
                                })
                            except:
                                pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources