Exemple #1
0
 def mz_server(self, url):
     try:
         urls = []
         data = client.request(url)
         data = re.findall('''file:\s*["']([^"']+)",label:\s*"(\d{3,}p)"''',
                           data, re.DOTALL)
         for url, label in data:
             label = source_utils.label_to_quality(label)
             if label == 'SD': continue
             urls.append({'url': url, 'quality': label})
         return urls
     except:
         return url
Exemple #2
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        #{'source': host, 'quality': i[1], 'provider': 'Sezonlukdizi', 'url': i[0]})
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url)
            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result, 'div', attrs={'class': 'item'}, req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page})
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe', req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'): url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid: sources.append({'source': host, 'quality': 'HD', 'url': url,'provider': 'Sezonlukdizi'})

                    if '.asp' not in url: continue

                    result = client.request(url)

                    captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''', result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''', result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]), x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result if not i[1].endswith('.vtt')]

                    for quality, url in result: sources.append({'source': 'gvideo', 'quality': quality, 'url': url, 'provider': 'Sezonlukdizi'})
                except:
                    pass

            return sources
        except Exception as e:
            control.log('ERROR sezonlukidz %s' % e)
            return sources
Exemple #3
0
    def sources(self, url, hostDict, locDict):
        sources = []

        try:
            if url == None: return sources
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            query = self.search_link % (urllib.quote_plus(title))
            query = urlparse.urljoin(self.base_link, query)
            # query = urlparse.urljoin(self.base_link, self.ajax_link)
            # post = urllib.urlencode({'action':'sufi_search', 'search_string': title})

            result = client.request(query)
            r = client.parseDOM(result, 'div', attrs={'id': 'showList'})
            r = re.findall(
                r'<a\s+style="color:white;"\s+href="([^"]+)">([^<]+)', r[0])
            r = [
                i for i in r if cleantitle.get(title) == cleantitle.get(i[1])
                and data['year'] in i[1]
            ][0]
            url = r[0]
            result = client.request(url)
            r = re.findall(
                r'video\s+id="\w+.*?src="([^"]+)".*?data-res="([^"]+)', result,
                re.DOTALL)

            for i in r:
                try:
                    q = source_utils.label_to_quality(i[1])
                    sources.append({
                        'source': 'CDN',
                        'quality': q,
                        'language': 'en',
                        'url': i[0],
                        'direct': True,
                        'debridonly': False
                    })
                except:
                    pass

            return sources
        except Exception as e:
            return sources
Exemple #4
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            hostDict += [
                'akamaized.net', 'google.com', 'picasa.com', 'blogspot.com'
            ]
            result = client.request(url, timeout=10)

            dom = dom_parser.parse_dom(result, 'a', req='data-video')
            urls = [
                i.attrs['data-video']
                if i.attrs['data-video'].startswith('https') else 'https:' +
                i.attrs['data-video'] for i in dom
            ]

            for url in urls:
                dom = []
                if 'vidnode.net' in url:
                    result = client.request(url, timeout=10)
                    dom = dom_parser.parse_dom(result,
                                               'source',
                                               req=['src', 'label'])
                    dom = [
                        (i.attrs['src'] if i.attrs['src'].startswith('https')
                         else 'https:' + i.attrs['src'], i.attrs['label'])
                        for i in dom if i
                    ]
                elif 'ocloud.stream' in url:
                    result = client.request(url, timeout=10)
                    base = re.findall('<base href="([^"]+)">', result)[0]
                    hostDict += [base]
                    dom = dom_parser.parse_dom(result, 'a', req=['href', 'id'])
                    dom = [(i.attrs['href'].replace('./embed', base + 'embed'),
                            i.attrs['id']) for i in dom if i]
                    dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)",
                                       client.request(i[0]))[0], i[1])
                           for i in dom if i]
                if dom:
                    try:
                        for r in dom:
                            valid, hoster = source_utils.is_host_valid(
                                r[0], hostDict)

                            if not valid: continue
                            quality = source_utils.label_to_quality(r[1])
                            urls, host, direct = source_utils.check_directstreams(
                                r[0], hoster)
                            for x in urls:
                                if direct:
                                    size = source_utils.get_size(x['url'])
                                if size:
                                    sources.append({
                                        'source': host,
                                        'quality': quality,
                                        'language': 'en',
                                        'url': x['url'],
                                        'direct': direct,
                                        'debridonly': False,
                                        'info': size
                                    })
                                else:
                                    sources.append({
                                        'source': host,
                                        'quality': quality,
                                        'language': 'en',
                                        'url': x['url'],
                                        'direct': direct,
                                        'debridonly': False
                                    })
                    except:
                        pass
                else:
                    valid, hoster = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    try:
                        url.decode('utf-8')
                        sources.append({
                            'source': hoster,
                            'quality': 'SD',
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        pass
            return sources
        except:
            return sources
Exemple #5
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                year = re.compile('(\d{4})-(\d{2})-(\d{2})').findall(data['premiered'])[0][0]
                episode = '%01d' % int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)

            else:
                episode = None
                year = data['year']
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            referer = url
            r = client.request(url)
            if episode == None:
                y = re.findall('Released\s*:\s*.+?\s*(\d{4})', r)[0]
                if not year == y: raise Exception()

            r = client.parseDOM(r, 'div', attrs={'class': 'sli-name'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))

            if not episode == None:
                r = [i[0] for i in r if
                     i[1].lower().startswith('episode %02d:' % int(data['episode'])) or i[1].lower().startswith(
                         'episode %d:' % int(data['episode']))]
            else:
                r = [i[0] for i in r]

            for u in r:
                try:
                    p = client.request(u, referer=referer, timeout='10')
                    quali = re.findall(r'Quality:\s*<.*?>([^<]+)', p)[0]
                    quali = quali if quali in ['HD', 'SD'] else source_utils.label_to_quality(quali)
                    src = re.findall('src\s*=\s*"(.*streamdor.co/video/\d+)"', p)[0]
                    if src.startswith('//'):
                        src = 'http:' + src
                    episodeId = re.findall('.*streamdor.co/video/(\d+)', src)[0]
                    p = client.request(src, referer=u)
                    try:
                        p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p, re.IGNORECASE)[0]
                        p = re.sub(r'\"\s*\+\s*\"', '', p)
                        p = re.sub(r'[^A-Za-z0-9+\\/=]', '', p)
                        p = base64.b64decode(p)
                        p = jsunpack.unpack(p)
                        p = unicode(p, 'utf-8')
                    except:
                        continue

                    try:

                        fl = re.findall(r'file"\s*:\s*"([^"]+)', p)
                        if len(fl) > 0:
                            fl = fl[0]
                            post = {'episodeID': episodeId, 'file': fl, 'subtitle': 'false',
                                    'referer': urllib.quote_plus(u)}
                            p = client.request(self.source_link, post=post, referer=src, XHR=True)
                            js = json.loads(p)
                            src = js['sources']
                            p = client.request('http:' + src, referer=src)
                            js = json.loads(p)[0]
                            ss = js['sources']
                            ss = [(i['file'], i['label']) for i in ss if 'file' in i]

                        else:
                            try:
                                post = {'id': episodeId}
                                p2 = client.request('https://embed.streamdor.co/token.php?v=5', post=post, referer=src,
                                                    XHR=True)
                                js = json.loads(p2)
                                tok = js['token']
                                p = re.findall(r'var\s+episode=({[^}]+});', p)[0]
                                js = json.loads(p)
                                ss = []
                                if 'eName' in js and js['eName'] != '':
                                    quali = source_utils.label_to_quality(js['eName'])
                                if 'fileEmbed' in js and js['fileEmbed'] != '':
                                    ss.append([js['fileEmbed'], quali])
                                if 'fileHLS' in js and js['fileHLS'] != '':
                                    ss.append(['https://hls.streamdor.co/%s%s' % (tok, js['fileHLS']), quali])
                            except:
                                pass

                        for i in ss:
                            try:
                                valid, hoster = source_utils.is_host_valid(i[0], hostDict)
                                direct = False
                                if not valid:
                                    hoster = 'CDN'
                                    direct = True
                                sources.append({'source': hoster, 'quality': quali, 'language': 'en', 'url': i[0],
                                                'direct': direct, 'debridonly': False})
                            except:
                                pass

                    except:
                        url = re.findall(r'embedURL"\s*:\s*"([^"]+)', p)[0]
                        valid, hoster = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue
                        urls, host, direct = source_utils.check_directstreams(url, hoster)
                        for x in urls:
                            sources.append(
                                {'source': host, 'quality': 'SD', 'language': 'en', 'url': x['url'], 'direct': direct,
                                 'debridonly': False})

                except:
                    pass

            return sources
        except:
            return sources
Exemple #6
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            url = urlparse.urljoin(self.base_link, url)
            c = client.request(url, output='cookie')
            result = client.request(url)

            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = dom_parser.parse_dom(result,
                                         'div',
                                         attrs={'class': 'item'},
                                         req='data-id')
            pages = [i.attrs['data-id'] for i in pages]

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)

                    result = client.request(url, post={'id': page}, cookie=c)
                    if not result: continue

                    url = dom_parser.parse_dom(result, 'iframe',
                                               req='src')[0].attrs['src']
                    if url.startswith('//'): url = 'http:' + url
                    if url.startswith('/'):
                        url = urlparse.urljoin(self.base_link, url)

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if valid:
                        sources.append({
                            'source': host,
                            'quality': 'HD',
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })

                    if '.asp' not in url: continue

                    result = client.request(url, cookie=c)

                    try:
                        url = dom_parser.parse_dom(result, 'iframe',
                                                   req='src')[0].attrs['src']
                        url = url.replace('https://href.li/?', '')
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if valid:
                            if host == 'gvideo':
                                ginfo = directstream.google(url)
                                for g in ginfo:
                                    sources.append({
                                        'source': host,
                                        'quality': g['quality'],
                                        'language': 'en',
                                        'url': g['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                            else:
                                sources.append({
                                    'source': host,
                                    'quality': 'HD',
                                    'language': 'en',
                                    'url': url,
                                    'direct': False,
                                    'debridonly': False
                                })
                    except:
                        pass

                    captions = re.search(
                        '''["']?kind["']?\s*:\s*(?:\'|\")captions(?:\'|\")''',
                        result)
                    if not captions: continue

                    matches = [(match[0], match[1]) for match in re.findall(
                        '''["']?label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)''',
                        result, re.DOTALL | re.I)]
                    matches += [(match[1], match[0]) for match in re.findall(
                        '''["']?\s*file\s*["']?\s*[:=,]?\s*["'](?P<url>[^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?(?P<label>[^"',]+)''',
                        result, re.DOTALL | re.I)]

                    result = [(source_utils.label_to_quality(x[0]),
                               x[1].replace('\/', '/')) for x in matches]
                    result = [(i[0], i[1]) for i in result
                              if not i[1].endswith('.vtt')]

                    for quality, url in result:
                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'direct': True,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources
Exemple #7
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
                if 'season' in data: season = data['season']
                if 'episode' in data: episode = data['episode']
                year = data['year']

                r = client.request(self.base_link, output='extended', timeout='10')
                cookie = r[4];
                headers = r[3];
                result = r[0]
                headers['Cookie'] = cookie

                query = urlparse.urljoin(self.base_link,
                                         self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
                r = client.request(query, headers=headers, XHR=True)
                r = json.loads(r)['content']
                r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))

                if 'tvshowtitle' in data:
                    cltitle = cleantitle.get(title + 'season' + season)
                    cltitle2 = cleantitle.get(title + 'season%02d' % int(season))
                    r = [i for i in r if cltitle == cleantitle.get(i[1]) or cltitle2 == cleantitle.get(i[1])]
                    vurl = '%s%s-episode-%s' % (self.base_link, str(r[0][0]).replace('/info', ''), episode)
                    vurl2 = None
                else:
                    cltitle = cleantitle.getsearch(title)
                    cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                    r = [i for i in r if
                         cltitle2 == cleantitle.getsearch(i[1]) or cltitle == cleantitle.getsearch(i[1])]
                    vurl = '%s%s-episode-0' % (self.base_link, str(r[0][0]).replace('/info', ''))
                    vurl2 = '%s%s-episode-1' % (self.base_link, str(r[0][0]).replace('/info', ''))

                r = client.request(vurl, headers=headers)
                headers['Referer'] = vurl

                slinks = client.parseDOM(r, 'div', attrs={'class': 'anime_muti_link'})
                slinks = client.parseDOM(slinks, 'li', ret='data-video')
                if len(slinks) == 0 and not vurl2 == None:
                    r = client.request(vurl2, headers=headers)
                    headers['Referer'] = vurl2
                    slinks = client.parseDOM(r, 'div', attrs={'class': 'anime_muti_link'})
                    slinks = client.parseDOM(slinks, 'li', ret='data-video')

                for slink in slinks:
                    try:
                        if 'vidnode.net/streaming.php' in slink:
                            r = client.request('https:%s' % slink, headers=headers)
                            clinks = re.findall(r'sources:\[(.*?)\]', r)[0]
                            clinks = re.findall(r'file:\s*\'(http[^\']+)\',label:\s*\'(\d+)', clinks)
                            for clink in clinks:
                                q = source_utils.label_to_quality(clink[1])
                                sources.append(
                                    {'source': 'cdn', 'quality': q, 'language': 'en', 'url': clink[0], 'direct': True,
                                     'debridonly': False})
                        else:
                            valid, hoster = source_utils.is_host_valid(slink, hostDict)
                            if valid:
                                sources.append(
                                    {'source': hoster, 'quality': 'SD', 'language': 'en', 'url': slink, 'direct': False,
                                     'debridonly': False})
                    except:
                        pass

            return sources
        except:
            return sources