Example #1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = client.request(url)
         try:
             match = re.compile(
                 'href="(.+?)" rel="noindex\,nofollow">Watch This Link</a>'
             ).findall(r)
             for url in match:
                 r = client.request(url)
                 match = re.compile(
                     '<a href="(.+?)://(.+?)/(.+?)"><button class="wpb\_button  wpb\_btn\-primary wpb\_regularsize"> Click Here To Play</button> </a>'
                 ).findall(r)
                 for http, host, url in match:
                     url = '%s://%s/%s' % (http, host, url)
                     info = source_utils.check_url(url)
                     quality = source_utils.check_url(url)
                     valid, host = source_utils.is_host_valid(
                         host, hostDict)
                     if valid:
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': url,
                             'direct': False,
                             'debridonly': False
                         })
         except:
             return
     except Exception:
         return
     return sources
Example #2
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         query = '%s %s' % (data['title'], data['year'])
         url = self.search_link % urllib.quote(query)
         url = urlparse.urljoin(self.base_link, url)
         html = client.request(url)
         try:
             results = client.parseDOM(html, 'div', attrs={'class': 'row'})[2]
         except Exception:
             return sources
         items = re.findall('class="browse-movie-bottom">(.+?)</div>\s</div>', results, re.DOTALL)
         if items is None:
             return sources
         for entry in items:
             try:
                 try:
                     link, name = \
                         re.findall('<a href="(.+?)" class="browse-movie-title">(.+?)</a>', entry, re.DOTALL)[0]
                     name = client.replaceHTMLCodes(name)
                     if not cleantitle.get(name) == cleantitle.get(data['title']):
                         continue
                 except Exception:
                     continue
                 y = entry[-4:]
                 if not y == data['year']:
                     continue
                 response = client.request(link)
                 try:
                     entries = client.parseDOM(response, 'div', attrs={'class': 'modal-torrent'})
                     for torrent in entries:
                         link, name = re.findall(
                             'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"',
                             torrent, re.DOTALL)[0]
                         link = 'magnet:%s' % link
                         link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                         quality, info = source_utils.get_release_quality(name, name)
                         try:
                             size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1]
                             div = 1 if size.endswith(('GB', 'GiB')) else 1024
                             size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                             size = '%.2f GB' % size
                             info.append(size)
                         except Exception:
                             pass
                         info = ' | '.join(info)
                         sources.append(
                             {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info,
                              'direct': False, 'debridonly': True})
                 except Exception:
                     continue
             except Exception:
                 continue
         return sources
     except Exception:
         return sources
Example #3
0
 def links(self, url):
     urls = []
     try:
         if url is None: return
         r = client.request(url)
         r = client.parseDOM(r, 'div', attrs={'class': 'entry'})
         r = client.parseDOM(r, 'a', ret='href')
         r1 = [(i) for i in r if 'money' in i][0]
         r = client.request(r1)
         r = client.parseDOM(r, 'div', attrs={'id': 'post-\d+'})[0]
         if 'enter the password' in r:
             plink = client.parseDOM(r, 'form', ret='action')[0]
             post = {'post_password': '******', 'Submit': 'Submit'}
             send_post = client.request(plink, post=post, output='cookie')
             link = client.request(r1, cookie=send_post)
         else:
             link = client.request(r1)
         link = re.findall('<strong>Single(.+?)</tr', link, re.DOTALL)[0]
         link = client.parseDOM(link, 'a', ret='href')
         link = [(i.split('=')[-1]) for i in link]
         for i in link:
             urls.append(i)
         return urls
     except:
         pass
Example #4
0
def yandex(url):
    try:
        cookie = client.request(url, output='cookie')

        r = client.request(url, cookie=cookie)
        r = re.sub(r'[^\x00-\x7F]+', ' ', r)

        sk = re.findall('"sk"\s*:\s*"([^"]+)', r)[0]

        idstring = re.findall('"id"\s*:\s*"([^"]+)', r)[0]

        idclient = binascii.b2a_hex(os.urandom(16))

        post = {
            'idClient': idclient,
            'version': '3.9.2',
            'sk': sk,
            '_model.0': 'do-get-resource-url',
            'id.0': idstring
        }
        post = urllib.urlencode(post)

        r = client.request('https://yadi.sk/models/?_m=do-get-resource-url',
                           post=post,
                           cookie=cookie)
        r = json.loads(r)

        url = r['models'][0]['data']['file']

        return url
    except:
        return
Example #5
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
             data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         r = client.request(url)
         try:
             posts = client.parseDOM(r, 'div', attrs={'class': 'box-info'})
             for post in posts:
                 data = client.parseDOM(post, 'a', ret='href')
                 u = [i for i in data if '/torrent/' in i]
                 for u in u:
                     match = '%s %s' % (title, hdlr)
                     match = match.replace('+', '-').replace(' ', '-').replace(':-', '-').replace('---', '-')
                     if not match in u: continue
                     u = self.base_link + u
                     r = client.request(u)
                     r = client.parseDOM(r, 'div', attrs={'class': 'torrent-category-detail clearfix'})
                     for t in r:
                         link = re.findall('href="magnet:(.+?)" onclick=".+?"', t)[0]
                         link = 'magnet:%s' % link
                         link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                         seeds = int(re.compile('<span class="seeds">(.+?)</span>').findall(t)[0])
                         if self.min_seeders > seeds:
                             continue
                         quality, info = source_utils.get_release_quality(link, link)
                         try:
                             size = re.findall('<strong>Total size</strong> <span>(.+?)</span>', t)
                             for size in size:
                                 size = '%s' % size
                                 info.append(size)
                         except BaseException:
                             pass
                         info = ' | '.join(info)
                         sources.append(
                             {'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info,
                              'direct': False, 'debridonly': True})
         except:
             return
         return sources
     except:
         return sources
Example #6
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle']
            hdlr = 's%02de%02d' % (int(data['season']), int(data['episode']))

            query = urllib.quote_plus(cleantitle.getsearch(title))
            surl = urlparse.urljoin(self.base_link, self.search_link % query)
            r = client.request(surl, XHR=True)
            r = json.loads(r)
            r = r['series']
            for i in r:
                tit = i['value']
                if not cleantitle.get(title) == cleantitle.get(tit):
                    raise Exception()
                slink = i['seo']
                slink = urlparse.urljoin(self.base_link, slink)

                r = client.request(slink)
                if not data['imdb'] in r: raise Exception()
                data = client.parseDOM(r, 'div', {'class': 'el-item\s*'})
                epis = [
                    client.parseDOM(i, 'a', ret='href')[0] for i in data if i
                ]
                epis = [i for i in epis if hdlr in i.lower()][0]
                r = client.request(epis)
                links = client.parseDOM(r, 'a', ret='data-actuallink')
                for url in links:
                    try:
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid: raise Exception()
                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
                    except BaseException:
                        return sources

            return sources
        except BaseException:
            return sources
Example #7
0
 def resolve(self, url):
     if self.base_link in url:
         url = client.request(url)
         v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0]
         b64 = base64.b64decode(v)
         url = client.parseDOM(b64, 'iframe', ret='src')[0]
     return url
Example #8
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['premiered'], url['season'], url[
             'episode'] = premiered, season, episode
         try:
             clean_title = cleantitle.geturl(
                 url['tvshowtitle']) + '-season-%d' % int(season)
             search_url = urlparse.urljoin(
                 self.base_link,
                 self.search_link % clean_title.replace('-', '+'))
             r = client.request(search_url)
             r = client.parseDOM(r, 'div', {'class': 'item'})
             r = [(client.parseDOM(i, 'a', ret='href'),
                   re.findall('<b><i>(.+?)</i>', i)) for i in r]
             r = [(i[0][0], i[1][0]) for i in r
                  if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
             url = r[0][0]
         except:
             pass
         data = self.scraper.get(url).content
         data = client.parseDOM(data, 'div', attrs={'id': 'details'})
         data = zip(client.parseDOM(data, 'a'),
                    client.parseDOM(data, 'a', ret='href'))
         url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
         return url[0][1]
     except:
         return
Example #9
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            url = url + 'watch/'
            r = client.request(url)
            qual = re.compile('class="quality">(.+?)<').findall(r)
            for i in qual:
                if 'HD' in i:
                    quality = '720p'
                else:
                    quality = 'SD'
            r = client.parseDOM(r, "div", attrs={"id": "list-eps"})
            for i in r:
                t = re.compile('<a href="(.+?)"').findall(i)
                for url in t:
                    t = self.scraper.get(url).content
                    t = client.parseDOM(t, "div", attrs={"id": "content-embed"})
                    for u in t:
                        i = re.findall('src="(.+?)"', u)[0].replace('load_player.html?e=', 'episode/embed/')
                        i = self.scraper.get(i).conent.replace("\\", "")
                        u = re.findall('"(https.+?)"', i)
                        for url in u:
                            sources.append(
                                {'source': 'CDN', 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
                                 'debridonly': False})

                return sources
        except Exception:
            return
Example #10
0
def get_size(url):
    try:
        size = client.request(url, output='file_size')
        if size == '0': size = False
        size = convert_size(size)
        return size
    except:
        return False
Example #11
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com']
         result = self.scraper.get(url, timeout=10).content
         dom = dom_parser.parse_dom(result, 'a', req='data-video')
         urls = [
             i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video']
             for i in dom]
         for url in urls:
             dom = []
             if 'vidnode.net' in url:
                 result = self.scraper.get(url, timeout=10).content
                 dom = dom_parser.parse_dom(result, 'source', req=['src', 'label'])
                 dom = [(i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'],
                         i.attrs['label']) for i in dom if i]
             elif 'ocloud.stream' in url:
                 result = self.scraper.get(url, timeout=10).content
                 base = re.findall('<base href="([^"]+)">', result)[0]
                 hostDict += [base]
                 dom = dom_parser.parse_dom(result, 'a', req=['href', 'id'])
                 dom = [(i.attrs['href'].replace('./embed', base + 'embed'), i.attrs['id']) for i in dom if i]
                 dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i]
             if dom:
                 try:
                     for r in dom:
                         valid, hoster = source_utils.is_host_valid(r[0], hostDict)
                         if not valid: continue
                         quality = source_utils.label_to_quality(r[1])
                         urls, host, direct = source_utils.check_directstreams(r[0], hoster)
                         for x in urls:
                             if direct: size = source_utils.get_size(x['url'])
                             if size:
                                 sources.append(
                                     {'source': host, 'quality': quality, 'language': 'en', 'url': x['url'],
                                      'direct': direct, 'debridonly': False, 'info': size})
                             else:
                                 sources.append(
                                     {'source': host, 'quality': quality, 'language': 'en', 'url': x['url'],
                                      'direct': direct, 'debridonly': False})
                 except:
                     pass
             else:
                 valid, hoster = source_utils.is_host_valid(url, hostDict)
                 if not valid: continue
                 try:
                     url.decode('utf-8')
                     sources.append(
                         {'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False,
                          'debridonly': False})
                 except:
                     pass
         return sources
     except:
         return sources
Example #12
0
    def episodeAbsoluteNumber(self, thetvdb, season, episode):
        try:
            url = 'http://thetvdb.com/api/%s/series/%s/default/%01d/%01d' % (
                'MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb,
                int(season), int(episode))
            return int(
                client.parseDOM(client.request(url), 'absolute_number')[0])
        except:
            pass

        return episode
Example #13
0
 def resolve(self, url):
     try:
         r = client.request(url)
         r = dom_parser2.parse_dom(
             r, 'a', req=['href', 'data-episodeid', 'data-linkid'])[0]
         url = r.attrs['href']
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
Example #14
0
    def ororo_moviecache(self, user):
        try:
            url = urlparse.urljoin(self.base_link, self.moviesearch_link)

            r = client.request(url, headers=self.headers)
            r = json.loads(r)['movies']
            r = [(str(i['id']), str(i['imdb_id'])) for i in r]
            r = [(i[0], 'tt' + re.sub('[^0-9]', '', i[1])) for i in r]
            return r
        except:
            return
Example #15
0
    def getTVShowTranslation(self, thetvdb, lang):
        try:
            url = 'http://thetvdb.com/api/%s/series/%s/%s.xml' % (
                'MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, lang)
            r = client.request(url)
            title = client.parseDOM(r, 'SeriesName')[0]
            title = client.replaceHTMLCodes(title)
            title = title.encode('utf-8')

            return title
        except:
            pass
Example #16
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            urls = self.search(data['title'], data['year'])

            for url in urls:
                try:
                    link = client.replaceHTMLCodes(url[1])
                    link = link.encode('utf-8')
                    if link in sources: continue
                    if 'snahp' in link:
                        data = client.request(link)
                        data = client.parseDOM(data, 'center')
                        data = [i for i in data if 'Hidden Link' in i][0]
                        link = client.parseDOM(data, 'a', ret='href')[0]
                    if 'google' in link:
                        quality, info2 = source_utils.get_release_quality(
                            url[0], link)
                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'language': 'en',
                            'url': link,
                            'direct': False,
                            'debridonly': False
                        })

                    else:
                        host = re.findall(
                            '([\w]+[.][\w]+)$',
                            urlparse.urlparse(link.strip().lower()).netloc)[0]
                        if host in hostDict:
                            host = host.encode('utf-8')
                            quality, info2 = source_utils.get_release_quality(
                                url[0], link)
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': link,
                                'direct': False,
                                'debridonly': False
                            })
                except BaseException:
                    pass
            return sources
        except BaseException:
            return sources
Example #17
0
def geturl(url):
    try:
        r = client.request(url, output='geturl')
        if r is None: return r

        host1 = re.findall('([\w]+)[.][\w]+$',
                           urlparse.urlparse(url.strip().lower()).netloc)[0]
        host2 = re.findall('([\w]+)[.][\w]+$',
                           urlparse.urlparse(r.strip().lower()).netloc)[0]
        if host1 == host2: return r

        proxies = sorted(get(), key=lambda x: random.random())
        proxies = sorted(proxies, key=lambda x: random.random())
        proxies = proxies[:3]

        for p in proxies:
            p += urllib.quote_plus(url)
            r = client.request(p, output='geturl')
            if r is not None: return parse(r)
    except:
        pass
Example #18
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url: return
         http = self.base_link + self.search_link % (url, season)
         season = '%02d' % int(season)
         episode = '%02d' % int(episode)
         r = client.request(http)
         match = re.compile('<a href="(.+?)-S' + season + 'E' + episode +
                            '-(.+?)">').findall(r)
         for url1, url2 in match:
             url = '%s-S%sE%s-%s' % (url1, season, episode, url2)
             return url
     except:
         return
Example #19
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                 data['title'], data['year'])
         query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         r = client.request(url)
         try:
             posts = client.parseDOM(r, 'tbody', attrs={'id': 'results'})
             for post in posts:
                 link = re.findall('a href="(magnet:.+?)" title="(.+?)"',
                                   post, re.DOTALL)
                 for url, data in link:
                     if not hdlr in data: continue
                     url = url.split('&tr')[0]
                     quality, info = source_utils.get_release_quality(data)
                     if any(x in url for x in [
                             'FRENCH', 'Ita', 'italian', 'TRUEFRENCH',
                             '-lat-', 'Dublado'
                     ]):
                         continue
                     info = ' | '.join(info)
                     sources.append({
                         'source': 'Torrent',
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'info': info,
                         'direct': False,
                         'debridonly': True
                     })
         except:
             return
         return sources
     except:
         return sources
Example #20
0
def vk(url):
    try:
        query = urlparse.parse_qs(urlparse.urlparse(url).query)

        try:
            oid, video_id = query['oid'][0], query['id'][0]
        except:
            oid, video_id = re.findall('\/video(.*)_(.*)', url)[0]

        sources_url = 'http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s' % (
            oid, video_id)
        html = client.request(sources_url)
        html = re.sub(r'[^\x00-\x7F]+', ' ', html)

        sources = re.findall('(\d+)x\d+.+?(http.+?\.m3u8.+?)n', html)

        if not sources:
            sources = re.findall('"url(\d+)"\s*:\s*"(.+?)"', html)

        sources = [(i[0], i[1].replace('\\', '')) for i in sources]
        sources = dict(sources)

        url = []
        try:
            url += [{'quality': 'HD', 'url': sources['720']}]
        except:
            pass
        try:
            url += [{'quality': 'SD', 'url': sources['540']}]
        except:
            pass
        try:
            url += [{'quality': 'SD', 'url': sources['480']}]
        except:
            pass
        if not url == []: return url
        try:
            url += [{'quality': 'SD', 'url': sources['360']}]
        except:
            pass
        if not url == []: return url
        try:
            url += [{'quality': 'SD', 'url': sources['240']}]
        except:
            pass
        if not url == []: return url
    except:
        return
Example #21
0
def cldmailru(url):
    try:
        v = url.split('public')[-1]

        r = client.request(url)
        r = re.sub(r'[^\x00-\x7F]+', ' ', r)

        tok = re.findall('"tokens"\s*:\s*{\s*"download"\s*:\s*"([^"]+)', r)[0]

        url = re.findall('"weblink_get"\s*:\s*\[.+?"url"\s*:\s*"([^"]+)', r)[0]

        url = '%s%s?key=%s' % (url, v, tok)

        return url
    except:
        return
Example #22
0
    def __get_base_url(self, fallback):
        try:
            for domain in self.domains:
                try:
                    url = 'https://%s' % domain
                    result = client.request(url, limit=1, timeout='10')
                    result = re.findall('<input type="submit" title="(.+?)"',
                                        result, re.DOTALL)[0]
                    if result and 'Pirate Search' in result:
                        return url
                except Exception:
                    pass
        except Exception:
            pass

        return fallback
Example #23
0
def googlepass(url):
    try:
        try:
            headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
        except:
            headers = None
        url = url.split('|')[0].replace('\\', '')
        url = client.request(url, headers=headers, output='geturl')
        if 'requiressl=yes' in url:
            url = url.replace('http://', 'https://')
        else:
            url = url.replace('https://', 'http://')
        if headers: url += '|%s' % urllib.urlencode(headers)
        return url
    except:
        return
Example #24
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         simple_title = cleantitle.get_simple(tvshowtitle)
         tvshowtitle = cleantitle.geturl(tvshowtitle).replace('-', '+')
         search_url = urlparse.urljoin(self.base_link,
                                       self.search_link % tvshowtitle)
         r = client.request(search_url)
         r = json.loads(r)['series']
         r = [(urlparse.urljoin(self.base_link, i['seo_name'])) for i in r
              if simple_title == cleantitle.get_simple(i['original_name'])]
         if r:
             return r[0]
         else:
             return
     except:
         return
Example #25
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'class': 'el-item'})
         r = [(dom_parser2.parse_dom(i, 'div', {'class': 'season'}), \
               dom_parser2.parse_dom(i, 'div', {'class': 'episode'}), \
               dom_parser2.parse_dom(i, 'a', req='href')) \
              for i in r if i]
         r = [(i[2][0].attrs['href']) for i in r if i[0][0].content == 'Season %01d' % int(season) \
              and i[1][0].content == 'Episode %01d' % int(episode)]
         if r:
             return r[0]
         else:
             return
     except:
         return
Example #26
0
 def searchMovie(self, title, year):
     try:
         query = self.search_link % urllib.quote_plus(
             cleantitle.getsearch(title))
         url = urlparse.urljoin(self.base_link, query)
         r = client.request(url)
         r = client.parseDOM(r, 'item')
         r = [(client.parseDOM(i, 'title')[0], client.parseDOM(i,
                                                               'link')[0])
              for i in r if i]
         r = [
             i[1] for i in r
             if cleantitle.get(title) == cleantitle.get(i[0])
         ]
         return r[0]
     except Exception:
         return
Example #27
0
 def _get_items(self, url):
     try:
         headers = {'User-Agent': client.agent()}
         r = client.request(url, headers=headers)
         posts = client.parseDOM(r,
                                 'tr',
                                 attrs={'id': 'torrent_latest_torrents'})
         for post in posts:
             data = client.parseDOM(post,
                                    'a',
                                    attrs={'title': 'Torrent magnet link'},
                                    ret='href')[0]
             link = urllib.unquote(data).decode('utf8').replace(
                 'https://mylink.me.uk/?url=', '')
             name = urllib.unquote_plus(
                 re.search('dn=([^&]+)', link).groups()[0])
             t = name.split(self.hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(
                     self.title):
                 continue
             try:
                 y = re.findall(
                     '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                     name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall(
                     '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                     re.I)[-1].upper()
             if not y == self.hdlr: continue
             try:
                 size = re.findall(
                     '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                     post)[0]
                 div = 1 if size.endswith('GB') else 1024
                 size = float(
                     re.sub('[^0-9|/.|/,]', '', size.replace(',',
                                                             '.'))) / div
                 size = '%.2f GB' % size
             except BaseException:
                 size = '0'
             self.items.append((name, link, size))
         return self.items
     except BaseException:
         return self.items
Example #28
0
def cdnImport(uri, name):
    import imp
    from resistancescrapers.modules import client

    path = os.path.join(dataPath, 'py' + name)
    path = path.decode('utf-8')

    deleteDir(os.path.join(path, ''), force=True)
    makeFile(dataPath)
    makeFile(path)

    r = client.request(uri)
    p = os.path.join(path, name + '.py')
    f = openFile(p, 'w')
    f.write(r)
    f.close()
    m = imp.load_source(name, p)

    deleteDir(os.path.join(path, ''), force=True)
    return m
Example #29
0
def odnoklassniki(url):
    try:
        media_id = re.compile('//.+?/.+?/([\w]+)').findall(url)[0]

        result = client.request('http://ok.ru/dk',
                                post={
                                    'cmd': 'videoPlayerMetadata',
                                    'mid': media_id
                                })
        result = re.sub(r'[^\x00-\x7F]+', ' ', result)
        result = json.loads(result).get('videos', [])

        hd = []
        for name, quali in {
                'ultra': '4K',
                'quad': '1440p',
                'full': '1080p',
                'hd': 'HD'
        }.items():
            hd += [{
                'quality': quali,
                'url': i.get('url')
            } for i in result if i.get('name').lower() == name]

        sd = []
        for name, quali in {
                'sd': 'SD',
                'low': 'SD',
                'lowest': 'SD',
                'mobile': 'SD'
        }.items():
            sd += [{
                'quality': quali,
                'url': i.get('url')
            } for i in result if i.get('name').lower() == name]

        url = hd + sd[:1]
        if not url == []: return url
    except:
        return
Example #30
0
    def search(self, title, year):
        try:
            content = []
            for link in self.base_link:
                try:
                    query = urlparse.urljoin(
                        link, self.search_link % (urllib.quote(title), year))
                    r = client.request(query)
                    r = json.loads(r)
                    r = r['data']['children'][0]['data']

                    if not cleantitle.get_simple(r['title'].split(year)
                                                 [0]) == cleantitle.get(title):
                        raise Exception()
                    if not year in r['title']: raise Exception()
                    content = [(r['title'], r['url'])]

                except BaseException:
                    pass
            return content
        except BaseException:
            return