Ejemplo n.º 1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         for u in url:
             hostDict += [('clicknupload.org')]
             quality = '1080p' if '-1080p' in u or 'bluray-2' in u else '720p' if '-720p' in u or 'bluray' in u else 'SD'
             
             r = client.request(u)
             r = dom_parser2.parse_dom(r, 'ul', {'class': 'download-links'})
             r = dom_parser2.parse_dom(r, 'a', req=['href'])
             r = [i.attrs['href'] for i in r if i]
             for i in r:
                 try:
                     valid, host = source_utils.is_host_valid(i, hostDict)
                     if not valid: continue
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': i,
                         'direct': False,
                         'debridonly': False
                     })
                 except: pass
         return sources
     except Exception:
         return
Ejemplo n.º 2
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['premiered'], url['season'], url['episode'] = premiered, season, episode
         try:
             clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
             search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
             r = self.scraper.get(search_url).content
             r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
             r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
                   dom_parser2.parse_dom(i, 'div', attrs={'class':'status'})[0]) for i in r if i]
             r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0],
                   re.findall('(\d+)', i[1].content)[0]) for i in r if i]
             r = [(i[0], i[1].split(':')[0], i[2]) for i in r
                  if (cleantitle.get(i[1].split(':')[0]) == cleantitle.get(url['tvshowtitle']) and i[2] == str(int(season)))]
             url = r[0][0]
         except:
             pass
         data = self.scraper.get(url).content
         data = client.parseDOM(data, 'div', attrs={'id': 'details'})
         data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
         url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
         return url[0][1]
     except:
         return
Ejemplo n.º 3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None:
                return sources

            r = client.request(url)
            quality = re.findall(">(\w+)<\/p", r)
            if quality[0] == "HD":
                quality = "720p"
            else:
                quality = "SD"
            r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

            for i in r[0]:
                url = {
                    'url': i.attrs['href'],
                    'data-film': i.attrs['data-film'],
                    'data-server': i.attrs['data-server'],
                    'data-name': i.attrs['data-name']}
                url = urllib.urlencode(url)
                sources.append({'source': i.content, 'quality': quality, 'language': 'en',
                                'url': url, 'direct': False, 'debridonly': False})
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('FreePutlockers - Exception: \n' + str(failure))
            return sources
Ejemplo n.º 4
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            sep = '%dx%02d' % (int(season), int(episode))
            r = client.request(url, headers=self.headers)
            r = dom_parser2.parse_dom(r, 'span', attrs={'class': 'list'})
            r1 = dom_parser2.parse_dom(r, 'br')
            r1 = [dom_parser2.parse_dom(i, 'a', req='href') for i in r1]
            try:
                if int(season) == 1 and int(episode) == 1:
                    url = dom_parser2.parse_dom(r, 'a', req='href')[1].attrs['href']
                else:
                    for i in r1:
                        if sep in i[0].content:
                            url = urlparse.urljoin(self.base_url, i[0].attrs['href'])
            except:
                pass
            url = url[:-1]
            url = url.split('?v=')[1]
            url = self.list_url % url
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('IceFilms - Exception: \n' + str(failure))
            return
Ejemplo n.º 5
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url: return
         sep = '%dx%02d' % (int(season), int(episode))
         r = self.scraper.get(url).content
         if 'To proceed, you must allow popups' in r:
             for i in range(0, 5):
                 r = self.scraper.get(url).content
                 if 'To proceed, you must allow popups' not in r: break    
         r = dom_parser2.parse_dom(r, 'span', attrs={'class': 'list'})
         r1 = dom_parser2.parse_dom(r, 'br')
         r1 = [dom_parser2.parse_dom(i, 'a', req='href') for i in r1]
         try:
             if int(season) == 1 and int(episode) == 1:
                 url = dom_parser2.parse_dom(r, 'a', req='href')[1].attrs['href']
             else:
                 for i in r1:
                     if sep in i[0].content:
                         url = urlparse.urljoin(self.base_link, i[0].attrs['href'])
         except:
             pass
         url = url[:-1]
         url = url.split('?v=')[1]
         url = self.list_url % url
         return url
     except:
         return
Ejemplo n.º 6
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []          
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'id': re.compile('option-\d+')})
         r = [dom_parser2.parse_dom(i, 'iframe', req=['src']) for i in r if i]
         r = [(i[0].attrs['src']) for i in r if i]
         if r:
             for url in r:
                 try:
                     host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                     if host in hostDict:
                         host = client.replaceHTMLCodes(host)
                         host = host.encode('utf-8')
                         sources.append({
                             'source': host,
                             'quality': 'SD',
                             'language': 'en',
                             'url': url.replace('\/','/'),
                             'direct': False,
                             'debridonly': False
                         })
                 except: pass
         return sources
     except Exception:
         return
Ejemplo n.º 7
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []          
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'p', {'class': 'server_play'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         r = [(i[0].attrs['href'], re.search('/(\w+).html', i[0].attrs['href'])) for i in r if i]
         r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
         for i in r:
             try:
                 host = i[1]
                 if str(host) in str(hostDict):
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': i[0].replace('\/','/'),
                         'direct': False,
                         'debridonly': False
                     })
             except: pass
         return sources
     except Exception:
         return
Ejemplo n.º 8
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(title)
            search_url = self.search_link % (clean_title.replace('-','+'), year)
            headers = {'Host': 'http://icefilms1.unblocked.sh',
                       'Cache-Control': 'max-age=0',
                        'Connection': 'keep-alive',
                        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
                        'Upgrade-Insecure-Requests': '1',
                        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                        'Accept-Encoding': 'gzip, deflate, br',
                        'Accept-Language': 'en-US,en;q=0.8'}

            r = client.request(search_url, headers=headers)
            r = dom_parser2.parse_dom(r, 'td')
            r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r if "<div class='number'" in i.content]
            r = [(urlparse.urljoin(self.base_url, i[0].attrs['href'])) for i in r if title.lower() in i[0].content.lower() and year in i[0].content]
            url = r[0]
            url = url[:-1]
            url = url.split('?v=')[1]
            url = self.list_url % url
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('IceFilms - Exception: \n' + str(failure))
            return
Ejemplo n.º 9
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []          
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'class': 'tab_box'})[0]
         r = dom_parser2.parse_dom(r.content, 'iframe', req='src')[0]
         url = r.attrs['src']
         if r:
             try:
                 host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                 if host in hostDict:
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': url.replace('\/','/'),
                         'direct': False,
                         'debridonly': False
                     })
             except: pass
         return sources
     except Exception:
         return
Ejemplo n.º 10
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            r = client.request(url)
            r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'})
            r = [(dom_parser2.parse_dom(i, 'a', req='href'), \
                  dom_parser2.parse_dom(i, 'div', {'class': 'notes'})) \
                  for i in r if i]
            r = [(i[0][0].attrs['href'], i[0][0].content, i[1][0].content if i[1] else 'None') for i in r]
            for i in r:
                try:
                    url = i[0]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')
                    valid, host = source_utils.is_host_valid(i[1], hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    
                    info = []
                    quality, info = source_utils.get_release_quality(i[2], i[2])

                    info = ' | '.join(info)
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 11
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title'].replace(':','').lower()
            year = data['year']

            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.post_link)

            post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(query)

            r = client.request(url, post=post)
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser2.parse_dom(i, 'div', attrs={'class':'news-title'})) for i in r if data['imdb'] in i]
            r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]

            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    name = item[1]
                    y = re.findall('\((\d{4})\)', name)[0]
                    if not y == year: raise Exception()

                    s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', name)
                    s = s[0] if s else '0'
                    data = client.request(item[0])
                    data = dom_parser2.parse_dom(data, 'div', attrs={'id': 'r-content'})
                    data = re.findall('\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
                                      data[0].content, re.DOTALL)
                    u = [(i[0], i[1], s) for i in data if i]

                    for name, url, size in u:
                        try:
                            if '4K' in name:
                                quality = '4K'
						    elif '2160p' in name:
                                quality = '4K'
							elif '1440p' in name:
                                quality = '4K'
                            elif '1080p' in name:
                                quality = '1080p'
                            elif '720p' in name:
                                quality = '720p'
                            elif any(i in ['dvdscr', 'r5', 'r6'] for i in name):
                                quality = 'SCR'
Ejemplo n.º 12
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         search_url = urlparse.urljoin(self.base_link, self.search_link)
         post = ('do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s+%s' % (clean_title.replace('-','+'), year))
         r = client.request(search_url, post=post)
         r = dom_parser2.parse_dom(r, 'article', {'class': ['shortstory','cf']})[0]
         r = dom_parser2.parse_dom(r.content, 'a', req='href')[0]
         url = r.attrs['href']
         return url
     except Exception:
         return
Ejemplo n.º 13
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(tvshowtitle)
         search_url = self.search_link % (clean_title.replace('-','+'), year)
         r = client.request(search_url, headers=self.headers)
         r = dom_parser2.parse_dom(r, 'td')
         r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r if "<div class='number'" in i.content]
         r = [(urlparse.urljoin(self.base_url, i[0].attrs['href'])) for i in r if tvshowtitle.lower() in i[0].content.lower() and year in i[0].content]
         url = r[0]
         return url
     except:
         return
Ejemplo n.º 14
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
         r = self.scraper.get(search_url).content
         r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
         r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
               re.findall('status-year">(\d{4})</div', i.content, re.DOTALL)[0]) for i in r if i]
         r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0], i[1]) for i in r if i]
         r = [(i[0], i[1], i[2]) for i in r if (cleantitle.get(i[1]) == cleantitle.get(title) and i[2] == year)]
         url = r[0][0]
         return url
     except Exception:
         return
Ejemplo n.º 15
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         search_url = urlparse.urljoin(self.base_link, self.search_link_movie % clean_title.replace('-','+'))
         r = cache.get(client.request, 6, search_url)
         r = dom_parser2.parse_dom(r, 'div', {'class': 'movie'})
         r = [(dom_parser2.parse_dom(i.content, 'a', req='href'), \
               dom_parser2.parse_dom(i.content, 'div', {'class': 'year'})) \
               for i in r]
         r = [(urlparse.urljoin(self.base_link, i[0][0].attrs['href']), i[1][0].content) for i in r if i[1][0].content == year]
         url = r[0][0]
         return url
     except Exception:
         return
Ejemplo n.º 16
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []          
         r = cache.get(client.request, 6, url)
         try:
             v = re.findall('\$\.get\(\'(.+?)(?:\'\,\s*\{\"embed\":\")([\d]+)', r)
             for i in v:
                 url = urlparse.urljoin(self.base_link, i[0] + '?embed=%s' % i[1])
                 ri = cache.get(client.request, 6, search_url)
                 url = dom_parser2.parse_dom(ri, 'iframe', req='src')[0]
                 url = url.attrs['src']
                 try:
                     host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                     if host in hostDict:
                         host = client.replaceHTMLCodes(host)
                         host = host.encode('utf-8')
                         sources.append({
                             'source': host,
                             'quality': 'SD',
                             'language': 'en',
                             'url': url.replace('\/','/'),
                             'direct': False,
                             'debridonly': False
                         })
                 except: pass
         except: pass
         r = dom_parser2.parse_dom(r, 'div', {'class': ['btn','btn-primary']})
         r = [dom_parser2.parse_dom(i.content, 'a', req='href') for i in r]
         r = [(i[0].attrs['href'], re.search('<\/i>\s*(\w+)', i[0].content)) for i in r]
         r = [(i[0], i[1].groups()[0]) for i in r if i[1]]
         if r:
             for i in r:
                 try:
                     host = i[1]
                     url = i[0]
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': url.replace('\/','/'),
                         'direct': False,
                         'debridonly': False
                     })
                 except: pass
         return sources
     except Exception:
         return
Ejemplo n.º 17
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'class': 'el-item'})
         r = [(dom_parser2.parse_dom(i, 'div', {'class': 'season'}), \
               dom_parser2.parse_dom(i, 'div', {'class': 'episode'}), \
               dom_parser2.parse_dom(i, 'a', req='href')) \
               for i in r if i]
         r = [(i[2][0].attrs['href']) for i in r if i[0][0].content == 'Season %01d' % int(season) \
               and i[1][0].content == 'Episode %01d' % int(episode)]
         if r: return r[0]
         else: return
     except:
         return
Ejemplo n.º 18
0
    def sources(self, url, hostDict, hostprDict):
    
        self._sources = []
        try:                
            if not url: return self._sources
            
            self.hostDict = hostDict
            self.hostprDict = hostprDict
   
            referer = url
            
            html = self.scraper.get(url).content
            if 'To proceed, you must allow popups' in html:
                for i in range(0, 5):
                    html = self.scraper.get(url).content
                    if 'To proceed, you must allow popups' not in html: break
            match = re.search('lastChild\.value="([^"]+)"(?:\s*\+\s*"([^"]+))?', html)
            
            secret = ''.join(match.groups(''))
            match = re.search('"&t=([^"]+)', html)
            t = match.group(1)
            match = re.search('(?:\s+|,)s\s*=(\d+)', html)
            s_start = int(match.group(1))
            
            match = re.search('(?:\s+|,)m\s*=(\d+)', html)
            m_start = int(match.group(1))
            
            threads = []
            
            for fragment in dom_parser2.parse_dom(html, 'div', {'class': 'ripdiv'}):
                match = re.match('<b>(.*?)</b>', fragment.content)
                if match:
                    q_str = match.group(1).replace(' ', '').upper()
                    if '1080' in q_str: quality = '1080p'
                    elif '720' in q_str: quality = '720p'
                    elif '4k' in q_str.lower(): quality = '4K'
                    else: quality = 'SD'
                else:
                    quality = 'SD'
                                
                pattern = '''onclick='go\((\d+)\)'>([^<]+)(<span.*?)</a>'''
                for match in re.finditer(pattern, fragment.content):
                    link_id, label, host_fragment = match.groups()
                    s = s_start + random.randint(3, 1000)
                    m = m_start + random.randint(21, 1000)                            
                    post = self.post % (link_id, s, m, secret, t)
                    url = urlparse.urljoin(self.base_link, 'membersonly/components/com_iceplayer/video.phpAjaxResp.php?s=%s&t=%s' % (link_id, t))
                                        
                    threads.append(workers.Thread(self._get_sources, url, post, host_fragment, quality, referer))
            
            [i.start() for i in threads]
            [i.join() for i in threads]

            alive = [x for x in threads if x.is_alive() == True]
            while alive:
                alive = [x for x in threads if x.is_alive() == True]
                time.sleep(0.1)
            return self._sources
        except:
            return self._sources
Ejemplo n.º 19
0
    def _get_items(self, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                data = dom.parse_dom(post, 'a', req='href')[1]
                link = urlparse.urljoin(self.base_link, data.attrs['href'])
                name = data.content
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue

                try:
                    y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                if not y == self.hdlr: continue

                try:
                    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    div = 1 if size.endswith('GB') else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                    size = '%.2f GB' % size
                except BaseException:
                    size = '0'

                self.items.append((name, link, size))
            return self.items
        except BaseException:
            return self.items
Ejemplo n.º 20
0
 def _search(self, title):
     try:
         url = self.search_link % title
         headers = {
             'Host':
             'vidics.unblocked.mx',
             'Origin':
             'https://vidics.unblocked.mx',
             'X-Requested-With':
             'XMLHttpRequest',
             'User-Agent':
             'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
             'Referer':
             'https://vidics.unblocked.mx/Film/%s' %
             title.replace('%20', '_')
         }
         r = client.request(url, post='ajax=1', headers=headers)
         count = 0
         while len(r) == 0 and count <= 10:
             r = client.request(url, post='ajax=1', headers=headers)
             count += 1
         r = dom_parser2.parse_dom(r, 'tr')
         r = [
             urlparse.urljoin(
                 self.base_link,
                 re.findall("href='([^']+)", i.attrs['onclick'])[0])
             for i in r
         ]
         return r[0]
     except Exception:
         return
Ejemplo n.º 21
0
 def _get_sources(self, item):
     try:
         links = dom_parser2.parse_dom(item, 'a', req='href')
         links = [i.attrs['href'] for i in links]
         info = []
         try:
             size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0]
             div = 1 if size.endswith('GB') else 1024
             size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
             size = '%.2f GB' % size
             info.append(size)
         except Exception:
             pass
         info = ' | '.join(info)
         for url in links:
             if 'youtube' in url: continue
             if any(x in url for x in ['.rar.', '.zip.', '.iso.']) or any(
                     url.endswith(x) for x in ['.rar', '.zip', '.iso']): raise Exception()
             valid, host = source_utils.is_host_valid(url, self.hostDict)
             if not valid: continue
             host = client.replaceHTMLCodes(host)
             host = host.encode('utf-8')
             quality, info2 = source_utils.get_release_quality(url, url)
             if url in str(self._sources): continue
             self._sources.append(
                 {'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                  'debridonly': True})
     except Exception:
         pass
Ejemplo n.º 22
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
         url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,url['year'])))
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             if i.content == 'Episode %s'%episode:
                 url = i.attrs['href']
         return url
     except:
         return
Ejemplo n.º 23
0
 def _get_items(self, url):
     try:
         headers = {'User-Agent': client.agent()}
         r = client.request(url, headers=headers)
         posts = client.parseDOM(r, 'tbody')[0]
         posts = client.parseDOM(posts, 'tr')
         for post in posts:
             data = dom.parse_dom(post, 'a', req='href')[1]
             link = urlparse.urljoin(self.base_link, data.attrs['href'])
             name = data.content
             t = name.split(self.hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title):
                 continue
             try:
                 y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
             if not y == self.hdlr:
                 continue
             try:
                 size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                 div = 1 if size.endswith('GB') else 1024
                 size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                 size = '%.2f GB' % size
             except BaseException:
                 size = '0'
             self.items.append((name, link, size))
         return self.items
     except BaseException:
         return self.items
Ejemplo n.º 24
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
         url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,url['year'])))
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             if i.content == 'Episode %s'%episode:
                 url = i.attrs['href']
         return url
     except:
         return
Ejemplo n.º 25
0
 def _get_sources(self, item):
     try:
         links = dom_parser2.parse_dom(item, 'a', req='href')
         links = [i.attrs['href'] for i in links]
         info = []
         try:
             size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0]
             div = 1 if size.endswith('GB') else 1024
             size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
             size = '%.2f GB' % size
             info.append(size)
         except Exception:
             pass
         info = ' | '.join(info)
         for url in links:
             if 'youtube' in url: continue
             if any(x in url for x in ['.rar.', '.zip.', '.iso.']) or any(
                     url.endswith(x) for x in ['.rar', '.zip', '.iso']): raise Exception()
             valid, host = source_utils.is_host_valid(url, self.hostDict)
             if not valid: continue
             host = client.replaceHTMLCodes(host)
             host = host.encode('utf-8')
             quality, info2 = source_utils.get_release_quality(url, url)
             if url in str(self._sources): continue
             self._sources.append(
                 {'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                  'debridonly': True})
     except Exception:
         pass
Ejemplo n.º 26
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            year = data['year']
            h = {'User-Agent': client.randomagent()}
            title = cleantitle.geturl(data['title']).replace('-', '_')
            url = urlparse.urljoin(self.base_link, self.search_link %(title, year))
            r = client.request(url, headers=h)
            vidlink = re.findall('d\/(.+)"',r)
            r = dom_parser2.parse_dom(r, 'div', {'class': 'title'})
            if '1080p' in r[0].content:
                quality = '1080p'
            elif '720p' in r[0].content:
                quality = '720p'
            else:
                quality = 'SD'
            u = 'https://vidlink.org/streamdrive/info/%s' % vidlink[0]
            r = client.request(u, headers=h)
            r = json.loads(r)
            for i in r:
                try: sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i['url'], 'direct': True, 'debridonly': False})
                except: pass
            return sources
        except:
            return sources
Ejemplo n.º 27
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None: return self._sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = cleantitle.geturl(query)
            url = urlparse.urljoin(self.base_link, query)

            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = dom_parser2.parse_dom(r, 'li', {'class': re.compile('.+?'), 'id': re.compile('comment-.+?')})
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts: threads.append(workers.Thread(self._get_sources, i.content))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except Exception:
            return self._sources
Ejemplo n.º 28
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url is None: return
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'class': 'el-item'})
         r = [(dom_parser2.parse_dom(i, 'div', {'class': 'season'}),
               dom_parser2.parse_dom(i, 'div', {'class': 'episode'}),
               dom_parser2.parse_dom(i, 'a', req='href')) for i in r if i]
         r = [(i[2][0].attrs['href']) for i in r
              if i[0][0].content == 'Season %01d' %
              int(season) and i[1][0].content == 'Episode %01d' %
              int(episode)]
         if r: return r[0]
         else: return
     except BaseException:
         return
Ejemplo n.º 29
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(tvshowtitle)
            search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
            r = client.request(search_url)
            r = client.parseDOM(r, 'div', {'class': 'result-item'})
            r = [(dom_parser2.parse_dom(i, 'a', req='href')[0],
                  client.parseDOM(i, 'img', ret='alt')[0],
                  dom_parser2.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r]
            r = [(i[0].attrs['href'], i[1], i[2][0].content) for i in r if
                 (cleantitle.get(i[1]) == cleantitle.get(tvshowtitle) and i[2][0].content == year)]
            url = source_utils.strip_domain(r[0][0])

            return url
        except:
            return
Ejemplo n.º 30
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None: return self._sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = cleantitle.geturl(query)
            url = urlparse.urljoin(self.base_link, query)

            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = dom_parser2.parse_dom(r, 'li', {'class': re.compile('.+?'), 'id': re.compile('comment-.+?')})
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts: threads.append(workers.Thread(self._get_sources, i.content))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except Exception:
            return self._sources
Ejemplo n.º 31
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            query = self.tvsearch_link % urllib.quote_plus(
                cleantitle.query(tvshowtitle))
            query = urlparse.urljoin(self.base_link, query.lower())
            result = client.request(query, referer=self.base_link)
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'index_item.+?'})

            result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0])
                      for i in result if i]
            result = [(
                i.attrs['href']
            ) for i in result if cleantitle.get(tvshowtitle) == cleantitle.get(
                re.sub(
                    '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                    '',
                    i.attrs['title'],
                    flags=re.I))][0]

            url = client.replaceHTMLCodes(result)
            url = url.encode('utf-8')
            return url
        except Exception:
            return
Ejemplo n.º 32
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = self.scraper.get(url, headers=headers).content
            name = client.replaceHTMLCodes(name)
            l = dom_parser2.parse_dom(r, 'div', {'class': 'ppu2h'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''', i.content, flags=re.MULTILINE|re.DOTALL)
            urls = [i for i in urls if '.rar' not in i or '.zip' not in i or '.iso' not in i or '.idx' not in i or '.sub' not in i]
            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', name)[0]
                    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                    size = '%.2f GB' % size
                    info.append(size)
                except BaseException:
                    pass
                info = ' | '.join(info)
                self.sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
        except:
            pass
Ejemplo n.º 33
0
    def sources(self, url, hostDict, hostprDict):
    
        self._sources = []
        try:                
            if not url: return self._sources
            
            self.hostDict = hostDict
            self.hostprDict = hostprDict
   
            referer = url
            
            html = self.scraper.get(url).content
            if 'To proceed, you must allow popups' in html:
                for i in range(0, 5):
                    html = self.scraper.get(url).content
                    if 'To proceed, you must allow popups' not in html: break
            match = re.search('lastChild\.value="([^"]+)"(?:\s*\+\s*"([^"]+))?', html)
            
            secret = ''.join(match.groups(''))
            match = re.search('"&t=([^"]+)', html)
            t = match.group(1)
            match = re.search('(?:\s+|,)s\s*=(\d+)', html)
            s_start = int(match.group(1))
            
            match = re.search('(?:\s+|,)m\s*=(\d+)', html)
            m_start = int(match.group(1))
            
            threads = []
            
            for fragment in dom_parser2.parse_dom(html, 'div', {'class': 'ripdiv'}):
                match = re.match('<b>(.*?)</b>', fragment.content)
                if match:
                    q_str = match.group(1).replace(' ', '').upper()
                    if '1080' in q_str: quality = '1080p'
                    elif '720' in q_str: quality = '720p'
                    elif '4k' in q_str.lower(): quality = '4K'
                    else: quality = 'SD'
                else:
                    quality = 'SD'
                                
                pattern = '''onclick='go\((\d+)\)'>([^<]+)(<span.*?)</a>'''
                for match in re.finditer(pattern, fragment.content):
                    link_id, label, host_fragment = match.groups()
                    s = s_start + random.randint(3, 1000)
                    m = m_start + random.randint(21, 1000)                            
                    post = self.post % (link_id, s, m, secret, t)
                    url = urlparse.urljoin(self.base_link, 'membersonly/components/com_iceplayer/video.phpAjaxResp.php?s=%s&t=%s' % (link_id, t))
                                        
                    threads.append(workers.Thread(self._get_sources, url, post, host_fragment, quality, referer))
            
            [i.start() for i in threads]
            [i.join() for i in threads]

            alive = [x for x in threads if x.is_alive() == True]
            while alive:
                alive = [x for x in threads if x.is_alive() == True]
                time.sleep(0.1)
            return self._sources
        except:
            return self._sources
Ejemplo n.º 34
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(tvshowtitle)
            search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
            r = client.request(search_url)
            r = client.parseDOM(r, 'div', {'class': 'result-item'})
            r = [(dom_parser2.parse_dom(i, 'a', req='href')[0],
                  client.parseDOM(i, 'img', ret='alt')[0],
                  dom_parser2.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r]
            r = [(i[0].attrs['href'], i[1], i[2][0].content) for i in r if
                 (cleantitle.get(i[1]) == cleantitle.get(tvshowtitle) and i[2][0].content == year)]
            url = source_utils.strip_domain(r[0][0])

            return url
        except:
            return
Ejemplo n.º 35
0
    def _get_sources(self, name, url, hostDict, hostprDict):
        try:
            hostDict = hostDict + hostprDict
            name = client.replaceHTMLCodes(name)
            r = self.scraper.get(url).content
            links = dom_parser2.parse_dom(r, 'a', req=['href','rel','data-wpel-link','target'])
            links = [i.attrs['href'] for i in links]
            if self.show:
                links = [i for i in links if self.hdlr.lower() in i.lower()]
                
            for url in links:
                try:
                    if self.hdlr in name:
                        fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
                        fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                        fmt = [i.lower() for i in fmt]

                        if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
                        if any(i in ['extras'] for i in fmt): raise Exception()

                        if '1080p' in fmt: quality = '1080p'
                        elif '720p' in fmt: quality = '720p'
                        else: quality = 'SD'
                        if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
                        elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'

                        info = []

                        if '3d' in fmt: info.append('3D')

                        try:
                            size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', name)[-1]
                            div = 1 if size.endswith(('GB', 'GiB')) else 1024
                            size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                            size = '%.2f GB' % size
                            info.append(size)
                        except:
                            pass

                        if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')

                        info = ' | '.join(info)

                        if not any(x in url for x in ['.rar', '.zip', '.iso']):
                            url = client.replaceHTMLCodes(url)
                            url = url.encode('utf-8')

                            host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                            host = client.replaceHTMLCodes(host)
                            host = host.encode('utf-8')
                            if host in hostDict:
                                self._sources.append({'source': host, 'quality': quality, 'language': 'en',
                                                      'url': url, 'info': info, 'direct': False, 'debridonly': True})
                except:
                    pass
            check = [i for i in self._sources if not i['quality'] == 'CAM']
            if check: self._sources = check
        except:
            pass
Ejemplo n.º 36
0
 def search(self, title, year):
     try:
         url = urlparse.urljoin(
             self.base_link, self.search_link % (urllib.quote_plus(title)))
         r = self.scraper.get(url).content
         r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0]
         r = dom_parser2.parse_dom(r.content, 'li')
         r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'}))
              for i in r]
         r = [(i[0].attrs['href'], i[0].content) for i in r]
         r = [(urlparse.urljoin(self.base_link, i[0])) for i in r
              if cleantitle.get(title) in cleantitle.get(i[1])
              and year in i[1]]
         if r: return r[0]
         else: return
     except:
         return
Ejemplo n.º 37
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []
            if url is None:
                return sources

            timer = control.Time(start=True)

            r = self.scraper.get(url).content
            quality = re.findall(">(\w+)<\/p", r)
            if quality[0] == "HD":
                quality = "720p"
            else:
                quality = "SD"
            r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

            hostDict = hostprDict + hostDict
            for i in r[0]:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('FreeFMovies - Timeout Reached')
                    break

                url = {
                    'url': i.attrs['href'],
                    'data-film': i.attrs['data-film'],
                    'data-server': i.attrs['data-server'],
                    'data-name': i.attrs['data-name']
                }
                url = urllib.urlencode(url)
                valid, host = source_utils.is_host_valid(i.content, hostDict)
                if valid:
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('FreeFMovies - Exception: \n' + str(failure))
            return sources
Ejemplo n.º 38
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(title).replace('-','+')
            url = urlparse.urljoin(self.base_link, (self.movies_search_path % clean_title))
            r = client.request(url)

            r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
            r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i]
            r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
            r = [(i[0], i[1]) for i in r if i[1] == year]
            if r[0]: 
                url = r[0][0]
                return url
            else: return
        except Exception:
            return
Ejemplo n.º 39
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(title).replace('-','+')
            url = urlparse.urljoin(self.base_link, (self.movies_search_path % clean_title))
            r = client.request(url)

            r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
            r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i]
            r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
            r = [(i[0], i[1]) for i in r if i[1] == year]
            if r[0]: 
                url = r[0][0]
                return url
            else: return
        except Exception:
            return
Ejemplo n.º 40
0
    def sources(self, url, hostDict, hostprDict):
            
        self.sources = []

        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
                         
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = data['year']
            hdlr2 = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else ''
            imdb = data['imdb']

            url = self.search(title, hdlr)
            headers = {'User-Agent': client.agent()}
            r = self.scraper.get(url, headers=headers).content
            if hdlr2 == '':
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
            else:
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
            r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
            r = [(i.content, urlparse.urljoin(self.base_link, i.attrs['href'])) for i in r if i and i.content != 'Watch']
            if hdlr2 != '':
                r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]
            
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in r:
                threads.append(workers.Thread(self._get_sources, i[0], i[1]))
            [i.start() for i in threads]
            
            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self.sources
        except:
            return self.sources
Ejemplo n.º 41
0
 def resolve(self, url):
     if 'hideurl' in url:
         data = client.request(url)
         data = client.parseDOM(data, 'div', attrs={'class': 'row'})
         url = [dom_parser2.parse_dom(i, 'a', req='href')[0] for i in data]
         url = [i.attrs['href'] for i in url if 'direct me' in i.content][0]
         return url
     else:
         return url
Ejemplo n.º 42
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(tvshowtitle)
         search_url = self.search_link % (clean_title.replace('-',
                                                              '+'), year)
         r = client.request(search_url, headers=self.headers)
         r = dom_parser2.parse_dom(r, 'td')
         r = [
             dom_parser2.parse_dom(i, 'a', req='href') for i in r
             if "<div class='number'" in i.content
         ]
         r = [(urlparse.urljoin(self.base_url, i[0].attrs['href']))
              for i in r if tvshowtitle.lower() in i[0].content.lower()
              and year in i[0].content]
         url = r[0]
         return url
     except:
         return
Ejemplo n.º 43
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         search_url = urlparse.urljoin(
             self.base_link,
             self.search_link_movie % clean_title.replace('-', '+'))
         r = cache.get(client.request, 6, search_url)
         r = dom_parser2.parse_dom(r, 'div', {'class': 'movie'})
         r = [(dom_parser2.parse_dom(i.content, 'a', req='href'), \
               dom_parser2.parse_dom(i.content, 'div', {'class': 'year'})) \
               for i in r]
         r = [(urlparse.urljoin(self.base_link,
                                i[0][0].attrs['href']), i[1][0].content)
              for i in r if i[1][0].content == year]
         url = r[0][0]
         return url
     except Exception:
         return
Ejemplo n.º 44
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(title)
            search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
            r = self.scraper.get(search_url).content
            r = client.parseDOM(r, 'div', {'class': 'result-item'})
            r = [(dom_parser2.parse_dom(i, 'a', req='href')[0],
                  re.sub('<.*?>', '' , re.findall('alt=\"(.*?)\"', i)[0]),
                  dom_parser2.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r]

            r = [(i[0].attrs['href'], i[1], i[2][0].content) for i in r if
                 (cleantitle.get(i[1]) == cleantitle.get(title) and i[2][0].content == year)]
            url = r[0][0]
    
            return url
        except:
            log_utils.log('>>>> %s TRACE <<<<\n%s' % (__file__.upper().split('\\')[-1].split('.')[0], traceback.format_exc()), log_utils.LOGDEBUG)
            return
Ejemplo n.º 45
0
    def sources(self, url, hostDict, hostprDict):
            
        self.sources = []

        try:
            if url is None:
                return self.sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
                         
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = data['year']
            hdlr2 = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else ''
            imdb = data['imdb']

            url = self.search(title, hdlr)
            r = cfscrape.get(url).content
            if hdlr2 == '':
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
            else:
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
            r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
            r = [(i.content, urlparse.urljoin(self.base_link, i.attrs['href'])) for i in r if i and i.content != 'Watch']
            if hdlr2 != '':
                r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]
            
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in r:
                threads.append(workers.Thread(self._get_sources, i[0], i[1]))
            [i.start() for i in threads]
            
            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self.sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('---Rapidmoviez Testing - Exception: \n' + str(failure))
            return self.sources
Ejemplo n.º 46
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = self.scraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            name = client.replaceHTMLCodes(name)
            try:
                _name = name.lower().replace('rr',
                                             '').replace('nf', '').replace(
                                                 'ul', '').replace('cu', '')
            except:
                _name = name
            l = dom_parser2.parse_dom(r, 'pre', {'class': 'links'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if not i.endswith(('.rar', '.zip', '.iso',
                                                   '.idx', '.sub', '.srt'))
            ]
            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                #host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''
                info.insert(0, isize)
                info = ' | '.join(info)
                self.sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': _name
                })
        except:
            pass
Ejemplo n.º 47
0
 def search(self, title, year):
     try:
         url = urljoin(self.base_link,
                       self.search_link % (quote_plus(title)))
         headers = {'User-Agent': client.agent()}
         r = self.scraper.get(url, headers=headers).content
         r = ensure_text(r, errors='replace')
         r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0]
         r = dom_parser2.parse_dom(r.content, 'li')
         r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'}))
              for i in r]
         r = [(i[0].attrs['href'], i[0].content) for i in r]
         r = [(urljoin(self.base_link, i[0])) for i in r
              if cleantitle.get(title) in cleantitle.get(i[1])
              and year in i[1]]
         if r: return r[0]
         else: return
     except:
         return
Ejemplo n.º 48
0
 def search(self, title, year):
     try:
         url = urlparse.urljoin(
             self.base_link, self.search_link % (urllib.quote_plus(title)))
         headers = {'User-Agent': client.agent()}
         scraper = cfscrape.create_scraper()
         r = scraper.get(url, headers=headers).content
         r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0]
         r = dom_parser2.parse_dom(r.content, 'li')
         r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'}))
              for i in r]
         r = [(i[0].attrs['href'], i[0].content) for i in r]
         r = [(urlparse.urljoin(self.base_link, i[0])) for i in r
              if cleantitle.get(title) in cleantitle.get(i[1])
              and year in i[1]]
         if r: return r[0]
         else: return
     except BaseException:
         return
Ejemplo n.º 49
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(tvshowtitle)
            search_url = self.search_link % (clean_title.replace('-','+'), year)
            self.scraper = cfscrape.create_scraper()
            r = self.scraper.get(search_url).content

            if 'To proceed, you must allow popups' in r:
                for i in range(0, 5):
                    r = self.scraper.get(search_url).content
                    if 'To proceed, you must allow popups' not in r: break
            r = dom_parser2.parse_dom(r, 'div', attrs={'class': 'title'})

            r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r]
            r = [(urlparse.urljoin(self.base_link, i[0].attrs['href'])) for i in r if tvshowtitle.lower() in i[0].content.lower() and year in i[0].content]
            url = r[0]
            return url
        except:
            return
Ejemplo n.º 50
0
 def resolve(self, url):
     try:
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'a', req=['href', 'data-episodeid', 'data-linkid'])[0]
         url = r.attrs['href']
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
Ejemplo n.º 51
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url is None:
             return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
         url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, url['year'])))
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             if i.content == 'Episode %s' % episode:
                 url = i.attrs['href']
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('FreePutlockers - Exception: \n' + str(failure))
         return
Ejemplo n.º 52
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(tvshowtitle)
            search_url = self.search_link % (clean_title.replace('-','+'), year)
            self.scraper = cfscrape.create_scraper()
            r = self.scraper.get(search_url).content

            if 'To proceed, you must allow popups' in r:
                for i in range(0, 5):
                    r = self.scraper.get(search_url).content
                    if 'To proceed, you must allow popups' not in r: break
            r = dom_parser2.parse_dom(r, 'div', attrs={'class': 'title'})

            r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r]
            r = [(urlparse.urljoin(self.base_link, i[0].attrs['href'])) for i in r if tvshowtitle.lower() in i[0].content.lower() and year in i[0].content]
            url = r[0]
            return url
        except:
            return
Ejemplo n.º 53
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url is None:
             return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
         url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, url['year'])))
         r = self.scraper.get(url).content
         r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             if i.content == 'Episode %s' % episode:
                 url = i.attrs['href']
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('GoStream - Exception: \n' + str(failure))
         return
Ejemplo n.º 54
0
 def resolve(self, url):
     try:
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'a', req=['href','data-episodeid','data-linkid'])[0]
         url = r.attrs['href']
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
Ejemplo n.º 55
0
    def _get_sources(self, urls, quality, info, hostDict, sc_timeout):
        try:
            for url in urls:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if self.timer.elapsed() > sc_timeout:
                    log_utils.log('2DDL - Timeout Reached')
                    return self._sources

                r = client.request(url)
                if 'linkprotector' in url:
                    p_link = dom_parser2.parse_dom(r,
                                                   'link',
                                                   {'rel': 'canonical'},
                                                   req='href')[0]
                    p_link = p_link.attrs['href']
                    input_name = client.parseDOM(r, 'input', ret='name')[0]
                    input_value = client.parseDOM(r, 'input', ret='value')[0]
                    post = {input_name: input_value}
                    p_data = client.request(p_link, post=post)
                    links = client.parseDOM(p_data,
                                            'a',
                                            ret='href',
                                            attrs={'target': '_blank'})
                    for i in links:
                        valid, host = source_utils.is_host_valid(i, hostDict)
                        if not valid:
                            continue
                        self._sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': i,
                            'info': info,
                            'direct': False,
                            'debridonly': debrid.status()
                        })
                elif 'torrent' in url:
                    if debrid.status(True) is False:
                        continue

                    data = client.parseDOM(r, 'a', ret='href')
                    url = [i for i in data if 'magnet:' in i][0]
                    url = url.split('&tr')[0]
                    self._sources.append({
                        'source': 'Torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
        except Exception:
            pass
Ejemplo n.º 56
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None: return sources
            r = client.request(url)
            r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'})
            r = [(dom_parser2.parse_dom(i, 'a', req='href'),
                  dom_parser2.parse_dom(i, 'div', {'class': 'notes'}))
                 for i in r if i]
            r = [(i[0][0].attrs['href'], i[0][0].content,
                  i[1][0].content if i[1] else 'None') for i in r]
            for i in r:
                try:
                    url = i[0]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')
                    valid, host = source_utils.is_host_valid(i[1], hostDict)
                    if not valid: continue
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    info = []
                    quality, info = source_utils.get_release_quality(
                        i[2], i[2])

                    info = ' | '.join(info)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })
                except BaseException:
                    pass

            return sources
        except BaseException:
            return sources
Ejemplo n.º 57
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if not url: return sources

            r = client.request(url)
            r = dom_parser2.parse_dom(r, 'div', {'class': 'movie_link'})
            r = [dom_parser2.parse_dom(i.content, 'a', req='href') for i in r]
            r = [(urlparse.urljoin(self.base_link,
                                   i[0].attrs['href']), i[0].content)
                 for i in r]

            for i in r:
                try:
                    host = i[1].split('.')[0]
                    if host.lower() in str(hostDict):
                        sources.append({
                            'source': host,
                            'info': '',
                            'quality': 'SD',
                            'language': 'en',
                            'url': i[0],
                            'direct': False,
                            'debridonly': False
                        })
                    elif host.lower() in str(hostprDict):
                        sources.append({
                            'source': host,
                            'info': '',
                            'quality': 'SD',
                            'language': 'en',
                            'url': i[0],
                            'direct': False,
                            'debridonly': True
                        })
                except:
                    pass

            return sources
        except:
            return sources
Ejemplo n.º 58
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            
            r = self.scraper.get(url).content
            quality = re.findall(">(\w+)<\/p",r)
            if quality[0] == "HD":
                quality = "720p"
            else:
                quality = "SD"
            r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

            for i in r[0]:
                url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'],'data-name': i.attrs['data-name']}
                url = urllib.urlencode(url)
                sources.append({'source': i.content, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
            return sources
        except:
            return sources
Ejemplo n.º 59
0
    def sources(self, url, hostDict, hostprDict):
        try:    
            sources = []
            
            if url == None: return sources
     
            if debrid.status() == False: raise Exception()
 
            data = urlparse.parse_qs(url)

            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if\
                'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            scraper = cfscrape.create_scraper()
            r = scraper.get(url).content

            items = dom_parser2.parse_dom(r, 'h2')
            items = [dom_parser2.parse_dom(i.content, 'a', req=['href','rel','title','data-wpel-link']) for i in items]
            items = [(i[0].content, i[0].attrs['href']) for i in items]

            hostDict = hostprDict + hostDict

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    scraper = cfscrape.create_scraper()
                    r = scraper.get(item[1]).content     
                    links = dom_parser2.parse_dom(r, 'a', req=['href','rel','data-wpel-link','target'])
                    links = [i.attrs['href'] for i in links]
                    for url in links:
                        try:
                            if hdlr in name:
                                fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
                                fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                                fmt = [i.lower() for i in fmt]

                                if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
                                if any(i in ['extras'] for i in fmt): raise Exception()

                                if '1080p' in fmt: quality = '1080p'
                                elif '720p' in fmt: quality = '720p'
                                else: quality = 'SD',
                                if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
                                elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'

                                info = []

                                if '3d' in fmt: info.append('3D')

                                try:
                                    size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', name[2])[-1]
                                    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                                    size = float(re.sub('[^0-9|/.|/,]', '', size))/div
                                    size = '%.2f GB' % size
                                    info.append(size)
                                except:
                                    pass

                                if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')

                                info = ' | '.join(info)

                                if not any(x in url for x in ['.rar', '.zip', '.iso']):
                                    url = client.replaceHTMLCodes(url)
                                    url = url.encode('utf-8')

                                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                                    if host in hostDict: 
                                        host = client.replaceHTMLCodes(host)
                                        host = host.encode('utf-8')

                                        sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
                        except:
                            pass
                except:
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
Ejemplo n.º 60
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)

            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'Season %d' % int(data['season']) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            self.scraper = cfscrape.create_scraper()
            r = self.scraper.get(url).content
            posts = client.parseDOM(r, 'li')

            for post in posts:
                try:
                    data = dom_parser2.parse_dom(post, 'a', req='href')[0]
                    t = re.findall('title=.+?>\s*(.+?)$', data.content, re.DOTALL)[0]
                    t2 = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', t)
                    y = re.findall('[\.|\(|\[|\s](S\d*E\d*|Season\s*\d*|\d{4})[\.|\)|\]|\s]', t)[-1]

                    if not (cleantitle.get_simple(t2.replace('720p / 1080p', '')) == cleantitle.get(
                        title) and y == hdlr): raise Exception()

                    link = client.parseDOM(post, 'a', ret='href')[0]
                    if not 'Episodes' in post: u = self.movie_links(link)
                    else:
                        sep = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
                        u = self.show_links(link, sep)

                    for item in u:
                        quality, info = source_utils.get_release_quality(item[0][0], None)
                        try:
                            size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', item[0][1])[-1]
                            div = 1 if size.endswith(' GB') else 1024
                            size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                            size = '%.2f GB' % size
                            info.append(size)
                        except:
                            pass

                        info = ' | '.join(info)

                        url = item[0][0]
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        sources.append({'source': 'popcorn', 'quality': quality, 'language': 'en', 'url': url,
                                        'info': info, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources