Example #1
0
    def search_ep(self, title1, title2):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title1)))
            titles.append(cleantitle.normalize(cleantitle.getsearch(title2)))

            for title in titles:
                url = urlparse.urljoin(self.base_link, self.search_link_ep)
                url = url % urllib.quote(str(title).replace(" ", "_"))

                result = client.request(url)
                result = client.parseDOM(result,
                                         'div',
                                         attrs={'class': 'video-clip-wrapper'})
                linki = []
                for item in result:
                    try:
                        link = str(client.parseDOM(item, 'a', ret='href')[0])
                        nazwa = str(
                            client.parseDOM(
                                item, 'a', attrs={'class':
                                                  'link-title-visit'})[0])
                        name = cleantitle.normalize(
                            cleantitle.getsearch(nazwa))
                        name = name.replace("  ", " ")
                        title = title.replace("  ", " ")
                        words = title.split(" ")
                        if self.contains_all_words(name, words):
                            linki.append(link)
                    except:
                        continue
                return linki
        except:
            return
Example #2
0
    def search(self, title, localtitle, year, is_movie_search):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle)))

            for title in titles:
                url = self.search_link + str(title)
                result = self.session.get(url).content
                result = result.decode('utf-8')
                h = HTMLParser()
                result = h.unescape(result)
                result = client.parseDOM(result,
                                         'div',
                                         attrs={'class': 'col-sm-4'})

                for item in result:
                    try:
                        link = str(client.parseDOM(item, 'a', ret='href')[0])
                        if link.startswith('//'):
                            link = "https:" + link
                        nazwa = str(client.parseDOM(item, 'a', ret='title')[0])
                        name = cleantitle.normalize(
                            cleantitle.getsearch(nazwa))
                        name = name.replace("  ", " ")
                        title = title.replace("  ", " ")
                        words = title.split(" ")
                        if self.contains_all_words(
                                name, words) and str(year) in link:
                            return link
                    except:
                        continue
        except:
            return
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query(title)))

            if 'tvshowtitle' in data:
                html = self.scraper.get(url).content

                match = re.compile('class="post-item.+?href="(.+?)" title="(.+?)"', re.DOTALL).findall(html)
                for url, item_name in match:
                    if cleantitle.getsearch(title).lower() in cleantitle.getsearch(item_name).lower():
                        season_url = '%02d' % int(data['season'])
                        episode_url = '%02d' % int(data['episode'])
                        sea_epi = 'S%sE%s' % (season_url, episode_url)

                        result = self.scraper.get(url).content
                        regex = re.compile('href="(.+?)"', re.DOTALL).findall(result)
                        for ep_url in regex:
                            if sea_epi in ep_url:
                                quality, info = source_utils.get_release_quality(url)
                                sources.append({'source': 'CDN', 'quality': quality, 'language': 'en',
                                                'url': ep_url, 'direct': False, 'debridonly': False})
            else:
                html = self.scraper.get(url).content
                match = re.compile('<div class="thumbnail".+?href="(.+?)" title="(.+?)"', re.DOTALL).findall(html)

                for url, item_name in match:
                    if cleantitle.getsearch(title).lower() in cleantitle.getsearch(item_name).lower():
                        quality, info = source_utils.get_release_quality(url)
                        result = self.scraper.get(url).content
                        regex = re.compile('href="/download.php.+?link=(.+?)"', re.DOTALL).findall(result)

                        for link in regex:
                            if 'server=' not in link:
                                try:
                                    link = base64.b64decode(link)
                                except Exception:
                                    pass
                                try:
                                    host = link.split('//')[1].replace('www.', '')
                                    host = host.split('/')[0].lower()
                                except Exception:
                                    pass
                                if not self.filter_host(host):
                                    continue
                                sources.append({'source': host, 'quality': quality, 'language': 'en',
                                                'url': link, 'direct': False, 'debridonly': False})

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('ExtraMovie - Exception: \n' + str(failure))
            return sources
Example #4
0
    def search(self, title, localtitle, year, is_movie_search):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle)))
            titles.append(title)
            titles.append(localtitle)
            for title in titles:
                try:
                    url = self.search_link + str(title)
                    result = self.session.get(url).content
                    result = result.decode('utf-8')
                    h = HTMLParser()
                    result = h.unescape(result)
                    result = client.parseDOM(result,
                                             'div',
                                             attrs={'class': 'card-body p-2'})

                    for item in result:
                        try:
                            nazwa = re.findall("""Film online: (.*?)\"""",
                                               item)[0]
                            try:
                                nazwa = re.findall(""">(.*?)<""", nazwa)[0]
                            except:
                                pass
                            name = cleantitle.normalize(
                                cleantitle.getsearch(nazwa))
                            rok = re.findall(
                                """Rok wydania filmu online\".*>(.*?)<""",
                                item)[0]
                            item = str(item).replace(
                                "<span style='color:red'>",
                                "").replace("</span>", "")
                            link = re.findall("""href=\"(.*?)\"""", item)[0]
                            if link.startswith('//'):
                                link = "https:" + link
                            name = name.replace("  ", " ")
                            title = title.replace("  ", " ")
                            words = name.split(" ")
                            if self.contains_all_words(
                                    title, words) and str(year) in rok:
                                return link
                        except:
                            continue
                except:
                    continue
        except:
            return
Example #5
0
    def search_ep(self, titles, season, episode, year):
        try:
            searchtitles = titles
            for searchtitle in searchtitles:

                response = requests.get(self.base_link +
                                        self.search_serial % searchtitle)
                result = response.content
                h = HTMLParser()
                result = h.unescape(result)
                result = client.parseDOM(result,
                                         'ul',
                                         attrs={'class': 'resultsList hits'})
                items = client.parseDOM(result, 'li')
                items = [x for x in items if not str(x).startswith("<a href")]
                orgtitles = []
                for content in items:
                    try:
                        orgtitle = str(
                            client.parseDOM(
                                content,
                                'div',
                                attrs={'class':
                                       'filmPreview__originalTitle'})[0])
                    except:
                        orgtitle = "0"
                        pass
                    orgtitles.append(orgtitle)
                ids = client.parseDOM(items, 'data', ret='data-id')
                titles = client.parseDOM(result, 'data', ret='data-title')
                years = client.parseDOM(result,
                                        'span',
                                        attrs={'class': 'filmPreview__year'})

                for item in zip(titles, ids, years, orgtitles):
                    f_title = str(item[0])
                    f_id = str(item[1])
                    f_year = str(item[2])
                    f_orgtitle = str(item[3])
                    teststring = cleantitle.normalize(
                        cleantitle.getsearch(searchtitle))
                    words = cleantitle.normalize(
                        cleantitle.getsearch(f_title)).split(" ")
                    if self.contains_all_wors(teststring,
                                              words) and year == f_year:
                        return (f_title, f_id, f_year, f_orgtitle, "SERIAL",
                                season, episode)
        except:
            return
Example #6
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0] + ' ' + year)))

            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i][0]

            r = client.request(query)

            r = dom_parser.parse_dom(r, 'div', attrs={'class': 'v_pict'})

            for i in r:
                title = re.findall('alt="(.+?)"', i[1], re.DOTALL)[0]
                y = re.findall('(\d{4})', title, re.DOTALL)[0]
                title = re.sub('<\w+>|</\w+>', '', title)
                title = cleantitle.get(title)
                title = re.findall('(\w+)', cleantitle.get(title))[0]

                if title in t and year == y:
                    url = re.findall('href="(.+?)"', i[1], re.DOTALL)[0]
                    return source_utils.strip_domain(url)
            return
        except:
            return
Example #7
0
 def __search(self, titles, year):
     try:
         query = self.search_link % (cleantitle.getsearch(titles[0].replace(
             ' ', '%20')))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i][0]
         r = client.request(query)
         r = client.parseDOM(
             r,
             'li',
             attrs={'class': 'item everyone-item over_online haveTooltip'})
         for i in r:
             title = client.parseDOM(i, 'a', ret='title')[0]
             url = client.parseDOM(i, 'a', ret='href')[0]
             data = client.request(url)
             y = re.findall('<p><span>Año:</span>(\d{4})', data)[0]
             original_t = re.findall('movie-text">.+?h2.+?">\((.+?)\)</h2>',
                                     data, re.DOTALL)[0]
             original_t, title = cleantitle.get(original_t), cleantitle.get(
                 title)
             if (t in title or t in original_t) and y == year:
                 x = dom_parser.parse_dom(i, 'a', req='href')
                 return source_utils.strip_domain(x[0][0]['href'])
         return
     except:
         return
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(
                self.base_link, self.search_link %
                urllib.quote_plus(cleantitle.getsearch(title)))
            r = self.scraper.get(url).content
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and (year == i[2])
                ][0]
            except Exception:
                url = None
                pass

            if (url is None):
                url = [
                    i[0] for i in results if self.matchAlias(i[1], aliases)
                ][0]
            return url
        except Exception:
            return
Example #9
0
 def __search(self, titles, year, content):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.getsearch(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i][0]
         r = client.request(query)
         r = client.parseDOM(r,
                             'div',
                             attrs={'class': 'tab-content clearfix'})
         if content == 'movies':
             r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
         else:
             r = client.parseDOM(r, 'div', attrs={'id': 'series'})
         data = dom_parser.parse_dom(r, 'figcaption')
         for i in data:
             title = i[0]['title']
             title = cleantitle.get(title)
             if title in t:
                 x = dom_parser.parse_dom(i, 'a', req='href')
                 return source_utils.strip_domain(x[0][0]['href'])
             else:
                 url = dom_parser.parse_dom(i, 'a', req='href')
                 data = client.request(url[0][0]['href'])
                 data = re.findall(
                     '<h3>Pelicula.+?">(.+?)\((\d{4})\).+?</a>', data,
                     re.DOTALL)[0]
                 if titles[0] in data[0] and year == data[1]:
                     return source_utils.strip_domain(url[0][0]['href'])
         return
     except:
         return
Example #10
0
    def __search(self, titles, year):
        try:

            query = self.search_link % (urllib.quote_plus(
                cleantitle.getsearch(titles[0] + ' ' + year)))

            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(titles[0])

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'card'})

            r = client.parseDOM(r, 'h3')

            for i in r:
                data = re.findall(
                    '<span.*?>(.+?)</span>.+?date">\s*\((\d{4}).*?</span>', i,
                    re.DOTALL)
                for title, year in data:
                    title = cleantitle.get(title)
                    y = year
                    if title in t and year == y:
                        url = client.parseDOM(i, 'a', ret='href')[0]
                        return source_utils.strip_domain(url)

            return
        except:
            return
Example #11
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': cleantitle.getsearch(tvshowtitle), 'year': year}
         url = urllib.urlencode(url)
         return url
     except:
         return
Example #12
0
	def _search(self, title, year, aliases, headers):
		try:
			q = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
			r = client.request(q)
			r = client.parseDOM(r, 'div', attrs={'class': 'ml-img'})
			r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'img', ret='alt'))
			url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1]) and year in i[1]][0][0]
			return url
		except:
			pass
Example #13
0
    def search(self, title, localtitle, year, search_type):
        try:
            titles = []
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(title + " 3d")))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle + " 3d")))
            cookies = client.request(self.base_link, output='cookie')
            cache.cache_insert('alltube_cookie', cookies)
            for title in titles:
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.search_link),
                                   post={'search': cleantitle.query(title)},
                                   headers={'Cookie': cookies})
                r = self.get_rows(r, search_type)

                for row in r:
                    url = client.parseDOM(row, 'a', ret='href')[0]
                    names_found = client.parseDOM(row, 'h3')[0]
                    if names_found.startswith(
                            'Zwiastun') and not title.startswith('Zwiastun'):
                        continue
                    names_found = names_found.encode('utf-8').split('/')
                    names_found = [
                        cleantitle.normalize(cleantitle.getsearch(i))
                        for i in names_found
                    ]
                    for name in names_found:
                        name = name.replace("  ", " ")
                        title = title.replace("  ", " ")
                        words = title.split(" ")
                        found_year = self.try_read_year(url)
                        if self.contains_all_wors(
                                name, words) and (not found_year
                                                  or found_year == year):
                            return url
                        else:
                            continue
                    continue
        except:
            return
Example #14
0
	def movie(self, imdb, title, localtitle, aliases, year):
		try:
			search_id = cleantitle.getsearch(title)
			search_url = self.base_link + self.search_link % (search_id.replace(':', ' ').replace(' ', '%20'))
			search_results = client.request(search_url)
			match = re.compile('<a href=".(.+?)">', re.DOTALL).findall(search_results)
			for link in match:
				if cleantitle.geturl(title).lower() in link:
					url = self.base_link + link
					return url
			return
		except:
			return
Example #15
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         searchName = cleantitle.getsearch(title)
         searchURL = self.base_link + self.search_link % (
             searchName.replace(':', ' ').replace(' ', '+'))
         searchPage = self.scraper.get(searchURL).content
         results = re.compile(
             '<a href="(.+?)">(.+?)</a>.+?<span class="year">(.+?)</span>',
             re.DOTALL).findall(searchPage)
         for url, zName, zYear in results:
             if cleantitle.geturl(title).lower() in cleantitle.geturl(
                     zName).lower():
                 if year in str(zYear):
                     url = url + "?watching"
                     return url
     except:
         return
Example #16
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search_id.replace(':', ' ').replace(' ', '+'))
         search_results = client.request(url)
         match = re.compile('<a href="/watch/(.+?)" title="(.+?)">',
                            re.DOTALL).findall(search_results)
         for row_url, row_title in match:
             row_url = 'https://fmoviesto.to/watch/%s' % row_url
             if cleantitle.get(title) in cleantitle.get(row_title):
                 return row_url
         return
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('FmoviesIO - Exception: \n' + str(failure))
         return
Example #17
0
 def searchMovie(self, title, year):
     try:
         query = self.search_link % urllib.quote_plus(
             cleantitle.getsearch(title + ' ' + year))
         url = urlparse.urljoin(self.base_link, query)
         headers = {'User-Agent': client.agent(), 'Referer': self.base_link}
         scraper = cfscrape.create_scraper()
         r = scraper.get(url, headers=headers).content
         r = client.parseDOM(r, 'item')
         r = [(client.parseDOM(i, 'title')[0], i) for i in r if i]
         r = [
             i[1] for i in r if
             cleantitle.get(title) in cleantitle.get(i[0]) and year in i[0]
         ]
         return r[0]
     except BaseException:
         return
Example #18
0
 def searchMovie(self, title, year):
     try:
         query = self.search_link % urllib.quote_plus(
             cleantitle.getsearch(title))
         url = urlparse.urljoin(self.base_link, query)
         r = client.request(url)
         r = client.parseDOM(r, 'item')
         r = [(client.parseDOM(i, 'title')[0], client.parseDOM(i,
                                                               'link')[0])
              for i in r if i]
         r = [
             i[1] for i in r
             if cleantitle.get(title) == cleantitle.get(i[0])
         ]
         return r[0]
     except Exception:
         return
Example #19
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search_id.replace(' ', '+').replace('-', '+').replace(
             '++', '+'))
         headers = {'User-Agent': User_Agent}
         search_results = requests.get(url, headers=headers,
                                       timeout=10).content
         match = re.compile(
             '<li>.+?<a href="(.+?)".+?title="(.+?)".+?<a href=.+?>(.+?)<',
             re.DOTALL).findall(search_results)
         for row_url, row_title, release in match:
             if cleantitle.get(title) in cleantitle.get(row_title):
                 if year in str(release):
                     return row_url
         return
     except:
         return
Example #20
0
 def __search(self, titles, year):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.getsearch(titles[0] + ' ' + year)))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i][0]
         r = client.request(query)
         r = client.parseDOM(r, 'div', attrs={'class': 'bd'})
         for i in r:
             r = dom_parser.parse_dom(i, 'h3')
             r = dom_parser.parse_dom(r, 'a')
             title = r[0][1]
             y = re.findall('(\d{4})', title, re.DOTALL)[0]
             title = cleantitle.get(title.split('(')[0])
             if title in t and year == y:
                 return source_utils.strip_domain(r[0][0]['href'])
         return
     except:
         return
Example #21
0
    def search_ep(self, titles, season, episode, year):
        try:
            query = 'S{:02d}E{:02d}'.format(int(season), int(episode))
            for title in titles:
                url = self.search_link + str(title)
                result = self.session.get(url).content
                result = result.decode('utf-8')
                h = HTMLParser()
                result = h.unescape(result)
                result = client.parseDOM(result,
                                         'div',
                                         attrs={'class': 'card-body p-2'})

                for item in result:
                    nazwa = re.findall("""Film online: (.*?)\"""", item)[0]
                    name = cleantitle.normalize(cleantitle.getsearch(nazwa))
                    rok = re.findall("""Rok wydania filmu online\".*>(.*?)<""",
                                     item)[0]
                    item = str(item).replace("<span style='color:red'>",
                                             "").replace("</span>", "")
                    link = re.findall("""href=\"(.*?)\"""", item)[0]
                    if link.startswith('//'):
                        link = "https:" + link
                    name = name.replace("  ", " ")
                    title = title.replace("  ", " ")
                    words = title.split(" ")
                    if self.contains_all_words(name,
                                               words) and str(year) in rok:
                        content = requests.get(link.replace(
                            'filmy', 'seriale')).content
                        content = client.parseDOM(
                            content,
                            'div',
                            attrs={'class': 'tabela_wiersz mb-1'})
                        for odcinek in content:
                            if query.lower() in odcinek.lower():
                                link = str(
                                    client.parseDOM(odcinek, 'a',
                                                    ret='href')[0])
                                return self.base_link + link

        except:
            return
Example #22
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search_id.replace(':', ' ').replace(' ', '+'))
         h = {'User-Agent': client.randomagent()}
         r = self.scraper.get(url, headers=h).content
         z = re.compile('<item>(.+?)</item>',
                        flags=re.DOTALL | re.UNICODE | re.MULTILINE
                        | re.IGNORECASE).findall(r)
         for t in z:
             b = re.compile('<a rel="nofollow" href="(.+?)">(.+?)</a>',
                            flags=re.DOTALL | re.UNICODE | re.MULTILINE
                            | re.IGNORECASE).findall(t)
             for foundURL, foundTITLE in b:
                 if cleantitle.get(title) in cleantitle.get(foundTITLE):
                     return foundURL
         return
     except:
         return
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle']
            hdlr = 's%02de%02d' % (int(data['season']), int(data['episode']))

            query = urllib.quote_plus(cleantitle.getsearch(title))
            surl = urlparse.urljoin(self.base_link, self.search_link % query)
            r = client.request(surl, XHR=True)
            r = json.loads(r)
            r = r['series']
            for i in r:
                tit = i['value']
                if not cleantitle.get(title) == cleantitle.get(tit): raise Exception()
                slink = i['seo']
                slink = urlparse.urljoin(self.base_link, slink)

                r = client.request(slink)
                if not data['imdb'] in r: raise Exception()
                data = client.parseDOM(r, 'div', {'class': 'el-item\s*'})
                epis = [client.parseDOM(i, 'a', ret='href')[0] for i in data if i]
                epis = [i for i in epis if hdlr in i.lower()][0]
                r = client.request(epis)
                links = client.parseDOM(r, 'a', ret='data-actuallink')
                for url in links:
                    try:
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid: raise Exception()
                        sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False,
                                        'debridonly': False})
                    except BaseException:
                        return sources

            return sources
        except BaseException:
            return sources
Example #24
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         search_id = cleantitle.getsearch(title)
         url = urlparse.urljoin(self.base_link, self.search_link)
         url = url % (search_id.replace(' ', '+').replace('-', '+').replace(
             '++', '+'), year)
         headers = {'User-Agent': self.User_Agent}
         search_results = requests.get(url, headers=headers,
                                       timeout=10).content
         items = re.compile('<item>(.+?)</item>',
                            re.DOTALL).findall(search_results)
         for item in items:
             match = re.compile('<title>(.+?)</title>.+?<link>(.+?)</link>',
                                re.DOTALL).findall(item)
             for row_title, row_url in match:
                 if cleantitle.get(title) in cleantitle.get(row_title):
                     if year in str(row_title):
                         return row_url
         return
     except:
         return
Example #25
0
 def searchShow(self, title, season):
     try:
         sea = '%s season %d' % (title, int(season))
         query = self.search_link % urllib.quote_plus(
             cleantitle.getsearch(sea))
         url = urlparse.urljoin(self.base_link, query)
         headers = {'User-Agent': client.agent(), 'Referer': self.base_link}
         scraper = cfscrape.create_scraper()
         r = scraper.get(url, headers=headers).content
         # r = client.request(url)
         r = client.parseDOM(r, 'item')
         r = [(client.parseDOM(i, 'title')[0], i) for i in r if i]
         r = [
             i[1] for i in r
             if sea.lower() in i[0].replace('  ', ' ').lower()
         ]
         links = re.findall('''<h4>(EP\d+)</h4>.+?src="(.+?)"''', r[0],
                            re.I | re.DOTALL)
         links = [(i[0], i[1].lstrip()) for i in links if i]
         return links
     except BaseException:
         return
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(
             self.base_link, self.search_link %
             urllib.quote_plus(cleantitle.getsearch(search)))
         log_utils.log('shit Returned: %s' % str(url), log_utils.LOGNOTICE)
         r = self.scraper.get(url).content
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'),
                 client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
              for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [
             i[0] for i in r
             if self.matchAlias(i[2][0], aliases) and i[2][1] == season
         ][0]
         return url
     except Exception:
         return
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
            r = self.scraper.get(url, headers=headers).content
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1], re.findall('(\d+)', i[0])[0]) for i in r]
            results = []
            for i in r:
                try:
                    info = client.request(urlparse.urljoin(self.base_link, self.info_link % i[2]), headers=headers,
                                          timeout='15')
                    y = re.findall('<div\s+class="jt-info">(\d{4})', info)[0]
                    if self.matchAlias(i[1], aliases) and (year == y):
                        url = i[0]
                        break
                    # results.append([i[0], i[1], re.findall('<div\s+class="jt-info">(\d{4})', info)[0]])
                except:
                    url = None
                    pass

            # try:
            #    r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
            #    url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            # except:
            #    url = None
            #    pass

            if (url == None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
            return url
        except:
            return
Example #28
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']
                if 'season' in data: season = data['season']
                if 'episode' in data: episode = data['episode']
                year = data['year']

                query = urlparse.urljoin(
                    self.base_link, self.search_link %
                    urllib.quote_plus(cleantitle.getsearch(title)))
                r = self.scraper.get(query).content
                r = json.loads(r)['content']
                r = zip(client.parseDOM(r, 'a', ret='href'),
                        client.parseDOM(r, 'a'))

                if 'tvshowtitle' in data:
                    cltitle = cleantitle.get(title + 'season' + season)
                    cltitle2 = cleantitle.get(title +
                                              'season%02d' % int(season))
                    r = [
                        i for i in r if cltitle == cleantitle.get(i[1])
                        or cltitle2 == cleantitle.get(i[1])
                    ]
                    vurl = '%s%s-episode-%s' % (self.base_link, str(
                        r[0][0]).replace('/info', ''), episode)
                    vurl2 = None
                else:
                    cltitle = cleantitle.getsearch(title)
                    cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                    r = [
                        i for i in r if cltitle2 == cleantitle.getsearch(i[1])
                        or cltitle == cleantitle.getsearch(i[1])
                    ]
                    vurl = '%s%s-episode-0' % (self.base_link, str(
                        r[0][0]).replace('/info', ''))
                    vurl2 = '%s%s-episode-1' % (self.base_link, str(
                        r[0][0]).replace('/info', ''))

                r = self.scraper.get(vurl).content

                slinks = client.parseDOM(r,
                                         'div',
                                         attrs={'class': 'anime_muti_link'})
                slinks = client.parseDOM(slinks, 'li', ret='data-video')
                if len(slinks) == 0 and not vurl2 is None:
                    r = self.scraper.get(vurl2).content
                    slinks = client.parseDOM(
                        r, 'div', attrs={'class': 'anime_muti_link'})
                    slinks = client.parseDOM(slinks, 'li', ret='data-video')

                for slink in slinks:
                    try:
                        if 'vidnode.net/streaming.php' in slink:
                            r = self.scraper.get('https:%s' % slink)
                            clinks = re.findall(r'sources:\[(.*?)\]', r)[0]
                            clinks = re.findall(
                                r'file:\s*\'(http[^\']+)\',label:\s*\'(\d+)',
                                clinks)
                            for clink in clinks:
                                q = source_utils.label_to_quality(clink[1])
                                sources.append({
                                    'source': 'cdn',
                                    'quality': q,
                                    'language': 'en',
                                    'url': clink[0],
                                    'direct': True,
                                    'debridonly': False
                                })
                        else:
                            valid, hoster = source_utils.is_host_valid(
                                slink, hostDict)
                            if valid:
                                sources.append({
                                    'source': hoster,
                                    'quality': 'SD',
                                    'language': 'en',
                                    'url': slink,
                                    'direct': False,
                                    'debridonly': False
                                })
                    except:
                        pass

            return sources
        except:
            return sources
Example #29
0
 def clean_serach(self, serach_str):
     result = cleantitle.getsearch(serach_str)
     result = re.sub(' +', ' ', result)
     return result.strip()
Example #30
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        is_anime = url[3]
        try:
            titles = []
            titles.append(url[0])
            titles.append(url[1])
            try:
                year = url[2]
            except:
                year = ''
            for url_single in titles:
                url_single = cleantitle.normalize(
                    cleantitle.getsearch(url_single))
                words = url_single.split(' ')
                search_url = urlparse.urljoin(
                    self.base_link,
                    self.search_link) % (url_single + " " + year)

                cookies = client.request(self.base_link, output='cookie')
                verifyGet = client.request(self.verify, cookie=cookies)
                cookies = cookies + ";tmvh=" + self.crazy_cookie_hash(
                    verifyGet)
                cache.cache_insert('szukajka_cookie', cookies)

                result = client.request(search_url, cookie=cookies)
                result = client.parseDOM(result,
                                         'div',
                                         attrs={'class': 'element'})

                for el in result:

                    found_title = str(
                        client.parseDOM(
                            el, 'div',
                            attrs={'class': 'title'})[0]).lower().replace(
                                "_", " ").replace(".", " ").replace("-", " ")
                    if is_anime:
                        numbers = [
                            int(s) for s in found_title.split() if s.isdigit()
                        ]
                        if not int(words[-1]) in numbers:
                            continue
                    if ("zwiastun" or "trailer") in str(found_title).lower():
                        continue
                    if len(words) >= 4 or is_anime:
                        if not self.contains_all_words(found_title, words):
                            continue
                    else:
                        if not self.contains_all_words(
                                found_title, words) or year not in found_title:
                            continue
                    q = 'SD'
                    if self.contains_word(found_title,
                                          '1080p') or self.contains_word(
                                              found_title, 'FHD'):
                        q = '1080p'
                    elif self.contains_word(found_title, '720p'):
                        q = 'HD'

                    link = client.parseDOM(el,
                                           'a',
                                           attrs={'class': 'link'},
                                           ret='href')[0]
                    transl_type = client.parseDOM(el,
                                                  'span',
                                                  attrs={'class':
                                                         'version'})[0]
                    transl_type = transl_type.split(' ')
                    transl_type = transl_type[-1]

                    host = client.parseDOM(el, 'span', attrs={'class':
                                                              'host'})[0]
                    host = host.split(' ')
                    host = host[-1]
                    lang, info = self.get_lang_by_type(transl_type)
                    sources.append({
                        'source': host,
                        'quality': q,
                        'language': lang,
                        'url': link,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })
                    continue
            return sources
        except Exception as e:
            print(str(e))
            return sources