Ejemplo n.º 1
0
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:
            if url == None: return sources
            r = client.request(urlparse.urljoin(self.base_link, url),
                               redirect=False)
            info = self.get_lang_by_type(client.parseDOM(r, 'title')[0])
            r = client.parseDOM(r, 'div', attrs={'class':
                                                 'tab-pane active'})[0]
            r = client.parseDOM(r, 'script')[0]
            script = r.split('"')[1]
            decoded = self.shwp(script)

            link = client.parseDOM(decoded, 'iframe', ret='src')[0]
            valid, host = source_utils.is_host_valid(link, hostDict)
            if not valid: return sources
            q = source_utils.check_sd_url(link)
            sources.append({
                'source': host,
                'quality': q,
                'language': 'pl',
                'url': link,
                'info': info,
                'direct': False,
                'debridonly': False
            })

            return sources
        except:
            return sources
Ejemplo n.º 2
0
	def search(self, title, localtitle, year, search_type):
		try:
			titles = []
			titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
			titles.append(cleantitle.normalize(cleantitle.getsearch(localtitle)))
			cookies = client.request(self.base_link, output='cookie')
			cache.cache_insert('alltube_cookie', cookies)
			for title in titles:
				r = client.request(urlparse.urljoin(self.base_link, self.search_link),
				                   post={'search': cleantitle.query(title)}, headers={'Cookie': cookies})
				r = self.get_rows(r, search_type)

				for row in r:
					url = client.parseDOM(row, 'a', ret='href')[0]
					names_found = client.parseDOM(row, 'h3')[0]
					if names_found.startswith('Zwiastun') and not title.startswith('Zwiastun'):
						continue
					names_found = names_found.encode('utf-8').split('/')
					names_found = [cleantitle.normalize(cleantitle.getsearch(i)) for i in names_found]
					for name in names_found:
						name = name.replace("  ", " ")
						title = title.replace("  ", " ")
						words = title.split(" ")
						found_year = self.try_read_year(url)
						if self.contains_all_words(name, words) and (not found_year or found_year == year):
							return url
						else:
							continue
					continue
		except:
			return
Ejemplo n.º 3
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         hostDict = hostprDict + hostDict
         headers = {'Referer': url}
         r = self.scraper.get(url, headers=headers).content
         u = client.parseDOM(r,
                             "span",
                             attrs={"class": "movie_version_link"})
         for t in u:
             match = client.parseDOM(t, 'a', ret='data-href')
             for url in match:
                 if url in str(sources):
                     continue
                 quality, info = source_utils.get_release_quality(url, url)
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Ejemplo n.º 4
0
    def sources(self, url, hostDict, hostprDict):

        sources = []
        try:

            if url == None: return sources
            result = client.request(urlparse.urljoin(self.base_link, url), redirect=False)

            section = client.parseDOM(result, 'section', attrs={'id': 'video_player'})[0]
            link = client.parseDOM(section, 'iframe', ret='src')[0]
            valid, host = source_utils.is_host_valid(link, hostDict)
            if not valid: return sources
            spans = client.parseDOM(section, 'span')
            info = None
            for span in spans:
                if span == 'Z lektorem':
                    info = 'Lektor'

            q = source_utils.check_sd_url(link)
            sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False,
                            'debridonly': False})

            return sources
        except:
            return sources
Ejemplo n.º 5
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url is None:
                return

            r = self.scraper.get(url, headers={'referer': self.base_link}).content

            r = client.parseDOM(r, 'li', attrs={'itemprop': 'episode'})

            t = cleantitle.get(title)

            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs={'itemprop': 'name'}),
                  re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
            r = [(i[0], i[1][0].split(' ')[-1], i[2])
                 for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]]
            r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]]
            r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

            url = [i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]][:1]
            if not url:
                url = [i for i in r if t == cleantitle.get(i[1])]
            if len(url) > 1 or not url:
                url = [i for i in r if premiered == i[2]]
            if len(url) > 1 or not url:
                raise Exception()

            return url[0][0]
        except:
            return
Ejemplo n.º 6
0
 def __search(self, titles, year):
     try:
         query = self.search_link % (cleantitle.getsearch(titles[0].replace(
             ' ', '%20')))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i][0]
         r = client.request(query)
         r = client.parseDOM(
             r,
             'li',
             attrs={'class': 'item everyone-item over_online haveTooltip'})
         for i in r:
             title = client.parseDOM(i, 'a', ret='title')[0]
             url = client.parseDOM(i, 'a', ret='href')[0]
             data = client.request(url)
             y = re.findall('<p><span>Año:</span>(\d{4})', data)[0]
             original_t = re.findall('movie-text">.+?h2.+?">\((.+?)\)</h2>',
                                     data, re.DOTALL)[0]
             original_t, title = cleantitle.get(original_t), cleantitle.get(
                 title)
             if (t in title or t in original_t) and y == year:
                 x = dom_parser.parse_dom(i, 'a', req='href')
                 return source_utils.strip_domain(x[0][0]['href'])
         return
     except:
         return
Ejemplo n.º 7
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         hostDict = hostprDict + hostDict
         r = self.scraper.get(url).content
         u = client.parseDOM(r, "ul", attrs={"id": "serverul"})
         for t in u:
             u = client.parseDOM(t, 'a', ret='href')
             for url in u:
                 if 'getlink' in url:
                     continue
                 quality = source_utils.check_url(url)
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'url': url,
                         'direct': False,
                         'debridonly': False
                     })
             return sources
     except:
         return
Ejemplo n.º 8
0
    def get_links_from_box(self, result, hostDict):
        sources = []

        src_url = client.parseDOM(result, 'tr', attrs={'id': 'mov\w+|tv\w+'})
        for item in src_url:

            url = client.parseDOM(item, 'a', ret='href')[0]

            url = client.request(url.replace('https://www.', 'http://'))

            url = client.parseDOM(url, 'a', ret='href')[0]

            data = re.findall('<td>(.+?)</td>', item, re.DOTALL)

            # lang_type = data[2].split()[1]

            if 'HD' in data[1]:
                q = 'HD'
            else:
                q = 'SD'

            # host = re.findall('">(.+?)\.',data[0], re.DOTALL )[0]
            valid, host = source_utils.is_host_valid(url, hostDict)

            lang, info = 'es', 'LAT'

            sources.append(
                {'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False,
                 'debridonly': False})

        return sources
Ejemplo n.º 9
0
    def __search(self, titles, year):
        try:

            query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))

            query = urlparse.urljoin(self.base_link, query)

            t = [cleantitle.get(i) for i in set(titles) if i][0]

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'details'})

            for i in r:
                title = client.parseDOM(i, 'div', attrs={'class': 'title'})[0]
                y = client.parseDOM(i, 'span', attrs={'class': 'year'})[0]
                title = re.findall('">(.+?)</a', title, re.DOTALL)[0]
                title = cleantitle.get_simple(title)

                if t in title and y == year:
                    x = dom_parser.parse_dom(i, 'a', req='href')
                    return source_utils.strip_domain(x[0][0]['href'])

            return
        except:
            return
Ejemplo n.º 10
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return
			hostDict = [(i.rsplit('.', 1)[0], i) for i in hostDict]
			hostDict.append(['okru', 'ok.ru'])
			locDict = [i[0] for i in hostDict]
			url = urlparse.urljoin(self.base_link, url)
			r = client.request(url)
			r = client.parseDOM(r, 'ul', attrs={'class': '[^\'"]*lecteurs nop[^\'"]*'})
			r = client.parseDOM(r, 'li')
			r = [(client.parseDOM(i, 'a', ret='data-streamer'), client.parseDOM(i, 'a', ret='data-id')) for i in r]
			r = [(i[0][0], i[1][0], re.search('([a-zA-Z]+)(?:_([a-zA-Z]+))?', i[0][0]),) for i in r if i[0] and i[1]]
			r = [(i[0], i[1], i[2].group(1), i[2].group(2)) for i in r if i[2]]
			for streamer, id, host, info in r:
				if host not in locDict:
					continue
				host = [x[1] for x in hostDict if x[0] == host][0]
				link = urlparse.urljoin(self.base_link, '/%s/%s/%s' % (
				('streamerSerie' if '/series/' in url else 'streamer'), id, streamer))
				sources.append(
					{'source': host, 'quality': 'SD', 'url': link, 'language': 'FR', 'info': info if info else '',
					 'direct': False, 'debridonly': False})
			return sources
		except:
			return sources
Ejemplo n.º 11
0
 def __search(self, titles, year, content):
     try:
         query = self.search_link % (urllib.quote_plus(
             cleantitle.getsearch(titles[0])))
         query = urlparse.urljoin(self.base_link, query)
         t = [cleantitle.get(i) for i in set(titles) if i][0]
         r = client.request(query)
         r = client.parseDOM(r,
                             'div',
                             attrs={'class': 'tab-content clearfix'})
         if content == 'movies':
             r = client.parseDOM(r, 'div', attrs={'id': 'movies'})
         else:
             r = client.parseDOM(r, 'div', attrs={'id': 'series'})
         data = dom_parser.parse_dom(r, 'figcaption')
         for i in data:
             title = i[0]['title']
             title = cleantitle.get(title)
             if title in t:
                 x = dom_parser.parse_dom(i, 'a', req='href')
                 return source_utils.strip_domain(x[0][0]['href'])
             else:
                 url = dom_parser.parse_dom(i, 'a', req='href')
                 data = client.request(url[0][0]['href'])
                 data = re.findall(
                     '<h3>Pelicula.+?">(.+?)\((\d{4})\).+?</a>', data,
                     re.DOTALL)[0]
                 if titles[0] in data[0] and year == data[1]:
                     return source_utils.strip_domain(url[0][0]['href'])
         return
     except:
         return
Ejemplo n.º 12
0
    def search(self, title, localtitle, year):
        try:
            titles = []
            title2 = title.split('.')[0]
            localtitle2 = localtitle.split('.')[0]
            titles.append(cleantitle.normalize(cleantitle.getsearch(title2)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle2)))
            titles.append(title2)
            titles.append(localtitle2)

            for title in titles:
                title = title.replace(" ", "+")
                result = client.request(self.search_link % title)

                result = client.parseDOM(result,
                                         'div',
                                         attrs={'class': 'col-xs-4'})
                for item in result:
                    try:
                        rok = client.parseDOM(item,
                                              'div',
                                              attrs={'class': 'col-sm-8'})
                        rok_nazwa = client.parseDOM(rok, 'p')[0].lower()
                        link = client.parseDOM(item, 'a', ret='href')[0]
                        link = self.base_link + link
                        words = title.lower().split(" ")
                        if self.contains_all_words(
                                rok_nazwa, words) and year in rok_nazwa:
                            return link
                    except:
                        continue
            return
        except:
            return
Ejemplo n.º 13
0
 def resolve(self, url):
     result = client.request(url)
     result = client.parseDOM(result,
                              'div',
                              attrs={'class': 'boton reloading'})
     link = client.parseDOM(result, 'a', ret='href')[0]
     return link
Ejemplo n.º 14
0
    def search(self, localtitle, year, search_type):
        try:

            url = urlparse.urljoin(self.base_link, self.search_link)
            r = client.request(url,
                               redirect=False,
                               post={
                                   'q': cleantitle.query(localtitle),
                                   'sb': ''
                               })
            r = client.parseDOM(r, 'div', attrs={'class': 'small-item'})

            local_simple = cleantitle.get(localtitle)
            for row in r:
                name_found = client.parseDOM(row, 'a')[1]
                year_found = name_found[name_found.find("(") +
                                        1:name_found.find(")")]
                url = client.parseDOM(row, 'a', ret='href')[1]
                if not search_type in url:
                    continue

                if cleantitle.get(
                        name_found) == local_simple and year_found == year:
                    return url
        except:
            return
Ejemplo n.º 15
0
    def resolve(self, url):
        try:
            # cookies = client.request(url, output='cookie')
            # verifyGet = client.request(self.verify, cookie = cookies)
            # cookies = cookies + ";tmvh=" + self.crazy_cookie_hash(verifyGet)
            cookies = cache.cache_get('szukajka_cookie')
            replace = re.findall("""tmvh=(.*)""", str(cookies['value']))[0]
            cookies = str(cookies['value'])

            verifyGet = client.request(self.verify, cookie=cookies)
            tmvh = self.crazy_cookie_hash(verifyGet)
            cookies = cookies.replace(replace, tmvh)

            test4 = client.request(url, cookie=cookies)
            test5 = client.parseDOM(test4,
                                    'a',
                                    attrs={'class': 'submit'},
                                    ret='href')[0]

            replace = re.findall("""tmvh=(.*)""", cookies)[0]
            verifyGet = client.request(self.verify, cookie=cookies)
            tmvh = self.crazy_cookie_hash(verifyGet)
            cookies = cookies.replace(replace, tmvh)

            test6 = client.request(test5, cookie=cookies)
            test7 = client.parseDOM(test6, 'iframe', ret='src')
            video_url = test7[0].replace(
                "javascript:window.location.replace('", "").replace("')", "")
            return video_url
        except Exception as e:
            return
Ejemplo n.º 16
0
    def get_from_main_player(self, result, sources, hostDict):
        result_sources = []

        data = client.parseDOM(result, 'div', attrs={'id': 'playex'})

        links = client.parseDOM(data, 'iframe', ret='src')
        r = client.parseDOM(result, 'a', attrs={'class': 'options'})

        for i in range(len(r)):

            item = r[i].split()
            host = item[-4]
            q = item[-3]

            if 'Latino' in item[-1]:
                lang, info = 'es', 'LAT'
            else:
                lang, info = 'es', None

            url = links[i]
            if 'megapelistv' in url:
                url = client.request(url.replace('https://www.', 'http://'))
                url = client.parseDOM(url, 'a', ret='href')[0]
            else:
                url = url
            if (self.url_not_on_list(url, sources)):
                valid, host = source_utils.is_host_valid(url, hostDict)
                result_sources.append(
                    {'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False,
                     'debridonly': False})

        return result_sources
Ejemplo n.º 17
0
    def extract_sources(self, transl_type, links):
        sources = []
        data_refs = client.parseDOM(links, 'li', ret='data-ref')
        result = client.parseDOM(links, 'li')

        lang, info = self.get_lang_by_type(transl_type)

        for i in range(0, len(result)):

            el = result[i]
            host = client.parseDOM(el, 'span', attrs={'class': 'host'})[0]
            quality = client.parseDOM(el, 'span', attrs={'class':
                                                         'quality'})[0]
            q = 'SD'
            if quality.endswith('720p'):
                q = 'HD'
            elif quality.endswith('1080p'):
                q = '1080p'

            sources.append({
                'source': host,
                'quality': q,
                'language': lang,
                'url': data_refs[i],
                'info': info,
                'direct': False,
                'debridonly': False
            })

        return sources
Ejemplo n.º 18
0
    def get_from_main_player(self, result, sources):

        q = 'SD'
        if len(sources) == 0 and (len(
                client.parseDOM(result, 'span', attrs={'class': 'calidad2'})) >
                                  0):
            q = 'HD'
        player2 = client.parseDOM(result, 'div', attrs={'id': 'player2'})
        links = client.parseDOM(player2, 'iframe', ret='src')

        player_nav = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'player_nav'})
        transl_type = client.parseDOM(player_nav, 'a')
        result_sources = []
        for i in range(0, len(links)):
            url = links[i]
            if (self.url_not_on_list(url, sources)):
                lang, info = self.get_lang_by_type(transl_type[i])
                host = url.split("//")[-1].split("/")[0]
                result_sources.append({
                    'source': host,
                    'quality': q,
                    'language': lang,
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })

        return result_sources
Ejemplo n.º 19
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return
            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle']) + '-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                r = self.scraper.get(search_url).content
                r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      re.findall('<b><i>(.+?)</i>', i)) for i in r]
                r = [(i[0][0], i[1][0]) for i in r if
                     cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
                url = r[0][0]
            except:
                pass
            data = self.scraper.get(url).content
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
Ejemplo n.º 20
0
    def do_search(self, title, local_title, year, video_type):
        try:
            url = urlparse.urljoin(self.base_link, self.search_link)
            url = url % urllib.quote_plus(cleantitle.query(title))
            result = client.request(url)
            result = client.parseDOM(result, 'div', attrs={'class': 'item'})
            for row in result:
                row_type = client.parseDOM(row,
                                           'div',
                                           attrs={'class': 'typepost'})[0]
                if row_type != video_type:
                    continue
                names = client.parseDOM(row, 'span', attrs={'class': 'tt'})[0]
                names = names.split('/')
                year_found = client.parseDOM(row,
                                             'span',
                                             attrs={'class': 'year'})

                titles = [cleantitle.get(i) for i in [title, local_title]]

                if self.name_matches(names, titles,
                                     year) and (len(year_found) == 0
                                                or year_found[0] == year):
                    url = client.parseDOM(row, 'a', ret='href')[0]
                    return urlparse.urljoin(self.base_link, url)
        except:
            return
Ejemplo n.º 21
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			query = urlparse.urljoin(self.base_link, url)
			r = client.request(query)
			links = client.parseDOM(r, 'tbody')
			links = client.parseDOM(links, 'a', ret='href')
			for i in range(len(links)):
				url = links[i]
				if 'target' in url: continue
				data = client.request(url)
				url = client.parseDOM(data, 'iframe', ret='src')[0]
				if url.startswith('/go'): url = re.findall('go\?(.+?)-', url)[0]
				if 'crypt' in url: continue
				if 'redvid' in url:
					data = client.request(url)
					url = client.parseDOM(data, 'iframe', ret='src')[0]
				if any(x in url for x in ['.online', 'xrysoi.se', 'filmer', '.bp', '.blogger', 'youtu']):
					continue
				quality = 'SD'
				lang, info = 'gr', 'SUB'
				valid, host = source_utils.is_host_valid(url, hostDict)
				if 'hdvid' in host: valid = True
				if not valid: continue
				sources.append({'source': host, 'quality': quality, 'language': lang, 'url': url, 'info': info,
				                'direct': False, 'debridonly': False})
			return sources
		except:
			return sources
Ejemplo n.º 22
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            result = client.request(url)
            # cant user dom parser here because HTML is bugged div is not closed
            result = re.findall('<ul class="episodios">(.*?)</ul>', result,
                                re.MULTILINE | re.DOTALL)
            for item in result:
                season_episodes = re.findall('<li>(.*?)</li>', item,
                                             re.MULTILINE | re.DOTALL)
                for row in season_episodes:
                    s = client.parseDOM(row,
                                        'div',
                                        attrs={'class':
                                               'numerando'})[0].split('x')
                    season_found = s[0].strip()
                    episode_found = s[1].strip()
                    if (season_found != season):
                        break
                    if episode_found == episode:
                        return client.parseDOM(row, 'a', ret='href')[0]

        except:
            return
Ejemplo n.º 23
0
    def searchMovie(self, title, year, aliases):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link,
                                   self.search_link % cleantitle.geturl(title))
            r = self.scraper.get(url).content
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and (year == i[2])
                ][0]
            except:
                url = None
                pass

            if (url == None):
                url = [
                    i[0] for i in results if self.matchAlias(i[1], aliases)
                ][0]

            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            return
Ejemplo n.º 24
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         r = client.request(url)
         links = client.parseDOM(r, 'div', attrs={'class': 'mtos'})
         for i in range(1, len(links)):
             idioma = client.parseDOM(links[i], 'img', ret='src')[0]
             if 'in.' in idioma: continue
             quality = client.parseDOM(links[i],
                                       'div',
                                       attrs={'class': 'dcalidad'})[0]
             servidor = re.findall("src='.+?'\s*/>(.+?)</div>", links[i])[0]
             lang, info = self.get_lang_by_type(idioma)
             quality = self.quality_fixer(quality)
             link = dom_parser.parse_dom(links[i], 'a',
                                         req='href')[0][0]['href']
             url = link
             if 'streamcloud' in url: quality = 'SD'
             valid, host = source_utils.is_host_valid(servidor, hostDict)
             sources.append({
                 'source': host,
                 'quality': quality,
                 'language': lang,
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': False
             })
         return sources
     except:
         return sources
Ejemplo n.º 25
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []

			if url is None:
				return sources

			url = urlparse.urljoin(self.base_link, url)
			cookies = cache.cache_get('alltube_cookie')['value']
			result = client.request(url, headers={'Cookie': cookies})

			links = client.parseDOM(result, 'tr')
			links = [(client.parseDOM(i, 'a', attrs={'class': 'watch'}, ret='href')[0],
			          client.parseDOM(i, 'img', ret='alt')[0],
			          client.parseDOM(i, 'td', attrs={'class': 'text-center'})[0]) for i in links]

			for i in links:
				try:
					url1 = '%s?%s' % (url, i[0])
					url1 = url1.encode('utf-8')
					language, info = self.get_language_by_type(i[2]);

					sources.append({'source': i[1].encode('utf-8'), 'quality': 'SD', 'language': language, 'url': url1,
					                'info': info, 'direct': False, 'debridonly': False})
				except:
					pass

			return sources
		except:
			return sources
Ejemplo n.º 26
0
	def search(self, titles, season, episode):
		try:
			for title in titles:
				headers = {
					'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',
					'Referer': self.base_link
				}
				data = {'text': title}
				result = self.session.post(self.search_link, data=data, headers=headers).content
				if result is None:
					continue
				query = 'S00E00'
				if int(season) < 10:
					query = query.replace('S00', 'S0' + season)
				if int(season) >= 10:
					query = query.replace('S00', 'S' + season)
				if int(episode) < 10:
					query = query.replace('E00', 'E0' + episode)
				if int(episode) >= 10:
					query = query.replace('E00', 'E' + episode)
				result = client.parseDOM(result, 'div', attrs={'class': 'episodes-list'})
				results = client.parseDOM(result, 'li')
				for result in results:
					test = client.parseDOM(result, 'span')[1]
					if query == str(test):
						link = client.parseDOM(result, 'a', ret='href')[0]
						return link
		except:
			return
Ejemplo n.º 27
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
            r = self.scraper.get(url, headers=headers).content
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1], re.findall('(\d+)', i[0])[0]) for i in r]
            results = []
            for i in r:
                try:
                    info = client.request(urlparse.urljoin(self.base_link, self.info_link % i[2]), headers=headers,
                                          timeout='15')
                    y = re.findall('<div\s+class="jt-info">(\d{4})', info)[0]
                    if self.matchAlias(i[1], aliases) and (year == y):
                        url = i[0]
                        break
                    # results.append([i[0], i[1], re.findall('<div\s+class="jt-info">(\d{4})', info)[0]])
                except:
                    url = None
                    pass

            # try:
            #    r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
            #    url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            # except:
            #    url = None
            #    pass

            if (url == None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
            return url
        except:
            return
Ejemplo n.º 28
0
 def links(self, url):
     urls = []
     try:
         if url is None: return
         r = client.request(url)
         r = client.parseDOM(r, 'div', attrs={'class': 'entry'})
         r = client.parseDOM(r, 'a', ret='href')
         r1 = [(i) for i in r if 'money' in i][0]
         r = client.request(r1)
         r = client.parseDOM(r, 'div', attrs={'id': 'post-\d+'})[0]
         if 'enter the password' in r:
             plink = client.parseDOM(r, 'form', ret='action')[0]
             post = {'post_password': '******', 'Submit': 'Submit'}
             send_post = client.request(plink, post=post, output='cookie')
             link = client.request(r1, cookie=send_post)
         else:
             link = client.request(r1)
         link = re.findall('<strong>Single(.+?)</tr', link, re.DOTALL)[0]
         link = client.parseDOM(link, 'a', ret='href')
         link = [(i.split('=')[-1]) for i in link]
         for i in link:
             urls.append(i)
         return urls
     except:
         pass
Ejemplo n.º 29
0
    def __search(self, title, season):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.query(title)))
            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(title)

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'moviefilm'})
            r = client.parseDOM(r, 'div', attrs={'class': 'movief'})
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a'))
                 for i in r]
            r = [(i[0][0], i[1][0].lower()) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], i[1], re.findall('(.+?)\s+(?:saison)\s+(\d+)', i[1]))
                 for i in r]
            r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1],
                  i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
            r = [(i[0], i[1], re.findall('\((.+?)\)$', i[1]), i[2]) for i in r]
            r = [(i[0], i[2][0] if len(i[2]) > 0 else i[1], i[3]) for i in r]
            r = [
                i[0] for i in r
                if t == cleantitle.get(i[1]) and int(i[2]) == int(season)
            ][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 30
0
    def __search(self, titles, year):
        try:

            query = self.search_link % (urllib.quote_plus(
                cleantitle.getsearch(titles[0] + ' ' + year)))

            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(titles[0])

            r = client.request(query)

            r = client.parseDOM(r, 'div', attrs={'class': 'card'})

            r = client.parseDOM(r, 'h3')

            for i in r:
                data = re.findall(
                    '<span.*?>(.+?)</span>.+?date">\s*\((\d{4}).*?</span>', i,
                    re.DOTALL)
                for title, year in data:
                    title = cleantitle.get(title)
                    y = year
                    if title in t and year == y:
                        url = client.parseDOM(i, 'a', ret='href')[0]
                        return source_utils.strip_domain(url)

            return
        except:
            return