Esempio n. 1
0
    def search(self, title, localtitle, year=''):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(cleantitle.normalize(cleantitle.getsearch(localtitle)))

            for title in titles:
                url = self.search_link + str(title)
                result = self.session.get(url).content
                result = result.decode('utf-8')
                h = HTMLParser()
                result = h.unescape(result)
                result = client.parseDOM(result, 'div', attrs={'class': 'item-detail-bigblock title title-bigblock'})
                for item in result:
                    if 'trailer' in item.lower():
                        continue
                    link = self.base_link + str(client.parseDOM(item, 'a', ret='href')[0])
                    nazwa = str(client.parseDOM(item, 'a')[0])
                    name = cleantitle.normalize(cleantitle.getsearch(nazwa))
                    name = name.replace("  ", " ")
                    title = title.replace("  ", " ")
                    words = title.split(" ")
                    if self.contains_all_words(name, words) and str(year) in name:
                        return link
        except Exception as e:
            print(str(e))
            return
Esempio n. 2
0
    def search(self, title, localtitle, year, is_movie_search):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle)))

            for title in titles:
                url = urlparse.urljoin(self.base_link, self.search_link)
                url = url % urllib.quote(str(title).replace(" ", "+"))
                result = client.request(url)
                result = result.decode('utf-8')
                h = HTMLParser()
                result = h.unescape(result)
                result = client.parseDOM(result,
                                         'div',
                                         attrs={'class': 'col-sm-4'})

                for item in result:
                    link = str(client.parseDOM(item, 'a', ret='href')[0])
                    nazwa = str(client.parseDOM(item, 'a', ret='title')[0])
                    name = cleantitle.normalize(cleantitle.getsearch(nazwa))
                    name = name.replace("  ", " ")
                    title = title.replace("  ", " ")
                    words = title.split(" ")
                    if self.contains_all_words(name,
                                               words) and str(year) in link:
                        return link
        except Exception as e:
            log_exception()
            return
Esempio n. 3
0
    def search(self, title, localtitle, year, is_movie_search):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(cleantitle.normalize(cleantitle.getsearch(localtitle)))

            for title in titles:
                url = urlparse.urljoin(self.base_link, self.search_link)
                url = url % urllib.quote(title)
                cookies = client.request(self.base_link, output='cookie')
                cache.cache_insert('naszekino_cookie', cookies)
                result = client.request(url, cookie=cookies)
                result = result.decode('utf-8')

                result = client.parseDOM(result, 'div', attrs={'class': 'col-sm-4'})
                for item in result:
                    link = str(client.parseDOM(item, 'a', ret='href')[0])
                    nazwa = str(client.parseDOM(item, 'a', ret='title')[0])
                    # rok = str(client.parseDOM(result, 'div', attrs={'class': 'year'})[0])
                    names_found = nazwa.split('/')
                    names_found = [cleantitle.normalize(cleantitle.getsearch(i)) for i in names_found]
                    for name in names_found:
                        name = name.replace("  ", " ")
                        title = title.replace("  ", " ")
                        words = title.split(" ")
                        if self.contains_all_wors(name, words) and str(year) in link:
                            return link
        except Exception, e:
            print e
            return
Esempio n. 4
0
    def search(self, title, localtitle, year, is_movie_search):
        try:

            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(cleantitle.normalize(cleantitle.getsearch(localtitle)))

            for title in titles:
                url = self.search_link + str(title)
                result = client.request(url)
                result = result.decode('utf-8')
                h = HTMLParser()
                result = h.unescape(result)
                result = client.parseDOM(result, 'div', attrs={'class': 'row'})

                for item in result:
                    try:
                        link = str(client.parseDOM(item, 'a', ret='href')[0])
                        if link.startswith('//'):
                            link = "https:" + link
                        nazwa = str(client.parseDOM(item, 'img', ret='alt')[0])
                        name = cleantitle.normalize(cleantitle.getsearch(nazwa))
                        rok = link
                        name = name.replace("  ", " ")
                        title = title.replace("  ", " ")
                        words = title.split(" ")
                        if self.contains_all_words(name, words) and str(year) in rok:
                            return link
                    except:
                        continue
        except Exception as e:
            log_exception()
            return
Esempio n. 5
0
    def search(self, title, localtitle, year, is_movie_search):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(cleantitle.normalize(cleantitle.getsearch(localtitle)))

            for title in titles:
                url = urlparse.urljoin(self.base_link, self.search_link)
                url = url % urllib.quote(str(title).replace(" ", "_"))

                result = client.request(url)
                result = client.parseDOM(result, 'div', attrs={'class': 'video-clip-wrapper'})
                linki = []
                for item in result:
                    try:
                        link = str(client.parseDOM(item, 'a', ret='href')[0])
                        nazwa = str(client.parseDOM(item, 'a', attrs={'class': 'link-title-visit'})[0])
                        name = cleantitle.normalize(cleantitle.getsearch(nazwa))
                        name = name.replace("  ", " ")
                        title = title.replace("  ", " ")
                        words = title.split(" ")
                        if self.contains_all_words(name, words) and str(year) in name:
                            linki.append(link)
                    except:
                        continue
                return linki
        except Exception as e:
            print(e)
            return
Esempio n. 6
0
 def search_ep(self, titles, season, episode, tvdb):
     try:
         odcinek = source_utils.absoluteNumber(tvdb, episode, season)
         for title in titles:
             title = cleantitle.normalize(
                 cleantitle.getsearch(title)).replace(" ", "+").replace(
                     "shippuden", "shippuuden")
             r = self.session.get(self.search_link % title).content
             result = client.parseDOM(r,
                                      'div',
                                      attrs={
                                          'class': 'description pull-right'
                                      })  ## na linki i opisy
             linki = client.parseDOM(result, 'a', ret='href')
             nazwy = client.parseDOM(result, 'a')
             for row in zip(linki, nazwy):
                 try:
                     tytul = re.findall("""<mark>(.*)</mark>""", row[1])[0]
                 except:
                     continue
                 tytul = cleantitle.normalize(
                     cleantitle.getsearch(tytul)).replace("  ", " ")
                 words = tytul.split(" ")
                 if self.contains_all_words(title, words):
                     link = self.base_link + row[0].replace(
                         'odcinki', 'odcinek') + '/' + odcinek
                     return link
     except:
         return
Esempio n. 7
0
    def search(self, title, localtitle, year):
        try:
            titles = []
            title2 = title.split('.')[0]
            localtitle2 = localtitle.split('.')[0]
            titles.append(cleantitle.normalize(cleantitle.getsearch(title2)))
            titles.append(cleantitle.normalize(cleantitle.getsearch(localtitle2)))
            titles.append(title2)
            titles.append(localtitle2)
            for title in titles:
                try:
                    token = client.request("https://filmowakraina.tv/movies")
                    token = re.findall("""token:.*'(.*?)'""", token)[0]
                    url = self.search_link % (token, urllib.quote_plus(cleantitle.query(title)), 'movie')
                    content = client.request(url)
                    content = json.loads(content)
                    for item in content[u'items']:
                        if year in item[u'release_date']:
                            return item[u'link']
                except:
                    pass

        except Exception as e:
            print(str(e))
            return
Esempio n. 8
0
    def getSources(self, name, title, year, imdb, tmdb, tvdb, tvrage, season, episode, tvshowtitle, alter, date):
        sourceDict = []
        for package, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg))
        sourceDict = [i[0] for i in sourceDict if i[1] == False]

        content = 'movie' if tvshowtitle == None else 'episode'


        if content == 'movie':
            sourceDict = [i for i in sourceDict if i.endswith(('_mv', '_mv_tv'))]
            try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i))) for i in sourceDict]
            except: sourceDict = [(i, 'true') for i in sourceDict]
        else:
            sourceDict = [i for i in sourceDict if i.endswith(('_tv', '_mv_tv'))]
            try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i) + '_tv')) for i in sourceDict]
            except: sourceDict = [(i, 'true') for i in sourceDict]


        global global_sources
        global_sources = []

        threads = []

        control.makeFile(control.dataPath)
        self.sourceFile = control.sourcescacheFile

        sourceDict = [i[0] for i in sourceDict if i[1] == 'true']

        if content == 'movie':
            title = cleantitle.normalize(title)
            for source in sourceDict: threads.append(workers.Thread(self.getMovieSource, title, year, imdb, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))
        else:
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            season, episode = alterepisode.alterepisode().get(imdb, tmdb, tvdb, tvrage, season, episode, alter, title, date)
            for source in sourceDict: threads.append(workers.Thread(self.getEpisodeSource, title, year, imdb, tvdb, season, episode, tvshowtitle, date, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))


        try: timeout = int(control.setting('sources_timeout_15'))
        except: timeout = 10


        [i.start() for i in threads]
        #[i.join() for i in threads] ; self.sources = global_sources ; return self.sources


        for i in range(0, timeout * 2):
            is_alive = [x.is_alive() for x in threads]
            if all(x == False for x in is_alive): break
            time.sleep(0.5)

        for i in range(0, 5 * 2):
            is_alive = len([i for i in threads if i.is_alive() == True])
            if is_alive < 10: break
            time.sleep(0.5)


        self.sources = global_sources

        return self.sources
Esempio n. 9
0
    def getSources(self, name, title, year, imdb, tmdb, tvdb, tvrage, season, episode, tvshowtitle, alter, date):
        sourceDict = []
        for package, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg))
        sourceDict = [i[0] for i in sourceDict if i[1] == False]

        content = 'movie' if tvshowtitle == None else 'episode'


        if content == 'movie':
            sourceDict = [i for i in sourceDict if i.endswith(('_mv', '_mv_tv'))]
            try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i))) for i in sourceDict]
            except: sourceDict = [(i, 'true') for i in sourceDict]
        else:
            sourceDict = [i for i in sourceDict if i.endswith(('_tv', '_mv_tv'))]
            try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i) + '_tv')) for i in sourceDict]
            except: sourceDict = [(i, 'true') for i in sourceDict]


        global global_sources
        global_sources = []

        threads = []

        control.makeFile(control.dataPath)
        self.sourceFile = control.sourcescacheFile

        sourceDict = [i[0] for i in sourceDict if i[1] == 'true']

        if content == 'movie':
            title = cleantitle.normalize(title)
            for source in sourceDict: threads.append(workers.Thread(self.getMovieSource, title, year, imdb, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))
        else:
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            season, episode = alterepisode.alterepisode().get(imdb, tmdb, tvdb, tvrage, season, episode, alter, title, date)
            for source in sourceDict: threads.append(workers.Thread(self.getEpisodeSource, title, year, imdb, tvdb, season, episode, tvshowtitle, date, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))


        try: timeout = int(control.setting('sources_timeout_40'))
        except: timeout = 40


        [i.start() for i in threads]
        #[i.join() for i in threads] ; self.sources = global_sources ; return self.sources


        for i in range(0, timeout * 2):
            is_alive = [x.is_alive() for x in threads]
            if all(x == False for x in is_alive): break
            time.sleep(0.5)

        for i in range(0, 5 * 2):
            is_alive = len([i for i in threads if i.is_alive() == True])
            if is_alive < 10: break
            time.sleep(0.5)


        self.sources = global_sources

        return self.sources
Esempio n. 10
0
	def getSources(self, name, title, year, imdb, tmdb, tvdb, tvrage, season, episode, tvshowtitle, alter, date, proxy_options, provider_options, key, session):
		#try:
		sourceDict = []
		self.getSourcesAlive = True
		
		if provider_options !=None:
			myProviders = []
			for prov in provider_options: myProviders += [i for i in self.providersCaller if i['url'].lower() == prov['url'].lower() and str(prov['enabled'])=='True']
		else:
			myProviders = self.providersCaller
		
		content = 'movie' if tvshowtitle == None else 'show'
		
		self.threads[key] = []
		if content == 'movie':
			log(err='Searching Movie: %s' % title)
			title = cleantitle.normalize(title)
			for source in myProviders:
				try:
					source_name = 'Unknow source (import error)'
					source_name = source['name']
					log(err='Searching Movie: %s (%s) in Provider %s' % (title,year,source_name))
					thread_i = workers.Thread(self.getMovieSource, title, year, imdb, proxy_options, key, re.sub('_mv_tv$|_mv$|_tv$', '', source['name']), source['call'])
					self.threads[key].append(thread_i)
					thread_i.start()
				except Exception as e:
					log(type='ERROR', err='getSources %s - %s' % (source_name,e))
		else:
			log(err='Searching Show: %s' % tvshowtitle)
			try:
				tvshowtitle = cleantitle.normalize(tvshowtitle)
			except:
				pass
			try:
				season, episode = alterepisode.alterepisode().get(imdb, tmdb, tvdb, tvrage, season, episode, alter, title, date)
			except:
				pass
			for source in myProviders:
				try:
					source_name = 'Unknow source (import error)'
					source_name = source['name']
					log(err='Searching Show: %s S%sE%s in Provider %s' % (tvshowtitle,season,episode,source_name))
					thread_i = workers.Thread(self.getEpisodeSource, title, year, imdb, tvdb, season, episode, tvshowtitle, date, proxy_options, key, re.sub('_mv_tv$|_mv$|_tv$', '', source_name), source['call'])
					self.threads[key].append(thread_i)
					thread_i.start()
				except Exception as e:
					log(type='ERROR', err='getSources %s - %s' % (source_name,e))

		#sourceLabel = [re.sub('_mv_tv$|_mv$|_tv$', '', i) for i in sourceDict]
		#sourceLabel = [re.sub('v\d+$', '', i).upper() for i in sourceLabel]

		#time.sleep(0.5)
		self.getSourcesAlive = False
		return self.sources
Esempio n. 11
0
    def checkSources(self, name, title, year, imdb, tmdb, tvdb, tvrage, season, episode, tvshowtitle, alter, date):
        sourceDict = []
        for package, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg))
        sourceDict = [i[0] for i in sourceDict if i[1] == False]

        content = 'movie' if tvshowtitle == None else 'episode'


        if content == 'movie':
            sourceDict = [i for i in sourceDict if i.endswith(('_mv', '_mv_tv'))]
            try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i))) for i in sourceDict]
            except: sourceDict = [(i, 'true') for i in sourceDict]
        else:
            sourceDict = [i for i in sourceDict if i.endswith(('_tv', '_mv_tv'))]
            try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i) + '_tv')) for i in sourceDict]
            except: sourceDict = [(i, 'true') for i in sourceDict]

        threads = []

        control.makeFile(control.dataPath)
        self.sourceFile = control.sourcescacheFile

        sourceDict = [i[0] for i in sourceDict if i[1] == 'true']

        if content == 'movie':
            title = cleantitle.normalize(title)
            for source in sourceDict: threads.append(workers.Thread(self.getMovieSource, title, year, imdb, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))
        else:
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            season, episode = alterepisode.alterepisode().get(imdb, tmdb, tvdb, tvrage, season, episode, alter, title, date)
            for source in sourceDict:
                #control.log("SOURCE S2 %s" % source)
                threads.append(workers.Thread(self.getEpisodeSource, title, year, imdb, tvdb, season, episode, tvshowtitle, date, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))


        try: timeout = int(control.setting('sources_timeout_40'))
        except: timeout = 40

        [i.start() for i in threads]


        for i in range(0, timeout * 2):
            try:
                if xbmc.abortRequested == True: return sys.exit()
                if len(self.sources) >= 10: break

                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                time.sleep(0.5)
            except:
                pass

        if len(self.sources) >= 5: return True
        else: return False
Esempio n. 12
0
    def checkSources(self, name, title, year, imdb, tmdb, tvdb, tvrage, season, episode, tvshowtitle, alter, date):
        sourceDict = []
        for package, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg))
        sourceDict = [i[0] for i in sourceDict if i[1] == False]

        content = 'movie' if tvshowtitle == None else 'episode'


        if content == 'movie':
            sourceDict = [i for i in sourceDict if i.endswith(('_mv', '_mv_tv'))]
            try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i))) for i in sourceDict]
            except: sourceDict = [(i, 'true') for i in sourceDict]
        else:
            sourceDict = [i for i in sourceDict if i.endswith(('_tv', '_mv_tv'))]
            try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i) + '_tv')) for i in sourceDict]
            except: sourceDict = [(i, 'true') for i in sourceDict]

        threads = []

        control.makeFile(control.dataPath)
        self.sourceFile = control.sourcescacheFile

        sourceDict = [i[0] for i in sourceDict if i[1] == 'true']

        if content == 'movie':
            title = cleantitle.normalize(title)
            for source in sourceDict: threads.append(workers.Thread(self.getMovieSource, title, year, imdb, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))
        else:
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            season, episode = alterepisode.alterepisode().get(imdb, tmdb, tvdb, tvrage, season, episode, alter, title, date)
            for source in sourceDict:
                #control.log("SOURCE S2 %s" % source)
                threads.append(workers.Thread(self.getEpisodeSource, title, year, imdb, tvdb, season, episode, tvshowtitle, date, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))


        try: timeout = int(control.setting('sources_timeout_40'))
        except: timeout = 40

        [i.start() for i in threads]


        for i in range(0, timeout * 2):
            try:
                if xbmc.abortRequested == True: return sys.exit()
                if len(self.sources) >= 10: break

                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                time.sleep(0.5)
            except:
                pass

        if len(self.sources) >= 10: return True
        else: return False
Esempio n. 13
0
    def search(self, title, localtitle, year):
        try:
            titles = []
            title2 = title.split('.')[0]
            localtitle2 = localtitle.split('.')[0]
            titles.append(cleantitle.normalize(cleantitle.getsearch(title2)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle2)))
            titles.append(title2)
            titles.append(localtitle2)

            cookies = client.request('http://segos.es', output='cookie')
            cache.cache_insert('segos_cookie', cookies)
            for title in titles:
                try:
                    query = self.search_link % urllib.quote_plus(
                        title.replace(" ", "+"))
                    url = urlparse.urljoin(self.base_link, query)

                    result = client.request(url, headers={'Cookie': cookies})

                    results = client.parseDOM(
                        result,
                        'div',
                        attrs={'class': 'col-lg-12 col-md-12 col-xs-12'})
                except:
                    continue
                for result in results:
                    try:
                        segosurl = client.parseDOM(result, 'a', ret='href')[0]
                        result = client.parseDOM(result, 'a')
                        segostitles = cleantitle.normalize(
                            cleantitle.getsearch(result[1])).split('/')
                        segostitles.append(result[1])
                        rok = str(result[1][-5:-1])
                    except:
                        continue
                    for segostitle in segostitles:
                        try:
                            segostitle = segostitle.replace("  ", " ")
                            simply_name = title.replace("  ", " ")
                            words = simply_name.split(" ")
                            if self.contains_all_words(segostitle,
                                                       words) and year == rok:
                                link = urlparse.urljoin(
                                    self.base_link, segosurl)
                                return link
                            continue
                        except:
                            continue
        except Exception as e:
            print(str(e))
            return
Esempio n. 14
0
	def getSources(self, name, title, year, imdb, tmdb, tvdb, tvrage, season, episode, tvshowtitle, alter, date, proxy_options, provider_options, key):
		#try:
		sourceDict = []
		self.getSourcesAlive = True
		
		if provider_options !=None:
			myProviders = []
			for prov in provider_options: myProviders += [i for i in self.providersCaller if i['url'].lower() == prov['url'].lower() and str(prov['enabled'])=='True']
		else:
			myProviders = self.providersCaller
		
		content = 'movie' if tvshowtitle == None else 'episode'
		
		self.threads[key] = []
		if content == 'movie':
			print 'Searching Movie'
			title = cleantitle.normalize(title)
			for source in myProviders:
				try:
					thread_i = workers.Thread(self.getMovieSource, title, year, imdb, proxy_options, key, re.sub('_mv_tv$|_mv$|_tv$', '', source['name']), source['call'])
					self.threads[key].append(thread_i)
					thread_i.start()
				except Exception as e:
					print ('Source getSources %s ERROR %s' % (source,e))
					control.log('Source getSources %s ERROR %s' % (source,e))
					pass
		else:
			print 'Searching Episode'
			try:
				tvshowtitle = cleantitle.normalize(tvshowtitle)
			except:
				pass
			try:
				season, episode = alterepisode.alterepisode().get(imdb, tmdb, tvdb, tvrage, season, episode, alter, title, date)
			except:
				pass
			for source in myProviders:
				try:
					thread_i = workers.Thread(self.getEpisodeSource, title, year, imdb, tvdb, season, episode, tvshowtitle, date, proxy_options, key, re.sub('_mv_tv$|_mv$|_tv$', '', source['name']), source['call'])
					self.threads[key].append(thread_i)
					thread_i.start()
				except Exception as e:
					print ('Source getSources %s ERROR %s' % (source, e))
					control.log('Source getSources %s ERROR %s' % (source, e))
					pass


		#sourceLabel = [re.sub('_mv_tv$|_mv$|_tv$', '', i) for i in sourceDict]
		#sourceLabel = [re.sub('v\d+$', '', i).upper() for i in sourceLabel]

		#time.sleep(0.5)
		self.getSourcesAlive = False
		return self.sources
Esempio n. 15
0
    def search(self, title, localtitle, year=''):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle)))
            self.login()
            for title in titles:

                data = {
                    'type': '1',
                    'search': title + ' ' + year + ' (avi|mkv|mp4)'
                }

                self.session.post('https://tb7.pl/mojekonto/szukaj',
                                  data=data).content

                headers = {
                    'Connection': 'keep-alive',
                    'Cache-Control': 'max-age=0',
                    'Origin': 'https://tb7.pl',
                    'Upgrade-Insecure-Requests': '1',
                    'DNT': '1',
                    'Content-Type': 'application/x-www-form-urlencoded',
                    'User-Agent':
                    'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.66 Mobile Safari/537.36',
                    'Accept':
                    'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
                    'Referer': 'https://tb7.pl/mojekonto/szukaj/1',
                    'Accept-Language': 'pl-PL,pl;q=0.9,en-US;q=0.8,en;q=0.7',
                }

                data = {'sort': 'size'}

                self.session.post('https://tb7.pl/mojekonto/szukaj/1',
                                  headers=headers,
                                  data=data)
                r = self.session.post('https://tb7.pl/mojekonto/szukaj/1',
                                      headers=headers,
                                      data=data).content

                rows = client.parseDOM(r, 'tr')

                if rows:
                    cookies = self.session.cookies
                    cookies = "; ".join(
                        [str(x) + "=" + str(y) for x, y in cookies.items()])
                    cache.cache_insert('tb7_cookie', cookies)
                    return rows
        except Exception as e:
            log_exception()
            return
Esempio n. 16
0
    def search(self, title, localtitle, year, is_movie_search):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle)))
            titles.append(title)
            titles.append(localtitle)
            for title in titles:
                try:
                    url = self.search_link + str(title)
                    result = self.session.get(url).content
                    result = result.decode('utf-8')
                    h = HTMLParser()
                    result = h.unescape(result)
                    result = client.parseDOM(result,
                                             'div',
                                             attrs={'class': 'card-body p-2'})

                    for item in result:
                        try:
                            nazwa = re.findall("""Film online: (.*?)\"""",
                                               item)[0]
                            try:
                                nazwa = re.findall(""">(.*?)<""", nazwa)[0]
                            except:
                                pass
                            name = cleantitle.normalize(
                                cleantitle.getsearch(nazwa))
                            rok = re.findall(
                                """Rok wydania filmu online\".*>(.*?)<""",
                                item)[0]
                            item = str(item).replace(
                                "<span style='color:red'>",
                                "").replace("</span>", "")
                            link = re.findall("""href=\"(.*?)\"""", item)[0]
                            if link.startswith('//'):
                                link = "https:" + link
                            name = name.replace("  ", " ")
                            title = title.replace("  ", " ")
                            words = name.split(" ")
                            if self.contains_all_words(
                                    title, words) and str(year) in rok:
                                return link
                        except:
                            continue
                except:
                    continue
        except Exception as e:
            log_exception()
            return
Esempio n. 17
0
    def search_ep(self, titles, season, episode, year):
        try:
            searchtitles = titles
            for searchtitle in searchtitles:

                response = requests.get(self.base_link +
                                        self.search_serial % searchtitle)
                result = response.content
                h = HTMLParser()
                result = h.unescape(result)
                result = client.parseDOM(result,
                                         'ul',
                                         attrs={'class': 'resultsList hits'})
                items = client.parseDOM(result, 'li')
                items = [x for x in items if not str(x).startswith("<a href")]
                orgtitles = []
                for content in items:
                    try:
                        orgtitle = str(
                            client.parseDOM(
                                content,
                                'div',
                                attrs={'class':
                                       'filmPreview__originalTitle'})[0])
                    except:
                        orgtitle = "0"
                        pass
                    orgtitles.append(orgtitle)
                ids = client.parseDOM(items, 'data', ret='data-id')
                titles = client.parseDOM(result, 'data', ret='data-title')
                years = client.parseDOM(result,
                                        'span',
                                        attrs={'class': 'filmPreview__year'})

                for item in zip(titles, ids, years, orgtitles):
                    f_title = str(item[0])
                    f_id = str(item[1])
                    f_year = str(item[2])
                    f_orgtitle = str(item[3])
                    teststring = cleantitle.normalize(
                        cleantitle.getsearch(searchtitle))
                    words = cleantitle.normalize(
                        cleantitle.getsearch(f_title)).split(" ")
                    if self.contains_all_wors(teststring,
                                              words) and year == f_year:
                        return (f_title, f_id, f_year, f_orgtitle, "SERIAL",
                                season, episode)
        except Exception, e:
            print str(e)
            return
Esempio n. 18
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            titles = []
            title2 = title.split('.')[0]
            localtitle2 = localtitle.split('.')[0]
            titles.append(cleantitle.normalize(cleantitle.getsearch(title2)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle2)))
            titles.append(title2)
            titles.append(localtitle2)

            for title in titles:
                headers = {
                    'Connection': 'keep-alive',
                    'Cache-Control': 'max-age=0',
                    'Origin': 'https://www.boxfilm.pl',
                    'Upgrade-Insecure-Requests': '1',
                    'DNT': '1',
                    'Content-Type': 'application/x-www-form-urlencoded',
                    'User-Agent':
                    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3555.0 Safari/537.36',
                    'Accept':
                    'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                    'Referer': 'https://www.boxfilm.pl/szukaj',
                    'Accept-Encoding': 'gzip, deflate, br',
                    'Accept-Language': 'pl-PL,pl;q=0.9,en-US;q=0.8,en;q=0.7',
                }
                data = {'szukaj': title}
                cookies = {
                    'lektor': 'Wszystkie',
                    'cookies-accepted': '1',
                }
                r = requests.post('https://www.boxfilm.pl/szukaj',
                                  headers=headers,
                                  cookies=cookies,
                                  data=data).content
                r = client.parseDOM(r, 'div', attrs={'class': 'video_info'})

                local_simple = cleantitle.get(localtitle)
                for row in r:
                    name_found = client.parseDOM(row, 'h1')[0]
                    year_found = name_found[name_found.find("(") +
                                            1:name_found.find(")")]
                    if cleantitle.get(
                            name_found) == local_simple and year_found == year:
                        url = client.parseDOM(row, 'a', ret='href')[0]
                        return url
        except:
            return
Esempio n. 19
0
 def search_ep(self, titles, season, episode, year):
     try:
         # data = {'login': self.user_name, 'password': self.user_pass}
         # result = self.session.post('https://zalukaj.com/account.php', headers={'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}, data=data)
         headers = {
             'Cookie':
             '__cfduid=d61b42b729455a590ff291892cb688ea11546349293; PHPSESSID=7u6cbc5pagnhqfm84jgjhg9hc2; __PHPSESSIDS=de81fa674b436a948cb337b7f4d2fa3898bd308c'
         }
         query = 'S{:02d}E{:02d}'.format(int(season), int(episode))
         for title in titles:
             title = cleantitle.normalize(cleantitle.getsearch(title))
             result = self.session.get(self.base_link,
                                       headers=headers).content
             result = client.parseDOM(result,
                                      'td',
                                      attrs={'class': 'wef32f'})
             for row in result:
                 try:
                     tytul = client.parseDOM(row, 'a', ret='title')[0]
                 except:
                     continue
                 tytul = cleantitle.normalize(
                     cleantitle.getsearch(tytul)).replace("  ", " ")
                 words = title.split(" ")
                 if self.contains_all_words(tytul, words):
                     link = self.base_link + client.parseDOM(
                         row, 'a', ret='href')[0]
                     content = self.session.get(link,
                                                headers=headers).content
                     content = client.parseDOM(content,
                                               'div',
                                               attrs={'id': 'sezony'})
                     for item in content:
                         if 'Sezon: %s' % str(season) in item:
                             link = self.base_link + client.parseDOM(
                                 item, 'a', ret='href')[0]
                             content = self.session.get(
                                 link, headers=headers).content
                             content = client.parseDOM(
                                 content, 'div', attrs={'class': 'sezony'})
                             for item in content:
                                 if query in item:
                                     link = client.parseDOM(item,
                                                            'a',
                                                            ret='href')[0]
                                     return link
     except:
         return
Esempio n. 20
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(
                self.base_link, self.search_link %
                (cleantitle.geturl(title.replace('\'', '-'))))
            r = client.request(url, timeout='10', headers=headers)
            r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
            r = [(client.parseDOM(i, 'a', ret='href'),
                  client.parseDOM(i, 'a', ret='title')) for i in r]
            r = [(i[0][0], i[1][0]) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0]
            r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
            try:
                match = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and year == i[2]
                ][0]
            except:
                match = [i[0] for i in r if self.matchAlias(i[1], aliases)][0]

            url = re.findall('(?://.+?|)(/.+)', match)[0]
            url = client.replaceHTMLCodes(url)
            return url.encode('utf-8')
        except:
            failure = traceback.format_exc()
            print('XMovies - Exception: \n' + str(failure))
            return
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            url = self.tvsearch_link % cleantitle.query10(tvshowtitle)

            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r, limit='1')
            r = client.parseDOM(r, 'title')

            if not r:
                url = 'http://www.imdb.com/title/%s' % imdb
                url = client.request(url, headers={'Accept-Language':'es-ES'})
                url = client.parseDOM(url, 'title')[0]
                url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip()
                url = cleantitle.normalize(url.encode("utf-8"))
                url = self.tvsearch_link % cleantitle.geturl(url)

                r = urlparse.urljoin(self.base_link, url)
                r = client.request(r, limit='1')
                r = client.parseDOM(r, 'title')

            if not year in r[0]: raise Exception()

            return url
        except:
            return
Esempio n. 22
0
    def strmFile(self, i):
        try:
            title, year, imdb, tvdb, season, episode, tvshowtitle, premiered = i[
                'title'], i['year'], i['imdb'], i['tvdb'], i['season'], i[
                    'episode'], i['tvshowtitle'], i['premiered']

            episodetitle = urllib.quote_plus(title)
            systitle, syspremiered = urllib.quote_plus(
                tvshowtitle), urllib.quote_plus(premiered)

            transtitle = cleantitle.normalize(
                tvshowtitle.translate(None, '\/:*?"<>|'))

            content = '%s?action=play&title=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s&tvshowtitle=%s&date=%s' % (
                sys.argv[0], episodetitle, year, imdb, tvdb, season, episode,
                systitle, syspremiered)

            folder = lib_tools.make_path(self.library_folder, transtitle, year)
            if not os.path.isfile(os.path.join(folder, 'tvshow.nfo')):
                lib_tools.create_folder(folder)
                lib_tools.write_file(os.path.join(folder, 'tvshow.nfo'),
                                     lib_tools.nfo_url('tv', i))

            folder = lib_tools.make_path(self.library_folder, transtitle, year,
                                         season)
            lib_tools.create_folder(folder)
            lib_tools.write_file(
                os.path.join(
                    folder,
                    lib_tools.legal_filename(
                        '%s S%02dE%02d' %
                        (transtitle, int(season), int(episode))) + '.strm'),
                content)
        except:
            pass
Esempio n. 23
0
    def strmFile(self, i):
        try:
            name, title, year, imdb, tmdb = i['name'], i['title'], i[
                'year'], i['imdb'], i['tmdb']

            sysname, systitle = urllib.quote_plus(name), urllib.quote_plus(
                title)

            transtitle = cleantitle.normalize(
                title.translate(None, '\/:*?"<>|'))

            content = '%s?action=play&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s' % (
                sys.argv[0], sysname, systitle, year, imdb, tmdb)

            folder = lib_tools.make_path(self.library_folder, transtitle, year)

            lib_tools.create_folder(folder)
            lib_tools.write_file(
                os.path.join(folder,
                             lib_tools.legal_filename(transtitle) + '.strm'),
                content)
            lib_tools.write_file(os.path.join(folder, 'movie.nfo'),
                                 lib_tools.nfo_url('movie', i))
        except:
            pass
Esempio n. 24
0
    def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            url = self.tvsearch_link % cleantitle.geturl(tvshowtitle)

            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r, limit='1')
            r = client.parseDOM(r, 'title')

            if not r:
                url = 'http://www.imdb.com/title/%s' % imdb
                url = client.request(url, headers={'Accept-Language': 'es-ES'})
                url = client.parseDOM(url, 'title')[0]
                url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip()
                url = cleantitle.normalize(url.encode("utf-8"))
                url = self.tvsearch_link % cleantitle.geturl(url)

                r = urlparse.urljoin(self.base_link, url)
                r = client.request(r, limit='1')
                r = client.parseDOM(r, 'title')

            if not year in r[0]: raise Exception()

            return url
        except:
            return
Esempio n. 25
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(
                self.base_link, self.search_link %
                urllib.quote_plus(cleantitle.getsearch(title)))
            r = client.request(url, headers=headers, timeout='15')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [
                    i[0] for i in r
                    if self.matchAlias(i[1], aliases) and (year == i[2])
                ][0]
            except:
                url = None
                pass

            if (url == None):
                url = [
                    i[0] for i in results if self.matchAlias(i[1], aliases)
                ][0]
            return url
        except:
            return
Esempio n. 26
0
    def searchShow(self, title, season, year):
        try:
            title = cleantitle.normalize(title)
            t = cleantitle.get(title)

            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s Season %01d' % (title.replace('\'', '-'), int(season)))))
            r = client.request(url, timeout='10')
            r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
            if r:
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1])) for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
                r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
            else:
                url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s %01d' % (title.replace('\'', '-'), int(year)))))
                r = client.request(url, timeout='10')
                r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
                r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
                r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            return url.encode('utf-8')
        except:
            return
Esempio n. 27
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
            r = self.s.get(url, headers=headers).content
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1], re.findall('(\d+)', i[0])[0]) for i in r]
            results = []
            for i in r:
                try:
                    info = client.request(urlparse.urljoin(self.base_link, self.info_link % i[2]), headers=headers,
                                          timeout='15')
                    y = re.findall('<div\s+class="jt-info">(\d{4})', info)[0]
                    if self.matchAlias(i[1], aliases) and (year == y):
                        url = i[0]
                        break
                    # results.append([i[0], i[1], re.findall('<div\s+class="jt-info">(\d{4})', info)[0]])
                except:
                    url = None
                    pass

            # try:
            #    r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
            #    url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            # except:
            #    url = None
            #    pass

            if (url == None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
            return url
        except:
            return
Esempio n. 28
0
 def getImdbTitle(self, imdb):
     try:
         t = 'http://www.omdbapi.com/?i=%s' % imdb
         t = client.request(t)
         t = json.loads(t)
         t = cleantitle.normalize(t['Title'])
         return t
     except:
         return
Esempio n. 29
0
 def getImdbTitle(self, imdb):
     try:
         t = 'http://www.omdbapi.com/?i=%s' % imdb
         t = client.request(t)
         t = json.loads(t)
         t = cleantitle.normalize(t['Title'])
         return t
     except:
         return
Esempio n. 30
0
    def search(self, title, localtitle, year, search_type):
        try:
            titles = []
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(title + " 3d")))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle + " 3d")))
            cookies = client.request(self.base_link, output='cookie')
            cache.cache_insert('alltube_cookie', cookies)
            for title in titles:
                r = client.request(urlparse.urljoin(self.base_link,
                                                    self.search_link),
                                   post={'search': cleantitle.query(title)},
                                   headers={'Cookie': cookies})
                r = self.get_rows(r, search_type)

                for row in r:
                    url = client.parseDOM(row, 'a', ret='href')[0]
                    names_found = client.parseDOM(row, 'h3')[0]
                    if names_found.startswith(
                            'Zwiastun') and not title.startswith('Zwiastun'):
                        continue
                    names_found = names_found.encode('utf-8').split('/')
                    names_found = [
                        cleantitle.normalize(cleantitle.getsearch(i))
                        for i in names_found
                    ]
                    for name in names_found:
                        name = name.replace("  ", " ")
                        title = title.replace("  ", " ")
                        words = title.split(" ")
                        found_year = self.try_read_year(url)
                        if self.contains_all_wors(
                                name, words) and (not found_year
                                                  or found_year == year):
                            return url
                        else:
                            continue
                    continue
        except Exception, e:
            print e
            return
Esempio n. 31
0
    def search(self, title, localtitle, year, is_movie_search):
        try:
            titles = []
            titles.append(cleantitle.normalize(cleantitle.getsearch(title)))
            titles.append(
                cleantitle.normalize(cleantitle.getsearch(localtitle)))

            # data = {'login': self.user_name, 'password': self.user_pass}
            # result = self.session.post('https://zalukaj.com/account.php', headers={'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}, data=data)
            headers = {
                'Cookie':
                '__cfduid=d61b42b729455a590ff291892cb688ea11546349293; PHPSESSID=7u6cbc5pagnhqfm84jgjhg9hc2; __PHPSESSIDS=de81fa674b436a948cb337b7f4d2fa3898bd308c'
            }
            for title in titles:
                url = self.search_link % str(title).replace(" ", "+")
                result = self.session.get(url, headers=headers).content
                result = result.decode('utf-8')
                h = HTMLParser()
                result = h.unescape(result)
                result = client.parseDOM(result, 'div', attrs={'class': 'row'})

                for item in result:
                    try:
                        link = str(client.parseDOM(item, 'a', ret='href')[0])
                        if link.startswith('//'):
                            link = "https:" + link
                        elif link.startswith('/'):
                            link = self.base_link + link
                        nazwa = str(client.parseDOM(item, 'a', ret='title')[0])
                        name = cleantitle.normalize(
                            cleantitle.getsearch(nazwa))
                        name = name.replace("  ", " ")
                        title = title.replace("  ", " ")
                        words = title.split(" ")
                        if self.contains_all_words(
                                name, words) and str(year) in link:
                            return link
                    except:
                        continue
        except Exception as e:
            log_exception()
            return
Esempio n. 32
0
 def searchMovie(self, title, year, headers):
     try:
         title = cleantitle.normalize(title)
         u = urlparse.urljoin(self.base_link, self.search_link)
         p = urllib.urlencode({'keyword': title})
         r = client.request(u, post=p, XHR=True)
         r = json.loads(r)['content']
         r = zip(client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'}))
         url = [i[0] for i in r if self.matchAlias(i[1], aliases)][0]
         return url
     except:
         return
Esempio n. 33
0
    def search_ep(self, titles, season, episode, year):
        query = 'S{:02d}E{:02d}'.format(int(season), int(episode))
        try:
            titles_tv = []
            titles_tv.append(cleantitle.normalize(cleantitle.getsearch(titles[0])))
            titles_tv.append(cleantitle.normalize(cleantitle.getsearch(titles[1])))

            for title in titles_tv:
                url = self.search_link + str(title)
                result = client.request(url)
                result = result.decode('utf-8')
                h = HTMLParser()
                result = h.unescape(result)
                result = client.parseDOM(result, 'div', attrs={'class': 'row'})
                result = client.parseDOM(result[5], 'div', attrs={'class': 'col-sm-4'})
                for item in result:
                    try:
                        link = str(client.parseDOM(item, 'a', ret='href')[0])
                        if link.startswith('//'):
                            link = "https:" + link
                        nazwa = str(client.parseDOM(item, 'img', ret='alt')[0])
                        name = cleantitle.normalize(cleantitle.getsearch(nazwa))
                        name = name.replace("  ", " ")
                        title = title.replace("  ", " ")
                        words = title.split(" ")
                        if self.contains_all_words(name, words):
                            result = client.request(link)
                            result = client.parseDOM(result, 'ul', attrs={'id': "episode-list"})
                            result = client.parseDOM(result, 'li')
                            for episode in result:
                                nazwa = client.parseDOM(episode, 'a')[0]
                                link = str(client.parseDOM(episode, 'a', ret='href')[0])
                                if query.lower() in nazwa.lower():
                                    return link

                    except:
                        continue
        except Exception as e:
            log_exception()
            return
Esempio n. 34
0
    def search_ep(self, titles, season, episode):
        try:
            for title in titles:
                simply_name = cleantitle.normalize(cleantitle.getsearch(title))
                query = self.search_link % str(title).replace(" ", "+")
                url = urlparse.urljoin(self.base_link, query)

                cookies = client.request(self.base_link, output='cookie')
                cache.cache_insert('segos_cookie', cookies)
                result = client.request(url, headers={'Cookie': cookies})

                results = client.parseDOM(
                    result,
                    'div',
                    attrs={'class': 'col-lg-12 col-md-12 col-xs-12'})
                for result in results:
                    try:
                        segosurl = client.parseDOM(result, 'a', ret='href')[0]
                        segosurl = segosurl + "&s=%s&o=%s" % (season, episode)
                        result = client.parseDOM(result, 'a')
                        segostitles = cleantitle.normalize(
                            cleantitle.getsearch(result[1])).split('/')
                    except:
                        continue
                    for segostitle in segostitles:
                        try:
                            segostitle = segostitle.replace("  ", " ")
                            simply_name = simply_name.replace("  ", " ")
                            words = simply_name.split(" ")
                            if self.contains_all_words(segostitle, words):
                                if 'seriale' in segosurl:
                                    link = urlparse.urljoin(
                                        self.base_link, segosurl)
                                    return link
                        except:
                            continue
                        continue
        except:
            return
Esempio n. 35
0
 def getOriginalTitle(self, imdb):
     try:
         tmdb_link = base64.b64decode(
             'aHR0cHM6Ly9hcGkudGhlbW92aWVkYi5vcmcvMy9maW5kLyVzP2FwaV9rZXk9MTBiYWIxZWZmNzZkM2NlM2EyMzQ5ZWIxMDQ4OTRhNmEmbGFuZ3VhZ2U9ZW4tVVMmZXh0ZXJuYWxfc291cmNlPWltZGJfaWQ=')
         t = client.request(tmdb_link % imdb, timeout='10')
         try: title = json.loads(t)['movie_results'][0]['original_title']
         except: pass
         try: title = json.loads(t)['tv_results'][0]['original_name']
         except: pass
         title = cleantitle.normalize(title)
         return title
     except:
         return
Esempio n. 36
0
 def searchShow(self, title, season, headers):
     try:
         title = cleantitle.normalize(title)
         u = urlparse.urljoin(self.base_link, self.search_link)
         p = urllib.urlencode({'keyword': ('%s - Season %s' % (title, season))})
         r = client.request(u, post=p, XHR=True)
         r = json.loads(r)['content']
         r = zip(client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'}))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         return url
     except:
         return
Esempio n. 37
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))
         r = client.request(url, headers=headers, timeout='15')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         return url
     except:
         return
Esempio n. 38
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))
         r = self.s.get(url, headers=headers).content
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         return url
     except:
         return
Esempio n. 39
0
 def searchMovie(self, title, year):
     try:
         title = cleantitle.normalize(title)
         url = urlparse.urljoin(self.base_link, self.search_link % (cleantitle.geturl(title.replace('\'', '-'))))
         r = client.request(url, timeout='10')
         t = cleantitle.get(title)
         r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
         r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
         r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
         r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
         r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
         r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         return url.encode('utf-8')
     except:
         return
Esempio n. 40
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
            r = client.request(url, headers=headers, timeout='15')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            except:
                url = None
                pass

            if (url == None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
            return url
        except:
            return
Esempio n. 41
0
    def getSources(self, name, title, year, imdb, tmdb, tvdb, tvrage, season, episode, tvshowtitle, alter, date):
        sourceDict = []
        for package, name, is_pkg in pkgutil.walk_packages(__path__):
            sourceDict.append((name, is_pkg))
        sourceDict = [i[0] for i in sourceDict if i[1] == False]

        content = 'movie' if tvshowtitle == None else 'episode'


        if content == 'movie':
            sourceDict = [i for i in sourceDict if i.endswith(('_mv', '_mv_tv'))]
            try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i))) for i in sourceDict]
            except: sourceDict = [(i, 'true') for i in sourceDict]
        else:
            sourceDict = [i for i in sourceDict if i.endswith(('_tv', '_mv_tv'))]
            try: sourceDict = [(i, control.setting(re.sub('_mv_tv$|_mv$|_tv$', '', i) + '_tv')) for i in sourceDict]
            except: sourceDict = [(i, 'true') for i in sourceDict]

        threads = []

        control.makeFile(control.dataPath)
        self.sourceFile = control.sourcescacheFile

        sourceDict = [i[0] for i in sourceDict if i[1] == 'true']

        if content == 'movie':
            title = cleantitle.normalize(title)
            for source in sourceDict: threads.append(workers.Thread(self.getMovieSource, title, year, imdb, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))
        else:
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            season, episode = alterepisode.alterepisode().get(imdb, tmdb, tvdb, tvrage, season, episode, alter, title, date)
            for source in sourceDict: threads.append(workers.Thread(self.getEpisodeSource, title, year, imdb, tvdb, season, episode, tvshowtitle, date, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))


        try: timeout = int(control.setting('sources_timeout_40'))
        except: timeout = 40

        [i.start() for i in threads]

        control.idle()

        sourceLabel = [re.sub('_mv_tv$|_mv$|_tv$', '', i) for i in sourceDict]
        sourceLabel = [re.sub('v\d+$', '', i).upper() for i in sourceLabel]


        self.progressDialog = control.progressDialog
        self.progressDialog.create(control.addonInfo('name'), '')
        self.progressDialog.update(0)

        string1 = control.lang(30512).encode('utf-8')
        string2 = control.lang(30513).encode('utf-8')
        string3 = control.lang(30514).encode('utf-8')

        for i in range(0, timeout * 2):
            try:
                if xbmc.abortRequested == True: return sys.exit()

                try: info = [sourceLabel[int(re.sub('[^0-9]', '', str(x.getName()))) - 1] for x in threads if x.is_alive() == True]
                except: info = []

                if len(info) > 5: info = len(info)

                self.progressDialog.update(int((100 / float(len(threads))) * len([x for x in threads if x.is_alive() == False])), str('%s: %s %s' % (string1, int(i * 0.5), string2)), str('%s: %s' % (string3, str(info).translate(None, "[]'"))))

                if self.progressDialog.iscanceled(): break

                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                time.sleep(0.5)
            except:
                pass

        self.progressDialog.close()

        return self.sources