Ejemplo n.º 1
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            url = self.moviesearch_link % cleantitle.geturl(title)

            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r, limit='1', timeout='10')
            r = client.parseDOM(r, 'title')

            if not r:
                url = 'http://www.imdb.com/title/%s' % imdb
                url = client.request(url, headers={'Accept-Language':'es-ES'}, timeout='10')
                url = client.parseDOM(url, 'title')[0]
                url = re.sub('(?:\(|\s)\d{4}.+', '', url).strip()
                url = cleantitle.normalize(url.encode("utf-8"))
                url = self.moviesearch_link % cleantitle.geturl(url)

                r = urlparse.urljoin(self.base_link, url)
                r = client.request(r, limit='1', timeout='10')
                r = client.parseDOM(r, 'title')

            if not year in r[0]: raise Exception()

            return url
        except:
            pass
Ejemplo n.º 2
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
            r = self.s.get(url, headers=headers).content
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1], re.findall('(\d+)', i[0])[0]) for i in r]
            results = []
            for i in r:
                try:
                    info = client.request(urlparse.urljoin(self.base_link, self.info_link % i[2]), headers=headers, timeout='15')
                    y = re.findall('<div\s+class="jt-info">(\d{4})', info)[0]
                    if self.matchAlias(i[1], aliases) and (year == y):
                        url = i[0]
                        break
                    #results.append([i[0], i[1], re.findall('<div\s+class="jt-info">(\d{4})', info)[0]])
                except:
                    url = None
                    pass

            #try:
            #    r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
            #    url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            #except:
            #    url = None
            #    pass

            if (url == None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
            return url
        except:
            return
Ejemplo n.º 3
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title))

            r = client.request(url)
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))

            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]

            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            except:
                url = None
                pass

            if (url == None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]

            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)

            return url
        except:
            return
Ejemplo n.º 4
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            url = self.tvsearch_link % cleantitle.geturl(tvshowtitle)

            r = urlparse.urljoin(self.base_link, url)
            r = client.request(r, limit='1')
            r = client.parseDOM(r, 'title')

            if not r:
                url = 'http://www.imdb.com/title/%s' % imdb
                url = client.request(url, headers={'Accept-Language':'es-ES'})
                url = client.parseDOM(url, 'title')[0]
                url = re.sub('\((?:.+?|)\d{4}.+', '', url).strip()
                url = cleantitle.normalize(url.encode("utf-8"))
                url = self.tvsearch_link % cleantitle.geturl(url)

                r = urlparse.urljoin(self.base_link, url)
                r = client.request(r, limit='1')
                r = client.parseDOM(r, 'title')

            if not year in r[0]: raise Exception()

            return url
        except:
            return
Ejemplo n.º 5
0
	def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
		try:
			query = self.search_link % urllib.quote_plus(cleantitle.query(tvshowtitle))

			# req page 3 times to workaround their BS random 404's
			# responses (legit & BS 404s) are actually very fast: timeout prob not important
			for i in range(4):
				result = client.request(query, timeout=3)
				if not result == None: break
			

			t = [tvshowtitle] + source_utils.aliases_to_array(aliases)
			t = [cleantitle.get(i) for i in set(t) if i]
			result = re.compile('itemprop="url"\s+href="([^"]+).*?itemprop="name"\s+class="serie-title">([^<]+)', re.DOTALL).findall(result)
			for i in result:
				if cleantitle.get(cleantitle.normalize(i[1])) in t and year in i[1]: url = i[0]

			url = url.encode('utf-8')
			
			#log_utils.log('\n\n~~~ outgoing tvshow() url')
			#log_utils.log(url)
			
			# returned 'url' format like: /serie/x_files 
			return url
		except:
			return
Ejemplo n.º 6
0
 def searchShow(self, title, season, year, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         t = cleantitle.get(title)
         url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s Season %01d' % (title.replace('\'', '-'), int(season)))))
         r = client.request(url, timeout='10')
         r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
         if r:
             r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
             r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
             r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1])) for i in r]
             r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
             r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
         else:
             url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s %01d' % (title.replace('\'', '-'), int(year)))))
             r = client.request(url, timeout='10')
             r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
             r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
             r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
             r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
             r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
             r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         return url.encode('utf-8')
     except:
         return
Ejemplo n.º 7
0
 def getImdbTitle(self, imdb):
     try:
         t = 'http://www.omdbapi.com/?i=%s' % imdb
         t = client.request(t)
         t = json.loads(t)
         t = cleantitle.normalize(t['Title'])
         return t
     except:
         return
Ejemplo n.º 8
0
 def searchMovie(self, title):
     try:
         title = cleantitle.normalize(title)
         url = urlparse.urljoin(self.base_link, self.search_link_2 % urllib.quote_plus(cleantitle.getsearch(title)))
         r = client.request(url, timeout='10')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
         url = [i[0] for i in r if cleantitle.get(title) == cleantitle.get(i[1])][0]
         return url
     except:
         return
Ejemplo n.º 9
0
 def searchMovie(self, title, headers):
     try:
         title = cleantitle.normalize(title)
         url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title))
         r = client.request(url, headers=headers, timeout='10')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'h2'))
         r = [i[0] for i in r if cleantitle.get(title) == cleantitle.get(i[1])][0]
         url = urlparse.urljoin(self.base_link, '%s/watching.html' % r)
         return url
     except:
         return
Ejemplo n.º 10
0
 def searchMovie(self, title, year, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         u = urlparse.urljoin(self.base_link, self.search_link)
         p = urllib.urlencode({'keyword': title})
         r = client.request(u, post=p, XHR=True)
         r = json.loads(r)['content']
         r = zip(client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'}))
         url = [i[0] for i in r if self.matchAlias(i[1], aliases)][0]
         return url
     except:
         return
Ejemplo n.º 11
0
 def getOriginalTitle(self, imdb):
     try:
         tmdb_link = base64.b64decode(
             'aHR0cHM6Ly9hcGkudGhlbW92aWVkYi5vcmcvMy9maW5kLyVzP2FwaV9rZXk9MTBiYWIxZWZmNzZkM2NlM2EyMzQ5ZWIxMDQ4OTRhNmEmbGFuZ3VhZ2U9ZW4tVVMmZXh0ZXJuYWxfc291cmNlPWltZGJfaWQ=')
         t = client.request(tmdb_link % imdb, timeout='10')
         try: title = json.loads(t)['movie_results'][0]['original_title']
         except: pass
         try: title = json.loads(t)['tv_results'][0]['original_name']
         except: pass
         title = cleantitle.normalize(title)
         return title
     except:
         return
Ejemplo n.º 12
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))
         r = client.request(url, headers=headers, timeout='15')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         return url
     except:
         return
Ejemplo n.º 13
0
    def searchShow(self, title, season, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            search = '%s Season %s' % (title, season)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))

            r = client.request(url)

            url = re.findall('<a href=\"(.+?\/movie\/%s-season-%s-.+?\.html)\"' % (cleantitle.geturl(title), season), r)[0]

            return url

        except:
            return
Ejemplo n.º 14
0
 def searchMovie(self, title, year, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         title = cleantitle.getsearch(title)
         query = self.search_link % ('%s+%s' % (urllib.quote_plus(title), year))
         query = urlparse.urljoin(self.base_link, query)
         r = client.request(query, timeout='15', headers=headers, mobile=True)
         match = re.compile('alias=(.+?)\'">(.+?)</a>').findall(r)
         match = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in match]
         match = [(i[0], i[1][0][0], i[1][0][1]) for i in match if len(i[1]) > 0]
         r = [(i[0],i[1]) for i in match if self.matchAlias(i[1], aliases) and year == i[2]][0]
         return r
     except:
         return
Ejemplo n.º 15
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            cltitle = cleantitle.get(title)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
            r = client.request(url, timeout='15')
            r = [i[1] for i in re.findall(r'<li\s+class=["\']movie-item["\'].*?data-title=["\']([^"\']+)["\']><a\s+href=["\']([^"\']+)["\']',r, re.IGNORECASE) 
                 if cleantitle.get(re.sub(r"\s*\d{4}","",i[0])) == cltitle]

            if r == None: return
            else: url = r[0]
            return url
        except:
            return
Ejemplo n.º 16
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         u = urlparse.urljoin(self.base_link, self.search_link)
         p = urllib.urlencode({'keyword': ('%s - Season %s' % (title, season))})
         r = client.request(u, post=p, XHR=True)
         r = json.loads(r)['content']
         r = zip(client.parseDOM(r, 'a', ret='href', attrs={'class': 'ss-title'}), client.parseDOM(r, 'a', attrs={'class': 'ss-title'}))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         return url
     except:
         return
Ejemplo n.º 17
0
    def searchShow(self, title, season, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)

            url = self.search_link % urllib.quote_plus('%s Season %01d' % (title, int(season)))
            r = client.request(url)

            r = client.parseDOM(r, 'div', attrs={'class': 'item_movie'})[0]

            if r:
                url = client.parseDOM(r, 'a', ret='href')[0]
                url = re.sub(r'\/\/xmovies8\.es', '', url)
            # if sr:
            #     r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
            #     r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
            #     r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            #     r = [(i[0], re.findall('(.+?)\s+-\s+S(\d+)', i[1])) for i in r]
            #     r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
            #     r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
            # else:
            #     url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s Season %01d' % (title.replace('\'', '-'), int(season)))))
            #     sr = client.request(url, headers=headers, timeout='10')
            #     if sr:
            #         r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
            #         r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
            #         r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            #         r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1])) for i in r]
            #         r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
            #         r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
            #     else:
            #         url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s %01d' % (title.replace('\'', '-'), int(year)))))
            #         sr = client.request(url, headers=headers, timeout='10')
            #         if sr:
            #             r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
            #             r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
            #             r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
            #             r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
            #             r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
            #             r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
            # url = re.findall('(?://.+?|)(/.+)', r)[0]
            # url = client.replaceHTMLCodes(url)
            #
            # return url.encode('utf-8')

            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('XMovies - Exception: \n' + str(failure))
            return
Ejemplo n.º 18
0
    def searchShow(self, title, season, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            search = '%s Season %s' % (title, season)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))

            r = client.request(url)

            url = re.findall('<a href=\"(.+?\/movie\/%s-season-%s-.+?\.html)\"' % (cleantitle.geturl(title), season), r)[0]

            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMoviez - Exception: \n' + str(failure))
            return
Ejemplo n.º 19
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))
         log_utils.log('shit Returned: %s' % str(url), log_utils.LOGNOTICE)
         r = self.scraper.get(url).content
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         return url
     except Exception:
         return
Ejemplo n.º 20
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            query = self.search_link % urllib.quote_plus(cleantitle.query(tvshowtitle))
            result = client.request(query)
            #tvshowtitle = cleantitle.get(tvshowtitle)
            t = [tvshowtitle] + source_utils.aliases_to_array(aliases)
            t = [cleantitle.get(i) for i in set(t) if i]
            result = re.compile('itemprop="url"\s+href="([^"]+).*?itemprop="name"\s+class="serie-title">([^<]+)', re.DOTALL).findall(result)
            for i in result:
                if cleantitle.get(cleantitle.normalize(i[1])) in t and year in i[1]: url = i[0]

            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 21
0
 def searchMovie(self, title, year, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         url = urlparse.urljoin(self.base_link, self.search_link % (cleantitle.geturl(title.replace('\'', '-'))))
         r = client.request(url, timeout='10', headers=headers)
         r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
         r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
         r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
         r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
         r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
         r = [i[0] for i in r if self.matchAlias(i[1], aliases) and year == i[2]][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         return url.encode('utf-8')
     except:
         return
Ejemplo n.º 22
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))
         r = self.s.get(url, headers=headers).content
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('SolarMoviez - Exception: \n' + str(failure))
         return
Ejemplo n.º 23
0
    def searchShow(self, title, season, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            cltitle = cleantitle.get(title+'season'+season)
            cltitle2 = cleantitle.get(title+'season%02d'%int(season))
            search = '%s Season %01d' % (title, int(season))
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))
            r = client.request(url, timeout='15')
            r = [i[1] for i in re.findall(r'<li\s+class="movie-item".*?data-title=\"([^\"]+)\"><a\s+href="([^"]+)',r, re.IGNORECASE) 
                 if cleantitle.get(re.sub(r"\s*\d{4}","",i[0])) in [cltitle, cltitle2]]
  
            if r == None: return
            else: url = r[0]

            return url
        except:
            return
Ejemplo n.º 24
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(search))
         r = client.request(url, headers=headers, timeout='15')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('Series9 - Exception: \n' + str(failure))
         return  
Ejemplo n.º 25
0
    def strmFile(self, i):
        try:
            name, title, year, imdb, tmdb = i['name'], i['title'], i['year'], i['imdb'], i['tmdb']

            sysname, systitle = urllib.quote_plus(name), urllib.quote_plus(title)

            transtitle = cleantitle.normalize(title.translate(None, '\/:*?"<>|'))

            content = '%s?action=play&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s' % (sys.argv[0], sysname, systitle, year, imdb, tmdb)

            folder = lib_tools.make_path(self.library_folder, transtitle, year)

            lib_tools.create_folder(folder)
            lib_tools.write_file(os.path.join(folder, lib_tools.legal_filename(transtitle) + '.strm'), content)
            lib_tools.write_file(os.path.join(folder, 'movie.nfo'), lib_tools.nfo_url('movie', i))
        except:
            pass
Ejemplo n.º 26
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:            
            query = self.search_link % urllib.quote_plus(cleantitle.query(title))           
            for i in range(3):
                result = client.request(query, timeout=10)
                if not result == None: break

            t = [title] + [localtitle] + source_utils.aliases_to_array(aliases)
            t = [cleantitle.get(i) for i in set(t) if i]
            items = dom_parser.parse_dom(result, 'div', attrs={'class':'result'})
            url = None
            for i in items:
                result = re.findall(r'href="([^"]+)">(.*)<', i.content)
                if re.sub('<[^<]+?>', '', cleantitle.get(cleantitle.normalize(result[0][1]))) in t and year in result[0][1]: url = result[0][0]
                if not url == None: break 

            url = url.encode('utf-8')
            return url
        except:
            return
Ejemplo n.º 27
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
            r = self.scraper.get(url).content
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            except Exception:
                url = None
                pass

            if (url is None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
            return url
        except Exception:
            return
Ejemplo n.º 28
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(
             self.base_link, self.search_link %
             urllib.quote_plus(cleantitle.getsearch(search)))
         r = client.request(url, headers=headers, timeout='15')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'),
                 client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
              for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [
             i[0] for i in r
             if self.matchAlias(i[2][0], aliases) and i[2][1] == season
         ][0]
         return url
     except:
         return
Ejemplo n.º 29
0
    def strmFile(self, i):
        try:
            title, year, imdb, tvdb, season, episode, tvshowtitle, premiered = i['title'], i['year'], i['imdb'], i['tvdb'], i['season'], i['episode'], i['tvshowtitle'], i['premiered']

            episodetitle = urllib.quote_plus(title)
            systitle, syspremiered = urllib.quote_plus(tvshowtitle), urllib.quote_plus(premiered)

            transtitle = cleantitle.normalize(tvshowtitle.translate(None, '\/:*?"<>|'))

            content = '%s?action=play&title=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s&tvshowtitle=%s&date=%s' % (sys.argv[0], episodetitle, year, imdb, tvdb, season, episode, systitle, syspremiered)

            folder = lib_tools.make_path(self.library_folder, transtitle, year)
            if not os.path.isfile(os.path.join(folder, 'tvshow.nfo')):
                lib_tools.create_folder(folder)
                lib_tools.write_file(os.path.join(folder, 'tvshow.nfo'), lib_tools.nfo_url('tv', i))

            folder = lib_tools.make_path(self.library_folder, transtitle, year, season)
            lib_tools.create_folder(folder)
            lib_tools.write_file(os.path.join(folder, lib_tools.legal_filename('%s S%02dE%02d' % (transtitle, int(season), int(episode))) + '.strm'), content)
        except:
            pass
Ejemplo n.º 30
0
 def searchMovie(self, title, year, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         title = cleantitle.getsearch(title)
         query = self.search_link % ('%s+%s' %
                                     (urllib.quote_plus(title), year))
         query = urlparse.urljoin(self.base_link, query)
         r = client.request(query,
                            timeout='15',
                            headers=headers,
                            mobile=True)
         match = re.compile('alias=(.+?)\'">(.+?)</a>').findall(r)
         match = [(i[0], re.findall('(.+?) \((\d{4})', i[1]))
                  for i in match]
         match = [(i[0], i[1][0][0], i[1][0][1]) for i in match
                  if len(i[1]) > 0]
         r = [(i[0], i[1]) for i in match
              if self.matchAlias(i[1], aliases) and year == i[2]][0]
         return r
     except:
         return
Ejemplo n.º 31
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
            r = client.request(url, headers=headers, timeout='15')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            except:
                url = None
                pass

            if (url == None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
            return url
        except:

            return
Ejemplo n.º 32
0
    def strmFile(self, i):
        try:
            title, year, imdb, tvdb, season, episode, tvshowtitle, premiered = i['title'], i['year'], i['imdb'], i['tvdb'], i['season'], i['episode'], i['tvshowtitle'], i['premiered']

            episodetitle = urllib.quote_plus(title)
            systitle, syspremiered = urllib.quote_plus(tvshowtitle), urllib.quote_plus(premiered)

            transtitle = cleantitle.normalize(tvshowtitle.translate(None, '\/:*?"<>|'))

            content = '%s?action=play&title=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s&tvshowtitle=%s&date=%s' % (sys.argv[0], episodetitle, year, imdb, tvdb, season, episode, systitle, syspremiered)

            folder = lib_tools.make_path(self.library_folder, transtitle, year)
            if not os.path.isfile(os.path.join(folder, 'tvshow.nfo')):
                lib_tools.create_folder(folder)
                lib_tools.write_file(os.path.join(folder, 'tvshow.nfo'), lib_tools.nfo_url('tv', i))

            folder = lib_tools.make_path(self.library_folder, transtitle, year, season)
            lib_tools.create_folder(folder)
            lib_tools.write_file(os.path.join(folder, lib_tools.legal_filename('%s S%02dE%02d' % (transtitle, int(season), int(episode))) + '.strm'), content)
        except:
            pass
Ejemplo n.º 33
0
    def searchMovie(self, title, year, aliases):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title))
            r = cfscrape.get(url, headers=self.headers).content
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            except:
                url = None
                pass

            if url is None:
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]

            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except:
            return
Ejemplo n.º 34
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
            r = client.request(url, headers=headers, timeout='15')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            except:
                url = None
                pass

            if (url == None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('SolarMoviez - Exception: \n' + str(failure))
            return
Ejemplo n.º 35
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(
             self.base_link, self.search_link %
             urllib.quote_plus(cleantitle.getsearch(search)))
         log_utils.log('shit Returned: %s' % str(url), log_utils.LOGNOTICE)
         r = self.scraper.get(url).content
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'),
                 client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
              for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [
             i[0] for i in r
             if self.matchAlias(i[2][0], aliases) and i[2][1] == season
         ][0]
         return url
     except Exception:
         return
Ejemplo n.º 36
0
	def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
		try:
			query = self.search_link % urllib.quote_plus(cleantitle.query(tvshowtitle))

			
			for i in range(4):
				result = client.request(query, timeout=3)
				if not result == None: break
			

			t = [tvshowtitle] + source_utils.aliases_to_array(aliases)
			t = [cleantitle.get(i) for i in set(t) if i]
			result = re.compile('itemprop="url"\s+href="([^"]+).*?itemprop="name"\s+class="serie-title">([^<]+)', re.DOTALL).findall(result)
			for i in result:
				if cleantitle.get(cleantitle.normalize(i[1])) in t and year in i[1]: url = i[0]

			url = url.encode('utf-8')
			
			
			return url
		except:
			return
Ejemplo n.º 37
0
 def searchShow(self, title, season, year, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         t = cleantitle.get(title)
         url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s S%02d' % (title.replace('\'', '-'), int(season)))))
         sr = client.request(url, headers=headers, timeout='10')
         if sr:
             r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
             r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
             r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
             r = [(i[0], re.findall('(.+?)\s+-\s+S(\d+)', i[1])) for i in r]
             r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
             r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
         else:
             url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s Season %01d' % (title.replace('\'', '-'), int(season)))))
             sr = client.request(url, headers=headers, timeout='10')
             if sr:
                 r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
                 r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
                 r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
                 r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1])) for i in r]
                 r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
                 r = [i[0] for i in r if t == cleantitle.get(i[1]) and int(season) == int(i[2])][0]
             else:
                 url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.query('%s %01d' % (title.replace('\'', '-'), int(year)))))
                 sr = client.request(url, headers=headers, timeout='10')
                 if sr:
                     r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
                     r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
                     r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
                     r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
                     r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
                     r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         return url.encode('utf-8')
     except:
         return
Ejemplo n.º 38
0
    def searchShow(self, title, season, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            cltitle = cleantitle.get(title + 'season' + season)
            search = '%s Season %01d' % (title, int(season))
            url = urlparse.urljoin(
                self.base_link, self.search_link %
                urllib.quote_plus(cleantitle.getsearch(search)))
            r = client.request(url, timeout='15')
            r = [
                i[0] for i in re.findall(
                    r"<li\s+class='movie-item'\s+data-url='([^']+)'\s+data-title=\"([^\"]+)\">",
                    r, re.IGNORECASE) if cleantitle.get(i[1]) == cltitle
            ]
            for u in r:
                try:
                    result = client.request(urlparse.urljoin(
                        self.base_link, u),
                                            timeout=10)
                    result = json.loads(result)['html']
                    match = re.findall(
                        r'Season\s+%s.*?class="jtip-bottom"><a\s+href="([^"]+)"'
                        % season, result, re.DOTALL)[0]
                    if not match == None: break
                except:
                    pass
            if match == None: return
            else: url = match

            #r = client.parseDOM(r, 'div', attrs={'class': 'item-detail'})
            #r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            #r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
            #r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
            #url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
            url = '%s/watching.html' % url
            return url
        except:
            return
Ejemplo n.º 39
0
    def searchShow(self, title, season, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            search = '%s Season %01d' % (title, int(season))
            url = urlparse.urljoin(
                self.base_link, self.search_link %
                urllib.quote_plus(cleantitle.getsearch(search)))

            s = cfscrape.create_scraper()
            r = s.get(url).content
            r = client.parseDOM(r, 'li', attrs={'class': 'movie-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
                 for i in r]
            r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
            url = [
                i[0] for i in r
                if self.matchAlias(i[2][0], aliases) and i[2][1] == season
            ][0]
            return url
        except BaseException:
            return
Ejemplo n.º 40
0
 def searchMovie(self, title, year):
     try:
         title = cleantitle.normalize(title)
         url = urlparse.urljoin(
             self.base_link, self.moviesearch_link %
             (cleantitle.geturl(title.replace('\'', '-'))))
         r = client.request(url)
         t = cleantitle.get(title)
         r = client.parseDOM(r, 'h2', attrs={'class': 'tit'})
         r = [(client.parseDOM(i, 'a', ret='href'),
               client.parseDOM(i, 'a', ret='title')) for i in r]
         r = [(i[0][0], i[1][0]) for i in r
              if len(i[0]) > 0 and len(i[1]) > 0]
         r = [(i[0], re.findall('(.+?) \((\d{4})', i[1])) for i in r]
         r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
         r = [
             i[0] for i in r if t in cleantitle.get(i[1]) and year == i[2]
         ][0]
         url = re.findall('(?://.+?|)(/.+)', r)[0]
         url = client.replaceHTMLCodes(url)
         return url.encode('utf-8')
     except:
         return
Ejemplo n.º 41
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(
             self.base_link, self.search_link %
             urllib.quote_plus(cleantitle.getsearch(search)))
         r = self.s.get(url, headers=headers).content
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'),
                 client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
              for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [
             i[0] for i in r
             if self.matchAlias(i[2][0], aliases) and i[2][1] == season
         ][0]
         return url
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('SolarMoviez - Exception: \n' + str(failure))
         return
Ejemplo n.º 42
0
    def searchShow(self, title, season, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            cltitle = cleantitle.get(title + 'season' + season)
            cltitle2 = cleantitle.get(title + 'season%02d' % int(season))
            search = '%s Season %01d' % (title, int(season))
            url = urlparse.urljoin(
                self.base_link, self.search_link %
                urllib.quote_plus(cleantitle.getsearch(search)))
            r = client.request(url, timeout='15')
            r = [
                i[1] for i in re.findall(
                    r'<li\s+class=["\']movie-item["\'].*?data-title=["\']([^"\']+)["\']><a\s+href=["\']([^"\']+)["\']',
                    r, re.IGNORECASE) if cleantitle.get(
                        re.sub(r"\s*\d{4}", "", i[0])) in [cltitle, cltitle2]
            ]

            if r == None: return
            else: url = r[0]

            return url
        except:
            return
Ejemplo n.º 43
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(
             self.base_link, self.search_link % cleantitle.geturl(search))
         r = client.request(url, headers=headers, timeout='15')
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'),
                 client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
              for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [
             i[0] for i in r
             if self.matchAlias(i[2][0], aliases) and i[2][1] == season
         ][0]
         url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
         return url
     except:
         failure = traceback.format_exc()
         log_utils.log('Series9 - Exception: \n' + str(failure))
         return
Ejemplo n.º 44
0
 def searchShow(self, title, season, aliases, headers):
     try:
         title = cleantitle.normalize(title)
         u = urlparse.urljoin(self.base_link, self.search_link)
         p = urllib.urlencode(
             {'keyword': ('%s - Season %s' % (title, season))})
         r = client.request(u, post=p, XHR=True)
         r = json.loads(r)['content']
         r = zip(
             client.parseDOM(r,
                             'a',
                             ret='href',
                             attrs={'class': 'ss-title'}),
             client.parseDOM(r, 'a', attrs={'class': 'ss-title'}))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
              for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [
             i[0] for i in r
             if self.matchAlias(i[2][0], aliases) and i[2][1] == season
         ][0]
         return url
     except:
         return
Ejemplo n.º 45
0
    def searchMovie(self, title, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title))
            r = client.request(url, headers=headers, timeout='15')
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            except Exception:
                url = None
                pass

            if (url is None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]

            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('SeriesOnline - Exception: \n' + str(failure))
            return
Ejemplo n.º 46
0
 def strmFile(self, i):
     try:
         name, title, year, imdb = i['name'], i['title'], i['year'], i[
             'imdb']
         sysname, systitle = urllib.quote_plus(name), urllib.quote_plus(
             title)
         transtitle = cleantitle.normalize(
             title.translate(None, '\/:*?"<>|'))
         content = '%s?action=play&name=%s&title=%s&year=%s&imdb=%s' % (
             sys.argv[0], sysname, systitle, year, imdb)
         folder = lib_tools.make_path(self.library_folder, transtitle, year)
         lib_tools.create_folder(folder)
         lib_tools.write_file(
             os.path.join(
                 folder,
                 lib_tools.legal_filename(transtitle) + '.' + year +
                 '.strm'), content)
         lib_tools.write_file(
             os.path.join(
                 folder,
                 lib_tools.legal_filename(transtitle) + '.' + year +
                 '.nfo'), lib_tools.nfo_url('movie', i))
     except:
         pass
Ejemplo n.º 47
0
    def getSources(self,
                   title,
                   year,
                   imdb,
                   tvdb,
                   season,
                   episode,
                   tvshowtitle,
                   premiered,
                   quality='HD',
                   timeout=20):
        u = None

        sourceDict = []
        for package, name, is_pkg in pkgutil.walk_packages(__path__):
            sourceDict.append((name, is_pkg))
        sourceDict = [i[0] for i in sourceDict if i[1] == False]

        content = 'movie' if tvshowtitle == None else 'episode'

        if content == 'movie':
            sourceDict = [
                i for i in sourceDict if i.endswith(('_mv', '_mv_tv'))
            ]
        else:
            sourceDict = [
                i for i in sourceDict if i.endswith(('_tv', '_mv_tv'))
            ]

        if quality == 'SD':
            quality = [
                'movie4k_mv', 'movie25_mv', 'onseries_tv', 'primewire_mv_tv',
                'watchfree_mv_tv', 'watchseries_tv', 'wmo_mv'
            ]
            sourceDict = [i for i in sourceDict if i in quality]

        threads = []

        control.makeFile(control.dataPath)
        self.sourceFile = control.providercacheFile

        if content == 'movie':
            title = cleantitle.normalize(title)
            for source in sourceDict:
                threads.append(
                    workers.Thread(
                        self.getMovieSource, title, year, imdb,
                        re.sub('_mv_tv$|_mv$|_tv$', '', source),
                        __import__(source, globals(), locals(), [],
                                   -1).source()))
        else:
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            for source in sourceDict:
                threads.append(
                    workers.Thread(
                        self.getEpisodeSource, title, year, imdb, tvdb, season,
                        episode, tvshowtitle, premiered,
                        re.sub('_mv_tv$|_mv$|_tv$', '', source),
                        __import__(source, globals(), locals(), [],
                                   -1).source()))

        [i.start() for i in threads]

        progressDialog = control.progressDialog
        progressDialog.create(control.addonInfo('name'),
                              control.lang(30726).encode('utf-8'))
        progressDialog.update(0)

        progressDialog.update(0,
                              control.lang(30726).encode('utf-8'),
                              control.lang(30731).encode('utf-8'))

        for i in range(0, timeout * 2):
            try:
                if progressDialog.iscanceled(): break
                if xbmc.abortRequested == True: return sys.exit()
                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                time.sleep(0.5)
            except:
                pass

        for i in range(0, 20 * 2):
            try:
                if progressDialog.iscanceled(): break
                if xbmc.abortRequested == True: return sys.exit()
                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                if self.sources: break
                time.sleep(0.5)
            except:
                pass

        progressDialog.update(50,
                              control.lang(30726).encode('utf-8'),
                              control.lang(30731).encode('utf-8'))

        items = self.sourcesFilter()

        filter = [
            i for i in items
            if i['source'].lower() in self.hostcapDict and i['debrid'] == ''
        ]
        items = [i for i in items if not i in filter]

        filter = [
            i for i in items
            if i['source'].lower() in self.hostblockDict and i['debrid'] == ''
        ]
        items = [i for i in items if not i in filter]

        items = [
            i for i in items if ('autoplay' in i and i['autoplay'] == True)
            or not 'autoplay' in i
        ]

        for i in range(len(items)):
            try:
                if progressDialog.iscanceled(): break
                if xbmc.abortRequested == True: return sys.exit()
                url = self.sourcesResolve(items[i])
                if u == None: u = url
                if not url == None: break
            except:
                pass

        try:
            progressDialog.close()
        except:
            pass

        return u
Ejemplo n.º 48
0
 def getTitle(self, title):
     title = cleantitle.normalize(title)
     return title
Ejemplo n.º 49
0
    def getSources(title,
                   year,
                   imdb,
                   tvdb,
                   season,
                   episode,
                   tvshowtitle,
                   premiered,
                   timeout=30,
                   progress=True,
                   preset="search",
                   dialog=None,
                   exclude=None,
                   scraper_title=False):

        year = str(year)

        content = 'movie' if tvshowtitle == None else 'episode'

        if content == 'movie':
            title = cleantitle.normalize(title)
            links_scraper = nanscrapers.scrape_movie(title,
                                                     year,
                                                     imdb,
                                                     timeout=timeout,
                                                     exclude=exclude)
        elif content == 'episode':
            if scraper_title:
                tvshowtitle = title
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            links_scraper = nanscrapers.scrape_episode(tvshowtitle,
                                                       year,
                                                       premiered,
                                                       season,
                                                       episode,
                                                       imdb,
                                                       tvdb,
                                                       timeout=timeout,
                                                       exclude=exclude)
        else:
            return

        allow_debrid = bool(control.setting('allow_debrid'))

        if control.setting('use_link_dialog') == 'true':
            if content == 'movie':
                link = nanscrapers.scrape_movie_with_dialog(
                    title,
                    year,
                    imdb,
                    timeout=timeout,
                    exclude=exclude,
                    sort_function=sources.sort_function)
            elif content == "episode":
                link = nanscrapers.scrape_episode_with_dialog(
                    tvshowtitle,
                    year,
                    premiered,
                    season,
                    episode,
                    imdb,
                    tvdb,
                    timeout=timeout,
                    exclude=exclude,
                    sort_function=sources.sort_function)
            else:
                return

            url = link['url']
            import urlresolver9
            hmf = urlresolver9.HostedMediaFile(url=url,
                                               include_disabled=False,
                                               include_universal=allow_debrid)
            if hmf.valid_url() == True:
                resolved_url = hmf.resolve()
            else:
                resolved_url = None
            if resolved_url and sources().check_playable(
                    resolved_url) is not None:
                url = resolved_url
            return url

        sd_links = []
        non_direct = []
        sd_non_direct = []
        links_scraper = links_scraper()
        for scraper_links in links_scraper:
            if scraper_links is not None:
                random.shuffle(scraper_links)
                for scraper_link in scraper_links:
                    if dialog is not None and dialog.iscanceled():
                        return

                    if (not control.setting('allow_openload') == 'true'
                            and 'openload' in scraper_link['url']
                        ) or (
                            not control.setting('allow_the_video_me') == 'true'
                            and 'thevideo.me' in scraper_link['url']):
                        continue
                    if preset.lower() == 'searchsd':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality > 576:
                                continue
                        except:
                            if scraper_link['quality'] not in [
                                    "SD", "CAM", "SCR"
                            ]:
                                continue
                    elif preset.lower() == 'search':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality <= 576:
                                sd_links.append(scraper_link)
                                continue
                        except:
                            if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                                sd_links.append(scraper_link)
                                continue

                    if "m4u" in scraper_link['url']:
                        if sources().check_playable(
                                scraper_link['url']) is not None:
                            return scraper_link['url']

                    else:
                        try:
                            if scraper_link['direct']:
                                import urlresolver9
                                hmf = urlresolver9.HostedMediaFile(
                                    url=scraper_link['url'],
                                    include_disabled=False,
                                    include_universal=allow_debrid)
                                if hmf.valid_url() == True:
                                    resolved_url = hmf.resolve()
                                # resolved_url = urlresolver9.resolve(scraper_link['url'])
                                if resolved_url and sources().check_playable(
                                        resolved_url) is not None:
                                    url = resolved_url
                                    return url
                                else:
                                    if sources().check_playable(
                                            scraper_link['url']):
                                        return scraper_link['url']
                            else:
                                non_direct.append(scraper_link)
                        except:
                            if scraper_link['direct']:
                                url = scraper_link['url']
                                if sources().check_playable(url) is not None:
                                    return url
                            else:
                                non_direct.append(scraper_link)

        for scraper_link in non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            if (not control.setting('allow_openload') == 'true'
                    and 'openload' in scraper_link['url']) or (
                        not control.setting('allow_the_video_me') == 'true'
                        and 'thevideo.me' in scraper_link['url']):
                continue
            if preset.lower() == 'searchsd':
                try:
                    quality = int(scraper_link['quality'])
                    if quality > 576:
                        continue
                except:
                    if scraper_link['quality'] not in ["SD", "CAM", "SCR"]:
                        continue
            elif preset.lower() == 'search':
                try:
                    quality = int(scraper_link['quality'])
                    if quality <= 576:
                        sd_non_direct.append(scraper_link)
                        continue
                except:
                    if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                        sd_non_direct.append(scraper_link)
                        continue

            try:
                hmf = urlresolver9.HostedMediaFile(
                    url=scraper_link['url'],
                    include_disabled=False,
                    include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (resolved_url.startswith("plugin://")
                                 or sources().check_playable(resolved_url)
                                 is not None):
                url = resolved_url
                return url

        for scraper_link in sd_links:

            if dialog is not None and dialog.iscanceled():
                return

            if "m4u" in scraper_link['url']:
                return scraper_link['url']

            else:
                try:
                    if scraper_link['direct']:
                        import urlresolver9
                        hmf = urlresolver9.HostedMediaFile(
                            url=scraper_link['url'],
                            include_disabled=False,
                            include_universal=allow_debrid)
                        if hmf.valid_url() == True:
                            resolved_url = hmf.resolve()
                        # resolved_url = urlresolver9.resolve(scraper_link['url'])
                        if resolved_url and sources().check_playable(
                                resolved_url) is not None:
                            url = resolved_url
                            return url
                        else:
                            if sources().check_playable(scraper_link['url']):
                                return scraper_link['url']
                    else:
                        non_direct.append(scraper_link)
                except:
                    if scraper_link['direct']:
                        url = scraper_link['url']
                        if sources().check_playable(url) is not None:
                            return url
                    else:
                        non_direct.append(scraper_link)

        try:
            import urlresolver9
        except:
            control.dialog.ok(
                "Dependency missing",
                "please install script.mrknow.urlresolver to resolve non-direct links"
            )
            return

        for scraper_link in sd_non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            try:
                hmf = urlresolver9.HostedMediaFile(
                    url=scraper_link['url'],
                    include_disabled=False,
                    include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (resolved_url.startswith("plugin://")
                                 or sources().check_playable(resolved_url)
                                 is not None):
                url = resolved_url
                return url
Ejemplo n.º 50
0
    def sources(self, url, hostDict, hostprDict):
        try:
            print '-------------------------------    -------------------------------'
            sources = []

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            print data

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            year = data['year'] if 'year' in data else data['year']
            season = data['season'] if 'season' in data else False
            episode = data['episode'] if 'episode' in data else False
            localtitle = data['localtitle'] if 'localtitle' in data else False

            if season and episode:
                aTitle = tvmaze.tvMaze().getTVShowTranslation(
                    data['tvdb'], 'fr')
            else:
                aTitle = trakt.getMovieTranslation(data['imdb'], 'fr')

                if not aTitle == None:
                    print ''
                else:
                    aTitle = title

            if season and episode:
                query = 'http://www.skstream.org/recherche/?s=' + urllib.quote_plus(
                    aTitle)
                query = urlparse.urljoin(self.base_link, query)
            else:
                query = self.key_link + 's=' + urllib.quote_plus(aTitle)
                query = urlparse.urljoin(self.base_link, query)

            print query

            r = client.request(query)

            try:
                r0 = re.compile('<center><h3>(.+?)</h3></center>').findall(
                    r)[0]

                if 'Aucun film à afficher.' in r0:
                    aTitle = re.sub(':', '-', aTitle)
                    query = self.key_link + 'r_film=' + urllib.quote_plus(
                        aTitle)
                    query = urlparse.urljoin(self.base_link, query)

                    r = client.request(query)
            except:
                pass

            print query

            aTitle = cleantitle.get(cleantitle.normalize(aTitle))
            t = cleantitle.get(cleantitle.normalize(aTitle))

            r = client.parseDOM(r, 'div', attrs={'class': 'panel-body'})
            r = client.parseDOM(
                r,
                'div',
                attrs={
                    'class':
                    'col-xs-3 col-sm-3 col-md-3 col-lg-3  movie_single'
                })
            r = [(client.parseDOM(client.parseDOM(
                i, 'div', attrs={'class': 'text-center'}),
                                  'a',
                                  ret='href'),
                  client.parseDOM(
                      i,
                      'img',
                      attrs={'class': 'img-responsive img-thumbnail'},
                      ret='title')) for i in r]
            r = [(i[0][0], i[1][0].lower()) for i in r
                 if len(i[0]) > 0 and len(i[1]) > 0]

            try:
                r = [i[0] for i in r if t + year == cleantitle.get(i[1])][0]
            except:
                for i in r:
                    print 't = %s, i[1] = %s' % (
                        t, cleantitle.normalize(cleantitle.get(i[1])))
                    if t == cleantitle.normalize(cleantitle.get(i[1])):
                        r = i[0]

            if season and episode:
                url = '%s%s' % (self.base_link, r)
            else:
                url = '%s/%s' % (self.base_link, r)

            url = client.replaceHTMLCodes(url)
            #url = url.encode('utf-8')

            url = urlparse.urljoin('http://www.skstream.org', url)

            print url

            r = client.request(url)

            if season and episode:
                r = client.request(url)
                r = r.replace(' = ', '=')

                unChunk = client.parseDOM(r,
                                          'div',
                                          attrs={'class': 'jumbotron'})
                desSaisons = client.parseDOM(
                    r, 'i', attrs={'class': 'fa fa-television'}, ret='id')
                desLiens = client.parseDOM(r,
                                           'div',
                                           attrs={'class': 'btn-group'})
                desLiens = client.parseDOM(desLiens, 'a', ret='href')

                for unLienTV in desLiens:
                    tempSaisonEpisode = '/saison-%s/episode-%s/' % (season,
                                                                    episode)
                    if '/series/' in unLienTV and tempSaisonEpisode in unLienTV:
                        urlTV = 'http://www.skstream.org/%s' % unLienTV
                        break

                print urlTV

                s = client.request(urlTV)
                urlLoop = urlTV

                s = client.parseDOM(
                    s,
                    'table',
                    attrs={'class': 'players table table-striped table-hover'})
            else:
                s = client.request(url)
                urlLoop = url

                s = client.parseDOM(
                    r,
                    'table',
                    attrs={'class': 'players table table-striped table-hover'})

            leVideo = client.parseDOM(s,
                                      'input',
                                      attrs={'name': 'levideo'},
                                      ret='value')
            leHost = re.compile('&nbsp;(.+?)</a></form>').findall(s[0])
            #uneLangue = client.parseDOM(s, 'span', attrs={'class': 'badge'})
            #uneQualite = re.compile('</span></td>\n\s+<td>(.+?)</td>', re.MULTILINE | re.DOTALL).findall(s)

            leHost = [x.lower() for x in leHost]
            #uneLangue = [x.lower() for x in uneLangue]
            #uneQualite = [x.lower() for x in uneQualite]

            counter = 0
            for unVideo in leVideo:

                if season and episode:
                    url = urlLoop + '***' + unVideo + '***' + 'TV'
                else:
                    url = urlLoop + '***' + unVideo + '***' + 'Movie'

                url = url.encode('utf-8')

                try:
                    if leHost[counter] == 'openload':
                        host = filter(lambda s: leHost[counter] in str(s),
                                      hostDict)[1]
                    else:
                        host = filter(lambda s: leHost[counter] in str(s),
                                      hostDict)[0]
                except:
                    counter = counter + 1
                    continue

                host = host.encode('utf-8')

                #langue = uneLangue[counter]

                #quality2 = uneQualite[counter]
                #quality2 = re.sub('-', '', quality2)
                quality2 = ''

                if '1080p' in quality2:
                    quality = '1080p'
                elif '720p' in quality2 or 'bdrip' in quality2 or 'hdrip' in quality2:
                    quality = 'HD'
                else:
                    quality = 'SD'

                if 'dvdscr' in quality2 or 'r5' in quality2 or 'r6' in quality2:
                    quality2 = 'SCR'
                elif 'camrip' in quality2 or 'tsrip' in quality2 or 'hdcam' in quality2 or 'hdts' in quality2 or 'dvdcam' in quality2 or 'dvdts' in quality2 or 'cam' in quality2 or 'telesync' in quality2 or 'ts' in quality2:
                    quality2 = 'CAM'

                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'FR',
                    'url': url,
                    'info': '',
                    'direct': False,
                    'debridonly': False
                })

                counter = counter + 1

            print sources

            return sources
        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]

            print(exc_type, fname, exc_tb.tb_lineno)

            return sources
Ejemplo n.º 51
0
    def searchTitle(self, title):
        try:
            result = []

            q = urlparse.urljoin(self.base_link, self.search_link)

            data = {
                'keyword': title
            }
            post = urllib.urlencode(data)

            headers = {
                'Content-Type': 'application/x-www-form-urlencoded',
            }

            r = client.request(q, post=post, headers=headers)

            # Sometimes the search result returns nothing or the only searched item,
            # This changes the content of show instead of search results
            if (not 'Find anime' in r) or ('Not found' in r):
                try:
                    # this tries to catch the anime tv show directly if available...
                    if ('Not found' in r):
                        # makes a query url for the show
                        url_title = cleantitle.normalize(title)
                        url_title = re.sub(r'[^\w\d]', '-', url_title)
                        url_title = re.sub(r'\-+', '-', url_title)

                        q = urlparse.urljoin(self.base_link, self.anime_link % url_title)
                        
                        # requests the url
                        r = client.request(q)

                    x = client.parseDOM(r, 'div', attrs={'class': 'barContent'})[0]

                    t = client.parseDOM(x, 'a', attrs={'Class': 'bigChar'})[0]

                    if (cleantitle.get(title) == cleantitle.get(t)):
                        s = client.parseDOM(r, 'table', attrs={'class': 'listing'})[0]
                        s = self.getTableRows(s)

                        status = 0
                        for e in s:
                            match = re.search('episode\s(\d+)', e, flags=re.I)
                            if (match):
                                try:
                                    status = int( match.group(1) )
                                    break
                                except:
                                    pass

                        item = {
                            'status': status,
                            'title': title,
                            'url': str( client.parseDOM(x, 'a', ret='href', attrs={'Class': 'bigChar'})[0] )
                        }

                        try:
                            result.index( item )
                        except:
                            result.append( item )

                    return result
                except:
                    return

            r = client.parseDOM(r, 'table', attrs={'class': 'listing'})[0]
            r = re.compile("(\stitle=\'[^\']+\')", flags=re.I).sub('', r)

            # Goes through search results
            for x in self.getTableRows(r):
                try:
                    tdList = client.parseDOM(x, 'td')
                    
                    url = client.parseDOM(tdList[0], 'a', ret='href')[0]
                    ititle = client.parseDOM(tdList[0], 'a')[0].replace(' (TV)', '')
                    status = tdList[1]

                    try:
                        ititle = ititle.encode('utf-8')
                    except:
                        pass

                    try: status = int( re.sub('[^\d]', '', client.parseDOM(status, 'a')[0] ))
                    except: pass

                    if (not isinstance(status, int)):
                        status = str( cleantitle.get( status ) )

                    item = {
                        'status': status,
                        'title': ititle,
                        'url': str( url )
                    }

                    if item['status'] == 'notyetaired':
                        continue

                    try:
                        result.index( item )
                    except:
                        result.append( item )
                        pass

                except:
                    continue

            return result
        except:
            pass

        return []
Ejemplo n.º 52
0
    def searchShow(self, title, season, year, aliases, headers):
        try:
            title = cleantitle.normalize(title)

            url = self.search_link % urllib.quote_plus('%s Season %01d' %
                                                       (title, int(season)))
            r = client.request(url)

            r = client.parseDOM(r, 'div', attrs={'class': 'item_movie'})[0]

            if r:
                url = client.parseDOM(r, 'a', ret='href')[0]
                url = re.sub(r'\/\/xmovies8\.es', '', url)
            if sr:
                r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      client.parseDOM(i, 'a', ret='title')) for i in r]
                r = [(i[0][0], i[1][0]) for i in r
                     if len(i[0]) > 0 and len(i[1]) > 0]
                r = [(i[0], re.findall('(.+?)\s+-\s+S(\d+)', i[1])) for i in r]
                r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                     if len(i[1]) > 0]
                r = [
                    i[0] for i in r
                    if t == cleantitle.get(i[1]) and int(season) == int(i[2])
                ][0]
            else:
                url = urlparse.urljoin(
                    self.base_link, self.search_link % urllib.quote_plus(
                        cleantitle.query(
                            '%s Season %01d' %
                            (title.replace('\'', '-'), int(season)))))
                sr = client.request(url, headers=headers, timeout='10')
                if sr:
                    r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
                    r = [(client.parseDOM(i, 'a', ret='href'),
                          client.parseDOM(i, 'a', ret='title')) for i in r]
                    r = [(i[0][0], i[1][0]) for i in r
                         if len(i[0]) > 0 and len(i[1]) > 0]
                    r = [(i[0], re.findall('(.+?)\s+-\s+Season\s+(\d+)', i[1]))
                         for i in r]
                    r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                         if len(i[1]) > 0]
                    r = [
                        i[0] for i in r if t == cleantitle.get(i[1])
                        and int(season) == int(i[2])
                    ][0]
                else:
                    url = urlparse.urljoin(
                        self.base_link, self.search_link % urllib.quote_plus(
                            cleantitle.query(
                                '%s %01d' %
                                (title.replace('\'', '-'), int(year)))))
                    sr = client.request(url, headers=headers, timeout='10')
                    if sr:
                        r = client.parseDOM(sr, 'h2', attrs={'class': 'tit'})
                        r = [(client.parseDOM(i, 'a', ret='href'),
                              client.parseDOM(i, 'a', ret='title')) for i in r]
                        r = [(i[0][0], i[1][0]) for i in r
                             if len(i[0]) > 0 and len(i[1]) > 0]
                        r = [(i[0], re.findall('(.+?) \((\d{4})', i[1]))
                             for i in r]
                        r = [(i[0], i[1][0][0], i[1][0][1]) for i in r
                             if len(i[1]) > 0]
                        r = [
                            i[0] for i in r
                            if t == cleantitle.get(i[1]) and year == i[2]
                        ][0]
            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)

            return url.encode('utf-8')

            return url
        except:
            failure = traceback.format_exc()
            log_utils.log('XMovies - Exception: \n' + str(failure))
            return
Ejemplo n.º 53
0
def download(name, image, url):

    if url == None: return

    from resources.lib.modules import control, cleantitle

    try:
        headers = dict(urllib_parse.parse_qsl(url.rsplit('|', 1)[1]))
    except:
        headers = dict('')

    url = url.split('|')[0]

    content = re.compile('(.+?)\sS(\d*)E\d*$').findall(name)
    try:
        transname = name.translate(None, '\/:*?"<>|').strip('.')
    except:
        transname = name.translate(str.maketrans('', '',
                                                 '\/:*?"<>|')).strip('.')
    transname = cleantitle.normalize(transname)
    levels = ['../../../..', '../../..', '../..', '..']

    if len(content) == 0:
        dest = control.setting('movie.download.path')
        dest = control.transPath(dest)
        for level in levels:
            try:
                control.makeFile(os.path.abspath(os.path.join(dest, level)))
            except:
                pass
        control.makeFile(dest)
        dest = os.path.join(dest, transname)
        control.makeFile(dest)
    else:
        dest = control.setting('tv.download.path')
        dest = control.transPath(dest)
        for level in levels:
            try:
                control.makeFile(os.path.abspath(os.path.join(dest, level)))
            except:
                pass
        control.makeFile(dest)
        try:
            transtvshowtitle = content[0][0].translate(None,
                                                       '\/:*?"<>|').strip('.')
        except:
            transtvshowtitle = content[0][0].translate(
                str.maketrans('', '', '\/:*?"<>|')).strip('.')
        dest = os.path.join(dest, transtvshowtitle)
        control.makeFile(dest)
        dest = os.path.join(dest, 'Season %01d' % int(content[0][1]))
        control.makeFile(dest)

    ext = os.path.splitext(urllib_parse.urlparse(url).path)[1][1:]
    if not ext in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4'
    dest = os.path.join(dest, transname + '.' + ext)

    sysheaders = urllib_parse.quote_plus(json.dumps(headers))

    sysurl = urllib_parse.quote_plus(url)

    systitle = urllib_parse.quote_plus(name)

    sysimage = urllib_parse.quote_plus(image)

    sysdest = urllib_parse.quote_plus(dest)

    script = inspect.getfile(inspect.currentframe())
    cmd = 'RunScript(%s, %s, %s, %s, %s, %s)' % (
        script, sysurl, sysdest, systitle, sysimage, sysheaders)

    xbmc.executebuiltin(cmd)
Ejemplo n.º 54
0
    def getSources(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, presetDict=[], timeout=30, progress=True):
        sourceDict = []
        for package, name, is_pkg in pkgutil.walk_packages(__path__): sourceDict.append((name, is_pkg))
        sourceDict = [i[0] for i in sourceDict if i[1] == False]

        if not presetDict == []: sourceDict = [i for i in presetDict if i in sourceDict]

        content = 'movie' if tvshowtitle == None else 'episode'

        if content == 'movie':
            sourceDict = [i for i in sourceDict if i.endswith(('_mv', '_mv_tv'))]
        else:
            sourceDict = [i for i in sourceDict if i.endswith(('_tv', '_mv_tv'))]

        try: sourceDict = [(i, control.setting('provider.' + re.sub('_mv_tv$|_mv$|_tv$', '', i))) for i in sourceDict]
        except: sourceDict = [(i, 'true') for i in sourceDict]

        sourceDict = [i[0] for i in sourceDict if not i[1] == 'false']

        threads = []

        control.makeFile(control.dataPath)
        self.sourceFile = control.providercacheFile


        if content == 'movie':
            title = cleantitle.normalize(title)
            for source in sourceDict: threads.append(workers.Thread(self.getMovieSource, title, year, imdb, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))
        else:
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            for source in sourceDict: threads.append(workers.Thread(self.getEpisodeSource, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, re.sub('_mv_tv$|_mv$|_tv$', '', source), __import__(source, globals(), locals(), [], -1).source()))


        try: timeout = int(control.setting('scrapers.timeout.1'))
        except: pass

        [i.start() for i in threads]

        control.idle()

        sourceLabel = [re.sub('_mv_tv$|_mv$|_tv$', '', i) for i in sourceDict]
        sourceLabel = [re.sub('v\d+$', '', i).upper() for i in sourceLabel]

        if progress == True:
            self.progressDialog = control.progressDialog
            self.progressDialog.create(control.addonInfo('name'), '')
            self.progressDialog.update(0)

        string1 = control.lang(30512).encode('utf-8')
        string2 = control.lang(30513).encode('utf-8')
        string3 = control.lang(30514).encode('utf-8')

        for i in range(0, timeout * 2):
            try:
                if xbmc.abortRequested == True: return sys.exit()

                try: info = [sourceLabel[int(re.sub('[^0-9]', '', str(x.getName()))) - 1] for x in threads if x.is_alive() == True]
                except: info = []

                if len(info) > 5: info = len(info)

                if progress == True:
                    self.progressDialog.update(int((100 / float(len(threads))) * len([x for x in threads if x.is_alive() == False])), str('%s: %s %s' % (string1, int(i * 0.5), string2)), str('%s: %s' % (string3, str(info).translate(None, "[]'"))))
                    if self.progressDialog.iscanceled(): break

                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                time.sleep(0.5)
            except:
                pass

        try: self.progressDialog.close()
        except: pass

        self.sourcesFilter()

        return self.sources
Ejemplo n.º 55
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        is_anime = url[3]
        try:
            titles = []
            titles.append(url[0])
            titles.append(url[1])
            try:
                year = url[2]
            except:
                year = ''
            for url_single in titles:
                url_single = cleantitle.normalize(cleantitle.getsearch(url_single))
                words = url_single.split(' ')
                search_url = urlparse.urljoin(self.base_link, self.search_link) % (url_single + " " + year)

                cookies = client.request(self.base_link, output='cookie')
                verifyGet = client.request(self.verify, cookie=cookies)
                cookies = cookies + ";tmvh=" + self.crazy_cookie_hash(verifyGet)
                cache.cache_insert('szukajka_cookie', cookies)

                result = client.request(search_url, cookie=cookies)
                result = client.parseDOM(result, 'div', attrs={'class': 'element'})

                for el in result:

                    found_title = str(client.parseDOM(el, 'div', attrs={'class': 'title'})[0]).lower().replace("_",
                                                                                                               " ").replace(
                        ".", " ").replace("-", " ")
                    if is_anime:
                        numbers = [int(s) for s in found_title.split() if s.isdigit()]
                        if not int(words[-1]) in numbers:
                            continue
                    if ("zwiastun" or "trailer") in str(found_title).lower():
                        continue
                    if len(words) >= 4 or is_anime:
                        if not self.contains_all_words(found_title, words):
                            continue
                    else:
                        if not self.contains_all_words(found_title, words) or year not in found_title:
                            continue
                    q = 'SD'
                    if self.contains_word(found_title, '1080p') or self.contains_word(found_title, 'FHD'):
                        q = '1080p'
                    elif self.contains_word(found_title, '720p'):
                        q = 'HD'

                    link = client.parseDOM(el, 'a', attrs={'class': 'link'}, ret='href')[0]
                    transl_type = client.parseDOM(el, 'span', attrs={'class': 'version'})[0]
                    transl_type = transl_type.split(' ')
                    transl_type = transl_type[-1]

                    host = client.parseDOM(el, 'span', attrs={'class': 'host'})[0]
                    host = host.split(' ')
                    host = host[-1]
                    lang, info = self.get_lang_by_type(transl_type)
                    sources.append(
                        {'source': host, 'quality': q, 'language': lang, 'url': link, 'info': info, 'direct': False,
                         'debridonly': False})
                    continue
            return sources
        except Exception as e:
            print(str(e))
            return sources
Ejemplo n.º 56
0
    def getMusicSources(title, artist, timeout=30, progress=True, preset="search", dialog=None, exclude=None,
                        queueing=False):
        title = cleantitle.normalize(title)
        links_scraper = nanscrapers.scrape_song(title, artist, timeout=timeout, exclude=exclude)

        sd_links = []
        non_direct = []
        sd_non_direct = []
        allow_debrid = bool(control.setting('allow_debrid'))

        for scraper_links in links_scraper():
            if scraper_links is not None:
                random.shuffle(scraper_links)
                for scraper_link in scraper_links:
                    if dialog is not None and dialog.iscanceled():
                        return

                    if (not control.setting('allow_openload') == 'true' and 'openload' in scraper_link['url']) or (
                                not control.setting('allow_the_video_me') == 'true' and 'thevideo.me' in scraper_link[
                                'url']):
                        continue
                    if preset.lower() == 'searchsd':
                        if scraper_link['quality'] not in ["SD"]:
                            continue
                    elif preset.lower() == 'search':
                        if scraper_link['quality'] in ["SD"]:
                            sd_links.append(scraper_link)
                            continue

                    if scraper_link['direct']:
                        url = scraper_link['url']
                        if sources().check_playable(url) is not None:
                            return url
                        else:
                            non_direct.append(scraper_link)

                for scraper_link in sd_links:
                    if dialog is not None and dialog.iscanceled():
                        return
                    if scraper_link['direct']:
                        url = scraper_link['url']
                        if sources().check_playable(url) is not None:
                            return url
                        else:
                            non_direct.append(scraper_link)

                try:
                    import urlresolver9
                except:
                    control.dialog.ok("Dependency missing",
                                      "please install script.mrknow.urlresolver to resolve non-direct links")
                    return

                for scraper_link in non_direct:
                    if dialog is not None and dialog.iscanceled():
                        return

                    if preset.lower() == 'searchsd':
                        if scraper_link['quality'] not in ["SD"]:
                            continue
                    elif preset.lower() == 'search':
                        if scraper_link['quality'] in ["SD"]:
                            sd_non_direct.append(scraper_link)
                            continue
                    try:
                        hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                           include_universal=allow_debrid)
                        if hmf.valid_url() == True: resolved_url = hmf.resolve()
                        # resolved_url = urlresolver9.resolve(scraper_link['url'])
                    except:
                        continue
                    if resolved_url and (
                                resolved_url.startswith("plugin://") or sources().check_playable(
                                resolved_url) is not None):
                        url = resolved_url
                        return url

                for scraper_link in sd_non_direct:
                    if dialog is not None and dialog.iscanceled():
                        return

                    try:
                        hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                           include_universal=allow_debrid)
                        if hmf.valid_url() == True: resolved_url = hmf.resolve()
                        # resolved_url = urlresolver9.resolve(scraper_link['url'])
                    except:
                        continue
                    if resolved_url and (
                                resolved_url.startswith("plugin://") or sources().check_playable(
                                resolved_url) is not None):
                        url = resolved_url
                        return url
Ejemplo n.º 57
0
    def getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, timeout=30,
                   progress=True, preset="search", dialog=None, exclude=None, scraper_title=False, queueing=False):

        year = str(year)
        content = 'movie' if tvshowtitle == None else 'episode'
        allow_debrid = control.setting('allow_debrid') == "true"

        if content == 'movie':
            title = cleantitle.normalize(title)
            links_scraper = nanscrapers.scrape_movie(title, year, imdb, timeout=timeout, exclude=exclude,
                                                     enable_debrid=allow_debrid)
        elif content == 'episode':
            if scraper_title:
                tvshowtitle = title
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            links_scraper = nanscrapers.scrape_episode(tvshowtitle, year, premiered, season, episode, imdb, tvdb,
                                                       timeout=timeout, exclude=exclude, enable_debrid=allow_debrid)
        else:
            return

        if not queueing and control.setting('use_link_dialog') == 'true':
            if control.setting('check_url') == "true":
                check_url = True
            else:
                check_url = False
            if content == 'movie':
                link, items = nanscrapers.scrape_movie_with_dialog(title, year, imdb, timeout=timeout, exclude=exclude,
                                                                   sort_function=sources.sort_function,
                                                                   check_url=check_url, extended=True,
                                                                   enable_debrid=allow_debrid)
            elif content == "episode":
                link, items = nanscrapers.scrape_episode_with_dialog(tvshowtitle, year, premiered, season, episode,
                                                                     imdb, tvdb,
                                                                     timeout=timeout, exclude=exclude,
                                                                     sort_function=sources.sort_function,
                                                                     check_url=check_url, extended=True,
                                                                     enable_debrid=allow_debrid)
            else:
                return

            if link is None:
                return False

            if type(link) == dict and "path" in link:
                link = link["path"]
            url = link['url']
            if allow_debrid:
                new_url = debrid.resolve(url)
                if new_url:
                    url = new_url
            import urlresolver9
            try:
                hmf = urlresolver9.HostedMediaFile(url=url, include_disabled=False,
                                                   include_universal=allow_debrid)
                if hmf.valid_url() == True:
                    resolved_url = hmf.resolve()
                else:
                    resolved_url = url
            except:
                resolved_url = None

            if resolved_url and sources().check_playable(resolved_url) is not None:
                url = resolved_url
            else:
                if control.setting('link_fallthtough') == 'true':
                    try:
                        links = []
                        for item in items:
                            if type(item) == dict and "path" in item:
                                links.extend(item["path"][1])
                            else:
                                links.extend(item[1])
                        index = links.index(link)
                        links = links[index + 1:]
                        for link in links:
                            try:
                                hmf = urlresolver9.HostedMediaFile(url=link["url"], include_disabled=False,
                                                                   include_universal=allow_debrid)
                                if hmf.valid_url() == True:
                                    resolved_url = hmf.resolve()
                                else:
                                    resolved_url = None
                            except:
                                resolved_url = None

                            if resolved_url and sources().check_playable(resolved_url) is not None:
                                url = resolved_url
                                return url
                        url = None
                    except:
                        pass
                else:
                    url = None
            return url

        sd_links = []
        non_direct = []
        sd_non_direct = []
        links_scraper = links_scraper()
        for scraper_links in links_scraper:
            if scraper_links is not None:
                random.shuffle(scraper_links)
                for scraper_link in scraper_links:
                    if dialog is not None and dialog.iscanceled():
                        return

                    if allow_debrid:
                        new_url = debrid.resolve(scraper_link['url'])
                        if new_url:
                            scraper_link['url'] = new_url

                    if (not control.setting('allow_openload') == 'true' and 'openload' in scraper_link['url']) or (
                                not control.setting('allow_the_video_me') == 'true' and 'thevideo.me' in scraper_link[
                                'url']):
                        continue
                    if preset.lower() == 'searchsd':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality > 576:
                                continue
                        except:
                            if scraper_link['quality'] not in ["SD", "CAM", "SCR"]:
                                continue
                    elif preset.lower() == 'search':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality <= 576:
                                if scraper_link["direct"]:
                                    sd_links.append(scraper_link)
                                else:
                                    sd_non_direct.append(scraper_link)
                                continue
                        except:
                            if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                                if scraper_link["direct"]:
                                    sd_links.append(scraper_link)
                                else:
                                    sd_non_direct.append(scraper_link)
                                continue

                    if "m4u" in scraper_link['url']:
                        if sources().check_playable(scraper_link['url']) is not None:
                            return scraper_link['url']

                    else:
                        try:
                            if scraper_link['direct']:
                                import urlresolver9
                                hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                                   include_universal=allow_debrid)
                                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                                # resolved_url = urlresolver9.resolve(scraper_link['url'])
                                if resolved_url and sources().check_playable(resolved_url) is not None:
                                    url = resolved_url
                                    return url
                                else:
                                    if sources().check_playable(scraper_link['url']):
                                        return scraper_link['url']
                            else:
                                non_direct.append(scraper_link)
                        except:
                            if scraper_link['direct']:
                                url = scraper_link['url']
                                if sources().check_playable(url) is not None:
                                    return url
                            else:
                                non_direct.append(scraper_link)

        for scraper_link in non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            if (not control.setting('allow_openload') == 'true' and 'openload' in scraper_link['url']) or (
                        not control.setting('allow_the_video_me') == 'true' and 'thevideo.me' in scraper_link[
                        'url']):
                continue
            if preset.lower() == 'searchsd':
                try:
                    quality = int(scraper_link['quality'])
                    if quality > 576:
                        continue
                except:
                    if scraper_link['quality'] not in ["SD", "CAM", "SCR"]:
                        continue
            elif preset.lower() == 'search':
                try:
                    quality = int(scraper_link['quality'])
                    if quality <= 576:
                        sd_non_direct.append(scraper_link)
                        continue
                except:
                    if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                        sd_non_direct.append(scraper_link)
                        continue

            try:
                hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                   include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (
                        resolved_url.startswith("plugin://") or sources().check_playable(resolved_url) is not None):
                url = resolved_url
                return url

        for scraper_link in sd_links:

            if dialog is not None and dialog.iscanceled():
                return

            if "m4u" in scraper_link['url']:
                return scraper_link['url']

            else:
                try:
                    if scraper_link['direct']:
                        import urlresolver9
                        hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                           include_universal=allow_debrid)
                        if hmf.valid_url() == True: resolved_url = hmf.resolve()
                        # resolved_url = urlresolver9.resolve(scraper_link['url'])
                        if resolved_url and sources().check_playable(resolved_url) is not None:
                            url = resolved_url
                            return url
                        else:
                            if sources().check_playable(scraper_link['url']):
                                return scraper_link['url']
                    else:
                        non_direct.append(scraper_link)
                except:
                    if scraper_link['direct']:
                        url = scraper_link['url']
                        if sources().check_playable(url) is not None:
                            return url
                    else:
                        non_direct.append(scraper_link)

        try:
            import urlresolver9
        except:
            control.dialog.ok("Dependency missing",
                              "please install script.mrknow.urlresolver to resolve non-direct links")
            return

        for scraper_link in sd_non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            try:
                hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                   include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (
                        resolved_url.startswith("plugin://") or sources().check_playable(resolved_url) is not None):
                url = resolved_url
                return url
Ejemplo n.º 58
0
    def getSources(self,
                   title,
                   year,
                   imdb,
                   tvdb,
                   season,
                   episode,
                   tvshowtitle,
                   premiered,
                   presetDict=[],
                   timeout=30,
                   progress=True):
        sourceDict = []
        for package, name, is_pkg in pkgutil.walk_packages(__path__):
            sourceDict.append((name, is_pkg))
        sourceDict = [i[0] for i in sourceDict if i[1] == False]

        if not presetDict == []:
            sourceDict = [i for i in presetDict if i in sourceDict]

        content = 'movie' if tvshowtitle == None else 'episode'

        if content == 'movie':
            sourceDict = [
                i for i in sourceDict if i.endswith(('_mv', '_mv_tv'))
            ]
        else:
            sourceDict = [
                i for i in sourceDict if i.endswith(('_tv', '_mv_tv'))
            ]

        try:
            sourceDict = [(i,
                           control.setting('provider.' +
                                           re.sub('_mv_tv$|_mv$|_tv$', '', i)))
                          for i in sourceDict]
        except:
            sourceDict = [(i, 'true') for i in sourceDict]

        sourceDict = [i[0] for i in sourceDict if not i[1] == 'false']

        threads = []

        control.makeFile(control.dataPath)
        self.sourceFile = control.providercacheFile

        if content == 'movie':
            title = cleantitle.normalize(title)
            for source in sourceDict:
                threads.append(
                    workers.Thread(
                        self.getMovieSource, title, year, imdb,
                        re.sub('_mv_tv$|_mv$|_tv$', '', source),
                        __import__(source, globals(), locals(), [],
                                   -1).source()))
        else:
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            for source in sourceDict:
                threads.append(
                    workers.Thread(
                        self.getEpisodeSource, title, year, imdb, tvdb, season,
                        episode, tvshowtitle, premiered,
                        re.sub('_mv_tv$|_mv$|_tv$', '', source),
                        __import__(source, globals(), locals(), [],
                                   -1).source()))

        try:
            timeout = int(control.setting('scrapers.timeout.1'))
        except:
            pass

        [i.start() for i in threads]

        control.idle()

        sourceLabel = [re.sub('_mv_tv$|_mv$|_tv$', '', i) for i in sourceDict]
        sourceLabel = [re.sub('v\d+$', '', i).upper() for i in sourceLabel]

        if progress == True:
            self.progressDialog = control.progressDialog
            self.progressDialog.create(control.addonInfo('name'), '')
            self.progressDialog.update(0)

        string1 = control.lang(30512).encode('utf-8')
        string2 = control.lang(30513).encode('utf-8')
        string3 = control.lang(30514).encode('utf-8')

        for i in range(0, timeout * 2):
            try:
                if xbmc.abortRequested == True: return sys.exit()

                try:
                    info = [
                        sourceLabel[int(re.sub('[^0-9]', '', str(x.getName())))
                                    - 1] for x in threads
                        if x.is_alive() == True
                    ]
                except:
                    info = []

                if len(info) > 5: info = len(info)

                if progress == True:
                    self.progressDialog.update(
                        int((100 / float(len(threads))) *
                            len([x
                                 for x in threads if x.is_alive() == False])),
                        str('%s: %s %s' % (string1, int(i * 0.5), string2)),
                        str('%s: %s' %
                            (string3, str(info).translate(None, "[]'"))))
                    if self.progressDialog.iscanceled(): break

                is_alive = [x.is_alive() for x in threads]
                if all(x == False for x in is_alive): break
                time.sleep(0.5)
            except:
                pass

        try:
            self.progressDialog.close()
        except:
            pass

        self.sourcesFilter()

        return self.sources
Ejemplo n.º 59
0
    def getSources(title,
                   year,
                   imdb,
                   tvdb,
                   season,
                   episode,
                   tvshowtitle,
                   premiered,
                   timeout=20,
                   progress=True,
                   preset="search",
                   dialog=None,
                   exclude=None,
                   scraper_title=False,
                   queueing=False):
        progressDialog = control.progressDialog
        progressDialog.create(control.addonInfo('name'), '')
        progressDialog.update(
            0, '[COLOR yellow]Scraping the net for Streams[/COLOR]',
            'Please Be Patient...')
        year = str(year)
        content = 'movie' if tvshowtitle == None else 'episode'
        allow_debrid = control.setting('allow_debrid') == "true"

        if content == 'movie':
            title = cleantitle.normalize(title)
            links_scraper = nanscrapers.scrape_movie(
                title,
                year,
                imdb,
                timeout=timeout,
                exclude=exclude,
                enable_debrid=allow_debrid)
        elif content == 'episode':
            if scraper_title:
                tvshowtitle = title
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            links_scraper = nanscrapers.scrape_episode(
                tvshowtitle,
                year,
                premiered,
                season,
                episode,
                imdb,
                tvdb,
                timeout=timeout,
                exclude=exclude,
                enable_debrid=allow_debrid)
        else:
            return

        sd_links = []
        non_direct = []
        sd_non_direct = []
        links_scraper = links_scraper()
        for scraper_links in links_scraper:
            if scraper_links is not None:
                random.shuffle(scraper_links)
                for scraper_link in scraper_links:
                    if dialog is not None and dialog.iscanceled():
                        return

                    if allow_debrid:
                        new_url = debrid.resolve(scraper_link['url'])
                        if new_url:
                            scraper_link['url'] = new_url

                    if (not control.setting('allow_openload') == 'true'
                            and 'openload' in scraper_link['url']
                        ) or (
                            not control.setting('allow_the_video_me') == 'true'
                            and 'thevideo.me' in scraper_link['url']):
                        continue
                    if preset.lower() == 'searchsd':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality > 576:
                                continue
                        except:
                            if scraper_link['quality'] not in [
                                    "SD", "CAM", "SCR"
                            ]:
                                continue
                    elif preset.lower() == 'search':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality <= 576:
                                if scraper_link["direct"]:
                                    sd_links.append(scraper_link)
                                else:
                                    sd_non_direct.append(scraper_link)
                                continue
                        except:
                            if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                                if scraper_link["direct"]:
                                    sd_links.append(scraper_link)
                                else:
                                    sd_non_direct.append(scraper_link)
                                continue

                    if "m4u" in scraper_link['url']:
                        if sources().check_playable(
                                scraper_link['url']) is not None:
                            return scraper_link['url']

                    else:
                        try:
                            if scraper_link['direct']:
                                import urlresolver9
                                hmf = urlresolver9.HostedMediaFile(
                                    url=scraper_link['url'],
                                    include_disabled=False,
                                    include_universal=allow_debrid)
                                if hmf.valid_url() == True:
                                    resolved_url = hmf.resolve()
                                # resolved_url = urlresolver9.resolve(scraper_link['url'])
                                if resolved_url and sources().check_playable(
                                        resolved_url) is not None:
                                    url = resolved_url
                                    return url
                                else:
                                    if sources().check_playable(
                                            scraper_link['url']):
                                        return scraper_link['url']
                            else:
                                non_direct.append(scraper_link)
                        except:
                            if scraper_link['direct']:
                                url = scraper_link['url']
                                if sources().check_playable(url) is not None:
                                    return url
                            else:
                                non_direct.append(scraper_link)

        for scraper_link in non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            if (not control.setting('allow_openload') == 'true'
                    and 'openload' in scraper_link['url']) or (
                        not control.setting('allow_the_video_me') == 'true'
                        and 'thevideo.me' in scraper_link['url']):
                continue
            if preset.lower() == 'searchsd':
                try:
                    quality = int(scraper_link['quality'])
                    if quality > 576:
                        continue
                except:
                    if scraper_link['quality'] not in ["SD", "CAM", "SCR"]:
                        continue
            elif preset.lower() == 'search':
                try:
                    quality = int(scraper_link['quality'])
                    if quality <= 576:
                        sd_non_direct.append(scraper_link)
                        continue
                except:
                    if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                        sd_non_direct.append(scraper_link)
                        continue

            try:
                hmf = urlresolver9.HostedMediaFile(
                    url=scraper_link['url'],
                    include_disabled=False,
                    include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (resolved_url.startswith("plugin://")
                                 or sources().check_playable(resolved_url)
                                 is not None):
                url = resolved_url
                return url

        for scraper_link in sd_links:

            if dialog is not None and dialog.iscanceled():
                return

            if "m4u" in scraper_link['url']:
                return scraper_link['url']

            else:
                try:
                    if scraper_link['direct']:
                        import urlresolver9
                        hmf = urlresolver9.HostedMediaFile(
                            url=scraper_link['url'],
                            include_disabled=False,
                            include_universal=allow_debrid)
                        if hmf.valid_url() == True:
                            resolved_url = hmf.resolve()
                        # resolved_url = urlresolver9.resolve(scraper_link['url'])
                        if resolved_url and sources().check_playable(
                                resolved_url) is not None:
                            url = resolved_url
                            return url
                        else:
                            if sources().check_playable(scraper_link['url']):
                                return scraper_link['url']
                    else:
                        non_direct.append(scraper_link)
                except:
                    if scraper_link['direct']:
                        url = scraper_link['url']
                        if sources().check_playable(url) is not None:
                            return url
                    else:
                        non_direct.append(scraper_link)

        try:
            import urlresolver9
        except:
            control.dialog.ok(
                "Dependency missing",
                "please install script.mrknow.urlresolver to resolve non-direct links"
            )
            return

        for scraper_link in sd_non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            try:
                hmf = urlresolver9.HostedMediaFile(
                    url=scraper_link['url'],
                    include_disabled=False,
                    include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (resolved_url.startswith("plugin://")
                                 or sources().check_playable(resolved_url)
                                 is not None):
                url = resolved_url
                return url
        try:
            progressDialog.close()
        except:
            pass