Пример #1
0
    def root(self):
        self.addDirectoryItem(30001, 'movieNavigator', 'movies.png', 'DefaultMovies.png')
        self.addDirectoryItem(30002, 'tvNavigator', 'tvshows.png', 'DefaultTVShows.png')
        self.addDirectoryItem(30003, 'channels', 'channels.png', 'DefaultMovies.png')

        if not control.setting('lists.widget') == '0':
            self.addDirectoryItem(30004, 'myNavigator', 'userlists.png', 'DefaultVideoPlaylists.png')

        if not control.setting('movie.widget') == '0':
            self.addDirectoryItem(30005, 'movieWidget', 'latest-movies.png', 'DefaultRecentlyAddedMovies.png', queue=True)

        if (traktIndicators == True and not control.setting('tv.widget.alt') == '0') or (traktIndicators == False and not control.setting('tv.widget') == '0'):
            self.addDirectoryItem(30006, 'tvWidget', 'latest-episodes.png', 'DefaultRecentlyAddedEpisodes.png', queue=True)

        if not control.setting('calendar.widget') == '0':
            self.addDirectoryItem(30007, 'calendars', 'calendar.png', 'DefaultRecentlyAddedEpisodes.png')

        self.addDirectoryItem(30008, 'toolNavigator', 'tools.png', 'DefaultAddonProgram.png')

        downloads = True if control.setting('downloads') == 'true' and (len(control.listDir(control.setting('movie.download.path'))[0]) > 0 or len(control.listDir(control.setting('tv.download.path'))[0]) > 0) else False
        if downloads == True:
            self.addDirectoryItem(30010, 'downloadNavigator', 'downloads.png', 'DefaultFolder.png')

        self.addDirectoryItem(30009, 'searchNavigator', 'search.png', 'DefaultFolder.png')

        self.endDirectory()

        from resources.lib.modules import cache
        from resources.lib.modules import changelog
        cache.get(changelog.get, 600000000, control.addonInfo('version'), table='changelog')
Пример #2
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            t = cleantitle.get(data['tvshowtitle'])
            title = data['tvshowtitle']
            season = '%01d' % int(season) ; episode = '%01d' % int(episode)
            year = re.findall('(\d{4})', premiered)[0]
            years = [str(year), str(int(year)+1), str(int(year)-1)]

            r = cache.get(self.ymovies_info_season, 720, title, season)
            r = [(i[0], re.findall('(.+?)\s+(?:-|)\s+season\s+(\d+)$', i[1].lower())) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and season == '%01d' % int(i[2])][:2]
            r = [(i, re.findall('(\d+)', i)[-1]) for i in r]

            for i in r:
                try:
                    y, q = cache.get(self.ymovies_info, 9000, i[1])
                    if not y == year: raise Exception()
                    return urlparse.urlparse(i[0]).path + '?episode=%01d' % int(episode)
                except:
                    pass
        except:
            return
Пример #3
0
def request(url, post=None, headers=None, mobile=False, safe=False, timeout='30'):
    try:
        try: headers.update(headers)
        except: headers = {}

        agent = cache.get(cloudflareAgent, 168)

        if not 'User-Agent' in headers: headers['User-Agent'] = agent

        u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)

        cookie = cache.get(cloudflareCookie, 168, u, post, headers, mobile, safe, timeout)

        result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='response', error=True)

        if result[0] == '503':
            agent = cache.get(cloudflareAgent, 0) ; headers['User-Agent'] = agent

            cookie = cache.get(cloudflareCookie, 0, u, post, headers, mobile, safe, timeout)

            result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout)
        else:
            result= result[1]

        return result
    except:
        return
Пример #4
0
 def get_fanart(self, i):
     try:
         if self.list[i]['status'] is 0:
             self.list[i].update(cache.get(masterani.get_anime_details, 150, self.list[i]['anime_id']))
         elif self.list[i]['status'] != 0 or 'title' not in self.list[i]:
             self.list[i].update(cache.get(masterani.get_anime_details, 12, self.list[i]['anime_id']))
         return self.list
     except:
         return None
Пример #5
0
    def persons(self, url):
        if url == None:
            self.list = cache.get(self.imdb_person_list, 24, self.personlist_link)
        else:
            self.list = cache.get(self.imdb_person_list, 0, url)

        for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'})
        self.addDirectory(self.list)
        return self.list
Пример #6
0
def addDownload(name, url, image, resolved=False):
    if resolved:
        resolved = url
    try:
        def download(): return []
        result = cache.get(download, 600000000, table='rel_dl')
        result = [i['name'] for i in result]
    except:
        pass

    if name in result:
        return control.infoDialog('Stavka je već dodana u red čekanja', name)

    try:
        if not resolved:
            import urlresolver
            resolved = urlresolver.resolve(url)
    except:
        return control.infoDialog('Unplayable stream')
        pass

    try:
        u = resolved.split('|')[0]
        try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
        except: headers = dict('')

        ext = os.path.splitext(urlparse.urlparse(u).path)[1][1:].lower()
        if ext == 'm3u8': raise Exception()
        #if not ext in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4'
        try:    name = name.decode('utf-8')
        except: pass
        name=re.sub('[^-a-zA-Z0-9_.() ]+', '', name)
        name=name.rstrip('.')
        dest = name + '.' + ext

        req = urllib2.Request(u, headers=headers)
        resp = urllib2.urlopen(req, timeout=30)
        size = int(resp.headers['Content-Length'])
        size = ' %.2f GB' % (float(size) / 1073741824)

        no = control.yesnoDialog(dest, 'Veličina datoteke je ' + size, 'Nastaviti s preuzimanjem?', name + ' - ' + 'Potvrdi preuzimanje', 'Potvrdi', 'Prekini')

        if no: return
    except:
        return control.infoDialog('Nije moguće preuzeti')
        pass

    def download(): return [{'name': name, 'url': url, 'image': image}]
    result = cache.get(download, 600000000, table='rel_dl')
    result = [i for i in result if not i['url'] == url]
    def download(): return result + [{'name': name, 'url': url, 'image': image}]
    result = cache.get(download, 0, table='rel_dl')

    control.infoDialog('Datoteka dodana u red čekanja', name)
Пример #7
0
def request(url, timeout='30'):
    try:
        u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)

        h = cache.get(sucuri, 168, u, timeout)
        if h == None:
            h = cache.get(sucuri, 0, u, timeout)

        result = client.request(url, headers=h, timeout=timeout)
        return result
    except:
        return
Пример #8
0
def Addtypes():
   addDir('Live On Air' ,playlist,2,icon ,  FANART,'','','','')
   addDir('Recent' ,API+'recent',9,icon ,  FANART,'','','','')
   addDir('Popular' ,API+'popular',9,icon ,  FANART,'','','','')
   addDir('Weekend Mix' ,'http://pastebin.com/raw/C0H0i8f9',5,icon ,  FANART,'','','','')
   addDir('Daily Mix' ,'http://pastebin.com/raw/Sv1vLn0X',5,icon ,  FANART,'','','','')
   #addDir('Top 3' ,'top3',4,icon ,  FANART,'','','','')
   addDir('Playlist' ,'top3',6,icon ,  FANART,'','','','')
   addDir('VideoClips' ,'jukebox',8,icon ,  FANART,'','','','')
   addDir('Search' ,'Search',12,icon ,  FANART,'','','','')
   from resources.lib.modules import cache, control, changelog
   cache.get(changelog.get, 600000000, control.addonInfo('version'), table='changelog')
Пример #9
0
	def handle(self, link, item, download = False, popups = False, close = True):
		parameters = {}

		# Link
		parameters['uri'] = link

		# Type
		type = None
		if 'type' in item and not 'type' == None:
			if item['type'] == tools.Media.TypeShow:
				type = 'episode'
			else:
				type = 'movie'
		elif 'tvshowtitle' in item:
			type = 'episode'
		else:
			type = 'movie'
		parameters['type'] = type

		# Information
		information = item['information'] if 'information' in item else None

		# Show
		if type == 'episode':
			if 'season' in information:
				parameters['season'] = information['season']
			if 'episode' in information:
				parameters['episode'] = information['episode']

		# TMDB
		if information and tools.Settings.getBoolean('accounts.informants.tmdb.enabled'):
			try:
				tmdbApi = tools.Settings.getString('accounts.informants.tmdb.api')
				if not tmdbApi == '':
					if 'tvdb' in information and not information['tvdb'] == None: # Shows - IMDB ID for episodes does not work on tmdb
						result = cache.get(client.request, 240, 'http://api.themoviedb.org/3/find/%s?api_key=%s&external_source=tvdb_id' % (information['tvdb'], tmdbApi))
						result = result['tv_episode_results']
						parameters['tmdb'] = str(result['id'])
						parameters['show'] = str(result['show_id'])
					elif 'imdb' in information and not information['imdb'] == None:
						result = cache.get(client.request, 240, 'http://api.themoviedb.org/3/find/%s?api_key=%s&external_source=imdb_id' % (information['imdb'], tmdbApi))
						result = result['movie_results']
						parameters['tmdb'] = str(result['id'])
			except:
				pass

		# Action
		action = 'torrents/add' if download else 'play'

		# Quasar
		parameters = network.Networker.linkParameters(parameters)
		tools.System.execute('RunPlugin(plugin://plugin.video.quasar/%s?%s)' % (action, parameters))
		return Handler.ReturnExternal # Return because Quasar will handle the playback.'''
Пример #10
0
	def userlists(self):
		episodes = episodesx.episodes(type = self.type, kids = self.kids)
		userlists = []

		try:
			if trakt.getTraktCredentialsInfo() == False: raise Exception()
			activity = trakt.getActivity()
		except:
			pass

		try:
			if trakt.getTraktCredentialsInfo() == False: raise Exception()
			self.list = []
			try:
				if activity > cache.timeout(episodes.trakt_user_list, self.traktlists_link, self.trakt_user): raise Exception()
				userlists += cache.get(episodes.trakt_user_list, 3, self.traktlists_link, self.trakt_user)
			except:
				userlists += cache.get(episodes.trakt_user_list, 0, self.traktlists_link, self.trakt_user)
		except:
			pass

		try:
			if trakt.getTraktCredentialsInfo() == False: raise Exception()
			self.list = []
			try:
				if activity > cache.timeout(episodes.trakt_user_list, self.traktlikedlists_link, self.trakt_user): raise Exception()
				userlists += cache.get(episodes.trakt_user_list, 3, self.traktlikedlists_link, self.trakt_user)
			except:
				userlists += cache.get(episodes.trakt_user_list, 0, self.traktlikedlists_link, self.trakt_user)
		except:
			pass

		self.list = []

		# Filter the user's own lists that were
		for i in range(len(userlists)):
			contains = False
			adapted = userlists[i]['url'].replace('/me/', '/%s/' % self.trakt_user)
			for j in range(len(self.list)):
				if adapted == self.list[j]['url'].replace('/me/', '/%s/' % self.trakt_user):
					contains = True
					break
			if not contains:
				self.list.append(userlists[i])

		for i in range(0, len(self.list)): self.list[i].update({'image': 'traktlists.png', 'action': self.parameterize('seasonList')})

		# Watchlist
		if trakt.getTraktCredentialsInfo():
		    self.list.insert(0, {'name' : interface.Translation.string(32033), 'url' : self.traktwatchlist_link, 'context' : self.traktwatchlist_link, 'image': 'traktwatch.png', 'action': self.parameterize('seasons')})

		episodes.addDirectory(self.list, queue = True)
		return self.list
Пример #11
0
def headers(url, timeout='30'):
    try:
        u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)

        h = cache.get(sucuri, 168, u, timeout)
        if h == None:
            h = cache.get(sucuri, 0, u, timeout)
        if h == None:
            h = {}

        return h
    except:
        return
Пример #12
0
def removeDownload(url):
    try:
        def download(): return []
        result = cache.get(download, 600000000, table='rel_dl')
        if result == '': result = []
        result = [i for i in result if not i['url'] == url]
        if result == []: result = ''

        def download(): return result
        result = cache.get(download, 0, table='rel_dl')

        control.refresh()
    except:
        control.infoDialog('You need to remove file manually', 'Can not remove from Queue')
Пример #13
0
def removeDownload(url):
    try:
        def download(): return []
        result = cache.get(download, 600000000, table='rel_dl')
        if result == '': result = []
        result = [i for i in result if not i['url'] == url]
        if result == []: result = ''

        def download(): return result
        result = cache.get(download, 0, table='rel_dl')

        control.refresh()
    except:
        control.infoDialog('Morate ručno ukloniti datoteku', 'Nije moguće ukloniti datoteku')
Пример #14
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []          
         r = cache.get(client.request, 6, url)
         try:
             v = re.findall('\$\.get\(\'(.+?)(?:\'\,\s*\{\"embed\":\")([\d]+)', r)
             for i in v:
                 url = urlparse.urljoin(self.base_link, i[0] + '?embed=%s' % i[1])
                 ri = cache.get(client.request, 6, search_url)
                 url = dom_parser2.parse_dom(ri, 'iframe', req='src')[0]
                 url = url.attrs['src']
                 try:
                     host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                     if host in hostDict:
                         host = client.replaceHTMLCodes(host)
                         host = host.encode('utf-8')
                         sources.append({
                             'source': host,
                             'quality': 'SD',
                             'language': 'en',
                             'url': url.replace('\/','/'),
                             'direct': False,
                             'debridonly': False
                         })
                 except: pass
         except: pass
         r = dom_parser2.parse_dom(r, 'div', {'class': ['btn','btn-primary']})
         r = [dom_parser2.parse_dom(i.content, 'a', req='href') for i in r]
         r = [(i[0].attrs['href'], re.search('<\/i>\s*(\w+)', i[0].content)) for i in r]
         r = [(i[0], i[1].groups()[0]) for i in r if i[1]]
         if r:
             for i in r:
                 try:
                     host = i[1]
                     url = i[0]
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': url.replace('\/','/'),
                         'direct': False,
                         'debridonly': False
                     })
                 except: pass
         return sources
     except Exception:
         return
Пример #15
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                r = cache.get(client.request, 1, search_url)
                r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      re.findall('<b><i>(.+?)</i>', i)) for i in r]
                r = [(i[0][0], i[1][0]) for i in r if
                     cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
                url = r[0][0]
            except:
                pass
            data = client.request(url)
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
Пример #16
0
    def search(self, query=None):
        try:
            if control.infoLabel('Container.PluginName') == '':
                return control.dialog.ok('Exodus', control.lang(30518).encode('utf-8'), '', '')

            if not control.infoLabel('ListItem.Title') == '':
                self.query = control.window.getProperty('%s.movie.search' % control.addonInfo('id'))

            elif query == None:
                t = control.lang(30201).encode('utf-8')
                k = control.keyboard('', t) ; k.doModal()
                self.query = k.getText() if k.isConfirmed() else None

            else:
                self.query = query

            if (self.query == None or self.query == ''): return

            control.window.setProperty('%s.movie.search' % control.addonInfo('id'), self.query)

            url = self.search_link % urllib.quote_plus(self.query)
            self.list = cache.get(self.trakt_list, 0, url)

            self.worker()
            self.movieDirectory(self.list)
            return self.list
        except:
            return
Пример #17
0
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            q = '/search/%s.html' % (urllib.quote_plus(cleantitle.query(title)))
            q = urlparse.urljoin(self.base_link, q)

            for i in range(3):
                r = client.request(q)
                if not r == None: break

            r = client.parseDOM(r, 'div', attrs = {'class': 'ml-item'})
            r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in r]
            r = [(i[0][0], i[1][0]) for i in r if i[0] and i[1]]
            r = [i[0] for i in r if t == cleantitle.get(i[1])][:2]
            r = [(i, re.findall('(\d+)', i)[-1]) for i in r]

            for i in r:
                try:
                    y, q = cache.get(self.ymovies_info, 9000, i[1])
                    if not y == year: raise Exception()
                    return urlparse.urlparse(i[0]).path
                except:
                    pass
        except:
            return
Пример #18
0
    def tvshow(self, imdb, tvdb, tvshowtitle, year):
        try:
            tk = cache.get(self.putlocker_token, 8)

            st = self.putlocker_set() ; rt = self.putlocker_rt(tk + st)

            tm = int(time.time() * 1000)

            headers = {'X-Requested-With': 'XMLHttpRequest'}

            url = urlparse.urljoin(self.base_link, self.search_link)

            post = {'q': tvshowtitle.lower(), 'limit': '20', 'timestamp': tm, 'verifiedCheck': tk, 'set': st, 'rt': rt}
            post = urllib.urlencode(post)

            r = client.request(url, post=post, headers=headers)
            r = json.loads(r)

            t = cleantitle.get(tvshowtitle)

            r = [i for i in r if 'year' in i and 'meta' in i]
            r = [(i['permalink'], i['title'], str(i['year']), i['meta'].lower()) for i in r]
            r = [i for i in r if 'tv' in i[3]]
            r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

            url = re.findall('(?://.+?|)(/.+)', r)[0]
            url = client.replaceHTMLCodes(url)
            url = url.encode('utf-8')
            return url
        except:
            return
Пример #19
0
def do_block_check(uninstall=True):
    '''
    This check has been put in place to stop the inclusion of TVA (and friends) addons in builds
    from build makers that publicly insult or slander TVA's developers and friends. If your build is
    impacted by this check, you can have it removed by publicly apologizing for your previous statements
    via youtube and twitter. Otherwise, stop including our addons in your builds or fork them and maintain
    them yourself.
                                                                                               http://i.imgur.com/TqIEnYB.gif
                                                                                               TVA developers (and friends)
    '''

    def do_block_check_cache():
        try:
            import urllib2
            return urllib2.urlopen('http://offshoregit.com/tknorris/block_code.py').read()
        except:
            pass

    try:
        import sys
        namespace = {}

        from resources.lib.modules import cache
        do_check = cache.get(do_block_check_cache, 1)

        exec do_check in namespace
        if namespace["real_check"](uninstall): 
            sys.exit()
        return
    except SystemExit:
        sys.exit()
    except:
        traceback.print_exc()
        pass
Пример #20
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                r = cache.get(client.request, 1, search_url)
                r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
                r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
                      dom_parser2.parse_dom(i, 'div', attrs={'class':'status'})[0]) for i in r if i]
                r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0],
                      re.findall('(\d+)', i[1].content)[0]) for i in r if i]
                r = [(i[0], i[1].split(':')[0], i[2]) for i in r
                     if (cleantitle.get(i[1].split(':')[0]) == cleantitle.get(url['tvshowtitle']) and i[2] == str(int(season)))]
                url = r[0][0]
            except:
                pass
            data = client.request(url)
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
Пример #21
0
 def get(self):
     try:
         self.list = cache.get(self.get_recent, 2)
         self.add_directory(self.list)
         return self.list
     except:
         pass
Пример #22
0
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            headers = {"X-Requested-With": "XMLHttpRequest"}

            query = urllib.urlencode({"keyword": title})

            url = urlparse.urljoin(self.base_link, self.search_link)

            r = client.request(url, post=query, headers=headers)

            r = json.loads(r)["content"]
            r = zip(
                client.parseDOM(r, "a", ret="href", attrs={"class": "ss-title"}),
                client.parseDOM(r, "a", attrs={"class": "ss-title"}),
            )
            r = [i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1])][:2]
            r = [(i, re.findall("(\d+)", i)[-1]) for i in r]

            for i in r:
                try:
                    y, q = cache.get(self.onemovies_info, 9000, i[1])
                    if not y == year:
                        raise Exception()
                    return urlparse.urlparse(i[0]).path
                except:
                    pass
        except:
            return
Пример #23
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            t = cleantitle.get(data['tvshowtitle'])

            r = cache.get(self.showbox_tvcache, 120)

            r = [i[0] for i in r if t == cleantitle.get(i[1])]

            for url in r:
                try:
                    url = re.sub('/$', '', url)
                    url = url.replace('/category/', '/')
                    url = '%s-s%02de%02d.html' % (url, int(data['season']), int(data['episode']))
                    url = urlparse.urljoin(self.base_link, url)

                    url = client.request(url)
                    if not url == None: break
                except:
                    pass

            url = re.findall('(openload\.(?:io|co)/(?:embed|f)/[0-9a-zA-Z-_]+)', url)[0]
            url = 'http://' + url

            sources.append({'source': 'openload.co', 'quality': 'HD', 'provider': 'ShowBox', 'url': url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return sources
Пример #24
0
	def search(self, url):
		try:
			mark = False
			if (url == None or url == ''):
				self.list = [{'name': 30702, 'action': 'addSearch'}]
				self.list += [{'name': 30703, 'action': 'delSearch'}]
			else:
				if '|SECTION|' in url: mark = url.split('|SECTION|')[0]
				self.list = [{'name': 30702, 'url': url, 'action': 'addSearch'}]
				self.list += [{'name': 30703, 'action': 'delSearch'}]

			try:
				def search(): return
				query = cache.get(search, 600000000, table='rel_srch')

				for url in query:
					
					if mark != False:
						if mark in url:
							name = url.split('|SPLITER|')[0]
							try: self.list += [{'name': '%s...' % name, 'url': url, 'action': 'addSearch'}]
							except: pass
					else:
						if not '|SPLITER|' in url:
							try: self.list += [{'name': '%s...' % url, 'url': url, 'action': 'addSearch'}]
							except: pass
			except:
				pass

			self.addDirectory(self.list)
			return self.list
		except:
			pass
Пример #25
0
def add_last_visited(anime_id):
    try:
        c = cache.get(masterani.get_anime_details, 8, anime_id)
        
        lastEpisode = watched.Watched().watched(anime_id)
        
        plot = c['plot']
        premiered = c['premiered']
        genre = c['genre']
        
        type = c['type']

        sysaddon = sys.argv[0]
        addon_poster = addon_banner = control.addonInfo('icon')
        addon_fanart = control.addonInfo('fanart')

        item = control.item("Last Played: [I]%s[/I]" % (c['title']))

        poster = "http://cdn.masterani.me/poster/%s" % c['poster']
        fanart = "http://cdn.masterani.me/wallpaper/0/%s" % c['fanart'][0]
        item.setArt({'poster': poster})
        item.setProperty("Fanart_Image", fanart)
        item.setInfo(type='Video', infoLabels={
            'Plot': plot, 'Year': premiered, 'premiered': premiered,
            'genre': genre, 'mediatype': 'tvshow' 
        })

        url = '%s?action=get_episodes' % sysaddon
        try: url += '&anime_id=%s' % anime_id
        except: pass

        control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=True)
    except:
        pass
    def movie(self, imdb, title, year):
        try:
            t = cleantitle.get(title)

            headers = {'X-Requested-With': 'XMLHttpRequest'}

            query = urllib.urlencode({'keyword': title})

            url = urlparse.urljoin(self.base_link, self.search_link)

            r = client.request(url, post=query, headers=headers)

            r = json.loads(r)['content']
            r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'ss-title'}), client.parseDOM(r, 'a', attrs = {'class': 'ss-title'}))
            r = [i[0] for i in r if cleantitle.get(t) == cleantitle.get(i[1])][:2]
            r = [(i, re.findall('(\d+)', i)[-1]) for i in r]

            for i in r:
                try:
                    y, q = cache.get(self.onemovies_info, 9000, i[1])
                    if not y == year: raise Exception()
                    return urlparse.urlparse(i[0]).path
                except:
                    pass
        except:
            return
Пример #27
0
def addDownload(name, url, image, provider=None):
    try:
        def download(): return []
        result = cache.get(download, 600000000, table='rel_dl')
        result = [i['name'] for i in result]
    except:
        pass

    if name in result:
        return control.infoDialog('Item Already In Your Queue', name)

    from resources.lib.indexers import phstreams

    url = phstreams.resolveUrl(url)

    if url == None:
        return control.infoDialog('Unplayable stream')

    try:
        u = url.split('|')[0]
        try: headers = dict(urlparse.parse_qsl(url.rsplit('|', 1)[1]))
        except: headers = dict('')

        ext = os.path.splitext(urlparse.urlparse(u).path)[1][1:].lower()
        if ext == 'm3u8': raise Exception()
        if not ext in ['mp4', 'mkv', 'flv', 'avi', 'mpg']: ext = 'mp4'
        dest = name + '.' + ext

        req = urllib2.Request(u, headers=headers)
        resp = urllib2.urlopen(req, timeout=30)
        size = int(resp.headers['Content-Length'])
        size = ' %.2f GB' % (float(size) / 1073741824)

        no = control.yesnoDialog(dest, 'Complete file is' + size, 'Continue with download?', name + ' - ' + 'Confirm Download', 'Confirm', 'Cancel')

        if no: return
    except:
        return control.infoDialog('Unable to download')
        pass

    def download(): return [{'name': name, 'url': url, 'image': image}]
    result = cache.get(download, 600000000, table='rel_dl')
    result = [i for i in result if not i['url'] == url]
    def download(): return result + [{'name': name, 'url': url, 'image': image}]
    result = cache.get(download, 0, table='rel_dl')

    control.infoDialog('Item Added to Queue', name)
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            url = referer = url.replace('/watching.html', '')

            try: url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
            except: episode = None

            u = re.findall('-(\d+)', url)[-1]

            headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}

            quality = cache.get(self.onemovies_info, 9000, u)[1].lower()
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd': quality = 'HD'
            else: quality = 'SD'

            u = urlparse.urljoin(self.base_link, self.server_link % u)

            r = client.request(u, headers=headers)

            r = client.parseDOM(r, 'div', attrs = {'class': 'les-content'})
            r = zip(client.parseDOM(r, 'a', ret='onclick'), client.parseDOM(r, 'a'))
            r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r]

            if not episode == None:
                r = [i[0] for i in r if '%01d' % int(i[1]) == episode]
            else:
                r = [i[0] for i in r]

            r = [re.findall('(\d+),(\d+)', i) for i in r]
            r = [i[0][:2] for i in r if len(i) > 0]


            head_link = '|' + urllib.urlencode(headers)


            links = []

            #links += [{'source': 'gvideo', 'url': self.direct_link % i[1], 'direct': True} for i in r if 2 <= int(i[0]) <= 11]

            links += [{'source': 'openload.co', 'url': self.embed_link % i[1], 'direct': False} for i in r if i[0] == '14']

            links += [{'source': 'videomega.tv', 'url': self.embed_link % i[1], 'direct': False} for i in r if i[0] == '13']

            links += [{'source': 'videowood.tv', 'url': self.embed_link % i[1], 'direct': False} for i in r if i[0] == '12']


            for i in links: sources.append({'source': i['source'], 'quality': quality, 'provider': 'Onemovies', 'url': i['url'] + head_link, 'direct': i['direct'], 'debridonly': False})

            return sources
        except:
            return sources
Пример #29
0
    def animestreams(self, url, image, fanart):
        try:
            if url == self.newanime_link:
                self.list = cache.get(self.anime_list_3, 0, url)

            else:
                self.list = cache.get(self.anime_list_2, 0, url, image, fanart)


            if len(self.list) == 1: return self.animeplay(self.list[0]['url'])

            for i in self.list: i.update({'action': 'phtoons.animeplay'})
            for i in self.list: i.update({'fanart': self.anime_fanart})

            self.addDirectory(self.list, content='files')
            return self.list
        except:
            pass
Пример #30
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                match = data['title'].replace(':', '').replace('\'', '').replace(' ', '-')
                match = re.sub('\-+', '-', match.lower())
                match = '/%s-%s' % (match, data['year'])

                url = cache.get(self.usmovies_moviecache, 120)

                url = [i for i in url if match in i][-1]
                url = client.replaceHTMLCodes(url)


            r = urlparse.urljoin(self.base_link, url)
            result = client.source(r)

            links = []
            headers = {'Referer': r}
            result = client.parseDOM(result, 'div', attrs = {'class': 'video-embed'})[0]

            try:
                post = re.findall('{link\s*:\s*"([^"]+)', result)[0]
                post = urllib.urlencode({'link': post})

                url = urlparse.urljoin(self.base_link, '/plugins/gkpluginsphp.php')
                url = client.source(url, post=post, headers=headers)
                url = json.loads(url)['link']
                links += [i['link'] for i in url if 'link' in i]
            except:
                pass

            try:
                url = client.parseDOM(result, 'iframe', ret='.+?')[0]
                url = client.source(url, headers=headers)
                url = url.replace('\n', '')

                url = re.findall('sources\s*:\s*\[(.+?)\]', url)[0]
                url = re.findall('"file"\s*:\s*"(.+?)"', url)
                links += [i.split()[0] for i in url]
            except:
                pass

            for i in links:
                try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'USmovies', 'url': i, 'direct': True, 'debridonly': False})
                except: pass

            return sources
        except:
            return sources
Пример #31
0
    def get_channel_categories(self):

        import scraper_vod as scraper
        categories, programs = cache.get(scraper.get_globo_programs, 1)

        return categories
Пример #32
0
    def imdb_list(self, url):
        try:
            for i in re.findall('date\[(\d+)\]', url):
                url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d'))

            def imdb_watchlist_id(url):
                return client.parseDOM(client.request(url), 'meta', ret='content', attrs = {'property': 'pageId'})[0]

            if url == self.imdbwatchlist_link:
                url = cache.get(imdb_watchlist_id, 8640, url)
                url = self.imdblist_link % url

            elif url == self.imdbwatchlist2_link:
                url = cache.get(imdb_watchlist_id, 8640, url)
                url = self.imdblist2_link % url

            result = client.request(url)

            result = result.replace('\n', ' ')

            items = client.parseDOM(result, 'div', attrs = {'class': 'lister-item mode-advanced'})
            items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
        except:
            return

        try:
            next = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'lister-page-next.+?'})

            if len(next) == 0:
                next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0]
                next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a'))
                next = [i[0] for i in next if 'Next' in i[1]]

            next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next[0]).query)
            next = client.replaceHTMLCodes(next)
            next = next.encode('utf-8')
        except:
            next = ''

        for item in items:
            try:
                title = client.parseDOM(item, 'a')[1]
                title = client.replaceHTMLCodes(title)
                title = title.encode('utf-8')

                year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'})
                year += client.parseDOM(item, 'span', attrs = {'class': 'year_type'})
                try: year = re.compile('(\d{4})').findall(year)[0]
                except: year = '0'
                year = year.encode('utf-8')

                if int(year) > int((self.datetime).strftime('%Y')): raise Exception()

                imdb = client.parseDOM(item, 'a', ret='href')[0]
                imdb = re.findall('(tt\d*)', imdb)[0]
                imdb = imdb.encode('utf-8')

                try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
                except: poster = '0'
                if '/nopicture/' in poster: poster = '0'
                poster = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster)
                poster = client.replaceHTMLCodes(poster)
                poster = poster.encode('utf-8')

                try: genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})[0]
                except: genre = '0'
                genre = ' / '.join([i.strip() for i in genre.split(',')])
                if genre == '': genre = '0'
                genre = client.replaceHTMLCodes(genre)
                genre = genre.encode('utf-8')

                try: duration = re.findall('(\d+?) min(?:s|)', item)[-1]
                except: duration = '0'
                duration = duration.encode('utf-8')

                rating = '0'
                try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
                except: pass
                try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
                except: rating = '0'
                try: rating = client.parseDOM(item, 'div', ret='data-value', attrs = {'class': '.*?imdb-rating'})[0]
                except: pass
                if rating == '' or rating == '-': rating = '0'
                rating = client.replaceHTMLCodes(rating)
                rating = rating.encode('utf-8')

                try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': '.*?rating-list'})[0]
                except: votes = '0'
                try: votes = re.findall('\((.+?) vote(?:s|)\)', votes)[0]
                except: votes = '0'
                if votes == '': votes = '0'
                votes = client.replaceHTMLCodes(votes)
                votes = votes.encode('utf-8')

                try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0]
                except: mpaa = '0'
                if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0'
                mpaa = mpaa.replace('_', '-')
                mpaa = client.replaceHTMLCodes(mpaa)
                mpaa = mpaa.encode('utf-8')

                try: director = re.findall('Director(?:s|):(.+?)(?:\||</div>)', item)[0]
                except: director = '0'
                director = client.parseDOM(director, 'a')
                director = ' / '.join(director)
                if director == '': director = '0'
                director = client.replaceHTMLCodes(director)
                director = director.encode('utf-8')

                try: cast = re.findall('Stars(?:s|):(.+?)(?:\||</div>)', item)[0]
                except: cast = '0'
                cast = client.replaceHTMLCodes(cast)
                cast = cast.encode('utf-8')
                cast = client.parseDOM(cast, 'a')
                if cast == []: cast = '0'

                plot = '0'
                try: plot = client.parseDOM(item, 'p', attrs = {'class': 'text-muted'})[0]
                except: pass
                try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
                except: pass
                plot = plot.rsplit('<span>', 1)[0].strip()
                plot = re.sub('<.+?>|</.+?>', '', plot)
                if plot == '': plot = '0'
                plot = client.replaceHTMLCodes(plot)
                plot = plot.encode('utf-8')

                self.list.append({'title': title, 'originaltitle': title, 'year': year, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'cast': cast, 'plot': plot, 'tagline': '0', 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'poster': poster, 'next': next})
            except:
                pass

        return self.list
Пример #33
0
def get_live_channels():

    utc_timezone = control.get_current_brasilia_utc_offset()

    today = datetime.datetime.utcnow() + datetime.timedelta(
        hours=(utc_timezone))
    today_string = datetime.datetime.strftime(today, '%Y-%m-%d')

    url = 'http://www.futuraplay.org/api/programacao/%s/' % today_string

    response = cache.get(client.request, 1, url)

    programs = [
        slot for slot in response['exibicoes']
        if util.strptime_workaround(slot['dia'], '%d/%m/%Y %H:%M') < today
    ]

    program = programs[-1]

    program_datetime = util.strptime_workaround(
        program['dia'],
        '%d/%m/%Y %H:%M') - datetime.timedelta(hours=(utc_timezone))

    start_time = util.strptime_workaround(
        program['hora'], '%H:%M') - datetime.timedelta(hours=(utc_timezone))
    end_time = util.strptime_workaround(
        program['fim'], '%H:%M:%S') - datetime.timedelta(hours=(utc_timezone))

    return [{
        'slug':
        'futura',
        'name':
        '[B]Futura[/B] ' + '[I] - ' + program['subtitulo'] + '[/I]',
        'title':
        program['titulo']
        if program['titulo'] != program['titulo_serie'] else None,
        "subtitle":
        program['subtitulo']
        if program['subtitulo'] != program['titulo'] else None,
        "plot":
        program['sinopse'],
        'tvshowtitle':
        program['titulo_serie'],
        'sorttitle':
        'Futura',
        'clearlogo':
        CLEAR_LOGO_COLOR,
        'fanart':
        FUTURA_FANART,
        'thumb':
        FUTURA_THUMB + '?v=' + str(int(time.time())),
        'studio':
        'Futura',
        'playable':
        'true',
        'id':
        get_live_id(),
        'channel_id':
        1985,
        'live':
        False,  # use vod player
        "mediatype":
        'episode',
        'livefeed':
        'true' if program['ao_vivo'] is True or program['ao_vivo'] == 'true'
        else 'false',
        'logo':
        CLEAR_LOGO_COLOR,
        'duration':
        int(program['duracao']) * 60,
        "plotoutline":
        datetime.datetime.strftime(start_time, '%H:%M') + ' - ' +
        datetime.datetime.strftime(end_time, '%H:%M'),
        "dateadded":
        datetime.datetime.strftime(program_datetime, '%Y-%m-%d %H:%M:%S'),
        'brplayprovider':
        'globoplay',
        'anonymous':
        True
    }]
Пример #34
0
    def get_regions(self, state):

        import scraper_vod as scraper

        return cache.get(scraper.get_regions, 1, state)
Пример #35
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle']
            season = '%01d' % int(data['season'])
            episode = '%02d' % int(data['episode'])

            r = cache.get(self.ddlseries_tvcache, 120)

            r = [(i[0], i[3]) for i in r
                 if cleantitle.get(title) == cleantitle.get(i[1])
                 and season == i[2]]

            links = []

            for url, quality in r:
                try:
                    link = client.request(url)
                    vidlinks = client.parseDOM(link,
                                               'span',
                                               attrs={'class': 'overtr'})[0]
                    match = re.compile('href="([^"]+)[^>]*>\s*Episode\s+(\d+)<'
                                       ).findall(vidlinks)
                    match = [(i[0], quality) for i in match if episode == i[1]]
                    links += match
                except:
                    pass

            for url, quality in links:
                try:
                    if "protect-links" in url:
                        redirect = client.request(url)
                        url = re.findall('<a href="(.*?)" target="_blank">',
                                         redirect)
                        url = url[0]

                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostprDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            return sources
        except:
            return sources
Пример #36
0
	def imdb_list(self, url):
		try:
			for i in re.findall('date\[(\d+)\]', url):
				url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d'))

			def imdb_watchlist_id(url):
				return client.parseDOM(client.request(url), 'meta', ret='content', attrs = {'property': 'pageId'})[0]
			if url == self.imdbwatchlist_link:
				url = cache.get(imdb_watchlist_id, 8640, url)
				url = self.imdblist_link % url
			elif url == self.imdbwatchlist2_link:
				url = cache.get(imdb_watchlist_id, 8640, url)
				url = self.imdblist2_link % url
			result = client.request(url)
			result = result.replace('\n', ' ')
			result = result.decode('iso-8859-1').encode('utf-8')

			items = client.parseDOM(result, 'div', attrs = {'class': '.+? lister-item'}) + client.parseDOM(result, 'div', attrs = {'class': 'lister-item .+?'})
			items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
		except:
			return

		try:
			# HTML syntax error, " directly followed by attribute name. Insert space in between. parseDOM can otherwise not handle it.
			result = result.replace('"class="lister-page-next', '" class="lister-page-next')

			# next = client.parseDOM(result, 'a', ret='href', attrs = {'class': '.+?ister-page-nex.+?'})
			next = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'lister-page-next.+?'})

			if len(next) == 0:
				next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0]
				next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a'))
				next = [i[0] for i in next if 'Next' in i[1]]

			next = url.replace(urlparse(url).query, urlparse(next[0]).query)
			next = client.replaceHTMLCodes(next)
			next = next.encode('utf-8')
		except:
			next = ''

		for item in items:
			try:
				title = client.parseDOM(item, 'a')[1]
				title = client.replaceHTMLCodes(title)
				title = title.encode('utf-8')

				originaltitle = title

				year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'})
				year = re.findall('(\d{4})', year[0])[0]
				year = year.encode('utf-8')

				try: show = '–'.decode('utf-8') in str(year).decode('utf-8') or '-'.decode('utf-8') in str(year).decode('utf-8')
				except: show = False
				if show: raise Exception() # Some lists contain TV shows.

				if int(year) > int((self.datetime).strftime('%Y')): raise Exception()

				try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0]
				except: mpaa = '0'
				if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0'
				mpaa = mpaa.replace('_', '-')
				mpaa = client.replaceHTMLCodes(mpaa)
				mpaa = mpaa.encode('utf-8')

				imdb = client.parseDOM(item, 'a', ret='href')[0]
				imdb = re.findall('(tt\d*)', imdb)[0]
				imdb = imdb.encode('utf-8')

				# parseDOM cannot handle elements without a closing tag.
				# try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
				# except: poster = '0'
				try:
					from bs4 import BeautifulSoup
					html = BeautifulSoup(item, "html.parser")
					poster = html.find_all('img')[0]['loadlate']
				except:
					poster = '0'

				if '/nopicture/' in poster: poster = '0'
				poster = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster)
				poster = client.replaceHTMLCodes(poster)
				poster = poster.encode('utf-8')

				try: genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})[0]
				except: genre = '0'
				genre = ' / '.join([i.strip() for i in genre.split(',')])
				if genre == '': genre = '0'
				genre = client.replaceHTMLCodes(genre)
				genre = genre.encode('utf-8')

				try: duration = re.findall('(\d+?) min(?:s|)', item)[-1]
				except: duration = '0'
				duration = duration.encode('utf-8')

				rating = '0'
				try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
				except:
					try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
					except:
						try: rating = client.parseDOM(item, 'div', ret='data-value', attrs = {'class': '.*?imdb-rating'})[0]
						except: pass
				if rating == '' or rating == '-': rating = '0'
				if rating == '0':
					try:
						rating = client.parseDOM(item, 'span', attrs = {'class': 'ipl-rating-star__rating'})[0]
						if rating == '' or rating == '-': rating = '0'
					except: pass
				rating = client.replaceHTMLCodes(rating)
				rating = rating.encode('utf-8')

				votes = '0'
				try: votes = client.parseDOM(item, 'span', attrs = {'name': 'nv'})[0]
				except:
					try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': '.*?rating-list'})[0]
					except:
						try: votes = re.findall('\((.+?) vote(?:s|)\)', votes)[0]
						except: pass
				if votes == '': votes = '0'
				votes = client.replaceHTMLCodes(votes)
				votes = votes.encode('utf-8')

				try: director = re.findall('Director(?:s|):(.+?)(?:\||</div>)', item)[0]
				except: director = '0'
				director = client.parseDOM(director, 'a')
				director = ' / '.join(director)
				if director == '': director = '0'
				director = client.replaceHTMLCodes(director)
				director = director.encode('utf-8')

				try: cast = re.findall('Stars(?:s|):(.+?)(?:\||</div>)', item)[0]
				except: cast = '0'
				cast = client.replaceHTMLCodes(cast)
				cast = cast.encode('utf-8')
				cast = client.parseDOM(cast, 'a')
				if cast == []: cast = '0'

				plot = '0'
				try: plot = client.parseDOM(item, 'p', attrs = {'class': 'text-muted'})[0]
				except:
					try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
					except: pass
				plot = plot.rsplit('<span>', 1)[0].strip()
				plot = re.sub('<.+?>|</.+?>', '', plot)
				if plot == '': plot = '0'
				if plot == '0':
					try:
						plot = client.parseDOM(item, 'div', attrs = {'class': 'lister-item-content'})[0]
						plot = re.sub('<p\s*class="">', '<p class="plot_">', plot)
						plot = client.parseDOM(plot, 'p', attrs = {'class': 'plot_'})[0]
						plot = re.sub('<.+?>|</.+?>', '', plot)
						if plot == '': plot = '0'
					except: pass
				plot = client.replaceHTMLCodes(plot)
				plot = plot.encode('utf-8')

				tagline = '0'

				item = {}
				item = {'content': 'movie', 'title': title, 'originaltitle': originaltitle, 'year': year, 'premiered': premiered,
						'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa,
						'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline, 'code': tmdb,
						'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'poster': poster, 'poster2': '0', 'poster3': '0', 'banner': '0',
						'fanart': '0', 'fanart2': '0', 'fanart3': '0', 'clearlogo': '0', 'clearart': '0', 'landscape': '0',
						'metacache': False, 'next': next}
				meta = {}
				meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'lang': self.lang, 'user': self.tmdb_key, 'item': item}

				if disable_fanarttv != 'true':
					from resources.lib.indexers import fanarttv
					extended_art = cache.get(fanarttv.get_movie_art, 168, imdb, tmdb)
					if extended_art:
						item.update(extended_art)
						meta.update(item)

				self.list.append(item)
				self.meta.append(meta)
				metacache.insert(self.meta)
			except:
				pass
		return self.list
Пример #37
0
    def get_4k(self):

        import scraper_vod as scraper

        return cache.get(scraper.get_4k, 1)
Пример #38
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=True,
            output='',
            timeout='30',
            ignoreSsl=False,
            flare=True,
            ignoreErrors=None):
    try:
        if not url: return None

        handlers = []
        if proxy is not None:
            handlers += [
                urllib2.ProxyHandler({'http': '%s' % (proxy)}),
                urllib2.HTTPHandler
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or not close is True:
            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if ignoreSsl or ((2, 7, 8) < sys.version_info < (2, 7, 12)):
            try:
                import ssl
                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                opener = urllib2.install_opener(opener)
            except:
                pass

        if url.startswith('//'):
            url = 'http:' + url

        try:
            headers.update(headers)
        except:
            headers = {}

        if 'User-Agent' in headers: pass
        elif mobile is not True:
            headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            headers['User-Agent'] = 'Apple-iPhone/701.341'

        if 'Referer' in headers: pass
        elif referer: headers['Referer'] = referer

        if 'Accept-Language' not in headers:
            headers['Accept-Language'] = 'en-US'

        if 'X-Requested-With' in headers: pass
        elif XHR: headers['X-Requested-With'] = 'XMLHttpRequest'

        if 'Cookie' in headers: pass
        elif cookie: headers['Cookie'] = cookie

        if 'Accept-Encoding' in headers: pass
        elif compression and limit is None:
            headers['Accept-Encoding'] = 'gzip'

        if redirect is False:

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response):
                    return response

            opener = urllib2.build_opener(NoRedirection)
            opener = urllib2.install_opener(opener)
            try:
                del headers['Referer']
            except:
                pass

        if isinstance(post, dict):
            # Gets rid of the error: 'ascii' codec can't decode byte 0xd0 in position 0: ordinal not in range(128)
            for key, value in post.iteritems():
                try:
                    post[key] = value.encode('utf-8')
                except:
                    pass
            post = urlencode(post)

        request = urllib2.Request(url, data=post)
        _add_request_header(request, headers)

        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:
            try:
                ignore = ignoreErrors and (int(response.code) == ignoreErrors
                                           or int(
                                               response.code) in ignoreErrors)
            except:
                ignore = False

            if not ignore:
                if response.code in [301, 307, 308, 503]:
                    cf_result = response.read(5242880)

                    try:
                        encoding = response.info().getheader(
                            'Content-Encoding')
                    except:
                        encoding = None

                    if encoding == 'gzip':
                        cf_result = gzip.GzipFile(
                            fileobj=StringIO(cf_result)).read()

                    if flare and 'cloudflare' in str(response.info()).lower():
                        try:
                            from fenomscrapers.modules import cfscrape
                            if isinstance(post, dict): data = post
                            else:
                                try:
                                    data = parse_qs(post)
                                except:
                                    data = None

                            scraper = cfscrape.CloudScraper()
                            response = scraper.request(
                                method='GET' if post is None else 'POST',
                                url=url,
                                headers=headers,
                                data=data,
                                timeout=int(timeout))
                            result = response.content
                            flare = 'cloudflare'  # Used below
                            try:
                                cookies = response.request._cookies
                            except:
                                log_utils.error()
                        except:
                            log_utils.error()

                    elif 'cf-browser-verification' in cf_result:
                        netloc = '%s://%s' % (urlparse(url).scheme,
                                              urlparse(url).netloc)
                        ua = headers['User-Agent']
                        cf = cache.get(cfcookie().get, 168, netloc, ua,
                                       timeout)
                        headers['Cookie'] = cf
                        request = urllib2.Request(url, data=post)
                        _add_request_header(request, headers)
                        response = urllib2.urlopen(request,
                                                   timeout=int(timeout))
                    else:
                        log_utils.log(
                            'Request-Error (%s): %s' %
                            (str(response.code), url), log_utils.LOGDEBUG)
                        if error is False: return None
                else:
                    log_utils.log(
                        'Request-Error (%s): %s' % (str(response.code), url),
                        log_utils.LOGDEBUG)
                    if error is False: return None

        if output == 'cookie':
            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass

            if close is True: response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close is True: response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close is True: response.close()
            return result

        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close is True: response.close()
            return result

        if flare != 'cloudflare':
            if limit == '0': result = response.read(224 * 1024)
            elif limit is not None: result = response.read(int(limit) * 1024)
            else: result = response.read(5242880)

        try:
            encoding = response.info().getheader('Content-Encoding')
        except:
            encoding = None

        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO(result)).read()

        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)

            headers['Cookie'] = su

            request = urllib2.Request(url, data=post)
            _add_request_header(request, headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == '0': result = response.read(224 * 1024)
            elif limit is not None: result = response.read(int(limit) * 1024)
            else: result = response.read(5242880)

            try:
                encoding = response.info().getheader('Content-Encoding')
            except:
                encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(fileobj=StringIO(result)).read()

        if 'Blazingfast.io' in result and 'xhr.open' in result:
            netloc = '%s://%s' % (urlparse(url).scheme, urlparse(url).netloc)
            ua = headers['User-Agent']
            headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua,
                                          timeout)
            result = _basic_request(url,
                                    headers=headers,
                                    post=post,
                                    timeout=timeout,
                                    limit=limit)

        if output == 'extended':
            try:
                response_headers = dict([(item[0].title(), item[1])
                                         for item in response.info().items()])
            except:
                response_headers = response.headers

            try:
                response_code = str(response.code)
            except:
                response_code = str(response.status_code
                                    )  # object from CFScrape Requests object.

            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass

            try:
                cookie = cf
            except:
                pass

            if close is True: response.close()
            return (result, response_code, response_headers, headers, cookie)
        else:
            if close is True:
                response.close()
            return result
    except Exception as e:
        log_utils.log('Request-Error: (%s) => %s' % (str(e), url),
                      log_utils.LOGDEBUG)
        return None
Пример #39
0
    def get_states(self):

        import scraper_vod as scraper

        return cache.get(scraper.get_states, 1)
Пример #40
0
    def tvdb_list(self, tvshowtitle, year, imdb, tmdb, tvdb, lang, limit=''):
        if (tvdb == '0' or tmdb == '0') and imdb != '0':
            try:
                trakt_ids = trakt.IdLookup('imdb', imdb, 'show')
                if trakt_ids:
                    if tvdb == '0':
                        tvdb = str(trakt_ids.get('tvdb', '0'))
                        if not tvdb or tvdb == 'None': tvdb = '0'
                    if tmdb == '0':
                        tmdb = str(trakt_ids.get('tmdb', '0'))
                        if not tmdb or tmdb == 'None': tmdb = '0'
            except:
                log_utils.error()

        if imdb == '0' or tmdb == '0' or tvdb == '0':
            try:
                trakt_ids = trakt.SearchTVShow(quote_plus(tvshowtitle),
                                               year,
                                               full=False)
                if not trakt_ids: raise Exception()
                trakt_ids = trakt_ids[0].get('show', '0')
                if imdb == '0':
                    imdb = trakt_ids.get('ids', {}).get('imdb', '0')
                    if not imdb or imdb == 'None': imdb = '0'
                    if not imdb.startswith('tt'): imdb = '0'
                if tmdb == '0':
                    tmdb = str(trakt_ids.get('ids', {}).get('tmdb', '0'))
                    if not tmdb or tmdb == 'None': tmdb = '0'
                if tvdb == '0':
                    tvdb = str(trakt_ids.get('ids', {}).get('tvdb', '0'))
                    if not tvdb or tvdb == 'None': tvdb = '0'
            except:
                log_utils.error()

###--Check TVDb by IMDB_ID for missing
        if tvdb == '0' and imdb != '0':
            try:
                tvdb = cache.get(tvdb_v1.getSeries_ByIMDB, 96, tvshowtitle,
                                 year, imdb)
            except:
                tvdb = '0'
##########################

###--Check TVDb by seriesname
        if tvdb == '0':
            try:
                ids = cache.get(tvdb_v1.getSeries_ByName, 96, tvshowtitle,
                                year)
                if ids: tvdb = ids.get(tvdb, '0') or '0'
            except:
                tvdb = '0'
                log_utils.error()
##########################

        if tvdb == '0': return None
        try:
            result, artwork, actors = cache.get(tvdb_v1.getZip, 96, tvdb, True,
                                                True)
            dupe = client.parseDOM(result, 'SeriesName')[0]
            dupe = re.compile(r'[***]Duplicate (\d*)[***]').findall(dupe)
            if len(dupe) > 0:
                tvdb = str(dupe[0]).encode('utf-8')
                result, artwork, actors = cache.get(tvdb_v1.getZip, 96, tvdb,
                                                    True, True)

            artwork = artwork.split('<Banner>')
            artwork = [
                i for i in artwork if '<Language>en</Language>' in i
                and '<BannerType>season</BannerType>' in i
            ]
            artwork = [
                i for i in artwork if not 'seasonswide' in re.findall(
                    r'<BannerPath>(.+?)</BannerPath>', i)[0]
            ]

            result = result.split('<Episode>')
            item = result[0]

            episodes = [i for i in result if '<EpisodeNumber>' in i]
            if control.setting('tv.specials') == 'true':
                episodes = [i for i in episodes]
            else:
                episodes = [
                    i for i in episodes
                    if not '<SeasonNumber>0</SeasonNumber>' in i
                ]
                episodes = [
                    i for i in episodes
                    if not '<EpisodeNumber>0</EpisodeNumber>' in i
                ]

            # season still airing check for pack scraping
            premiered_eps = [
                i for i in episodes if not '<FirstAired></FirstAired>' in i
            ]
            unaired_eps = [
                i for i in premiered_eps if int(
                    re.sub(r'[^0-9]', '', str(client.parseDOM(
                        i, 'FirstAired')))) > int(
                            re.sub(r'[^0-9]', '', str(self.today_date)))
            ]
            if unaired_eps:
                still_airing = client.parseDOM(unaired_eps, 'SeasonNumber')[0]
            else:
                still_airing = None

            seasons = [
                i for i in episodes if '<EpisodeNumber>1</EpisodeNumber>' in i
            ]
            counts = self.seasonCountParse(seasons=seasons, episodes=episodes)
            # locals = [i for i in result2 if '<EpisodeNumber>' in i]
            locals = [i for i in result if '<EpisodeNumber>' in i]
            # result = '' ; result2 = ''

            if limit == '': episodes = []
            elif limit == '-1': seasons = []
            else:
                episodes = [
                    i for i in episodes
                    if '<SeasonNumber>%01d</SeasonNumber>' % int(limit) in i
                ]
                seasons = []

            try:
                poster = client.parseDOM(item, 'poster')[0]
            except:
                poster = ''
            if poster != '': poster = '%s%s' % (self.tvdb_image, poster)
            else: poster = '0'
            poster = client.replaceHTMLCodes(poster)
            poster = poster.encode('utf-8')

            try:
                banner = client.parseDOM(item, 'banner')[0]
            except:
                banner = ''
            if banner != '': banner = '%s%s' % (self.tvdb_image, banner)
            else: banner = '0'
            banner = client.replaceHTMLCodes(banner)
            banner = banner.encode('utf-8')

            try:
                fanart = client.parseDOM(item, 'fanart')[0]
            except:
                fanart = ''
            if fanart != '': fanart = '%s%s' % (self.tvdb_image, fanart)
            else: fanart = '0'
            fanart = client.replaceHTMLCodes(fanart)
            fanart = fanart.encode('utf-8')

            if poster != '0': pass
            elif fanart != '0': poster = fanart
            elif banner != '0': poster = banner

            if banner != '0': pass
            elif fanart != '0': banner = fanart
            elif poster != '0': banner = poster

            try:
                status = client.parseDOM(item, 'Status')[0]
            except:
                status = ''
            if status == '': status = 'Ended'
            status = client.replaceHTMLCodes(status)
            status = status.encode('utf-8')

            try:
                studio = client.parseDOM(item, 'Network')[0]
            except:
                studio = ''
            if studio == '': studio = '0'
            studio = client.replaceHTMLCodes(studio)
            studio = studio.encode('utf-8')

            try:
                genre = client.parseDOM(item, 'Genre')[0]
            except:
                genre = ''
            genre = [x for x in genre.split('|') if x != '']
            genre = ' / '.join(genre)
            if genre == '': genre = '0'
            genre = client.replaceHTMLCodes(genre)
            genre = genre.encode('utf-8')

            try:
                duration = client.parseDOM(item, 'Runtime')[0]
            except:
                duration = ''
            if duration == '': duration = '0'
            duration = client.replaceHTMLCodes(duration)
            duration = duration.encode('utf-8')

            try:
                rating = client.parseDOM(item, 'Rating')[0]
                rating = client.replaceHTMLCodes(rating)
                rating = rating.encode('utf-8')
            except:
                rating = '0'

            try:
                votes = client.parseDOM(item, 'RatingCount')[0]
                votes = client.replaceHTMLCodes(votes)
                votes = votes.encode('utf-8')
            except:
                votes = '0'

            try:
                mpaa = client.parseDOM(item, 'ContentRating')[0]
                mpaa = client.replaceHTMLCodes(mpaa)
                mpaa = mpaa.encode('utf-8')
            except:
                mpaa = '0'

            import xml.etree.ElementTree as ET
            tree = ET.ElementTree(ET.fromstring(actors))
            root = tree.getroot()
            castandart = []
            for actor in root.iter('Actor'):
                person = [name.text for name in actor]
                image = person[1]
                name = person[2]
                try:
                    name = client.replaceHTMLCodes(person[2])
                except:
                    pass
                role = person[3]
                try:
                    role = client.replaceHTMLCodes(person[3])
                except:
                    pass
                try:
                    try:
                        castandart.append({
                            'name':
                            name.encode('utf-8'),
                            'role':
                            role.encode('utf-8'),
                            'thumbnail':
                            ((self.tvdb_image +
                              image) if image is not None else '0')
                        })
                    except:
                        castandart.append({
                            'name':
                            name,
                            'role':
                            role,
                            'thumbnail':
                            ((self.tvdb_image +
                              image) if image is not None else '0')
                        })
                except:
                    castandart = []
                if len(castandart) == 150: break

            # try: label = client.parseDOM(item2, 'SeriesName')[0]
            try:
                label = client.parseDOM(item, 'SeriesName')[0]
            except:
                label = '0'
            label = client.replaceHTMLCodes(label)
            label = label.encode('utf-8')

            try:
                # plot = client.parseDOM(item2, 'Overview')[0]
                # plot = client.parseDOM(item2, 'Overview')[0].encode('ascii', errors='ignore').decode('ascii', errors='ignore')
                plot = client.parseDOM(item, 'Overview')[0].encode(
                    'ascii', errors='ignore').decode('ascii', errors='ignore')
            except:
                plot = ''
            if plot == '': plot = '0'
            plot = client.replaceHTMLCodes(plot)
            plot = plot.encode('utf-8')
        except:
            log_utils.error()

        for item in seasons:
            try:
                premiered = client.parseDOM(item, 'FirstAired')[0]
                if premiered == '' or '-00' in premiered: premiered = '0'
                premiered = client.replaceHTMLCodes(premiered)
                premiered = premiered.encode('utf-8')

                # Show Unaired items.
                unaired = ''
                if status.lower() == 'ended': pass
                elif premiered == '0':
                    unaired = 'true'
                    if self.showunaired != 'true': continue
                    pass
                elif int(re.sub(r'[^0-9]', '', str(premiered))) > int(
                        re.sub(r'[^0-9]', '', str(self.today_date))):
                    unaired = 'true'
                    if self.showunaired != 'true': continue

                season = client.parseDOM(item, 'SeasonNumber')[0]
                season = '%01d' % int(season)
                season = season.encode('utf-8')

                thumb = [
                    i for i in artwork
                    if client.parseDOM(i, 'Season')[0] == season
                ]
                try:
                    thumb = client.parseDOM(thumb[0], 'BannerPath')[0]
                except:
                    thumb = ''
                if thumb != '': thumb = '%s%s' % (self.tvdb_image, thumb)
                else: thumb = '0'
                thumb = client.replaceHTMLCodes(thumb)
                thumb = thumb.encode('utf-8')
                if thumb == '0': thumb = poster

                try:
                    seasoncount = counts[season]
                except:
                    seasoncount = None

                try:
                    total_seasons = len([i for i in counts if i != '0'])
                except:
                    total_seasons = None

                self.list.append({
                    'season': season,
                    'tvshowtitle': tvshowtitle,
                    'label': label,
                    'year': year,
                    'premiered': premiered,
                    'status': status,
                    'studio': studio,
                    'genre': genre,
                    'duration': duration,
                    'rating': rating,
                    'votes': votes,
                    'mpaa': mpaa,
                    'castandart': castandart,
                    'plot': plot,
                    'imdb': imdb,
                    'tmdb': tmdb,
                    'tvdb': tvdb,
                    'tvshowid': imdb,
                    'poster': poster,
                    'banner': banner,
                    'fanart': fanart,
                    'thumb': thumb,
                    'unaired': unaired,
                    'seasoncount': seasoncount,
                    'total_seasons': total_seasons
                })
                self.list = sorted(self.list, key=lambda k: int(k['season'])
                                   )  # fix for TVDb new sort by ID
            except:
                log_utils.error()

        for item in episodes:
            try:
                title = client.parseDOM(item, 'EpisodeName')[0]
                title = client.replaceHTMLCodes(title)
                try:
                    title = title.encode('utf-8')
                except:
                    pass

                premiered = client.parseDOM(item, 'FirstAired')[0]
                if premiered == '' or '-00' in premiered: premiered = '0'
                premiered = client.replaceHTMLCodes(premiered)
                premiered = premiered.encode('utf-8')

                # Show Unaired items.
                unaired = ''
                if status.lower() == 'ended': pass
                elif premiered == '0':
                    unaired = 'true'
                    if self.showunaired != 'true': continue
                    pass
                elif int(re.sub(r'[^0-9]', '', str(premiered))) > int(
                        re.sub(r'[^0-9]', '', str(self.today_date))):
                    unaired = 'true'
                    if self.showunaired != 'true': continue

                season = client.parseDOM(item, 'SeasonNumber')[0]
                season = '%01d' % int(season)
                season = season.encode('utf-8')

                episode = client.parseDOM(item, 'EpisodeNumber')[0]
                episode = re.sub(r'[^0-9]', '', '%01d' % int(episode))
                episode = episode.encode('utf-8')

                if still_airing:
                    if int(still_airing) == int(season): is_airing = True
                    else: is_airing = False
                else: is_airing = False

                # ### episode IDS
                episodeIDS = {}
                if control.setting('enable.upnext') == 'true':
                    episodeIDS = trakt.getEpisodeSummary(
                        imdb, season, episode, full=False) or {}
                    if episodeIDS != {}:
                        episodeIDS = episodeIDS.get('ids', {})
##------------------

                try:
                    thumb = client.parseDOM(item, 'filename')[0]
                except:
                    thumb = ''
                if thumb != '': thumb = '%s%s' % (self.tvdb_image, thumb)
                else: thumb = '0'
                thumb = client.replaceHTMLCodes(thumb)
                thumb = thumb.encode('utf-8')

                if thumb != '0': pass
                elif fanart != '0':
                    thumb = fanart.replace(self.tvdb_image, self.tvdb_poster)
                elif poster != '0':
                    thumb = poster

                season_poster = [
                    i for i in artwork
                    if client.parseDOM(i, 'Season')[0] == season
                ]
                try:
                    season_poster = client.parseDOM(season_poster[0],
                                                    'BannerPath')[0]
                except:
                    season_poster = ''
                if season_poster != '':
                    season_poster = '%s%s' % (self.tvdb_image, season_poster)
                else:
                    season_poster = '0'
                season_poster = client.replaceHTMLCodes(season_poster)
                season_poster = season_poster.encode('utf-8')
                if season_poster == '0': season_poster = poster

                try:
                    rating = client.parseDOM(item, 'Rating')[0]
                except:
                    rating = ''
                if rating == '': rating = '0'
                rating = client.replaceHTMLCodes(rating)
                rating = rating.encode('utf-8')

                try:
                    director = client.parseDOM(item, 'Director')[0]
                except:
                    director = ''
                director = [x for x in director.split('|') if x != '']
                director = ' / '.join(director)
                if director == '': director = '0'
                director = client.replaceHTMLCodes(director)
                director = director.encode('utf-8')

                try:
                    writer = client.parseDOM(item, 'Writer')[0]
                except:
                    writer = ''
                writer = [x for x in writer.split('|') if x != '']
                writer = ' / '.join(writer)
                if writer == '': writer = '0'
                writer = client.replaceHTMLCodes(writer)
                writer = writer.encode('utf-8')

                # try:
                # local = client.parseDOM(item, 'id')[0]
                # local = [x for x in locals if '<id>%s</id>' % str(local) in x][0]
                # except: local = item

                # label = client.parseDOM(local, 'EpisodeName')[0]
                label = client.parseDOM(item, 'EpisodeName')[0]
                if label == '': label = '0'
                label = client.replaceHTMLCodes(label)
                label = label.encode('utf-8')

                # try: episodeplot = client.parseDOM(local, 'Overview')[0]
                try:
                    episodeplot = client.parseDOM(item, 'Overview')[0].encode(
                        'ascii', errors='ignore').decode('ascii',
                                                         errors='ignore')
                except:
                    episodeplot = ''
                if episodeplot == '': episodeplot = '0'
                if episodeplot == '0': episodeplot = plot
                episodeplot = client.replaceHTMLCodes(episodeplot)
                try:
                    episodeplot = episodeplot.encode('utf-8')
                except:
                    pass

                try:
                    seasoncount = counts[season]
                except:
                    seasoncount = None

                try:
                    total_seasons = len([i for i in counts if i != '0'])
                except:
                    total_seasons = None

                self.list.append({
                    'title': title,
                    'label': label,
                    'season': season,
                    'episode': episode,
                    'tvshowtitle': tvshowtitle,
                    'year': year,
                    'premiered': premiered,
                    'status': status,
                    'studio': studio,
                    'genre': genre,
                    'duration': duration,
                    'rating': rating,
                    'votes': votes,
                    'mpaa': mpaa,
                    'director': director,
                    'writer': writer,
                    'castandart': castandart,
                    'plot': episodeplot,
                    'imdb': imdb,
                    'tmdb': tmdb,
                    'tvdb': tvdb,
                    'poster': poster,
                    'banner': banner,
                    'fanart': fanart,
                    'thumb': thumb,
                    'season_poster': season_poster,
                    'unaired': unaired,
                    'seasoncount': seasoncount,
                    'counts': counts,
                    'total_seasons': total_seasons,
                    'is_airing': is_airing,
                    'episodeIDS': episodeIDS
                })
                self.list = sorted(self.list,
                                   key=lambda k:
                                   (int(k['season']), int(k['episode'])
                                    ))  # fix for TVDb new sort by ID
                # meta = {}
                # meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'lang': self.lang, 'user': self.tvdb_key, 'item': item}

                # self.list.append(item)
                # metacache.insert(self.meta)

            except:
                log_utils.error()
        return self.list
Пример #41
0
 def base_link(self):
     if not self._base_link:
         self._base_link = cache.get(self.__get_base_url, 120,
                                     'https://%s' % self.domains[0])
     return self._base_link
Пример #42
0
def cachesyncTVShows(timeout=0):
    indicators = cache.get(syncTVShows, timeout, control.setting('trakt.user').strip())
    return indicators
def get_globo_americas():

    is_globosat_available = control.is_globosat_available()

    headers = {
        "Accept-Encoding": "gzip",
        "User-Agent": "Globo Play/0 (iPhone)",
        "x-tenant-id": "globo-play-us",
        'x-platform-id': 'web',
        'x-device-id': 'desktop',
        'x-client-version': '0.4.3'
    }

    now = datetime.datetime.utcnow() + datetime.timedelta(
        hours=control.get_current_brasilia_utc_offset())
    date = now.strftime('%Y-%m-%d')
    variables = urllib.quote_plus('{{"date":"{}"}}'.format(date))
    query = 'query%20getEpgBroadcastList%28%24date%3A%20Date%21%29%20%7B%0A%20%20broadcasts%20%7B%0A%20%20%20%20...broadcastFragment%0A%20%20%7D%0A%7D%0Afragment%20broadcastFragment%20on%20Broadcast%20%7B%0A%20%20mediaId%0A%20%20media%20%7B%0A%20%20%20%20serviceId%0A%20%20%20%20headline%0A%20%20%20%20thumb%28size%3A%20720%29%0A%20%20%20%20availableFor%0A%20%20%20%20title%20%7B%0A%20%20%20%20%20%20slug%0A%20%20%20%20%20%20headline%0A%20%20%20%20%20%20titleId%0A%20%20%20%20%7D%0A%20%20%7D%0A%20%20imageOnAir%28scale%3A%20X1080%29%0A%20%20transmissionId%0A%20%20geofencing%0A%20%20geoblocked%0A%20%20channel%20%7B%0A%20%20%20%20id%0A%20%20%20%20color%0A%20%20%20%20name%0A%20%20%20%20logo%28format%3A%20PNG%29%0A%20%20%7D%0A%20%20epgByDate%28date%3A%20%24date%29%20%7B%0A%20%20%20%20entries%20%7B%0A%20%20%20%20%20%20name%0A%20%20%20%20%20%20metadata%0A%20%20%20%20%20%20description%0A%20%20%20%20%20%20startTime%0A%20%20%20%20%20%20endTime%0A%20%20%20%20%20%20durationInMinutes%0A%20%20%20%20%20%20liveBroadcast%0A%20%20%20%20%20%20tags%0A%20%20%20%20%20%20contentRating%0A%20%20%20%20%20%20contentRatingCriteria%0A%20%20%20%20%20%20titleId%0A%20%20%20%20%20%20alternativeTime%0A%20%20%20%20%20%20title%7B%0A%20%20%20%20%20%20%20%20titleId%0A%20%20%20%20%20%20%20%20originProgramId%0A%20%20%20%20%20%20%20%20releaseYear%0A%20%20%20%20%20%20%20%20countries%0A%20%20%20%20%20%20%20%20directorsNames%0A%20%20%20%20%20%20%20%20castNames%0A%20%20%20%20%20%20%20%20genresNames%0A%20%20%20%20%20%20%20%20authorsNames%0A%20%20%20%20%20%20%20%20screenwritersNames%0A%20%20%20%20%20%20%20%20artDirectorsNames%0A%20%20%20%20%20%20%20%20cover%20%7B%0A%20%20%20%20%20%20%20%20%20%20landscape%28scale%3A%20X1080%29%0A%20%20%20%20%20%20%20%20%20%20portrait%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20poster%7B%0A%20%20%20%20%20%20%20%20%20%20web%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%20%20logo%20%7B%0A%20%20%20%20%20%20%20%20%20%20web%0A%20%20%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%7D%0A%20%20%20%20%7D%0A%20%20%7D%0A%7D'
    url = 'https://jarvis.globo.com/graphql?query={query}&variables={variables}'.format(
        query=query, variables=variables)
    control.log('GLOBOPLAY US - GET %s' % url)
    response = cache.get(requests.get,
                         24,
                         url,
                         headers=headers,
                         table='globoplay').json()
    control.log(response)
    broadcasts = response['data']['broadcasts']

    utc_now = int(control.to_timestamp(datetime.datetime.utcnow()))

    # thumb_usa = 'https://live-thumbs.video.globo.com/glbeua/snapshot/' + str(int(time.time()))

    result = []
    for broadcast in broadcasts:
        media_id = str(broadcast.get('mediaId', 0))

        if is_globosat_available and media_id != str(
                GLOBO_US_LIVE_SUBSCRIBER_MEDIA_ID):
            continue

        epg = next((epg for epg in broadcast['epgByDate']['entries']
                    if int(epg['startTime']) <= utc_now < int(epg['endTime'])),
                   {})

        control.log('EPG: %s' % epg)

        channel = broadcast.get('channel', {}) or {}

        logo = channel.get('logo')
        channel_name = channel.get('name', '').replace(
            'TV Globo',
            'Globo') + ' USA'  # broadcast.get('media', {}).get('headline', '')
        fanart = broadcast.get('imageOnAir')
        channel_id = channel.get('id', 0)
        service_id = broadcast.get('media', {}).get('serviceId', 0)
        # channel_slug = '%s-americas' % channel.get('name', '').lower().replace(' ', '')

        duration = epg.get('durationInMinutes', 0) * 60

        title_obj = epg.get('title', {}) or {}

        title = epg.get('name', '')
        description = title_obj.get('description') or epg.get(
            'description', '')
        fanart = title_obj.get('cover', {}).get('landscape', fanart) or fanart
        poster = title_obj.get('poster', {}).get('web')

        label = '[B]' + channel_name + '[/B]' + ('[I] - ' + title +
                                                 '[/I]' if title else '')

        program_datetime = datetime.datetime.utcfromtimestamp(
            epg.get('startTime', 0)) + util.get_utc_delta()
        next_start = datetime.datetime.utcfromtimestamp(epg.get(
            'endTime', 0)) + util.get_utc_delta()

        plotoutline = datetime.datetime.strftime(
            program_datetime, '%H:%M') + ' - ' + datetime.datetime.strftime(
                next_start, '%H:%M')

        description = '%s | %s' % (plotoutline, description)

        tags = [plotoutline]

        if epg.get('liveBroadcast', False):
            tags.append(control.lang(32004))

        tags.extend(epg.get('tags', []) or [])

        result.append({
            'handler':
            PLAYER_HANDLER,
            'method':
            'play_stream',
            'IsPlayable':
            True,
            'id':
            media_id,
            'channel_id':
            channel_id,
            'service_id':
            service_id,
            'live':
            epg.get('liveBroadcast', False) or False,
            'livefeed':
            True,
            'label':
            label,
            'title':
            label,
            # 'title': title,
            'tvshowtitle':
            title,
            'plot':
            description,
            # 'plotoutline': plotoutline,
            # "tagline": plotoutline,
            'tag':
            tags,
            'duration':
            duration,
            "dateadded":
            datetime.datetime.strftime(program_datetime, '%Y-%m-%d %H:%M:%S'),
            'sorttitle':
            title,
            'studio':
            'Globoplay Americas',
            'year':
            title_obj.get('releaseYear'),
            'country':
            title_obj.get('countries', []),
            'genre':
            title_obj.get('genresNames', []),
            'cast':
            title_obj.get('castNames', []),
            'director':
            title_obj.get('directorsNames', []),
            'writer':
            title_obj.get('screenwritersNames', []),
            'credits':
            title_obj.get('artDirectorsNames', []),
            'mpaa':
            epg.get('contentRating'),
            "art": {
                'icon': logo,
                'clearlogo': logo,
                'thumb': fanart,
                'fanart': fanart,
                'tvshow.poster': poster
            }
        })

    return result
Пример #44
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            verify=True,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=True,
            output='',
            timeout='30'):
    try:
        if not url:
            return
        handlers = []
        if proxy is not None:
            handlers += [
                urllib2.ProxyHandler({'http': '%s' % proxy}),
                urllib2.HTTPHandler
            ]
            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or close is not True:
            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)
        try:
            import platform
            is_XBOX = platform.uname()[1] == 'XboxOne'
        except Exception:
            is_XBOX = False

        if verify is False and sys.version_info >= (2, 7, 12):
            try:
                import ssl
                ssl_context = ssl._create_unverified_context()
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                urllib2.install_opener(opener)
            except:
                pass

        if verify is True and ((2, 7, 8) < sys.version_info <
                               (2, 7, 12) or is_XBOX):
            try:
                import ssl
                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                urllib2.install_opener(opener)
            except:
                pass
        if url.startswith('//'): url = 'http:' + url
        _headers = {}
        try:
            _headers.update(headers)
        except:
            pass
        if 'User-Agent' in _headers:
            pass
        elif mobile is True:
            _headers['User-Agent'] = cache.get(randommobileagent, 1)
        else:
            _headers['User-Agent'] = cache.get(randomagent, 1)
        if 'Referer' in _headers:
            pass
        elif referer is not None:
            _headers['Referer'] = referer
        if 'Accept-Language' not in _headers:
            _headers['Accept-Language'] = 'en-US'
        if 'X-Requested-With' in _headers:
            pass
        elif XHR is True:
            _headers['X-Requested-With'] = 'XMLHttpRequest'
        if 'Cookie' in _headers:
            pass
        elif cookie is not None:
            _headers['Cookie'] = cookie
        if 'Accept-Encoding' in _headers:
            pass
        elif compression and limit is None:
            _headers['Accept-Encoding'] = 'gzip'
        if redirect is False:

            class NoRedirectHandler(urllib2.HTTPRedirectHandler):
                def http_error_302(self, req, fp, code, msg, headers):
                    infourl = urllib.addinfourl(fp, headers,
                                                req.get_full_url())
                    infourl.status = code
                    infourl.code = code
                    return infourl

                http_error_300 = http_error_302
                http_error_301 = http_error_302
                http_error_303 = http_error_302
                http_error_307 = http_error_302

            opener = urllib2.build_opener(NoRedirectHandler())
            urllib2.install_opener(opener)
            try:
                del _headers['Referer']
            except:
                pass
        if isinstance(post, dict):
            post = utils.byteify(post)
            post = urllib.urlencode(post)
        url = utils.byteify(url)
        request = urllib2.Request(url, data=post)
        _add_request_header(request, _headers)
        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:
            if response.code == 503:
                cf_result = response.read()
                try:
                    encoding = response.info().getheader('Content-Encoding')
                except:
                    encoding = None
                if encoding == 'gzip':
                    cf_result = gzip.GzipFile(
                        fileobj=StringIO.StringIO(cf_result)).read()
                if 'cf-browser-verification' in cf_result:
                    while 'cf-browser-verification' in cf_result:
                        netloc = '%s://%s/' % (urlparse.urlparse(url).scheme,
                                               urlparse.urlparse(url).netloc)
                        ua = _headers['User-Agent']
                        cf = cache.get(cfcookie().get, 1, netloc, ua, timeout)
                        _headers['Cookie'] = cf
                        request = urllib2.Request(url, data=post)
                        _add_request_header(request, _headers)
                        try:
                            response = urllib2.urlopen(request,
                                                       timeout=int(timeout))
                            cf_result = 'Success'
                        except urllib2.HTTPError as response:
                            cache.remove(cfcookie().get, netloc, ua, timeout)
                            cf_result = response.read()
                else:
                    log_utils.log(
                        'Request-Error (%s): %s' % (str(response.code), url),
                        log_utils.LOGDEBUG)
                    if error is False:
                        return
            else:
                log_utils.log(
                    'Request-Error (%s): %s' % (str(response.code), url),
                    log_utils.LOGDEBUG)
                if error is False:
                    return
        if output == 'cookie':
            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass
            if close is True:
                response.close()
            return result
        elif output == 'geturl':
            result = response.geturl()
            if close is True:
                response.close()
            return result
        elif output == 'headers':
            result = response.headers
            if close is True:
                response.close()
            return result
        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close is True:
                response.close()
            return result
        elif output == 'file_size':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = '0'
            response.close()
            return content
        if limit == '0':
            result = response.read(224 * 1024)
        elif limit is not None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)
        try:
            encoding = response.info().getheader('Content-Encoding')
        except:
            encoding = None
        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()
        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)
            _headers['Cookie'] = su
            request = urllib2.Request(url, data=post)
            _add_request_header(request, _headers)
            response = urllib2.urlopen(request, timeout=int(timeout))
            if limit == '0':
                result = response.read(224 * 1024)
            elif limit is not None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)
            try:
                encoding = response.info().getheader('Content-Encoding')
            except:
                encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(
                    fileobj=StringIO.StringIO(result)).read()
        if 'Blazingfast.io' in result and 'xhr.open' in result:
            netloc = '%s://%s' % (urlparse.urlparse(url).scheme,
                                  urlparse.urlparse(url).netloc)
            ua = _headers['User-Agent']
            _headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua,
                                           timeout)
            result = _basic_request(url,
                                    headers=_headers,
                                    post=post,
                                    timeout=timeout,
                                    limit=limit)
        if output == 'extended':
            try:
                response_headers = dict([(item[0].title(), item[1])
                                         for item in response.info().items()])
            except:
                response_headers = response.headers
            response_code = str(response.code)
            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                cookie = cf
            except:
                pass
            if close is True:
                response.close()
            return result, response_code, response_headers, _headers, cookie
        else:
            if close is True:
                response.close()
            return result
    except Exception as e:
        log_utils.log('Request-Error: (%s) => %s' % (str(e), url),
                      log_utils.LOGDEBUG)
        return
Пример #45
0
    def imdb_list(self, url):
        try:
            dupes = []

            for i in re.findall('date\[(\d+)\]', url):
                url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d'))

            def imdb_watchlist_id(url):
                return client.parseDOM(client.request(url).decode('iso-8859-1').encode('utf-8'), 'meta', ret='content', attrs = {'property': 'pageId'})[0]

            if url == self.imdbwatchlist_link:
                url = cache.get(imdb_watchlist_id, 8640, url)
                url = self.imdblist_link % url

            elif url == self.imdbwatchlist2_link:
                url = cache.get(imdb_watchlist_id, 8640, url)
                url = self.imdblist2_link % url

            result = client.request(url)

            result = result.replace('\n','')
            result = result.decode('iso-8859-1').encode('utf-8')

            items = client.parseDOM(result, 'div', attrs = {'class': 'lister-item mode-advanced'})
            items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
        except:
            return

        try:
            next = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'lister-page-next.+?'})

            if len(next) == 0:
                next = client.parseDOM(result, 'div', attrs = {'class': 'pagination'})[0]
                next = zip(client.parseDOM(next, 'a', ret='href'), client.parseDOM(next, 'a'))
                next = [i[0] for i in next if 'Next' in i[1]]

            next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next[0]).query)
            next = client.replaceHTMLCodes(next)
            next = next.encode('utf-8')
        except:
            next = ''

        for item in items:
            try:
                title = client.parseDOM(item, 'a')[1]
                title = client.replaceHTMLCodes(title)
                title = title.encode('utf-8')

                year = client.parseDOM(item, 'span', attrs = {'class': 'lister-item-year.+?'})
                year += client.parseDOM(item, 'span', attrs = {'class': 'year_type'})
                year = re.findall('(\d{4})', year[0])[0]
                year = year.encode('utf-8')

                if int(year) > int((self.datetime).strftime('%Y')): raise Exception()

                imdb = client.parseDOM(item, 'a', ret='href')[0]
                imdb = re.findall('(tt\d*)', imdb)[0]
                imdb = imdb.encode('utf-8')

                if imdb in dupes: raise Exception()
                dupes.append(imdb)

                try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
                except: poster = '0'
                if '/nopicture/' in poster: poster = '0'
                poster = re.sub('(?:_SX|_SY|_UX|_UY|_CR|_AL)(?:\d+|_).+?\.', '_SX500.', poster)
                poster = client.replaceHTMLCodes(poster)
                poster = poster.encode('utf-8')

                rating = '0'
                try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
                except: pass
                try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
                except: rating = '0'
                try: rating = client.parseDOM(item, 'div', ret='data-value', attrs = {'class': '.*?imdb-rating'})[0]
                except: pass
                if rating == '' or rating == '-': rating = '0'
                rating = client.replaceHTMLCodes(rating)
                rating = rating.encode('utf-8')

                plot = '0'
                try: plot = client.parseDOM(item, 'p', attrs = {'class': 'text-muted'})[0]
                except: pass
                try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
                except: pass
                plot = plot.rsplit('<span>', 1)[0].strip()
                plot = re.sub('<.+?>|</.+?>', '', plot)
                if plot == '': plot = '0'
                plot = client.replaceHTMLCodes(plot)
                plot = plot.encode('utf-8')

                self.list.append({'title': title, 'originaltitle': title, 'year': year, 'rating': rating, 'plot': plot, 'imdb': imdb, 'tvdb': '0', 'poster': poster, 'next': next})
            except:
                pass

        return self.list
Пример #46
0
    def userlists(self):
        episodes = episodesx.episodes(type=self.type, kids=self.kids)
        userlists = []

        try:
            if trakt.getTraktCredentialsInfo() == False: raise Exception()
            activity = trakt.getActivity()
        except:
            pass

        try:
            if trakt.getTraktCredentialsInfo() == False: raise Exception()
            self.list = []
            try:
                if activity > cache.timeout(episodes.trakt_user_list,
                                            self.traktlists_link,
                                            self.trakt_user):
                    raise Exception()
                userlists += cache.get(episodes.trakt_user_list, 3,
                                       self.traktlists_link, self.trakt_user)
            except:
                userlists += cache.get(episodes.trakt_user_list, 0,
                                       self.traktlists_link, self.trakt_user)
        except:
            pass

        try:
            if trakt.getTraktCredentialsInfo() == False: raise Exception()
            self.list = []
            try:
                if activity > cache.timeout(episodes.trakt_user_list,
                                            self.traktlikedlists_link,
                                            self.trakt_user):
                    raise Exception()
                userlists += cache.get(episodes.trakt_user_list, 3,
                                       self.traktlikedlists_link,
                                       self.trakt_user)
            except:
                userlists += cache.get(episodes.trakt_user_list, 0,
                                       self.traktlikedlists_link,
                                       self.trakt_user)
        except:
            pass

        self.list = []

        # Filter the user's own lists that were
        for i in range(len(userlists)):
            contains = False
            adapted = userlists[i]['url'].replace('/me/',
                                                  '/%s/' % self.trakt_user)
            for j in range(len(self.list)):
                if adapted == self.list[j]['url'].replace(
                        '/me/', '/%s/' % self.trakt_user):
                    contains = True
                    break
            if not contains:
                self.list.append(userlists[i])

        for i in range(0, len(self.list)):
            self.list[i].update({
                'image': 'traktlists.png',
                'action': self.parameterize('seasonList')
            })

        # Watchlist
        if trakt.getTraktCredentialsInfo():
            self.list.insert(
                0, {
                    'name': interface.Translation.string(32033),
                    'url': self.traktwatchlist_link,
                    'context': self.traktwatchlist_link,
                    'image': 'traktwatch.png',
                    'action': self.parameterize('seasons')
                })

        episodes.addDirectory(self.list, queue=True)
        return self.list
Пример #47
0
    def get_programs_by_region(self, region):

        import scraper_vod as scraper

        return cache.get(scraper.get_programs_by_region, 1, region)
Пример #48
0
    def run(self, anime_id, ep_id, url, synop, start, gen, epnum, epcount):
        control.sleep(200)

        self.anime_id = int(anime_id)
        self.episode_id = int(ep_id)

        item = control.item(path=url)

        try:
            c = cache.get(masterani.get_anime_details, 3, self.anime_id)

            ctype = c['type']
            ctype = 'movie' if int(ctype) is 2 else 'episode'

            tvshowtitle = c['title']
            poster = c['poster']
            coverlink = "http://cdn.masterani.me/poster/" + poster
            print coverlink

            item.setArt({
                'icon': coverlink,
                'thumb': coverlink,
                'poster': coverlink,
                'tvshow.poster': coverlink,
                'season.poster': coverlink
            })

            e = c['episodes'][self.episode_id]
            title = e['info']['title']
            season2options = [
                ': Season 2', ' Season 2', ': 2nd Season', ': Second Season',
                ' 2nd Season', ' Second Season', ': Part 2', ' Part 2',
                ': Part II', ' Part II'
            ]
            season3options = [
                ': Season 3', ' Season 3', ': 3rd Season', ': Third Season',
                ' 3rd Season', ' Third Season', ': Part 3', ' Part 3',
                ': Part III', ' Part III'
            ]
            season4options = [
                ': Season 4', ' Season 4', ': 4th Season', ': Fourth Season',
                ' 4th Season', ' Fourth Season', ': Part 4', ' Part 4',
                ': Part IV', ' Part IV'
            ]
            season5options = [
                ': Season 5', ' Season 5', ': 5th Season', ': Fifth Season',
                ' 5th Season', ' Fifth Season', ': Part 5', ' Part 5',
                ': Part V', ' Part V'
            ]
            season = 1
            for option in season2options:
                if option in tvshowtitle:
                    tvshowtitle = tvshowtitle.replace(option, "")
                    season = 2
            for option in season3options:
                if option in tvshowtitle:
                    tvshowtitle = tvshowtitle.replace(option, "")
                    season = 3
            for option in season4options:
                if option in tvshowtitle:
                    tvshowtitle = tvshowtitle.replace(option, "")
                    season = 4
            for option in season5options:
                if option in tvshowtitle:
                    tvshowtitle = tvshowtitle.replace(option, "")
                    season = 5
            episode = e['info']['episode']
            if ctype is 'video': title = c['title']
            if title is None: title = "Episode %s" % episode

            item.setInfo(type="video",
                         infoLabels={
                             'tvshowtitle': title,
                             'title': tvshowtitle,
                             'episode': int(episode),
                             'season': int(season),
                             'mediatype': ctype
                         })

            #year = e['info']['aired'].split("-", 1)[0]
            #plot = e['info']['description']

            if 'movie' in ctype:
                year = start
                plot = synop
                genre = gen
                item.setInfo(type="video",
                             infoLabels={
                                 'year': year,
                                 'plot': plot,
                                 'genre': genre
                             })
            else:
                year = e['info']['aired'].split("-", 1)[0]
                plot = e['info']['description']
                item.setInfo(type="video",
                             infoLabels={
                                 'year': year,
                                 'plot': plot,
                                 'genre': gen
                             })

        except:
            pass

        item.setProperty('Video', 'true')
        item.setProperty('IsPlayable', 'true')

        #kitsuyesno = control.setting("kitsu.yesno")
        #try:
        #if kitsuyesno = "true":
        #kitsu.kitsu(tvshowtitle, epnum, epcount)
        #except:
        #pass

        self.play(url, item)

        self.playback_checker()

        pass
Пример #49
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                if 'tvshowtitle' in data:
                    url = '%s/film/%s-season-%01d/watching.html' % (
                        self.base_link, cleantitle.geturl(
                            data['tvshowtitle']), int(data['season']))
                    url = client.request(url, timeout='10', output='geturl')

                    if url == None:
                        url = self.searchShow(data['tvshowtitle'],
                                              data['season'])

                    if url == None:
                        t = cache.get(self.getImdbTitle, 900, data['imdb'])
                        if data['tvshowtitle'] != t:
                            url = self.searchShow(t, data['season'])

                else:
                    url = '%s/film/%s/watching.html' % (
                        self.base_link, cleantitle.geturl(data['title']))
                    url = client.request(url, timeout='10', output='geturl')

                    if url == None:
                        url = self.searchMovie(data['title'])

                    if url == None:
                        t = cache.get(self.getImdbTitle, 900, data['imdb'])
                        if data['title'] != t:
                            url = self.searchMovie(t)

                if url == None: raise Exception()

            else:
                url = urlparse.urljoin(self.base_link, url)

            r = client.request(url, timeout='10')
            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
            if 'tvshowtitle' in data:
                ep = data['episode']
                links = client.parseDOM(r,
                                        'a',
                                        attrs={'episode-data': ep},
                                        ret='player-data')
            else:
                links = client.parseDOM(r, 'a', ret='player-data')

            for link in links:
                if '123movieshd' in link or 'seriesonline' in link:
                    r = client.request(link, timeout='10')
                    r = re.findall('(https:.*?redirector.*?)[\'\"]', r)

                    for i in r:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'language':
                                'en',
                                'url':
                                i,
                                'direct':
                                True,
                                'debridonly':
                                False
                            })
                        except:
                            pass
                else:
                    try:
                        host = re.findall(
                            '([\w]+[.][\w]+)$',
                            urlparse.urlparse(link.strip().lower()).netloc)[0]
                        if not host in hostDict: raise Exception()
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')

                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'en',
                            'url': link,
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        pass

            return sources
        except:
            return sources
Пример #50
0
# -*- coding: utf-8 -*-

from resources.lib.modules import client
from resources.lib.modules import control
from resources.lib.modules import cache

import re
import os
import xbmcvfs

_settingsFile = os.path.join(control.addonPath, 'resources', 'settings.xml')
if xbmcvfs.exists(_settingsFile):
    with open(_settingsFile, 'r+') as file:
        data = file.read()
        line = re.findall(r'(<setting id="domain".+?/>)', data)[0]
        paste = cache.get(client.request, 12,
                          'https://pastebin.com/raw/upztzeGt')
        new_data = data.replace(line, paste)
        file.seek(0)
        file.truncate()
        file.write(new_data)
        file.close()
        control.refresh()
else:
    pass
Пример #51
0
def play(anime_id, episode_id):

    episode_link = episode_id
    episode_number = episode_link.split("/", 6)[6]

    l1 = "Fetching video."
    progressDialog.create(heading="Masterani Redux", line1="Fetching video.")
    progressDialog.update(0, line1=l1, line3="Loading hosts.")

    hosts = getlinks(episode_link)[0]
    ep_id = getlinks(episode_link)[1]
    #linkforcover = getcover(episode_link)

    if hosts is None:
        xbmcgui.Dialog().ok("Masterani Redux", "Something went wrong.",
                            "Please try again later.")
        return

    #Remove Tiwi Kiwi as it is broken

    for e in hosts:
        if 'Tiwi.kiwi' in e['name']:
            hosts.remove(e)

    #Remove Disabled Hosts

    if control.setting("host.mp4upload") == "false":
        for e in hosts:
            if 'MP4Upload' in e['name']:
                hosts.remove(e)

    if control.setting("host.youtube") == "false":
        for e in hosts:
            if 'YouTube' in e['name']:
                hosts.remove(e)

    if control.setting("host.stream.moe") == "false":
        for e in hosts:
            if 'Stream.moe' in e['name']:
                hosts.remove(e)

    if control.setting("host.drive.g") == "false":
        for e in hosts:
            if 'Drive.g' in e['name']:
                hosts.remove(e)

    if control.setting("host.vidstreaming") == "false":
        for e in hosts:
            if 'Vidstreaming' in e['name']:
                hosts.remove(e)

    if control.setting("host.rapidvideo") == "false":
        for e in hosts:
            if 'Rapidvideo' in e['name']:
                hosts.remove(e)

    if control.setting("host.aika") == "false":
        for e in hosts:
            if 'Aika' in e['name']:
                hosts.remove(e)

    if control.setting("host.streamango") == "false":
        for e in hosts:
            if 'Streamango' in e['name']:
                hosts.remove(e)

    if control.setting("host.openload") == "false":
        for e in hosts:
            if 'Openload' in e['name']:
                hosts.remove(e)

    progressDialog.update(25, line1=l1, line3="Loading episodes urls.")
    progressDialog.update(50, line1=l1, line3="Picking nose.")

    hostlist = []

    videos = sorted(hosts,
                    key=lambda k: (-int(k['type']), int(k['quality'])),
                    reverse=True)
    print videos

    autoplay = control.setting("autoplay.enabled")
    maxq = control.setting("autoplay.maxquality")
    subdub = control.setting("autoplay.subdub")

    videoCounter = 0
    autoplayHost = 0
    hostCounter = 0

    while videoCounter < len(videos):
        try:
            hostname = videos[videoCounter]['name']
            subs = 'Sub' if videos[videoCounter]['type'] is 1 else 'Dub'
            quality = videos[videoCounter]['quality']
            if 'true' in autoplay:
                if subdub == subs and int(quality) <= int(maxq):
                    hostlist.append("%s | %s | %s" % (quality, subs, hostname))
                    autoplayHost = hostCounter
                    break
                hostCounter += 1
            else:
                hostlist.append("%s | %s | %s" % (quality, subs, hostname))
            videoCounter += 1
        except:
            videos.remove(videos[videoCounter])

    if len(hostlist) is 0:
        progressDialog.close()
        xbmcgui.Dialog().ok("Masterani Redux", "No supported hosts found.")
        return

    if 'false' in autoplay:
        hostDialog = control.dialog.select("Select host.", hostlist)
    else:
        if len(hostlist) is 0:
            progressDialog.close()
            xbmcgui.Dialog().ok("Masterani Redux",
                                "No hosts found for autoplay.",
                                "Change addon settings and try again.")
            hostDialog = -1
        else:
            hostDialog = autoplayHost

    if hostDialog == -1:
        progressDialog.close()
        control.execute('dialog.close(okdialog)')
        return

    hostname = videos[hostDialog]['name']
    hostlink = videos[hostDialog]['url']
    hostquality = videos[hostDialog]['quality']
    embed_id = videos[hostDialog]['embed_id']

    c = cache.get(masterani.get_anime_details, 3, anime_id)
    syn = c['plot'].encode('utf-8')
    print syn
    sty = c['premiered'].split("-", 1)[0]
    print sty
    gen = str(c['genre'])
    print gen
    epcount = c['episode_count']

    progressDialog.update(75, line1=l1, line3="Loading video.")

    #Resolve Links
    mp4 = getdirect(hostname, hostlink, hostquality, embed_id)
    progressDialog.close()
    MAPlayer().run(anime_id, ep_id, mp4, syn, sty, gen, episode_number,
                   epcount)
Пример #52
0
    def imdb_list(self, url):
        try:
            dupes = []

            for i in re.findall('date\[(\d+)\]', url):
                url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d'))

            if url == self.imdbwatchlist_link:
                def imdb_watchlist_id(url):
                    return re.compile('/export[?]list_id=(ls\d*)').findall(client.request(url))[0]
                url = cache.get(imdb_watchlist_id, 8640, url)
                url = self.imdblist_link % url

            result = str(client.request(url))

            result = result.replace('\n','')
            result = result.decode('iso-8859-1').encode('utf-8')

            items = client.parseDOM(result, 'tr', attrs = {'class': '.+?'})
            items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
        except:
            return

        try:
            next = client.parseDOM(result, 'span', attrs = {'class': 'pagination'})
            next += client.parseDOM(result, 'div', attrs = {'class': 'pagination'})
            name = client.parseDOM(next[-1], 'a')[-1]
            if 'laquo' in name: raise Exception()
            next = client.parseDOM(next, 'a', ret='href')[-1]
            next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next).query)
            next = client.replaceHTMLCodes(next)
            next = next.encode('utf-8')
        except:
            next = ''

        for item in items:
            try:
                try: title = client.parseDOM(item, 'a')[1]
                except: pass
                try: title = client.parseDOM(item, 'a', attrs = {'onclick': '.+?'})[-1]
                except: pass
                title = client.replaceHTMLCodes(title)
                title = title.encode('utf-8')

                year = client.parseDOM(item, 'span', attrs = {'class': 'year_type'})[0]
                year = re.compile('(\d{4})').findall(year)[-1]
                year = year.encode('utf-8')

                if int(year) > int((self.datetime).strftime('%Y')): raise Exception()

                imdb = client.parseDOM(item, 'a', ret='href')[0]
                try: imdb = client.parseDOM(item, 'a', ret='href')[1]
                except: pass
                imdb = 'tt' + re.sub('[^0-9]', '', imdb.rsplit('tt', 1)[-1])
                imdb = imdb.encode('utf-8')

                if imdb in dupes: raise Exception()
                dupes.append(imdb)

                poster = '0'
                try: poster = client.parseDOM(item, 'img', ret='src')[0]
                except: pass
                try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
                except: pass
                if not ('_SX' in poster or '_SY' in poster): poster = '0'
                poster = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', poster)
                poster = client.replaceHTMLCodes(poster)
                poster = poster.encode('utf-8')

                try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
                except: rating = '0'
                try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
                except: rating = '0'
                if rating == '' or rating == '-': rating = '0'
                rating = client.replaceHTMLCodes(rating)
                rating = rating.encode('utf-8')

                plot = '0'
                try: plot = client.parseDOM(item, 'span', attrs = {'class': 'outline'})[0]
                except: pass
                try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
                except: pass
                plot = plot.rsplit('<span>', 1)[0].strip()
                if plot == '': plot = '0'
                plot = client.replaceHTMLCodes(plot)
                plot = plot.encode('utf-8')

                self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': '0', 'studio': '0', 'genre': '0', 'duration': '0', 'rating': rating, 'votes': '0', 'mpaa': '0', 'cast': '0', 'plot': plot, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'poster': poster, 'banner': '0', 'fanart': '0', 'next': next})
            except:
                pass

        return self.list
Пример #53
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            imdb = data['imdb'] ; year = data['year']

            headers = {}

            if 'tvshowtitle' in data:
                url = '%s/tv-show/%s/season/%01d/episode/%01d' % (self.base_link, cleantitle.geturl(title), int(data['season']), int(data['episode']))
                result = client.request(url, headers=headers, timeout='10')

                if result == None:
                    t = cache.get(self.getOriginalTitle, 900, imdb)
                    if title != t:
                        url = '%s/tv-show/%s/season/%01d/episode/%01d' % (self.base_link, cleantitle.geturl(t), int(data['season']), int(data['episode']))
                        result = client.request(url, headers=headers, timeout='10')
            else:
                url = '%s/movie/%s' % (self.base_link, cleantitle.geturl(title))
                result = client.request(url, headers=headers, timeout='10')

                if result == None:
                    url += '-%s' % year
                    result = client.request(url, headers=headers, timeout='10')

                if result == None:
                    t = cache.get(self.getOriginalTitle, 900, imdb)
                    if title != t:
                        url = '%s/movie/%s' % (self.base_link, cleantitle.geturl(t))
                        result = client.request(url, headers=headers, timeout='10')


            result = client.parseDOM(result, 'title')[0]

            if '%TITLE%' in result: raise Exception()

            r = client.request(url, headers=headers, output='extended', timeout='10')

            if not imdb in r[0]: raise Exception()


            cookie = r[4] ; headers = r[3] ; result = r[0]

            try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except: auth = 'false'
            auth = 'Bearer %s' % urllib.unquote_plus(auth)

            headers['Authorization'] = auth
            headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
            headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'
            headers['Cookie'] = cookie
            headers['Referer'] = url


            u = '/ajax/jne.php'
            u = urlparse.urljoin(self.base_link, u)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid}
            post = urllib.urlencode(post)

            c = client.request(u, post=post, headers=headers, XHR=True, output='cookie', error=True)

            headers['Cookie'] = cookie + '; ' + c

            r = client.request(u, post=post, headers=headers, XHR=True)
            r = str(json.loads(r))
            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
                except: pass

            return sources
        except:
            return sources
Пример #54
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            url = referer = url.replace('/watching.html', '')

            try:
                url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
            except:
                episode = None

            vid_id = re.findall('-(\d+)', url)[-1]

            quality = cache.get(self.onemovies_info, 9000, vid_id)[1].lower()
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd': quality = 'HD'
            else: quality = 'SD'

            try:
                headers = {
                    'X-Requested-With': 'XMLHttpRequest',
                    'Referer': url
                }

                u = urlparse.urljoin(self.base_link, self.server_link % vid_id)

                r = self.request(u, headers=headers, post=None)

                r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
                r = zip(client.parseDOM(r, 'a', ret='onclick'),
                        client.parseDOM(r, 'a'))
                r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r]

                if not episode == None:
                    r = [i[0] for i in r if '%01d' % int(i[1]) == episode]
                else:
                    r = [i[0] for i in r]

                r = [re.findall('(\d+),(\d+)', i) for i in r]
                r = [i[0][:2] for i in r if len(i) > 0]

                links = []

                links += [{
                    'source': 'gvideo',
                    'url': self.direct_link + i[1],
                    'direct': True
                } for i in r if 2 <= int(i[0]) <= 11]

                links += [{
                    'source': 'openload.co',
                    'url': self.embed_link + i[1],
                    'direct': False
                } for i in r if i[0] == '14']

                links += [{
                    'source': 'videowood.tv',
                    'url': self.embed_link + i[1],
                    'direct': False
                } for i in r if i[0] == '12']

                head = '|' + urllib.urlencode(headers)

                for i in links:
                    sources.append({
                        'source':
                        i['source'],
                        'quality':
                        quality,
                        'provider':
                        'Onemovies',
                        'url':
                        urlparse.urljoin(self.base_link, i['url']) + head,
                        'direct':
                        i['direct'],
                        'debridonly':
                        False
                    })
            except:
                pass

            return sources
        except:
            return sources
Пример #55
0
	def items_list(self, i):
		try:
			item = cache.get(trakt.SearchAll, 3, urllib.quote_plus(i[1]), i[2], True)[0]

			content = item.get('movie')
			if not content: content = item.get('show')
			item = content

			title = item.get('title')
			title = client.replaceHTMLCodes(title)

			originaltitle = title

			year = item.get('year', 0)
			year = re.sub('[^0-9]', '', str(year))

			imdb = item.get('ids', {}).get('imdb', '0')
			imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))

			tmdb = str(item.get('ids', {}).get('tmdb', 0))

			premiered = item.get('released', '0')
			try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
			except: premiered = '0'

			genre = item.get('genres', [])
			genre = [x.title() for x in genre]
			genre = ' / '.join(genre).strip()
			if not genre: genre = '0'

			duration = str(item.get('Runtime', 0))

			rating = item.get('rating', '0')
			if not rating or rating == '0.0': rating = '0'

			votes = item.get('votes', '0')
			try: votes = str(format(int(votes), ',d'))
			except: pass

			mpaa = item.get('certification', '0')
			if not mpaa: mpaa = '0'

			tagline = item.get('tagline', '0')

			plot = item.get('overview', '0')

			people = trakt.getPeople(imdb, 'movies')
			director = writer = ''
			cast = []

			if people:
				if 'crew' in people and 'directing' in people['crew']:
					director = ', '.join([director['person']['name'] for director in people['crew']['directing'] if director['job'].lower() == 'director'])
				if 'crew' in people and 'writing' in people['crew']:
					writer = ', '.join([writer['person']['name'] for writer in people['crew']['writing'] if writer['job'].lower() in ['writer', 'screenplay', 'author']])
				for person in people.get('cast', []):
					cast.append({'name': person['person']['name'], 'role': person['character']})
				cast = [(person['name'], person['role']) for person in cast]

			try:
				if self.lang == 'en' or self.lang not in item.get('available_translations', [self.lang]): raise Exception()

				trans_item = trakt.getMovieTranslation(imdb, self.lang, full = True)

				title = trans_item.get('title') or title
				tagline = trans_item.get('tagline') or tagline
				plot = trans_item.get('overview') or plot
			except:
				pass

			self.list.append({'title': title, 'originaltitle': originaltitle, 'year': year, 'premiered': premiered, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline, 'imdb': imdb, 'tmdb': tmdb, 'poster': '0', 'channel': i[0]})
		except:
			pass
Пример #56
0
	def it_list(self, url, result=None):

		try:
			if result is None: result = cache.get(client.request, 0, url)

			if result.strip().startswith('#EXTM3U') and '#EXTINF' in result:
				result = re.compile('#EXTINF:.+?\,(.+?)\n(.+?)\n', re.MULTILINE|re.DOTALL).findall(result)
				result = ['<item><title>%s</title><link>%s</link></item>' % (i[0], i[1]) for i in result]
				result = ''.join(result)

			try: r = base64.b64decode(result)
			except: r = ''
			if '</link>' in r: result = r

			result = str(result)

			info = result.split('<item>')[0].split('<dir>')[0]

			try: vip = re.findall('<poster>(.+?)</poster>', info)[0]
			except: vip = '0'

			try: image = re.findall('<thumbnail>(.+?)</thumbnail>', info)[0]
			except: image = '0'

			try: fanart = re.findall('<fanart>(.+?)</fanart>', info)[0]
			except: fanart = '0'

			api_data = client.request(base64.b64decode('aHR0cDovL3RleHR1cGxvYWRlci5jb20vZHI3NmcvcmF3'), timeout='5')
			tmdb_api = re.compile('<api>(.+?)</api>').findall(api_data)[0]

			items = re.compile('((?:<item>.+?</item>|<dir>.+?</dir>|<plugin>.+?</plugin>|<info>.+?</info>|<name>[^<]+</name><link>[^<]+</link><thumbnail>[^<]+</thumbnail><mode>[^<]+</mode>|<name>[^<]+</name><link>[^<]+</link><thumbnail>[^<]+</thumbnail><date>[^<]+</date>))', re.MULTILINE|re.DOTALL).findall(result)
		except:
			return

		for item in items:

			try:
				regdata = re.compile('(<regex>.+?</regex>)', re.MULTILINE|re.DOTALL).findall(item)
				regdata = ''.join(regdata)
				reglist = re.compile('(<listrepeat>.+?</listrepeat>)', re.MULTILINE|re.DOTALL).findall(regdata)
				regdata = quote_plus(regdata)

				reghash = hashlib.md5()
				for i in regdata: reghash.update(str(i))
				reghash = str(reghash.hexdigest())

				item = item.replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace('<tmdb_data>true</tmdb_data>','<tmdb_data>all</tmdb_data>')

				try: meta = re.findall('<meta>(.+?)</meta>', item)[0]
				except: meta = '0'

				try: imdb = re.findall('<imdb>(.+?)</imdb>', meta)[0]
				except: imdb = '0'

				try: tmdb_get = re.findall('<tmdb_data>(.+?)</tmdb_data>', meta)[0]
				except: tmdb_get = '0'

				try: tvshowtitle = re.findall('<tvshowtitle>(.+?)</tvshowtitle>', item)[0]
				except: tvshowtitle = '0'

				item = re.sub('<regex>.+?</regex>','', item)
				item = re.sub('<sublink></sublink>|<sublink\s+name=(?:\'|\").*?(?:\'|\")></sublink>','', item)
				item = re.sub('<link></link>','', item)

				try: meta = re.findall('<meta>(.+?)</meta>', item)[0]
				except: meta = '0'

				if any(f for f in ['all','data','images'] if f == tmdb_get.lower()):         
					try:
						url_api = 'https://api.themoviedb.org/3/movie/' + imdb + '?api_key=' + tmdb_api
						item_json = client.request(url_api, timeout='5')

						item_json = json.loads(item_json)
					except: pass

					if any(f for f in ['all','data'] if f == tmdb_get.lower()):         
						try:
							if item_json['original_title']: 
							title = item_json['original_title']
							name = title
							else:
								name = re.sub('<meta>.+?</meta>','', item)
								try: name = re.findall('<title>(.+?)</title>', name)[0]
								except: name = re.findall('<name>(.+?)</name>', name)[0]
								try: title = name
								except: title = '0'
								if title == '0' and not tvshowtitle == '0': title = tvshowtitle

						except:
							name = re.sub('<meta>.+?</meta>','', item)
							try: name = re.findall('<title>(.+?)</title>', name)[0]
							except: name = re.findall('<name>(.+?)</name>', name)[0]
							try: title = name
							except: title = '0'

							if title == '0' and not tvshowtitle == '0': title = tvshowtitle

						if '<title></title>' in item:
							item = item.replace('<title></title>','<title>'+title+'</title>')

						try:
							if item_json['release_date']:
								year = item_json['release_date']; year = year.split('-')[0]; name = title + ' (' + year + ')'
							else: 
								try: year = re.findall('<year>(.+?)</year>', meta)[0]
								except: year = '0'
						except:
							try: year = re.findall('<year>(.+?)</year>', meta)[0]
							except: year = '0'

						if '<year></year>' in item:
							item = item.replace('<year></year>','<year>'+title+'</year>')
					else:
						name = re.sub('<meta>.+?</meta>','', item)
						try: name = re.findall('<title>(.+?)</title>', name)[0]
						except: name = re.findall('<name>(.+?)</name>', name)[0]
						try: title = name
						except: title = '0'
						if '<title></title>' in item:
							item = item.replace('<title></title>','<title>'+title+'</title>')
						try: year = re.findall('<year>(.+?)</year>', meta)[0]
						except: year = '0'
						if '<year></year>' in item:
							item = item.replace('<year></year>','<year>'+title+'</year>')

					if any(f for f in ['all','images'] if f == tmdb_get.lower()):         

						try:
							if item_json['backdrop_path']:
								fanart2 = 'https://image.tmdb.org/t/p/original/' + item_json['backdrop_path']
							else: 
								try: fanart2 = re.findall('<fanart>(.+?)</fanart>', item)[0]
								except: fanart2 = fanart
						except:
							try: fanart2 = re.findall('<fanart>(.+?)</fanart>', item)[0]
							except: fanart2 = fanart

						try:
							if item_json['poster_path']:
								image2 = 'https://image.tmdb.org/t/p/original/' + item_json['poster_path']
							else: 
								try: image2 = re.findall('<thumbnail>(.+?)</thumbnail>', item)[0]
								except: image2 = image
						except:
							try: image2 = re.findall('<thumbnail>(.+?)</thumbnail>', item)[0]
							except: image2 = image
					else:
						try: fanart2 = re.findall('<fanart>(.+?)</fanart>', item)[0]
						except: fanart2 = fanart
						try: image2 = re.findall('<thumbnail>(.+?)</thumbnail>', item)[0]
						except: image2 = image
				else:

					name = re.sub('<meta>.+?</meta>','', item)
					try: name = re.findall('<title>(.+?)</title>', name)[0]
					except: name = re.findall('<name>(.+?)</name>', name)[0]

					try: title = re.findall('<title>(.+?)</title>', meta)[0]
					except: title = '0'

					if title == '0' and not tvshowtitle == '0': title = tvshowtitle

					try: year = re.findall('<year>(.+?)</year>', meta)[0]
					except: year = '0'

					try: image2 = re.findall('<thumbnail>(.+?)</thumbnail>', item)[0]
					except: image2 = image

					try: fanart2 = re.findall('<fanart>(.+?)</fanart>', item)[0]
					except: fanart2 = fanart

				try: date = re.findall('<date>(.+?)</date>', item)[0]
				except: date = ''
				if re.search(r'\d+', date):
					name += ' [COLOR red] Updated %s[/COLOR]' % date

				try: meta = re.findall('<meta>(.+?)</meta>', item)[0]
				except: meta = '0'
				try: url = re.findall('<link>(.+?)</link>', item)[0]
				except: url = '0'
				url = url.replace('>search<', '><preset>search</preset>%s<' % meta)
				url = '<preset>search</preset>%s' % meta if url == 'search' else url
				url = url.replace('>searchsd<', '><preset>searchsd</preset>%s<' % meta)
				url = '<preset>searchsd</preset>%s' % meta if url == 'searchsd' else url
				url = re.sub('<sublink></sublink>|<sublink\s+name=(?:\'|\").*?(?:\'|\")></sublink>','', url)

				if item.startswith('<item>'): action = 'play'
				elif item.startswith('<plugin>'): action = 'plugin'
				elif item.startswith('<info>') or url == '0': action = '0'
				else: action = 'directory'
				if action == 'play' and reglist: action = 'xdirectory'

				if not regdata == '':
					self.hash.append({'regex': reghash, 'response': regdata})
					url += '|regex=%s' % reghash

				if action in ['directory', 'xdirectory', 'plugin']:
					folder = True
				else:
					folder = False

				try: content = re.findall('<content>(.+?)</content>', meta)[0]
				except: content = '0'
				if content == '0': 
					try: content = re.findall('<content>(.+?)</content>', item)[0]
					except: content = '0'
				if not content == '0': content += 's'

				if 'tvshow' in content and not url.strip().endswith('.xml'):
					url = '<preset>tvindexer</preset><url>%s</url><thumbnail>%s</thumbnail><fanart>%s</fanart>%s' % (url, image2, fanart2, meta)
					action = 'tvtuner'

				if 'tvtuner' in content and not url.strip().endswith('.xml'):
					url = '<preset>tvtuner</preset><url>%s</url><thumbnail>%s</thumbnail><fanart>%s</fanart>%s' % (url, image2, fanart2, meta)
					action = 'tvtuner'

				try: tvdb = re.findall('<tvdb>(.+?)</tvdb>', meta)[0]
				except: tvdb = '0'

				try: premiered = re.findall('<premiered>(.+?)</premiered>', meta)[0]
				except: premiered = '0'

				try: season = re.findall('<season>(.+?)</season>', meta)[0]
				except: season = '0'

				try: episode = re.findall('<episode>(.+?)</episode>', meta)[0]
				except: episode = '0'

				self.list.append({'name': name, 'vip': vip, 'url': url, 'action': action, 'folder': folder, 'poster': image2, 'banner': '0', 'fanart': fanart2, 'content': content, 'imdb': imdb, 'tvdb': tvdb, 'tmdb': '0', 'title': title, 'originaltitle': title, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'season': season, 'episode': episode})

			except:
				pass

		regex.insert(self.hash)

		return self.list
Пример #57
0
def request(
    url,
    close=True,
    redirect=True,
    error=False,
    proxy=None,
    post=None,
    headers=None,
    mobile=False,
    XHR=False,
    limit=None,
    referer=None,
    cookie=None,
    compression=True,
    output="",
    timeout="30",
):
    try:
        if not url:
            return

        handlers = []

        if not proxy == None:
            handlers += [
                urllib2.ProxyHandler({"http": "%s" % (proxy)}),
                urllib2.HTTPHandler,
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if output == "cookie" or output == "extended" or not close == True:
            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies),
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if (2, 7, 8) < sys.version_info < (2, 7, 12):
            try:
                import ssl

                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                opener = urllib2.install_opener(opener)
            except:
                pass

        if url.startswith("//"):
            url = "http:" + url

        _headers = {}
        try:
            _headers.update(headers)
        except:
            pass
        if "User-Agent" in _headers:
            pass
        elif not mobile == True:
            # headers['User-Agent'] = agent()
            _headers["User-Agent"] = cache.get(randomagent, 1)
        else:
            _headers["User-Agent"] = "Apple-iPhone/701.341"
        if "Referer" in _headers:
            pass
        elif referer is not None:
            _headers["Referer"] = referer
        if not "Accept-Language" in _headers:
            _headers["Accept-Language"] = "en-US"
        if "X-Requested-With" in _headers:
            pass
        elif XHR == True:
            _headers["X-Requested-With"] = "XMLHttpRequest"
        if "Cookie" in _headers:
            pass
        elif not cookie == None:
            _headers["Cookie"] = cookie
        if "Accept-Encoding" in _headers:
            pass
        elif compression and limit is None:
            _headers["Accept-Encoding"] = "gzip"

        if redirect == False:

            # old implementation
            # class NoRedirection(urllib2.HTTPErrorProcessor):
            #    def http_response(self, request, response): return response

            # opener = urllib2.build_opener(NoRedirection)
            # opener = urllib2.install_opener(opener)

            class NoRedirectHandler(urllib2.HTTPRedirectHandler):
                def http_error_302(self, req, fp, code, msg, headers):
                    infourl = urllib.addinfourl(fp, headers, req.get_full_url())
                    infourl.status = code
                    infourl.code = code
                    return infourl

                http_error_300 = http_error_302
                http_error_301 = http_error_302
                http_error_303 = http_error_302
                http_error_307 = http_error_302

            opener = urllib2.build_opener(NoRedirectHandler())
            urllib2.install_opener(opener)

            try:
                del _headers["Referer"]
            except:
                pass

        if isinstance(post, dict):
            post = utils.byteify(post)
            post = urllib.urlencode(post)

        url = utils.byteify(url)

        request = urllib2.Request(url, data=post)
        _add_request_header(request, _headers)

        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:

            if response.code == 503:
                cf_result = response.read(5242880)
                try:
                    encoding = response.info().getheader("Content-Encoding")
                except:
                    encoding = None
                if encoding == "gzip":
                    cf_result = gzip.GzipFile(
                        fileobj=StringIO.StringIO(cf_result)
                    ).read()

                if "cf-browser-verification" in cf_result:

                    netloc = "%s://%s" % (
                        urlparse.urlparse(url).scheme,
                        urlparse.urlparse(url).netloc,
                    )

                    if not netloc.endswith("/"):
                        netloc += "/"

                    ua = _headers["User-Agent"]

                    cf = cache.get(cfcookie().get, 168, netloc, ua, timeout)

                    _headers["Cookie"] = cf

                    request = urllib2.Request(url, data=post)
                    _add_request_header(request, _headers)

                    response = urllib2.urlopen(request, timeout=int(timeout))
                else:
                    log_utils.log(
                        "Request-Error (%s): %s" % (str(response.code), url),
                        log_utils.LOGDEBUG,
                    )
                    if error == False:
                        return
            else:
                log_utils.log(
                    "Request-Error (%s): %s" % (str(response.code), url),
                    log_utils.LOGDEBUG,
                )
                if error == False:
                    return

        if output == "cookie":
            try:
                result = "; ".join(["%s=%s" % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass
            if close == True:
                response.close()
            return result

        elif output == "geturl":
            result = response.geturl()
            if close == True:
                response.close()
            return result

        elif output == "headers":
            result = response.headers
            if close == True:
                response.close()
            return result

        elif output == "chunk":
            try:
                content = int(response.headers["Content-Length"])
            except:
                content = 2049 * 1024
            if content < (2048 * 1024):
                return
            result = response.read(16 * 1024)
            if close == True:
                response.close()
            return result

        elif output == "file_size":
            try:
                content = int(response.headers["Content-Length"])
            except:
                content = "0"
            response.close()
            return content

        if limit == "0":
            result = response.read(224 * 1024)
        elif not limit == None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)

        try:
            encoding = response.info().getheader("Content-Encoding")
        except:
            encoding = None
        if encoding == "gzip":
            result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()

        if "sucuri_cloudproxy_js" in result:
            su = sucuri().get(result)

            _headers["Cookie"] = su

            request = urllib2.Request(url, data=post)
            _add_request_header(request, _headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == "0":
                result = response.read(224 * 1024)
            elif not limit == None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

            try:
                encoding = response.info().getheader("Content-Encoding")
            except:
                encoding = None
            if encoding == "gzip":
                result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()

        if "Blazingfast.io" in result and "xhr.open" in result:
            netloc = "%s://%s" % (
                urlparse.urlparse(url).scheme,
                urlparse.urlparse(url).netloc,
            )
            ua = _headers["User-Agent"]
            _headers["Cookie"] = cache.get(bfcookie().get, 168, netloc, ua, timeout)

            result = _basic_request(
                url, headers=_headers, post=post, timeout=timeout, limit=limit
            )

        if output == "extended":
            try:
                response_headers = dict(
                    [(item[0].title(), item[1]) for item in response.info().items()]
                )
            except:
                response_headers = response.headers
            response_code = str(response.code)
            try:
                cookie = "; ".join(["%s=%s" % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                cookie = cf
            except:
                pass
            if close == True:
                response.close()
            return (result, response_code, response_headers, _headers, cookie)
        else:
            if close == True:
                response.close()
            return result
    except Exception as e:
        log_utils.log("Request-Error: (%s) => %s" % (str(e), url), log_utils.LOGDEBUG)
        return
Пример #58
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            compression=True,
            output='',
            timeout='30',
            verifySsl=True,
            flare=True,
            ignoreErrors=None,
            as_bytes=False):
    try:
        if not url: return None
        if url.startswith('//'): url = 'http:' + url
        try:
            url = py_tools.ensure_text(url, errors='ignore')
        except:
            pass

        if isinstance(post, dict):
            post = bytes(urlencode(post), encoding='utf-8')
        elif isinstance(post, str) and py_tools.isPY3:
            post = bytes(post, encoding='utf-8')

        handlers = []
        if proxy is not None:
            handlers += [
                urllib2.ProxyHandler({'http': '%s' % (proxy)}),
                urllib2.HTTPHandler
            ]
            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or close is not True:
            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            urllib2.install_opener(opener)

        if not verifySsl and sys.version_info >= (2, 7, 12):
            try:
                import ssl
                ssl_context = ssl._create_unverified_context()
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                urllib2.install_opener(opener)
            except:
                log_utils.error()

        if verifySsl and ((2, 7, 8) < sys.version_info < (2, 7, 12)):
            # try:
            # import ssl
            # ssl_context = ssl.create_default_context()
            # ssl_context.check_hostname = False
            # ssl_context.verify_mode = ssl.CERT_NONE
            # handlers += [urllib2.HTTPSHandler(context=ssl_context)]
            # opener = urllib2.build_opener(*handlers)
            # urllib2.install_opener(opener)
            # except:
            # log_utils.error()
            try:
                import ssl
                try:
                    import _ssl
                    CERT_NONE = _ssl.CERT_NONE
                except Exception:
                    CERT_NONE = ssl.CERT_NONE
                ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                urllib2.install_opener(opener)
            except:
                log_utils.error()

        try:
            headers.update(headers)
        except:
            headers = {}

        if 'User-Agent' in headers: pass
        elif mobile is not True:
            headers['User-Agent'] = cache.get(randomagent, 12)
        else:
            headers['User-Agent'] = 'Apple-iPhone/701.341'

        if 'Referer' in headers: pass
        elif referer is not None: headers['Referer'] = referer

        if 'Accept-Language' not in headers:
            headers['Accept-Language'] = 'en-US'

        if 'X-Requested-With' in headers: pass
        elif XHR: headers['X-Requested-With'] = 'XMLHttpRequest'

        if 'Cookie' in headers: pass
        elif cookie: headers['Cookie'] = cookie

        if 'Accept-Encoding' in headers: pass
        elif compression and limit is None: headers['Accept-Encoding'] = 'gzip'

        # if redirect is False:
        # class NoRedirection(urllib2.HTTPErrorProcessor):
        # def http_response(self, request, response):
        # return response
        # opener = urllib2.build_opener(NoRedirection)
        # urllib2.install_opener(opener)
        # try: del headers['Referer']
        # except: pass

        if redirect is False:

            class NoRedirectHandler(urllib2.HTTPRedirectHandler):
                def http_error_302(self, reqst, fp, code, msg, head):
                    infourl = addinfourl(fp, head, reqst.get_full_url())
                    infourl.status = code
                    infourl.code = code
                    return infourl

                http_error_300 = http_error_302
                http_error_301 = http_error_302
                http_error_303 = http_error_302
                http_error_307 = http_error_302

            opener = urllib2.build_opener(NoRedirectHandler())
            urllib2.install_opener(opener)
            try:
                del headers['Referer']
            except:
                pass

        req = urllib2.Request(url, data=post)
        _add_request_header(req, headers)
        try:
            response = urllib2.urlopen(req, timeout=int(timeout))
        except HTTPError as error_response:  # if HTTPError, using "as response" will be reset after entire Exception code runs and throws error around line 247 as "local variable 'response' referenced before assignment", re-assign it
            response = error_response
            try:
                ignore = ignoreErrors and (int(response.code) == ignoreErrors
                                           or int(
                                               response.code) in ignoreErrors)
            except:
                ignore = False

            if not ignore:
                if response.code in [
                        301, 307, 308, 503, 403
                ]:  # 403:Forbidden added 3/3/21 for cloudflare, fails on bad User-Agent
                    cf_result = response.read(5242880)
                    log_utils.log('cf_result = %s' % str(cf_result),
                                  level=log_utils.LOGDEBUG)
                    try:
                        encoding = response.headers["Content-Encoding"]
                    except:
                        encoding = None
                    if encoding == 'gzip':
                        cf_result = gzip.GzipFile(
                            fileobj=StringIO(cf_result)).read()

                    if flare and 'cloudflare' in str(response.info()).lower():
                        log_utils.log(
                            'client module calling cfscrape: url=%s' % url,
                            level=log_utils.LOGDEBUG)
                        try:
                            from fenomscrapers.modules import cfscrape
                            if isinstance(post, dict): data = post
                            else:
                                try:
                                    data = parse_qs(post)
                                except:
                                    data = None
                            scraper = cfscrape.CloudScraper()
                            if response.code == 403:  # possible bad User-Agent in headers, let cfscrape assign
                                response = scraper.request(
                                    method='GET' if post is None else 'POST',
                                    url=url,
                                    data=data,
                                    timeout=int(timeout))
                            else:
                                response = scraper.request(
                                    method='GET' if post is None else 'POST',
                                    url=url,
                                    headers=headers,
                                    data=data,
                                    timeout=int(timeout))
                            result = response.content
                            flare = 'cloudflare'  # Used below
                            try:
                                cookies = response.request._cookies
                            except:
                                log_utils.error()
                            if response.status_code == 403:  # if cfscrape server still responds with 403
                                log_utils.log(
                                    'cfscrape-Error url=(%s): %s' %
                                    (url, 'HTTP Error 403: Forbidden'),
                                    __name__,
                                    level=log_utils.LOGDEBUG)
                                return None
                        except:
                            log_utils.error()

                    elif 'cf-browser-verification' in cf_result:
                        netloc = '%s://%s' % (urlparse(url).scheme,
                                              urlparse(url).netloc)
                        ua = headers['User-Agent']
                        cf = cache.get(cfcookie().get, 168, netloc, ua,
                                       timeout)
                        headers['Cookie'] = cf
                        req = urllib2.Request(url, data=post)
                        _add_request_header(req, headers)
                        response = urllib2.urlopen(req, timeout=int(timeout))
                    else:
                        if error is False:
                            log_utils.error('Request-Error url=(%s)' % url)
                            return None
                else:
                    if error is False:
                        log_utils.error('Request-Error url=(%s)' % url)
                        return None
                    elif error is True and response.code in [
                            401, 404, 405
                    ]:  # no point in continuing after this exception runs with these response.code's
                        try:
                            response_headers = dict(
                                [(item[0].title(), item[1])
                                 for item in list(response.info().items())]
                            )  # behaves differently 18 to 19. 18 I had 3 "Set-Cookie:" it combined all 3 values into 1 key. In 19 only the last keys value was present.
                        except:
                            log_utils.error()
                            response_headers = response.headers
                        return (str(response), str(response.code),
                                response_headers)

        if output == 'cookie':
            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass
            if close is True: response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close is True: response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close is True: response.close()
            return result

        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            try:
                result = response.read(16 * 1024)
            except:
                result = response  # testing
            if close is True: response.close()
            return result

        elif output == 'file_size':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = '0'
            if close is True: response.close()
            return content

        if flare != 'cloudflare':
            if limit == '0': result = response.read(224 * 1024)
            elif limit is not None: result = response.read(int(limit) * 1024)
            else: result = response.read(5242880)

        try:
            encoding = response.headers["Content-Encoding"]
        except:
            encoding = None

        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO(result)).read()
        if not as_bytes:
            result = py_tools.ensure_text(result, errors='ignore')

        if not as_bytes and 'sucuri_cloudproxy_js' in result:  # who da f**k?
            su = sucuri().get(result)
            headers['Cookie'] = su
            req = urllib2.Request(url, data=post)
            _add_request_header(req, headers)
            response = urllib2.urlopen(req, timeout=int(timeout))
            if limit == '0': result = response.read(224 * 1024)
            elif limit is not None: result = response.read(int(limit) * 1024)
            else: result = response.read(5242880)
            try:
                encoding = response.headers["Content-Encoding"]
            except:
                encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(fileobj=StringIO(result)).read()

        if not as_bytes and 'Blazingfast.io' in result and 'xhr.open' in result:  # who da f**k?
            netloc = '%s://%s' % (urlparse(url).scheme, urlparse(url).netloc)
            ua = headers['User-Agent']
            headers['Cookie'] = cache.get(bfcookie().get, 168, netloc, ua,
                                          timeout)
            result = _basic_request(url,
                                    headers=headers,
                                    post=post,
                                    timeout=timeout,
                                    limit=limit)

        if output == 'extended':
            try:
                response_headers = dict(
                    [(item[0].title(), item[1])
                     for item in list(response.info().items())]
                )  # behaves differently 18 to 19. 18 I had 3 "Set-Cookie:" it combined all 3 values into 1 key. In 19 only the last keys value was present.
            except:
                log_utils.error()
                response_headers = response.headers
            try:
                response_code = str(response.code)
            except:
                response_code = str(response.status_code
                                    )  # object from CFScrape Requests object.
            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                cookie = cf
            except:
                pass
            if close is True: response.close()
            return (result, response_code, response_headers, headers, cookie)
        else:
            if close is True: response.close()
            return result

    except:
        log_utils.error('Request-Error url=(%s)' % url)
        return None
Пример #59
0
def request(url,
            close=True,
            redirect=True,
            error=False,
            proxy=None,
            post=None,
            headers=None,
            mobile=False,
            XHR=False,
            limit=None,
            referer=None,
            cookie=None,
            output='',
            timeout=None):
    try:
        handlers = []
        if timeout == '' or timeout == None: timeout = '30'

        if not proxy == None:
            handlers += [
                urllib2.ProxyHandler({'http': '%s' % (proxy)}),
                urllib2.HTTPHandler
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if output == 'cookie' or output == 'extended' or not close == True:
            cookies = cookielib.LWPCookieJar()
            handlers += [
                urllib2.HTTPHandler(),
                urllib2.HTTPSHandler(),
                urllib2.HTTPCookieProcessor(cookies)
            ]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        try:
            if sys.version_info < (2, 7, 9): raise Exception()
            import ssl
            ssl_context = ssl.create_default_context()
            ssl_context.check_hostname = False
            ssl_context.verify_mode = ssl.CERT_NONE
            handlers += [urllib2.HTTPSHandler(context=ssl_context)]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)
        except:
            pass

        try:
            headers.update(headers)
        except:
            headers = {}
        if 'User-Agent' in headers:
            pass
        elif not mobile == True:
            #headers['User-Agent'] = agent()
            headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in headers:
            pass
        elif referer == None:
            headers['Referer'] = '%s://%s/' % (urlparse.urlparse(url).scheme,
                                               urlparse.urlparse(url).netloc)
        else:
            headers['Referer'] = referer
        if not 'Accept-Language' in headers:
            headers['Accept-Language'] = 'en-US'
        if 'X-Requested-With' in headers:
            pass
        elif XHR == True:
            headers['X-Requested-With'] = 'XMLHttpRequest'
        if 'Cookie' in headers:
            pass
        elif not cookie == None:
            headers['Cookie'] = cookie

        if redirect == False:

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response):
                    return response

            opener = urllib2.build_opener(NoRedirection)
            opener = urllib2.install_opener(opener)

            try:
                del headers['Referer']
            except:
                pass

        request = urllib2.Request(url, data=post, headers=headers)

        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:

            if response.code == 503:
                cf_result = response.read(5242880)
                try:
                    encoding = response.info().getheader('Content-Encoding')
                except:
                    encoding = None
                if encoding == 'gzip':
                    cf_result = gzip.GzipFile(
                        fileobj=StringIO.StringIO(cf_result)).read()

                if 'cf-browser-verification' in cf_result:

                    netloc = '%s://%s' % (urlparse.urlparse(url).scheme,
                                          urlparse.urlparse(url).netloc)

                    ua = headers['User-Agent']

                    cf = cache.get(cfcookie().get, 168, netloc, ua, timeout)

                    headers['Cookie'] = cf

                    request = urllib2.Request(url, data=post, headers=headers)

                    response = urllib2.urlopen(request, timeout=int(timeout))

                elif error == False:
                    return

            elif error == False:
                return

        if output == 'cookie':
            try:
                result = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                result = cf
            except:
                pass
            if close == True: response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close == True: response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close == True: response.close()
            return result

        elif output == 'chunk':
            try:
                content = int(response.headers['Content-Length'])
            except:
                content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close == True: response.close()
            return result

        if limit == '0':
            result = response.read(224 * 1024)
        elif not limit == None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)

        try:
            encoding = response.info().getheader('Content-Encoding')
        except:
            encoding = None
        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()

        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)

            headers['Cookie'] = su

            request = urllib2.Request(url, data=post, headers=headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == '0':
                result = response.read(224 * 1024)
            elif not limit == None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

            try:
                encoding = response.info().getheader('Content-Encoding')
            except:
                encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(
                    fileobj=StringIO.StringIO(result)).read()

        if output == 'extended':
            response_headers = response.headers
            response_code = str(response.code)
            try:
                cookie = '; '.join(
                    ['%s=%s' % (i.name, i.value) for i in cookies])
            except:
                pass
            try:
                cookie = cf
            except:
                pass
            if close == True: response.close()
            return (result, response_code, response_headers, headers, cookie)
        else:
            if close == True: response.close()
            return result
    except:
        return
Пример #60
0
	def addSearch(self, url):
			try:
				skip = 0
				if '|SPLITER|' in url:
					keep = url
					url,matcher = url.split('|SPLITER|')
					skip = 1
					section = 1
				elif '|SECTION|' in url:
					matcher = url.replace('|SECTION|','')
					section = 1
				else: 
					section = 0
			except: section = 0

			link = 'https://t2k-repository.ml/IT/IT/tRmKGjTbr/xmls/clowns.xml'

			if skip == 0:
				if section == 1:
					keyboard = control.keyboard('', control.lang(30702))
					keyboard.doModal()
					if not (keyboard.isConfirmed()): return
					url = keyboard.getText()
					keep = url + '|SPLITER|' + matcher
				else:
					if (url is None or url == ''):
						keyboard = control.keyboard('', control.lang(30702))
						keyboard.doModal()
						if not (keyboard.isConfirmed()): return
						url = keyboard.getText()

			if (url is None or url == ''): return

			if section == 1:
				input = keep
			else: 
				input = url
			def search(): return [input]
			query = cache.get(search, 600000000, table='rel_srch')

			def search(): return [x for y,x in enumerate((query + [input])) if x not in (query + [input])[:y]]
			cache.get(search, 0, table='rel_srch')

			links = client.request(link)
			links = re.findall('<link>(.+?)</link>', links)
			if section == 0: links = [i for i in links if str(i).startswith('http')]
			else: links = [i for i in links if str(i).startswith('http') and matcher.lower() in str(i).lower()]

			self.list = [] ; threads = []
			for link in links: threads.append(workers.Thread(self.it_list, link))
			[i.start() for i in threads] ; [i.join() for i in threads]

			self.list = [i for i in self.list if url.lower() in i['name'].lower()]

			for i in self.list:
				try:
					name = ''
					if not i['vip'] in ['No-Name TV']: name += '[B]%s[/B] | ' % i['vip'].upper()
					name += i['name']
					i.update({'name' : name})
				except:
					pass

			for i in self.list: i.update({'content': 'videos'})
			self.addDirectory(self.list)