예제 #1
0
	def handle(self, link, item, download = False, popups = False, close = True, select = False, cloud = False):
		try:
			if item and 'direct' in item and item['direct'] == True:
				return link
			else:
				try: import urlresolver # Do not import at the start of the script, otherwise UrlResolver will be loaded everytime handler.py is imported, drastically slowing down menus.
				except: pass

				# First check if a debrid resolver is available.
				resolvers = [i() for i in urlresolver.relevant_resolvers(order_matters = True) if i.isUniversal()]
				if len(resolvers) == 0: resolvers = [i() for i in urlresolver.relevant_resolvers(order_matters = True, include_universal = False) if 'rapidgator.net' in i.domains]
				for i in resolvers:
					try:
						i.login()
						host, id = i.get_host_and_id(link)
						linkNew = i.get_media_url(host, id)
						if linkNew: return debrid.Debrid.addResult(link = linkNew)
					except: pass

				# If not supported by debrid, try normal resolvers.
				media = urlresolver.HostedMediaFile(url = link, include_disabled = True, include_universal = False)
				if media.valid_url() == True:
					return debrid.Debrid.addResult(link = media.resolve(allow_popups = popups))
				else:
					return debrid.Debrid.addResult(link = None)
		except:
			return debrid.Debrid.addResult(link = None)
예제 #2
0
    def getConstants(self):
        self.itemProperty = 'plugin.video.flixnet.container.items'

        self.metaProperty = 'plugin.video.flixnet.container.meta'

        from resources.lib.sources import sources as sources
        from resources.lib.sources_de import sources as sources_de
        from resources.lib.sources_fr import sources as sources_fr
        from resources.lib.sources_pt import sources as sources_pt
        from resources.lib.sources_pl import sources as sources_pl
        from resources.lib.sources_ko import sources as sources_ko

        self.sourceDict = sources() + sources_de() + sources_fr() + sources_pt() + sources_pl() + sources_ko()

        try:
            self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            self.hostDict = [i.domains for i in self.hostDict if not '*' in i.domains]
            self.hostDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostDict)]
            self.hostDict = [x for y,x in enumerate(self.hostDict) if x not in self.hostDict[:y]]
        except:
            self.hostDict = []

        self.hostprDict = ['1fichier.com', 'oboom.com', 'rapidgator.net', 'rg.to', 'uploaded.net', 'uploaded.to', 'ul.to', 'filefactory.com', 'nitroflare.com', 'turbobit.net', 'uploadrocket.net']

        self.hostcapDict = ['hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co', 'oload.tv', 'thevideo.me', 'vidup.me', 'streamin.to', 'torba.se']

        self.hosthqDict = ['openload.io', 'openload.co', 'oload.tv', 'thevideo.me', 'rapidvideo.com', 'raptu.com', 'filez.tv']

        self.hostblockDict = []

        self.debridDict = debrid.debridDict()
예제 #3
0
    def __get_resolvers(self, include_disabled, include_universal,
                        include_popups):
        if include_universal is None:
            include_universal = common.get_setting('allow_universal') == "true"

        if include_popups is None:
            include_popups = common.get_setting('allow_popups') == "true"

        klasses = urlresolver.relevant_resolvers(
            self._domain,
            include_universal=include_universal,
            include_popups=include_popups,
            include_external=True,
            include_disabled=include_disabled,
            order_matters=True)
        resolvers = []
        for klass in klasses:
            if klass in resolver_cache:
                common.logger.log_debug('adding resolver from cache: %s' %
                                        klass)
                resolvers.append(resolver_cache[klass])
            else:
                common.logger.log_debug('adding resolver to cache: %s' % klass)
                resolver_cache[klass] = klass()
                resolvers.append(resolver_cache[klass])
        return resolvers
예제 #4
0
 def get_hosts(self):
     import urlresolver
     domains = []
     for r in urlresolver.relevant_resolvers(include_universal=False):
         domains += r.domains
         domains = list(set(domains))
     return domains
예제 #5
0
    def getConstants(self):
        self.itemProperty = 'plugin.video.temptv.container.items'

        self.metaProperty = 'plugin.video.temptv.container.meta'

        from resources.lib.sources import sources

        self.sourceDict = sources()

        try:
            self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            self.hostDict = [i.domains for i in self.hostDict if '*' not in i.domains]
            self.hostDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostDict)]
            self.hostDict = [x for y, x in enumerate(self.hostDict) if x not in self.hostDict[:y]]
        except:
            self.hostDict = []

        self.hostprDict = [
            '1fichier.com', 'oboom.com', 'rapidgator.net', 'rg.to', 'uploaded.net', 'uploaded.to',
            'uploadgig.com', 'ul.to', 'filefactory.com', 'nitroflare.com', 'turbobit.net', 'uploadrocket.net',
            'multiup.org']

        self.hostcapDict = [
            'hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co', 'oload.tv', 'thevideo.me',
            'vidup.me', 'streamin.to', 'torba.se', 'flashx.tv', 'vshare.eu', 'vshare.io']

        self.hosthqDict = [
            'gvideo', 'google.com', 'openload.io', 'openload.co', 'oload.tv', 'thevideo.me',
            'rapidvideo.com', 'raptu.com', 'filez.tv', 'uptobox.com', 'uptobox.com', 'uptostream.com',
            'xvidstage.com', 'streamango.com', 'vev.io']

        self.hostblockDict = []
예제 #6
0
    def getConstants(self):
        from resources.lib.sources import sources as sources
        self.sourceDict = sources()

        try:
            self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            self.hostDict = [
                i.domains for i in self.hostDict if not '*' in i.domains
            ]
            self.hostDict = [
                i.lower() for i in reduce(lambda x, y: x + y, self.hostDict)
            ]
            self.hostDict = [
                x for y, x in enumerate(self.hostDict)
                if x not in self.hostDict[:y]
            ]
        except:
            self.hostDict = []

        self.hostprDict = [
            '1fichier.com', 'oboom.com', 'rapidgator.net', 'rg.to',
            'uploaded.net', 'uploaded.to', 'ul.to', 'filefactory.com',
            'nitroflare.com', 'turbobit.net', 'uploadrocket.net'
        ]

        self.hostcapDict = [
            'hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co',
            'thevideo.me', 'vidup.me', 'streamin.to', 'torba.se'
        ]

        self.hostblockDict = []

        self.debridDict = debrid.debridDict()
예제 #7
0
    def getConstants(self):
        self.itemProperty = 'plugin.video.exodus.container.items'

        self.metaProperty = 'plugin.video.exodus.container.meta'

        try:
            self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            self.hostDict = [
                i.domains for i in self.hostDict if not '*' in i.domains
            ]
            self.hostDict = [
                i.lower() for i in reduce(lambda x, y: x + y, self.hostDict)
            ]
            self.hostDict = [
                x for y, x in enumerate(self.hostDict)
                if x not in self.hostDict[:y]
            ]
        except:
            self.hostDict = []

        self.hostprDict = [
            '1fichier.com', 'oboom.com', 'rapidgator.net', 'rg.to',
            'uploaded.net', 'uploaded.to', 'ul.to', 'filefactory.com',
            'nitroflare.com', 'turbobit.net', 'uploadrocket.net'
        ]

        self.hostcapDict = [
            'hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co',
            'thevideo.me', 'torba.se'
        ]

        self.hostblockDict = []

        self.debridDict = debrid.debridDict()
예제 #8
0
    def getConstants(self):
        try:
            try:
                self.hostDict = urlresolver.relevant_resolvers(
                    order_matters=True)
            except:
                self.hostDict = urlresolver.plugnplay.man.implementors(
                    urlresolver.UrlResolver)
            self.hostDict = [
                i.domains for i in self.hostDict if not '*' in i.domains
            ]
            self.hostDict = [
                i.lower() for i in reduce(lambda x, y: x + y, self.hostDict)
            ]
            self.hostDict = [
                x for y, x in enumerate(self.hostDict)
                if x not in self.hostDict[:y]
            ]
        except:
            self.hostDict = []

        self.hostprDict = [
            'oboom.com', 'rapidgator.net', 'rg.to', 'uploaded.net',
            'uploaded.to', 'ul.to', 'filefactory.com', 'nitroflare.com',
            'turbobit.net'
        ]

        self.hostcapDict = ['hugefiles.net', 'kingfiles.net']

        self.debridDict = debrid.debridDict()
예제 #9
0
 def getResolverList(self):
     try:
         import urlresolver
         resolverList = []
         try: resolverList = urlresolver.relevant_resolvers(order_matters=True)
         except: resolverList = urlresolver.plugnplay.man.implementors(urlresolver.UrlResolver)
         resolverList = [i for i in resolverList if not '*' in i.domains]
     except:
         resolverList = []
     return resolverList
예제 #10
0
def status():
    try:
        debrid_resolver = [
            resolver()
            for resolver in urlresolver.relevant_resolvers(order_matters=True)
            if resolver.isUniversal()
        ]
        return True if debrid_resolver else False
    except:
        return False
예제 #11
0
	def services(self):
		if self.mServices == None:
			try:
				result = urlresolver.relevant_resolvers(order_matters = True)
				result = [i.domains for i in result if not '*' in i.domains]
				result = [i.lower() for i in reduce(lambda x, y: x+y, result)]
				result = [x for y,x in enumerate(result) if x not in result[:y]]
				self.mServices = result
			except:
				return []
		return self.mServices
예제 #12
0
	def services(self):
		if self.mServices == None:
			try: import urlresolver # Do not import at the start of the script, otherwise UrlResolver will be loaded everytime handler.py is imported, drastically slowing down menus.
			except: pass
			try:
				result = urlresolver.relevant_resolvers(order_matters = True)
				result = [i.domains for i in result if not '*' in i.domains]
				result = [i.lower() for i in reduce(lambda x, y: x+y, result)]
				result = [x for y,x in enumerate(result) if x not in result[:y]]
				self.mServices = result
			except:
				return []
		return self.mServices
예제 #13
0
    def getConstants(self):
        self.itemProperty = 'plugin.video.sedundnes.container.items'

        self.metaProperty = 'plugin.video.sedundnes.container.meta'

        try:
            self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            self.hostDict = [
                i.domains for i in self.hostDict if not '*' in i.domains
            ]
            self.hostDict = [
                i.lower() for i in reduce(lambda x, y: x + y, self.hostDict)
            ]
            self.hostDict = [
                x for y, x in enumerate(self.hostDict)
                if x not in self.hostDict[:y]
            ]
        except:
            self.hostDict = []

        self.hostBlackList = [
            'youtube.com', 'uploading.site', 'uploadkadeh.ir',
            'uploadkadeh.com', 'adf.ly', 'indishare.me', 'rlsbb.com',
            'nfo.rlsbb.com', 'bankupload.com', 'katfile.com', 'userboard.org',
            'multiup.org', 'hitfile.net', 'letitbit.net', 'pastebin.com',
            'myvideolinks.userboard.org', 'arabloads.net', 'multiup',
            'uppit.com', '4upld.com', 'bdupload.org', 'bdupload.info',
            'ziifile.com', 'bytewhale.com', 'go4up.com', 'file.rocks',
            'mylinkgen.com'
        ]

        self.hostmyDict = [
            'uploadrocket.net', 'userscloud', 'alfafile', '.avi', '.mkv',
            '.mov', '.mp4', '.xvid', '.divx', 'oboom', 'rapidgator', 'rg.to',
            'uploaded', 'ul.to', 'filefactory', 'nitroflare', 'turbobit',
            '1fichier', 'uptobox', '1fich', 'uploadrocket', 'uploading',
            'hugefiles', 'uploaded', 'clicknupload'
        ]
        self.hostprDict = self.hostDict + self.hostmyDict
        self.hostcapDict = [
            'hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co',
            'oload.tv', 'thevideo.me', 'vidup.me', 'streamin.to', 'torba.se'
        ]
        self.blacklist_zips = [
            '.zip', '.rar', '.jpeg', '.img', '.jpg', '.RAR', '.ZIP', '.png',
            '.sub', '.srt'
        ]

        self.hostblockDict = []

        self.debridDict = debrid.debridDict()
예제 #14
0
 def __get_resolvers(self, include_disabled):
     include_universal = common.get_setting('allow_universal') == "true"
     klasses = urlresolver.relevant_resolvers(self._domain, include_universal=include_universal,
                                              include_external=True, include_disabled=include_disabled, order_matters=True)
     resolvers = []
     for klass in klasses:
         if klass in resolver_cache:
             common.log_utils.log_debug('adding resolver from cache: %s' % (klass))
             resolvers.append(resolver_cache[klass])
         else:
             common.log_utils.log_debug('adding resolver to cache: %s' % (klass))
             resolver_cache[klass] = klass()
             resolvers.append(resolver_cache[klass])
     return resolvers
예제 #15
0
    def getConstants(self):
        self.itemProperty = 'plugin.video.flixnet.container.items'

        self.metaProperty = 'plugin.video.flixnet.container.meta'

        from resources.lib.sources import sources as sources
        from resources.lib.sources_de import sources as sources_de
        from resources.lib.sources_fr import sources as sources_fr
        from resources.lib.sources_pt import sources as sources_pt
        from resources.lib.sources_pl import sources as sources_pl
        from resources.lib.sources_ko import sources as sources_ko

        self.sourceDict = sources() + sources_de() + sources_fr() + sources_pt(
        ) + sources_pl() + sources_ko()

        try:
            self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            self.hostDict = [
                i.domains for i in self.hostDict if not '*' in i.domains
            ]
            self.hostDict = [
                i.lower() for i in reduce(lambda x, y: x + y, self.hostDict)
            ]
            self.hostDict = [
                x for y, x in enumerate(self.hostDict)
                if x not in self.hostDict[:y]
            ]
        except:
            self.hostDict = []

        self.hostprDict = [
            '1fichier.com', 'oboom.com', 'rapidgator.net', 'rg.to',
            'uploaded.net', 'uploaded.to', 'ul.to', 'filefactory.com',
            'nitroflare.com', 'turbobit.net', 'uploadrocket.net'
        ]

        self.hostcapDict = [
            'hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co',
            'oload.tv', 'thevideo.me', 'vidup.me', 'streamin.to', 'torba.se'
        ]

        self.hosthqDict = [
            'openload.io', 'openload.co', 'oload.tv', 'thevideo.me',
            'rapidvideo.com', 'raptu.com', 'filez.tv'
        ]

        self.hostblockDict = []

        self.debridDict = debrid.debridDict()
예제 #16
0
 def services(self):
     if self.mServices == None:
         try:
             result = urlresolver.relevant_resolvers(order_matters=True)
             result = [i.domains for i in result if not '*' in i.domains]
             result = [
                 i.lower() for i in reduce(lambda x, y: x + y, result)
             ]
             result = [
                 x for y, x in enumerate(result) if x not in result[:y]
             ]
             self.mServices = result
         except:
             return []
     return self.mServices
예제 #17
0
    def getConstants(self):
        try:
            try: self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            except: self.hostDict = urlresolver.plugnplay.man.implementors(urlresolver.UrlResolver)
            self.hostDict = [i.domains for i in self.hostDict if not '*' in i.domains]
            self.hostDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostDict)]
            self.hostDict = [x for y,x in enumerate(self.hostDict) if x not in self.hostDict[:y]]
        except:
            self.hostDict = []

        self.hostprDict = ['oboom.com', 'rapidgator.net', 'rg.to', 'uploaded.net', 'uploaded.to', 'ul.to', 'filefactory.com', 'nitroflare.com', 'turbobit.net', 'uploadrocket.net']

        self.hostcapDict = ['hugefiles.net', 'kingfiles.net']

        self.debridDict = debrid.debridDict()
예제 #18
0
    def getConstants(self):
        try:
            self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            self.hostDict = [i.domains for i in self.hostDict if not '*' in i.domains]
            self.hostDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostDict)]
            self.hostDict = [x for y,x in enumerate(self.hostDict) if x not in self.hostDict[:y]]
        except:
            self.hostDict = []

        self.hostprDict = ['1fichier.com', 'oboom.com', 'rapidgator.net', 'rg.to', 'uploaded.net', 'uploaded.to', 'ul.to', 'filefactory.com', 'nitroflare.com', 'turbobit.net', 'uploadrocket.net']

        self.hostcapDict = ['hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co', 'thevideo.me', 'torba.se']

        self.hostblockDict = []

        self.debridDict = debrid.debridDict()
예제 #19
0
def resolver(url, debrid):
    try:
        debrid_resolver = [
            resolver()
            for resolver in urlresolver.relevant_resolvers(order_matters=True)
            if resolver.isUniversal()
        ]
        debrid_resolver = [
            resolver for resolver in debrid_resolver if resolver.name == debrid
        ][0]

        debrid_resolver.login()
        _host, _media_id = debrid_resolver.get_host_and_id(url)
        stream_url = resolver.get_media_url(_host, _media_id)

        return stream_url
    except:
        return None
예제 #20
0
 def getResolverList(self):
     try:
         import urlresolver
         resolverList = []
         try:
             allowDebrid = bool(control.setting('allow_debrid'))
         except:
             allowDebrid = False
         try:
             resolverList = urlresolver.relevant_resolvers(
                 order_matters=True, include_universal=allowDebrid)
         except:
             resolverList = urlresolver.plugnplay.man.implementors(
                 urlresolver.UrlResolver)
         resolverList = [i for i in resolverList if not '*' in i.domains]
     except:
         resolverList = []
     return resolverList
예제 #21
0
    def getConstants(self):
        self.itemProperty = 'plugin.video.exodus.container.items'

        self.metaProperty = 'plugin.video.exodus.container.meta'

        try:
            self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            self.hostDict = [i.domains for i in self.hostDict if not '*' in i.domains]
            self.hostDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostDict)]
            self.hostDict = [x for y,x in enumerate(self.hostDict) if x not in self.hostDict[:y]]
        except:
            self.hostDict = []

        self.hostprDict = ['oboom.com', 'rapidgator.net', 'rg.to', 'uploaded.net', 'uploaded.to', 'ul.to', 'filefactory.com', 'nitroflare.com', 'turbobit.net', 'uploadrocket.net']

        self.hostcapDict = ['hugefiles.net', 'kingfiles.net']

        self.debridDict = debrid.debridDict()
예제 #22
0
    def getConstants(self):
        self.itemProperty = 'plugin.video.zen.container.items'

        self.metaProperty = 'plugin.video.zen.container.meta'

        try:
            self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            self.hostDict = [i.domains for i in self.hostDict if not '*' in i.domains]
            self.hostDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostDict)]
            self.hostDict = [x for y,x in enumerate(self.hostDict) if x not in self.hostDict[:y]]
        except:
            self.hostDict = []

        self.hostBlackList = ['youtube.com','uploading.site',
		'uploadkadeh.ir','uploadkadeh.com','adf.ly','indishare.me','rlsbb.com','nfo.rlsbb.com','bankupload.com','katfile.com','userboard.org','multiup.org','hitfile.net','letitbit.net','pastebin.com','myvideolinks.userboard.org','arabloads.net','multiup','uppit.com','4upld.com', 
		'bdupload.org', 'bdupload.info','ziifile.com','bytewhale.com','go4up.com','file.rocks', 'mylinkgen.com']
 
        self.hostmyDict = ['uploadrocket.net','userscloud','alfafile','.avi','.mkv','.mov','.mp4','.xvid','.divx','oboom', 'rapidgator', 'rg.to',  'uploaded', 'ul.to', 'filefactory', 'nitroflare', 'turbobit', 'uploadrocket','uploading','hugefiles', 'uploaded' , 'clicknupload']
        self.hostprDict = self.hostDict + self.hostmyDict
        self.hostcapDict = ['hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co', 'thevideo.me', 'torba.se']

        self.hostblockDict = []

        self.debridDict = debrid.debridDict()
예제 #23
0
    def sourcesDictionary(self):
        hosts = resolvers.info()
        hosts = [i for i in hosts if 'host' in i]

        self.rdDict = realdebrid.getHosts()
        self.pzDict = premiumize.getHosts()

        self.hostlocDict = [i['netloc'] for i in hosts if i['quality'] == 'High' and i['captcha'] == False]
        try: self.hostlocDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostlocDict)]
        except: pass
        self.hostlocDict = [x for y,x in enumerate(self.hostlocDict) if x not in self.hostlocDict[:y]]

        self.hostdirhdDict = [i['netloc'] for i in resolvers.info() if 'quality' in i and i['quality'] == 'High' and 'captcha' in i and i['captcha'] == False and 'a/c' in i and i['a/c'] == False]
        try: self.hostdirhdDict = [i.lower().rsplit('.', 1)[0] for i in reduce(lambda x, y: x+y, self.hostdirhdDict)]
        except: pass
        self.hostdirhdDict = [x for y,x in enumerate(self.hostdirhdDict) if x not in self.hostdirhdDict[:y]]

        self.hostprDict = [i['host'] for i in hosts if i['a/c'] == True]
        try: self.hostprDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostprDict)]
        except: pass
        self.hostprDict = [x for y,x in enumerate(self.hostprDict) if x not in self.hostprDict[:y]]

        self.hostcapDict = [i['host'] for i in hosts if i['captcha'] == True]
        try: self.hostcapDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostcapDict)]
        except: pass
        self.hostcapDict = [i for i in self.hostcapDict if not i in self.rdDict + self.pzDict]

        self.hosthdDict = [i['host'] for i in hosts if i['quality'] == 'High' and i['a/c'] == False and i['captcha'] == False]
        self.hosthdDict += [i['host'] for i in hosts if i['quality'] == 'High' and i['a/c'] == False and i['captcha'] == True]
        try: self.hosthdDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hosthdDict)]
        except: pass

        self.hosthqDict = [i['host'] for i in hosts if i['quality'] == 'High' and i['a/c'] == False and i['captcha'] == False]
        try: self.hosthqDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hosthqDict)]
        except: pass

        self.hostmqDict = [i['host'] for i in hosts if i['quality'] == 'Medium' and i['a/c'] == False and i['captcha'] == False]
        try: self.hostmqDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostmqDict)]
        except: pass

        self.hostlqDict = [i['host'] for i in hosts if i['quality'] == 'Low' and i['a/c'] == False and i['captcha'] == False]
        try: self.hostlqDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostlqDict)]
        except: pass

        try:
            self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            self.hostDict = [i.domains for i in self.hostDict if not '*' in i.domains]
            self.hostDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostDict)]
            self.hostDict = [x for y,x in enumerate(self.hostDict) if x not in self.hostDict[:y]]
        except:
            self.hostDict = []

        #for i in self.hostDict:
        #    control.log('##### SOURCES DICTY: %s' % i )

        self.hostsdfullDict = self.hostprDict + self.hosthqDict + self.hostmqDict + self.hostlqDict + self.hostDict
        #for i in self.hostsdfullDict:
        #    control.log('##### SOURCES DICTY2: %s' % i )
        #self.hostsdfullDict = self.hostDict

        self.hosthdfullDict = self.hostprDict + self.hosthdDict
예제 #24
0
    def sourcesDictionary(self):
        hosts = resolvers.info()
        hosts = [i for i in hosts if 'host' in i]

        self.rdDict = realdebrid.getHosts()
        self.pzDict = premiumize.getHosts()

        self.hostlocDict = [i['netloc'] for i in hosts if i['quality'] == 'High' and i['captcha'] == False]
        try: self.hostlocDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostlocDict)]
        except: pass
        self.hostlocDict = [x for y,x in enumerate(self.hostlocDict) if x not in self.hostlocDict[:y]]

        self.hostdirhdDict = [i['netloc'] for i in resolvers.info() if 'quality' in i and i['quality'] == 'High' and 'captcha' in i and i['captcha'] == False and 'a/c' in i and i['a/c'] == False]
        try: self.hostdirhdDict = [i.lower().rsplit('.', 1)[0] for i in reduce(lambda x, y: x+y, self.hostdirhdDict)]
        except: pass
        self.hostdirhdDict = [x for y,x in enumerate(self.hostdirhdDict) if x not in self.hostdirhdDict[:y]]

        self.hostprDict = [i['host'] for i in hosts if i['a/c'] == True]
        try: self.hostprDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostprDict)]
        except: pass
        self.hostprDict = [x for y,x in enumerate(self.hostprDict) if x not in self.hostprDict[:y]]

        self.hostcapDict = [i['host'] for i in hosts if i['captcha'] == True]
        try: self.hostcapDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostcapDict)]
        except: pass
        self.hostcapDict = [i for i in self.hostcapDict if not i in self.rdDict + self.pzDict]

        self.hosthdDict = [i['host'] for i in hosts if i['quality'] == 'High' and i['a/c'] == False and i['captcha'] == False]
        self.hosthdDict += [i['host'] for i in hosts if i['quality'] == 'High' and i['a/c'] == False and i['captcha'] == True]
        try: self.hosthdDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hosthdDict)]
        except: pass

        self.hosthqDict = [i['host'] for i in hosts if i['quality'] == 'High' and i['a/c'] == False and i['captcha'] == False]
        try: self.hosthqDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hosthqDict)]
        except: pass

        self.hostmqDict = [i['host'] for i in hosts if i['quality'] == 'Medium' and i['a/c'] == False and i['captcha'] == False]
        try: self.hostmqDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostmqDict)]
        except: pass

        self.hostlqDict = [i['host'] for i in hosts if i['quality'] == 'Low' and i['a/c'] == False and i['captcha'] == False]
        try: self.hostlqDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostlqDict)]
        except: pass

        try:
            self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            self.hostDict = [i.domains for i in self.hostDict if not '*' in i.domains]
            self.hostDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostDict)]
            self.hostDict = [x for y,x in enumerate(self.hostDict) if x not in self.hostDict[:y]]

        except:
            self.hostDict = []

        #for i in self.hostDict:
        #    control.log('##### SOURCES DICTY: %s' % i )

        self.hostsdfullDict = self.hostprDict + self.hosthqDict + self.hostmqDict + self.hostlqDict + self.hostDict
        #for i in self.hostsdfullDict:
        #    control.log('##### SOURCES DICTY2: %s' % i )
        #self.hostsdfullDict = self.hostDict

        self.hosthdfullDict = self.hostprDict + self.hosthdDict
예제 #25
0
# -*- coding: utf-8 -*-

from resources.lib.modules import log_utils

try:
    import urlresolver

    debrid_resolvers = [resolver() for resolver in urlresolver.relevant_resolvers(order_matters=True) if resolver.isUniversal()]
except:
    debrid_resolvers = []


def status():
    return debrid_resolvers != []


def resolver(url, debrid):
    try:
        debrid_resolver = [resolver for resolver in debrid_resolvers if resolver.name == debrid][0]

        debrid_resolver.login()
        _host, _media_id = debrid_resolver.get_host_and_id(url)
        stream_url = debrid_resolver.get_media_url(_host, _media_id)

        return stream_url
    except Exception as e:
        log_utils.log('%s Resolve Failure: %s' % (debrid, e), log_utils.LOGWARNING)
        return None
예제 #26
0
    def sourcesFilter(self):
        provider = control.setting('hosts.sort.provider')

        quality = control.setting('hosts.quality')
        if quality == '': quality = '0'

        captcha = control.setting('hosts.captcha')

        random.shuffle(self.sources)

        if provider == 'true':
            self.sources = sorted(self.sources, key=lambda k: k['provider'])

        for i in self.sources:
            if 'checkquality' in i and i['checkquality'] == True:
                if not i['source'].lower() in self.hosthqDict and i[
                        'quality'] not in ['SD', 'SCR', 'CAM']:
                    i.update({'quality': 'SD'})

        local = [
            i for i in self.sources if 'local' in i and i['local'] == True
        ]
        self.sources = [i for i in self.sources if not i in local]

        filter = []
        filter += [i for i in self.sources if i['direct'] == True]
        filter += [i for i in self.sources if i['direct'] == False]
        self.sources = filter

        filter = []
        for d in [
                resolver() for resolver in urlresolver.relevant_resolvers(
                    order_matters=True) if resolver.isUniversal()
        ]:
            filter += [
                dict(i.items() + [('debrid', d.name)]) for i in self.sources
                if d.valid_url('', i['source'])
            ]
        filter += [
            i for i in self.sources
            if not i['source'].lower() in self.hostprDict
            and i['debridonly'] == False
        ]
        self.sources = filter

        filter = []
        filter += local

        if quality in ['0']:
            filter += [
                i for i in self.sources
                if i['quality'] == '4K' and 'debrid' in i
            ]
        if quality in ['0', '1']:
            filter += [
                i for i in self.sources
                if i['quality'] == '1440p' and 'debrid' in i
            ]
        if quality in ['0', '1', '2']:
            filter += [
                i for i in self.sources
                if i['quality'] == '1080p' and 'debrid' in i
            ]
        if quality in ['0', '1', '2', '3']:
            filter += [
                i for i in self.sources
                if i['quality'] == 'HD' and 'debrid' in i
            ]

        if quality in ['0']:
            filter += [
                i for i in self.sources if i['quality'] == '4K'
                and not 'debrid' in i and 'memberonly' in i
            ]
        if quality in ['0', '1']:
            filter += [
                i for i in self.sources if i['quality'] == '1440p'
                and not 'debrid' in i and 'memberonly' in i
            ]
        if quality in ['0', '1', '2']:
            filter += [
                i for i in self.sources if i['quality'] == '1080p'
                and not 'debrid' in i and 'memberonly' in i
            ]
        if quality in ['0', '1', '2', '3']:
            filter += [
                i for i in self.sources if i['quality'] == 'HD'
                and not 'debrid' in i and 'memberonly' in i
            ]

        if quality in ['0']:
            filter += [
                i for i in self.sources if i['quality'] == '4K'
                and not 'debrid' in i and not 'memberonly' in i
            ]
        if quality in ['0', '1']:
            filter += [
                i for i in self.sources if i['quality'] == '1440p'
                and not 'debrid' in i and not 'memberonly' in i
            ]
        if quality in ['0', '1', '2']:
            filter += [
                i for i in self.sources if i['quality'] == '1080p'
                and not 'debrid' in i and not 'memberonly' in i
            ]
        if quality in ['0', '1', '2', '3']:
            filter += [
                i for i in self.sources if i['quality'] == 'HD'
                and not 'debrid' in i and not 'memberonly' in i
            ]

        #filter += [i for i in self.sources if i['quality'] == 'SD']
        #if len(filter) < 10: filter += [i for i in self.sources if i['quality'] == 'SCR']
        #if len(filter) < 10: filter += [i for i in self.sources if i['quality'] == 'CAM']
        filter += [
            i for i in self.sources if i['quality'] in ['SD', 'SCR', 'CAM']
        ]
        self.sources = filter

        if not captcha == 'true':
            filter = [
                i for i in self.sources if
                i['source'].lower() in self.hostcapDict and not 'debrid' in i
            ]
            self.sources = [i for i in self.sources if not i in filter]

        filter = [
            i for i in self.sources
            if i['source'].lower() in self.hostblockDict and not 'debrid' in i
        ]
        self.sources = [i for i in self.sources if not i in filter]

        multi = [i['language'] for i in self.sources]
        multi = [x for y, x in enumerate(multi) if x not in multi[:y]]
        multi = True if len(multi) > 1 else False

        if multi == True:
            self.sources = [
                i for i in self.sources if not i['language'] == 'en'
            ] + [i for i in self.sources if i['language'] == 'en']

        self.sources = self.sources[:2000]

        for i in range(len(self.sources)):
            u = self.sources[i]['url']

            p = self.sources[i]['provider']

            q = self.sources[i]['quality']

            s = self.sources[i]['source']
            s = s.rsplit('.', 1)[0]

            l = self.sources[i]['language']

            try:
                f = (' | '.join([
                    '[I]%s [/I]' % info.strip()
                    for info in self.sources[i]['info'].split('|')
                ]))
            except:
                f = ''

            try:
                d = self.sources[i]['debrid']
            except:
                d = self.sources[i]['debrid'] = ''

            if not d == '':
                label = '%02d | [B]%s[/B] | ' % (int(i + 1), d)
                #if not d == '': label = '%02d | [B]%s[/B] | [B]%s[/B] | ' % (int(i+1), p, d)
            else:
                label = '%02d | [B]%s[/B] | ' % (int(i + 1), p)

            if multi == True and not l == 'en': label += '[B]%s[/B] | ' % l

            if q in ['4K', '1440p', '1080p', 'HD']:
                label += '%s | %s | [B][I]%s [/I][/B]' % (s, f, q)
            elif q == 'SD':
                label += '%s | %s' % (s, f)
            else:
                label += '%s | %s | [I]%s [/I]' % (s, f, q)
            label = label.replace('| 0 |', '|').replace(' | [I]0 [/I]', '')
            label = label.replace('[I]HEVC [/I]', 'HEVC')
            label = re.sub('\[I\]\s+\[/I\]', ' ', label)
            label = re.sub('\|\s+\|', '|', label)
            label = re.sub('\|(?:\s+|)$', '', label)

            self.sources[i]['label'] = label.upper()

        return self.sources
예제 #27
0
    def _cached_http_get(self, url, base_url, timeout, params=None, data=None, multipart_data=None, headers=None, cookies=None, allow_redirect=True,
                         method=None, require_debrid=False, read_error=False, cache_limit=8):
        if require_debrid:
            if Scraper.debrid_resolvers is None:
                Scraper.debrid_resolvers = [resolver for resolver in urlresolver.relevant_resolvers() if resolver.isUniversal()]
            if not Scraper.debrid_resolvers:
                logger.log('%s requires debrid: %s' % (self.__module__, Scraper.debrid_resolvers), log_utils.LOGDEBUG)
                return ''
                
        if cookies is None: cookies = {}
        if timeout == 0: timeout = None
        if headers is None: headers = {}
        if url.startswith('//'): url = 'http:' + url
        referer = headers['Referer'] if 'Referer' in headers else base_url
        if params:
            if url == base_url and not url.endswith('/'):
                url += '/'
            
            parts = urlparse.urlparse(url)
            if parts.query:
                params.update(scraper_utils.parse_query(url))
                url = urlparse.urlunparse((parts.scheme, parts.netloc, parts.path, parts.params, '', parts.fragment))
                
            url += '?' + urllib.urlencode(params)
        logger.log('Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' % (url, cookies, data, headers), log_utils.LOGDEBUG)
        if data is not None:
            if isinstance(data, basestring):
                data = data
            else:
                data = urllib.urlencode(data, True)

        if multipart_data is not None:
            headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X'
            data = multipart_data

        _created, _res_header, html = self.db_connection().get_cached_url(url, data, cache_limit)
        if html:
            logger.log('Returning cached result for: %s' % (url), log_utils.LOGDEBUG)
            return html

        try:
            self.cj = self._set_cookies(base_url, cookies)
            if isinstance(url, unicode): url = url.encode('utf-8')
            request = urllib2.Request(url, data=data)
            headers = headers.copy()
            request.add_header('User-Agent', scraper_utils.get_ua())
            request.add_header('Accept', '*/*')
            request.add_header('Accept-Encoding', 'gzip')
            request.add_unredirected_header('Host', request.get_host())
            if referer: request.add_unredirected_header('Referer', referer)
            if 'Referer' in headers: del headers['Referer']
            if 'Host' in headers: del headers['Host']
            for key, value in headers.iteritems(): request.add_header(key, value)
            self.cj.add_cookie_header(request)
            if not allow_redirect:
                opener = urllib2.build_opener(NoRedirection)
                urllib2.install_opener(opener)
            else:
                opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
                urllib2.install_opener(opener)
                opener2 = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
                urllib2.install_opener(opener2)

            if method is not None: request.get_method = lambda: method.upper()
            response = urllib2.urlopen(request, timeout=timeout)
            self.cj.extract_cookies(response, request)
            if kodi.get_setting('cookie_debug') == 'true':
                logger.log('Response Cookies: %s - %s' % (url, scraper_utils.cookies_as_str(self.cj)), log_utils.LOGDEBUG)
            self.cj._cookies = scraper_utils.fix_bad_cookies(self.cj._cookies)
            self.cj.save(ignore_discard=True)
            if not allow_redirect and (response.getcode() in [301, 302, 303, 307] or response.info().getheader('Refresh')):
                if response.info().getheader('Refresh') is not None:
                    refresh = response.info().getheader('Refresh')
                    return refresh.split(';')[-1].split('url=')[-1]
                else:
                    redir_url = response.info().getheader('Location')
                    if redir_url.startswith('='):
                        redir_url = redir_url[1:]
                    return redir_url
            
            content_length = response.info().getheader('Content-Length', 0)
            if int(content_length) > MAX_RESPONSE:
                logger.log('Response exceeded allowed size. %s => %s / %s' % (url, content_length, MAX_RESPONSE), log_utils.LOGWARNING)
            
            if method == 'HEAD':
                return ''
            else:
                if response.info().get('Content-Encoding') == 'gzip':
                    html = ungz(response.read(MAX_RESPONSE))
                else:
                    html = response.read(MAX_RESPONSE)
        except urllib2.HTTPError as e:
            if e.info().get('Content-Encoding') == 'gzip':
                html = ungz(e.read(MAX_RESPONSE))
            else:
                html = e.read(MAX_RESPONSE)
                
            if CF_CAPCHA_ENABLED and e.code == 403 and 'cf-captcha-bookmark' in html:
                html = cf_captcha.solve(url, self.cj, scraper_utils.get_ua(), self.get_name())
                if not html:
                    return ''
            elif e.code == 503 and 'cf-browser-verification' in html:
                html = cloudflare.solve(url, self.cj, scraper_utils.get_ua(), extra_headers=headers)
                if not html:
                    return ''
            else:
                logger.log('Error (%s) during scraper http get: %s' % (str(e), url), log_utils.LOGWARNING)
                if not read_error:
                    return ''
        except Exception as e:
            logger.log('Error (%s) during scraper http get: %s' % (str(e), url), log_utils.LOGWARNING)
            return ''

        self.db_connection().cache_url(url, html, data)
        return html
예제 #28
0
def apply_urlresolver(hosters):
    filter_debrid = kodi.get_setting('filter_debrid') == 'true'
    show_debrid = kodi.get_setting('show_debrid') == 'true'
    if not filter_debrid and not show_debrid:
        print "RETURNING NON FILTERED"
        return hosters
## New Resolver
    try:
        import urlresolver.plugnplay
        resolvers = urlresolver.plugnplay.man.implementors(urlresolver.UrlResolver)
        debrid_resolvers = [resolver for resolver in resolvers if resolver.isUniversal() and resolver.get_setting('enabled') == 'true']
    except:
        import urlresolver
        debrid_resolvers = [resolver() for resolver in urlresolver.relevant_resolvers(order_matters=True) if resolver.isUniversal()]
##   End New Resolver
    filtered_hosters = []
    debrid_hosts = {}
    unk_hosts = {}
    known_hosts = {}


    for hoster in hosters:
        #print "HOSTERS ARE: "+str(hoster)
        if 'direct' in hoster and hoster['direct'] == False and hoster['host']:
            host = hoster['host']
            host = (host.lower())
            #
            if kodi.get_setting('filter_debrid')=='true':
                if host in unk_hosts:
                    # log_utils.log('Unknown Hit: %s from %s' % (host, hoster['class'].get_name()), log_utils.LOGDEBUG)
                    unk_hosts[host] += 1
                    continue
                elif host in known_hosts:
                    # log_utils.log('Known Hit: %s from %s' % (host, hoster['class'].get_name()), log_utils.LOGDEBUG)
                    known_hosts[host] += 1
                    filtered_hosters.append(hoster)
                else:
                    hmf = urlresolver.HostedMediaFile(host=host, media_id='dummy')  # use dummy media_id to force host validation
                    if hmf:
                        # log_utils.log('Known Miss: %s from %s' % (host, hoster['class'].get_name()), log_utils.LOGDEBUG)
                        known_hosts[host] = known_hosts.get(host, 0) + 1
                        filtered_hosters.append(hoster)
                    else:
                        # log_utils.log('Unknown Miss: %s from %s' % (host, hoster['class'].get_name()), log_utils.LOGDEBUG)
                        unk_hosts[host] = unk_hosts.get(host, 0) + 1
                        continue
            else:
                filtered_hosters.append(hoster)

            if host in debrid_hosts:
                log_utils.log('Debrid cache found for %s: %s' % (host, debrid_hosts[host]), log_utils.LOGDEBUG)
                hoster['debrid'] = debrid_hosts[host]
                #print debrid_hosts[host]
            else:
                temp_resolvers = []
                for resolver in debrid_resolvers:
                    if resolver.valid_url('', host):
                        #print resolver.name
                        rname= resolver.name.replace('Real-Debrid','RD').replace('Premiumize.me','PRE')
                        temp_resolvers.append(rname.upper())
                        #temp_resolvers.append(resolver.name.upper())
                        if kodi.get_setting('debug') == "true":
                            print '%s supported by: %s' % (host, temp_resolvers)
                        debrid_hosts[host] = temp_resolvers
                    else:
                         hoster['debrid'] = ''
                if temp_resolvers:
                    hoster['debrid'] = temp_resolvers
                    #print temp_resolvers
        else:
            filtered_hosters.append(hoster)

    #log_utils.log('Discarded Hosts: %s' % (sorted(unk_hosts.items(), key=lambda x: x[1], reverse=True)), xbmc.LOGDEBUG)
    if kodi.get_setting('debug') == "true":
        print "FILTERED HOSTERS ARE =" +str(filtered_hosters)
    return filtered_hosters


#
# <setting id="sort_scrapers" type="bool" label="Sort Providers" default="false" visable="false"/>
# <setting id="scraper_order" type="enum" label="Provider Sort Type" values="Default|Premium|Host" default="0" visible="eq(-1,true)"/>
예제 #29
0
def feedme(feed="", type=""):
    h = HTMLParser.HTMLParser()
    colour = [
        "black", "white", "gray", "blue", "teal", "fuchsia", "indigo",
        "turquoise", "cyan", "greenyellow", "lime", "green", "olive", "gold",
        "yello", "lavender", "pink", "magenta", "purple", "maroon",
        "chocolate", "orange", "red", "brown"
    ]
    parameters = util.parseParameters()

    #util.logError(str(parameters))

    try:
        mode = int(parameters["mode"])
    except:
        mode = None

    try:
        offsite = ast.literal_eval(parameters['extras'])
        #util.logError(str(offsite))
        if "site_xml" in offsite:
            feed = offsite['site_xml']
            type = "url"
    except:
        #not set, dont worry about it
        pass

    if mode == None or mode == 0:
        # if we get here list the sites found in the json file
        menu = []
        bits = util.getFile(feed, type)
        counter = 0

        if str(len(bits['sites'])) == "1" and 'folder' not in bits['sites']:
            mode = 1
            parameters['extras'] = str({"site": 0})
        else:
            try:
                folder = ast.literal_eval(parameters['extras'])
                folder = folder['folder']
                for site in bits['sites']:
                    try:
                        if site['folder'].lower() == folder.lower():
                            extras = {}
                            try:
                                extras['site_xml'] = offsite['site_xml']
                            except:
                                pass
                            extras['site'] = counter
                            menu.append({
                                "title": site['name'],
                                "url": site['name'],
                                "mode": "1",
                                "poster": site['poster'],
                                "icon": site['poster'],
                                "fanart": site['fanart'],
                                "type": ADDON_TYPE,
                                "plot": "",
                                "isFolder": True,
                                "extras": extras
                            })

                    except:
                        # site not in a folder
                        pass
                    counter = counter + 1
            except:
                if "folders" in bits:
                    for site in bits['folders']:
                        extras = {}
                        try:
                            extras['site_xml'] = offsite['site_xml']
                        except:
                            pass
                        extras['site'] = counter
                        folder_extras = {}
                        folder_extras['folder'] = site['name']
                        if "url" in site:
                            folder_extras['site_xml'] = site['url']
                            del (folder_extras['folder'])
                        menu.append({
                            "title": site['name'],
                            "url": site['name'],
                            "mode": "0",
                            "poster": site['poster'],
                            "icon": site['poster'],
                            "fanart": site['fanart'],
                            "type": ADDON_TYPE,
                            "plot": "",
                            "isFolder": True,
                            "extras": folder_extras
                        })
                for site in bits['sites']:
                    if "folder" not in site:
                        extras = {}
                        try:
                            extras['site_xml'] = offsite['site_xml']
                        except:
                            pass
                        extras['site'] = counter
                        menu.append({
                            "title": site['name'],
                            "url": site['name'],
                            "mode": "1",
                            "poster": site['poster'],
                            "icon": site['poster'],
                            "fanart": site['fanart'],
                            "type": ADDON_TYPE,
                            "plot": "",
                            "isFolder": True,
                            "extras": extras
                        })
                    counter = counter + 1
            util.addMenuItems(menu)
    if mode == 1:
        # first level within a site, show Latest, Search and any Tags within the specified site
        menu = []
        extras = ast.literal_eval(parameters['extras'])

        try:
            extras['site_xml'] = offsite['site_xml']
        except:
            pass

        bits = util.getFile(feed, type)
        site = bits['sites'][extras['site']]

        if "search_url" not in site and "tags" not in site and len(
                site['items']) == 1:
            mode = 2
            for item in site['items']:
                parameters['url'] = site['items'][item][0]['site_url']
                break

        else:
            for item in site['items'].iterkeys():
                if item.lower() != "search":
                    try:
                        poster = parameters['poster']
                    except:
                        try:
                            poster = site['items'][item][0]['folder_poster']
                            if "http" not in poster and "https" not in poster:
                                poster = os.path.join(HOME, '', poster)
                        except:
                            poster = ""
                    try:
                        fanart = parameters['fanart']
                    except:
                        try:
                            fanart = site['items'][item][0]['folder_fanart']
                            if "http" not in fanart and "https" not in fanart:
                                fanart = os.path.join(HOME, '', fanart)
                        except:
                            fanart = ""
                    extras['level'] = item

                    menu.append({
                        "title":
                        item,
                        "url":
                        urllib.quote_plus(site['items'][item][0]['site_url']),
                        "mode":
                        "2",
                        "poster":
                        poster,
                        "icon":
                        poster,
                        "fanart":
                        fanart,
                        "type":
                        ADDON_TYPE,
                        "plot":
                        "",
                        "isFolder":
                        True,
                        "extras":
                        str(extras)
                    })

            try:
                counter = 0
                for tag in site['tags']:
                    try:
                        poster = parameters['poster']
                    except:
                        poster = ""

                    try:
                        fanart = parameters['fanart']
                    except:
                        fanart = ""
                    extras['tag'] = counter
                    menu.append({
                        "title": tag['name'],
                        "url": tag['url'],
                        "mode": "4",
                        "poster": poster,
                        "icon": poster,
                        "fanart": fanart,
                        "type": ADDON_TYPE,
                        "plot": "",
                        "isFolder": True,
                        "extras": str(extras)
                    })
                    counter = counter + 1
            except:
                pass
            if "search_url" in site:
                try:
                    poster = parameters['poster']
                except:
                    poster = ""

                try:
                    fanart = parameters['fanart']
                except:
                    fanart = ""
                menu.append({
                    "title": "Search",
                    "url": "",
                    "mode": "3",
                    "poster": poster,
                    "icon": poster,
                    "fanart": fanart,
                    "type": ADDON_TYPE,
                    "plot": "",
                    "isFolder": True,
                    "extras": str(extras)
                })
            util.addMenuItems(menu)
    if mode == 2:
        # load the first level of relevant video information
        menu = []
        extras = ast.literal_eval(parameters['extras'])

        try:
            extras['site_xml'] = offsite['site_xml']
        except:
            pass

        bits = util.getFile(feed, type)
        site = bits['sites'][extras['site']]

        if 'pos' in extras:
            pos = extras['pos']
        else:
            pos = 0

        if 'level' in extras:
            level = extras['level']
        else:
            for item in site['items']:
                level = item
                break

        if len(site['items'][level]) > pos + 1:
            # another level is needed
            extras['pos'] = pos + 1
            newMode = "2"
            isFolder = True
        else:
            # on a level where next move is to check for sources
            try:
                if site['items'][level][pos]['play_media'] == "multiple":
                    newMode = "113"
                    isFolder = True
                else:
                    newMode = "111"  # find source
                    isFolder = False
            except:
                # default to play first found
                newMode = "111"  # find source
                isFolder = False

        #util.alert(newMode)
        page = util.get(h.unescape(parameters['url']))
        next = page
        """if parameters['name']=="Next Page >":
            util.logError(str(next))"""

        try:
            if site['items'][level][pos]['global'] != "":
                regex = util.prepare(site['items'][level][pos]['global'])
                matches = re.findall(regex, page)
                if matches:
                    page = matches[0]
        except:
            pass

        regex = util.prepare(site['items'][level][pos]['pattern'])
        matches = re.findall(regex, page)
        if matches:
            counter = 0
            for match in matches:
                try:
                    title = h.unescape(
                        util.replaceParts(
                            site['items'][level][pos]['name'],
                            matches[counter]).replace('\n', '').replace(
                                '\t', '').replace("\\", "").lstrip())
                except:
                    title = ""
                #try:
                #    util.alert(site['items'][level][pos]['url'])
                url = urllib.quote_plus(
                    util.replaceParts(site['items'][level][pos]['url'],
                                      matches[counter]))
                #    util.alert(">>"+url)
                #except:
                #    url=""
                try:
                    poster = util.replaceParts(
                        site['items'][level][pos]['poster'],
                        matches[counter]).encode('utf-8')
                except:
                    poster = ""
                try:
                    fanart = util.replaceParts(
                        site['items'][level][pos]['fanart'],
                        matches[counter]).encode('utf-8')
                except:
                    fanart = ""
                try:
                    plot = util.replaceParts(site['items'][level][pos]['plot'],
                                             matches[counter]).encode('utf-8')
                except:
                    plot = ""

                if isFolder:
                    menu.append({
                        "title": title,
                        "url": url,
                        "mode": newMode,
                        "poster": poster,
                        "icon": poster,
                        "fanart": fanart,
                        "type": ADDON_TYPE,
                        "plot": plot,
                        "isFolder": isFolder,
                        "extras": str(extras)
                    })
                else:
                    menu.append({
                        "title": title,
                        "url": url,
                        "mode": newMode,
                        "poster": poster,
                        "icon": poster,
                        "fanart": fanart,
                        "type": ADDON_TYPE,
                        "plot": plot,
                        "isFolder": isFolder,
                        "isPlayable": "True",
                        "extras": str(extras)
                    })
                counter = counter + 1
        try:
            regex = util.prepare(site['items'][level][pos]['next_pattern'])
            matches = re.findall(regex, next)
            if matches:
                parts = []
                if len(matches) > 1:
                    for match in matches:
                        parts.append(match)
                else:
                    match = matches

                #nextlink=util.execPy(util.replaceParts(site['items'][level][pos]['next_url'], match))
                nextlink = util.replaceParts(
                    site['items'][level][pos]['next_url'], match)
                extras['pos'] = pos

                menu.append({
                    "title": "Next Page >",
                    "url": urllib.quote_plus(nextlink),
                    "mode": "2",
                    "poster": "",
                    "icon": "",
                    "fanart": "",
                    "type": ADDON_TYPE,
                    "plot": plot,
                    "isFolder": True,
                    "extras": str(extras)
                })
        except Exception as e:
            util.logError(str(e))
            pass
        util.addMenuItems(menu)
    elif mode == 3:
        # display the Search dialog and build search results
        menu = []
        extras = ast.literal_eval(parameters['extras'])

        try:
            extras['site_xml'] = offsite['site_xml']
        except:
            pass

        term = util.searchDialog()

        if term:
            bits = util.getFile(feed, type)
            site = bits['sites'][extras['site']]
            pos = 0

            for item in site['items']:
                level = item
                extras['level'] = level
                break

            if len(site['items'][extras['level']]) > pos + 1:
                # another level is needed
                extras['pos'] = 1
                newMode = "2"
                isFolder = True
                isPlayable = True
            else:
                # on a level where next move is to check for sources
                if site['items'][
                        extras['level']][pos]['play_media'] == "multiple":
                    newMode = "113"
                    isFolder = True
                    isPlayable = False
                else:
                    newMode = "111"  # find source
                    isFolder = False
                    isPlayable = True
            if "{{" in site['search_url'] and "}}" in site['search_url']:
                url = util.execPy(site['search_url'].replace("{%}", term))
            else:
                url = site['search_url'].replace("{%}", term)
            util.logError(url)
            page = util.get(url)
            next = page

            try:
                if site['item']['global'] != "":
                    regex = util.prepare(site['item']['global'])
                    matches = re.findall(regex, page)
                    if matches:
                        page = matches[0]
            except:
                pass

            regex = util.prepare(site['items'][level][pos]['pattern'])
            matches = re.findall(regex, page)

            if matches:
                counter = 0
                for match in matches:
                    try:
                        title = h.unescape(
                            util.replaceParts(
                                site['items'][level][pos]['name'],
                                matches[counter]).replace('\n', '').replace(
                                    '\t', '').lstrip().encode('utf-8'))
                    except:
                        title = ""
                    try:
                        url = util.replaceParts(
                            site['items'][level][pos]['url'],
                            matches[counter]).encode('utf-8')
                        #util.logError(url)
                    except:
                        url = ""
                    try:
                        poster = util.replaceParts(
                            site['items'][level][pos]['poster'],
                            matches[counter]).encode('utf-8')
                    except:
                        poster = ""
                    try:
                        fanart = util.replaceParts(
                            site['items'][level][pos]['fanart'],
                            matches[counter]).encode('utf-8')
                    except:
                        fanart = ""
                    try:
                        plot = util.replaceParts(
                            site['items'][level][pos]['plot'],
                            matches[counter]).encode('utf-8')
                    except:
                        plot = ""

                    if isFolder:
                        menu.append({
                            "title": title,
                            "url": url,
                            "mode": newMode,
                            "poster": poster,
                            "icon": poster,
                            "fanart": fanart,
                            "type": ADDON_TYPE,
                            "plot": plot,
                            "isFolder": isFolder,
                            "extras": str(extras)
                        })
                    else:
                        menu.append({
                            "title": title,
                            "url": url,
                            "mode": newMode,
                            "poster": poster,
                            "icon": poster,
                            "fanart": fanart,
                            "type": ADDON_TYPE,
                            "plot": plot,
                            "isFolder": isFolder,
                            "isPlayable": "True",
                            "extras": str(extras)
                        })
                    counter = counter + 1
            try:
                regex = util.prepare(site['items'][level][pos]['next_pattern'])
                matches = re.findall(regex, next)
                if matches:
                    parts = []
                    """for match in matches:
                        parts.append(match)"""

                    if len(matches) > 1:
                        for match in matches:
                            parts.append(match)
                        else:
                            match = matches

                    #nextlink=util.execPy(util.replaceParts(site['items'][level][pos]['next_url'], match))
                    nextlink = util.replaceParts(
                        site['items'][level][pos]['next_url'], match)
                    menu.append({
                        "title": "Next Page >",
                        "url": nextlink,
                        "mode": "2",
                        "poster": "",
                        "icon": "",
                        "fanart": "",
                        "type": ADDON_TYPE,
                        "plot": plot,
                        "isFolder": True,
                        "extras": str(extras)
                    })
            except:
                pass
            util.addMenuItems(menu)
        else:
            return False
    elif mode == 4:
        # show relevant Tag video results
        menu = []

        extras = ast.literal_eval(parameters['extras'])

        try:
            extras['site_xml'] = offsite['site_xml']
        except:
            pass

        bits = util.getFile(feed, type)

        site = bits['sites'][extras['site']]['tags'][extras['tag']]

        page = util.get(parameters['url'])
        next = page

        try:
            if site['item']['global'] != "":
                regex = util.prepare(site['item']['global'])
                matches = re.findall(regex, page)
                if matches:
                    page = matches[0]
        except:
            pass

        regex = util.prepare(site['item']['pattern'])
        matches = re.findall(regex, page)
        if matches:
            counter = 0
            for match in matches:
                try:
                    title = h.unescape(
                        util.replaceParts(site['item']['name'],
                                          matches[counter]).encode('utf-8'))
                except:
                    title = ""
                try:
                    url = util.replaceParts(site['item']['url'],
                                            matches[counter]).encode('utf-8')
                except:
                    url = ""
                try:
                    poster = util.replaceParts(
                        site['item']['poster'],
                        matches[counter]).encode('utf-8')
                except:
                    poster = ""
                try:
                    fanart = util.replaceParts(
                        site['item']['fanart'],
                        matches[counter]).encode('utf-8')
                except:
                    fanart = ""
                try:
                    plot = util.replaceParts(site['item']['plot'],
                                             matches[counter]).encode('utf-8')
                except:
                    plot = ""

                menu.append({
                    "title": title,
                    "url": url,
                    "mode": "2",
                    "poster": poster,
                    "icon": poster,
                    "fanart": fanart,
                    "type": ADDON_TYPE,
                    "plot": plot,
                    "isFolder": True,
                    "extras": extras
                })
                counter = counter + 1
        util.addMenuItems(menu)
    elif mode == 5:
        pass
    elif mode == 111:
        # find playable sources in url
        #util.alert(parameters['url'])

        extras = ast.literal_eval(parameters['extras'])
        bits = util.getFile(feed, type)
        site = bits['sites'][extras['site']]

        try:
            pos = extras['pos']
        except:
            pos = 0

        try:
            selected_video = int(
                site['items'][extras['level']][pos]['play_media']) - 1
        except:
            selected_video = 0

        page = util.get(parameters['url'])

        link = False
        try:
            link = urlresolver.resolve(parameters['url'])
        except Exception as e:
            if str(e).lower() == "sign in to confirm your age":
                util.notify("YouTube Error: Login to confirm age.")
                return False
            else:
                util.notify(str(e))
                return False

        if link:
            # play if url resolver reports true
            util.playMedia(parameters['name'],
                           parameters['poster'],
                           link,
                           force=True)
        elif any(ext in parameters['url'] for ext in filetypes):
            # play if url has a video extension
            util.playMedia(parameters['name'],
                           parameters['poster'],
                           parameters['url'],
                           force=True)
        else:
            #search for video urls
            if "urlresolver" in site and site['urlresolver'].lower(
            ) == "false":
                regex = "\"([^\s]*?\.(:?" + "|".join(filetypes) + "))\""
                matches = re.findall(regex, page)
            else:
                regex = "(\/\/.*?\/embed.*?)[\?\"]"
                matches = re.findall(regex, page)
                regex = "\"((?:http:|https:)?\/\/.*?\/watch.*?)[\"]"
                matches = matches + re.findall(regex, page)
                matches2 = urlresolver.scrape_supported(page)
                #util.alert(str(matches))
                """regex="\"(https?://("+"|".join(supports)+")\..*?)\""
                matches2 = re.findall(regex, page)
                regex="\"((?:http:|https:)?\/\/.*?\/watch.*?)[\"]"
                matches3 = re.findall(regex, page)
                regex = 'https?://(.*?(?:\.googlevideo|(?:plus|drive|get|docs)\.google|google(?:usercontent|drive|apis))\.com)/(.*?(?:videoplayback\?|[\?&]authkey|host/)*.+)'
                matches4 = re.findall(regex, page)
                
                matches2=[ x for x in matches2 if any(sup in x for sup in supports) ]
                matches3=[ x for x in matches3 if any(sup in x for sup in supports) ]"""

                matches = matches + matches2
            util.logError(
                "''''''''''''''''''''''''''''''''''''''''''''''''''''''")
            util.logError(">>>>" + str(matches))
            if isinstance(matches[selected_video], tuple):
                url = matches[selected_video][0]
            else:
                url = matches[selected_video]
            #util.alert(url)
            if "http" not in url:
                url = "http:" + url

            link = urlresolver.resolve(url)

            if link == False:
                link = url

            util.playMedia(parameters['name'], parameters['poster'], link)

    elif mode == 112:
        extras = ast.literal_eval(parameters['extras'])
        bits = util.getFile(feed, type)
        site = bits['sites'][extras['site']]

        page = util.get(parameters['url'])
        """if "urlresolver" in site and site['urlresolver'].lower()=="false":
            regex="\"(.*?\.mp4)\""
            matches = re.findall(regex, page)
            if matches:
                link=matches[0]
        else:"""
        regex = "\"(//\S*?(:?" + ("|".join(filetypes)) + ")\S*?)\""
        matches = re.findall(regex, page)
        if matches:
            url = matches[selected_video][0]
            if "http" not in url:
                link = "http:" + url
        else:
            link = urlresolver.resolve(parameters['url'])
            if not link:
                try:
                    regex = "(\/\/.*?\/embed.*?)[\?\"]"
                    matches = re.findall(regex, page)
                    regex = "\"((?:http:|https:)?\/\/.*?\/watch.*?)[\"]"
                    matches = matches + re.findall(regex, page)
                    regex = 'https?://(.*?(?:\.googlevideo|(?:plus|drive|get|docs)\.google|google(?:usercontent|drive|apis))\.com)/(.*?(?:videoplayback\?|[\?&]authkey|host/)*.+)'
                    matches = matches + re.findall(regex, page)
                    if matches:
                        matches = [
                            x for x in matches
                            if any(sup in x for sup in supports)
                        ]
                        if matches:
                            link = urlresolver.resolve("http:" + matches[0])
                except Exception as e:
                    util.notify(str(e))
        if link:
            import downloader
            downloader.download(
                link,
                os.path.join(xbmcaddon.Addon().getSetting('folder'),
                             parameters['name'] + ".mp4"))
        else:
            util.notify("No video found")
    elif mode == 113:
        menu = []
        extras = ast.literal_eval(parameters['extras'])
        bits = util.getFile(feed, type)
        site = bits['sites'][extras['site']]

        page = util.get(parameters['url'])

        matches = urlresolver.scrape_supported(page)
        #regex="(//\S*?(:?"+("|".join(filetypes))+")\S*?)"
        #matches2 = re.findall(regex, page)
        """regex="(\/\/.*?\/embed.*?)[\?\"]"
        matches2 = re.findall(regex, page)
        regex="\"(https?://("+"|".join(supports)+")\..*?)\""
        matches3 = re.findall(regex, page)
        regex = 'https?://(.*?(?:\.googlevideo|(?:plus|drive|get|docs)\.google|google(?:usercontent|drive|apis))\.com)/(.*?(?:videoplayback\?|[\?&]authkey|host/)*.+)'
        matches4 = re.findall(regex, page)
        
        matches2=[ x for x in matches2 if any(sup in x for sup in supports) ]
        matches3=[ x for x in matches3 if any(sup in x for sup in supports) ]
        
        matches=matches+matches2+matches3+matches4"""

        unique = []
        for match in matches:  #+matches2:
            if isinstance(match, tuple):
                unique.append(match[0])
            else:
                unique.append(match)

        matches = list(set(unique))

        if matches:
            for match in matches:
                if "http" not in match:
                    rl = "http:" + match
                else:
                    rl = match

                menu.append({
                    "title": rl,
                    "url": rl,
                    "mode": "114",
                    "poster": parameters['poster'],
                    "icon": parameters['icon'],
                    "fanart": parameters['fanart'],
                    "type": "",
                    "plot": "",
                    "isFolder": False,
                    "isPlayable": False,
                    "extras": str(extras)
                })
            util.addMenuItems(menu)
    elif mode == 114:
        # find playable sources in url
        #util.alert(parameters['url'])
        urlresolver.relevant_resolvers()
        try:
            link = urlresolver.resolve(str(parameters['url']))
        except Exception as e:
            util.notify(str(e))
            exit()
        if link:
            try:
                util.playMedia(parameters['name'], parameters['poster'], link)
            except:
                util.playMedia(parameters['name'], parameters['poster'],
                               parameters['url'])
예제 #30
0
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

import log_utils

try:
    import urlresolver

    debrid_resolvers = [
        resolver()
        for resolver in urlresolver.relevant_resolvers(order_matters=True)
        if resolver.isUniversal()
    ]

    if len(debrid_resolvers) == 0:
        # Support Rapidgator accounts! Unfortunately, `sources.py` assumes that rapidgator.net is only ever
        # accessed via a debrid service, so we add rapidgator as a debrid resolver and everything just works.
        # As a bonus(?), rapidgator links will be highlighted just like actual debrid links
        debrid_resolvers = [
            resolver() for resolver in urlresolver.relevant_resolvers(
                order_matters=True, include_universal=False)
            if 'rapidgator.net' in resolver.domains
        ]

except:
    debrid_resolvers = []
예제 #31
0
    def _cached_http_get(self,
                         url,
                         base_url,
                         timeout,
                         cookies=None,
                         data=None,
                         multipart_data=None,
                         headers=None,
                         allow_redirect=True,
                         method=None,
                         require_debrid=False,
                         cache_limit=8):
        if require_debrid:
            if Scraper.debrid_resolvers is None:
                Scraper.debrid_resolvers = [
                    resolver for resolver in urlresolver.relevant_resolvers()
                    if resolver.isUniversal()
                ]
            if not Scraper.debrid_resolvers:
                log_utils.log(
                    '%s requires debrid: %s' %
                    (self.__class__.__name__, Scraper.debrid_resolvers),
                    log_utils.LOGDEBUG)
                return ''

        if cookies is None: cookies = {}
        if timeout == 0: timeout = None
        if headers is None: headers = {}
        if url.startswith('//'): url = 'http:' + url
        referer = headers['Referer'] if 'Referer' in headers else url
        log_utils.log(
            'Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' %
            (url, cookies, data, headers), log_utils.LOGDEBUG)
        if data is not None:
            if isinstance(data, basestring):
                data = data
            else:
                data = urllib.urlencode(data, True)

        if multipart_data is not None:
            headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X'
            data = multipart_data

        self.create_db_connection()
        _created, _res_header, html = self.db_connection.get_cached_url(
            url, data, cache_limit)
        if html:
            log_utils.log('Returning cached result for: %s' % (url),
                          log_utils.LOGDEBUG)
            return html

        try:
            self.cj = self._set_cookies(base_url, cookies)
            request = urllib2.Request(url, data=data)
            request.add_header('User-Agent', scraper_utils.get_ua())
            request.add_header('Accept', '*/*')
            request.add_unredirected_header('Host', request.get_host())
            request.add_unredirected_header('Referer', referer)
            for key in headers:
                request.add_header(key, headers[key])
            self.cj.add_cookie_header(request)
            if not allow_redirect:
                opener = urllib2.build_opener(NoRedirection)
                urllib2.install_opener(opener)
            else:
                opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
                urllib2.install_opener(opener)
                opener2 = urllib2.build_opener(
                    urllib2.HTTPCookieProcessor(self.cj))
                urllib2.install_opener(opener2)

            if method is not None: request.get_method = lambda: method.upper()
            response = urllib2.urlopen(request, timeout=timeout)
            self.cj.extract_cookies(response, request)
            if kodi.get_setting('cookie_debug') == 'true':
                log_utils.log(
                    'Response Cookies: %s - %s' %
                    (url, scraper_utils.cookies_as_str(self.cj)),
                    log_utils.LOGDEBUG)
            self.cj._cookies = scraper_utils.fix_bad_cookies(self.cj._cookies)
            self.cj.save(ignore_discard=True)
            if not allow_redirect and (
                    response.getcode() in [301, 302, 303, 307]
                    or response.info().getheader('Refresh')):
                if response.info().getheader('Refresh') is not None:
                    refresh = response.info().getheader('Refresh')
                    return refresh.split(';')[-1].split('url=')[-1]
                else:
                    return response.info().getheader('Location')

            content_length = response.info().getheader('Content-Length', 0)
            if int(content_length) > MAX_RESPONSE:
                log_utils.log(
                    'Response exceeded allowed size. %s => %s / %s' %
                    (url, content_length, MAX_RESPONSE), log_utils.LOGWARNING)

            if method == 'HEAD':
                return ''
            else:
                if response.info().get('Content-Encoding') == 'gzip':
                    buf = StringIO(response.read(MAX_RESPONSE))
                    f = gzip.GzipFile(fileobj=buf)
                    html = f.read()
                else:
                    html = response.read(MAX_RESPONSE)
        except urllib2.HTTPError as e:
            if e.code == 503 and 'cf-browser-verification' in e.read():
                html = cloudflare.solve(url, self.cj, scraper_utils.get_ua())
                if not html:
                    return ''
            else:
                log_utils.log(
                    'Error (%s) during scraper http get: %s' % (str(e), url),
                    log_utils.LOGWARNING)
                return ''
        except Exception as e:
            log_utils.log(
                'Error (%s) during scraper http get: %s' % (str(e), url),
                log_utils.LOGWARNING)
            return ''

        self.db_connection.cache_url(url, html, data)
        return html
예제 #32
0
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

from resources.lib.modules import log_utils

try:
    import urlresolver

    debrid_resolvers = [resolver() for resolver in urlresolver.relevant_resolvers(order_matters=True) if resolver.isUniversal()]

    if len(debrid_resolvers) == 0:
        # Support Rapidgator accounts! Unfortunately, `sources.py` assumes that rapidgator.net is only ever
        # accessed via a debrid service, so we add rapidgator as a debrid resolver and everything just works.
        # As a bonus(?), rapidgator links will be highlighted just like actual debrid links
        debrid_resolvers = [resolver() for resolver in urlresolver.relevant_resolvers(order_matters=True,include_universal=False) if 'rapidgator.net' in resolver.domains]

except:
    debrid_resolvers = []


def status():
    return debrid_resolvers != []

예제 #33
0
    def _cached_http_get(self,
                         url,
                         base_url,
                         timeout,
                         params=None,
                         data=None,
                         multipart_data=None,
                         headers=None,
                         cookies=None,
                         allow_redirect=True,
                         method=None,
                         require_debrid=False,
                         read_error=False,
                         cache_limit=8):
        if require_debrid:
            if Scraper.debrid_resolvers is None:
                Scraper.debrid_resolvers = [
                    resolver for resolver in urlresolver.relevant_resolvers()
                    if resolver.isUniversal()
                ]
            if not Scraper.debrid_resolvers:
                logger.log(
                    '%s requires debrid: %s' %
                    (self.__module__, Scraper.debrid_resolvers),
                    log_utils.LOGDEBUG)
                return ''

        if cookies is None: cookies = {}
        if timeout == 0: timeout = None
        if headers is None: headers = {}
        if url.startswith('//'): url = 'http:' + url
        referer = headers['Referer'] if 'Referer' in headers else base_url
        if params:
            if url == base_url and not url.endswith('/'):
                url += '/'

            parts = urlparse.urlparse(url)
            if parts.query:
                params.update(scraper_utils.parse_query(url))
                url = urlparse.urlunparse(
                    (parts.scheme, parts.netloc, parts.path, parts.params, '',
                     parts.fragment))

            url += '?' + urllib.urlencode(params)
        logger.log(
            'Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' %
            (url, cookies, data, headers), log_utils.LOGDEBUG)
        if data is not None:
            if isinstance(data, basestring):
                data = data
            else:
                data = urllib.urlencode(data, True)

        if multipart_data is not None:
            headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X'
            data = multipart_data

        _created, _res_header, html = self.db_connection().get_cached_url(
            url, data, cache_limit)
        if html:
            logger.log('Returning cached result for: %s' % (url),
                       log_utils.LOGDEBUG)
            return html

        try:
            self.cj = self._set_cookies(base_url, cookies)
            if isinstance(url, unicode): url = url.encode('utf-8')
            request = urllib2.Request(url, data=data)
            headers = headers.copy()
            request.add_header('User-Agent', scraper_utils.get_ua())
            request.add_header('Accept', '*/*')
            request.add_header('Accept-Encoding', 'gzip')
            request.add_unredirected_header('Host', request.get_host())
            if referer: request.add_unredirected_header('Referer', referer)
            if 'Referer' in headers: del headers['Referer']
            if 'Host' in headers: del headers['Host']
            for key, value in headers.iteritems():
                request.add_header(key, value)
            self.cj.add_cookie_header(request)
            if not allow_redirect:
                opener = urllib2.build_opener(NoRedirection)
                urllib2.install_opener(opener)
            else:
                opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
                urllib2.install_opener(opener)
                opener2 = urllib2.build_opener(
                    urllib2.HTTPCookieProcessor(self.cj))
                urllib2.install_opener(opener2)

            if method is not None: request.get_method = lambda: method.upper()
            response = urllib2.urlopen(request, timeout=timeout)
            self.cj.extract_cookies(response, request)
            if kodi.get_setting('cookie_debug') == 'true':
                logger.log(
                    'Response Cookies: %s - %s' %
                    (url, scraper_utils.cookies_as_str(self.cj)),
                    log_utils.LOGDEBUG)
            self.cj._cookies = scraper_utils.fix_bad_cookies(self.cj._cookies)
            self.cj.save(ignore_discard=True)
            if not allow_redirect and (
                    response.getcode() in [301, 302, 303, 307]
                    or response.info().getheader('Refresh')):
                if response.info().getheader('Refresh') is not None:
                    refresh = response.info().getheader('Refresh')
                    return refresh.split(';')[-1].split('url=')[-1]
                else:
                    redir_url = response.info().getheader('Location')
                    if redir_url.startswith('='):
                        redir_url = redir_url[1:]
                    return redir_url

            content_length = response.info().getheader('Content-Length', 0)
            if int(content_length) > MAX_RESPONSE:
                logger.log(
                    'Response exceeded allowed size. %s => %s / %s' %
                    (url, content_length, MAX_RESPONSE), log_utils.LOGWARNING)

            if method == 'HEAD':
                return ''
            else:
                if response.info().get('Content-Encoding') == 'gzip':
                    html = ungz(response.read(MAX_RESPONSE))
                else:
                    html = response.read(MAX_RESPONSE)
        except urllib2.HTTPError as e:
            if e.info().get('Content-Encoding') == 'gzip':
                html = ungz(e.read(MAX_RESPONSE))
            else:
                html = e.read(MAX_RESPONSE)

            if CF_CAPCHA_ENABLED and e.code == 403 and 'cf-captcha-bookmark' in html:
                html = cf_captcha.solve(url, self.cj, scraper_utils.get_ua(),
                                        self.get_name())
                if not html:
                    return ''
            elif e.code == 503 and 'cf-browser-verification' in html:
                html = cloudflare.solve(url,
                                        self.cj,
                                        scraper_utils.get_ua(),
                                        extra_headers=headers)
                if not html:
                    return ''
            else:
                logger.log(
                    'Error (%s) during scraper http get: %s' % (str(e), url),
                    log_utils.LOGWARNING)
                if not read_error:
                    return ''
        except Exception as e:
            logger.log(
                'Error (%s) during scraper http get: %s' % (str(e), url),
                log_utils.LOGWARNING)
            return ''

        self.db_connection().cache_url(url, html, data)
        return html
예제 #34
0
    def _cached_http_get(self, url, base_url, timeout, cookies=None, data=None, multipart_data=None, headers=None, allow_redirect=True, method=None,
                         require_debrid=False, cache_limit=8):
        if require_debrid:
            if Scraper.debrid_resolvers is None:
                Scraper.debrid_resolvers = [resolver for resolver in urlresolver.relevant_resolvers() if resolver.isUniversal()]
            if not Scraper.debrid_resolvers:
                log_utils.log('%s requires debrid: %s' % (self.__class__.__name__, Scraper.debrid_resolvers), log_utils.LOGDEBUG)
                return ''
                
        if cookies is None: cookies = {}
        if timeout == 0: timeout = None
        if headers is None: headers = {}
        if url.startswith('//'): url = 'http:' + url
        referer = headers['Referer'] if 'Referer' in headers else url
        log_utils.log('Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' % (url, cookies, data, headers), log_utils.LOGDEBUG)
        if data is not None:
            if isinstance(data, basestring):
                data = data
            else:
                data = urllib.urlencode(data, True)

        if multipart_data is not None:
            headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X'
            data = multipart_data

        self.create_db_connection()
        _created, _res_header, html = self.db_connection.get_cached_url(url, data, cache_limit)
        if html:
            log_utils.log('Returning cached result for: %s' % (url), log_utils.LOGDEBUG)
            return html

        try:
            self.cj = self._set_cookies(base_url, cookies)
            request = urllib2.Request(url, data=data)
            request.add_header('User-Agent', scraper_utils.get_ua())
            request.add_header('Accept', '*/*')
            request.add_unredirected_header('Host', request.get_host())
            request.add_unredirected_header('Referer', referer)
            for key in headers: request.add_header(key, headers[key])
            self.cj.add_cookie_header(request)
            if not allow_redirect:
                opener = urllib2.build_opener(NoRedirection)
                urllib2.install_opener(opener)
            else:
                opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
                urllib2.install_opener(opener)
                opener2 = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
                urllib2.install_opener(opener2)

            if method is not None: request.get_method = lambda: method.upper()
            response = urllib2.urlopen(request, timeout=timeout)
            self.cj.extract_cookies(response, request)
            if kodi.get_setting('cookie_debug') == 'true':
                log_utils.log('Response Cookies: %s - %s' % (url, scraper_utils.cookies_as_str(self.cj)), log_utils.LOGDEBUG)
            self.cj._cookies = scraper_utils.fix_bad_cookies(self.cj._cookies)
            self.cj.save(ignore_discard=True)
            if not allow_redirect and (response.getcode() in [301, 302, 303, 307] or response.info().getheader('Refresh')):
                if response.info().getheader('Refresh') is not None:
                    refresh = response.info().getheader('Refresh')
                    return refresh.split(';')[-1].split('url=')[-1]
                else:
                    return response.info().getheader('Location')
            
            content_length = response.info().getheader('Content-Length', 0)
            if int(content_length) > MAX_RESPONSE:
                log_utils.log('Response exceeded allowed size. %s => %s / %s' % (url, content_length, MAX_RESPONSE), log_utils.LOGWARNING)
            
            if method == 'HEAD':
                return ''
            else:
                if response.info().get('Content-Encoding') == 'gzip':
                    buf = StringIO(response.read(MAX_RESPONSE))
                    f = gzip.GzipFile(fileobj=buf)
                    html = f.read()
                else:
                    html = response.read(MAX_RESPONSE)
        except urllib2.HTTPError as e:
            if e.code == 503 and 'cf-browser-verification' in e.read():
                html = cloudflare.solve(url, self.cj, scraper_utils.get_ua())
                if not html:
                    return ''
            else:
                log_utils.log('Error (%s) during scraper http get: %s' % (str(e), url), log_utils.LOGWARNING)
                return ''
        except Exception as e:
            log_utils.log('Error (%s) during scraper http get: %s' % (str(e), url), log_utils.LOGWARNING)
            return ''

        self.db_connection.cache_url(url, html, data)
        return html
예제 #35
0
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

from resources.lib.modules import log_utils

try:
    import urlresolver

    debrid_resolvers = [
        resolver()
        for resolver in urlresolver.relevant_resolvers(order_matters=True)
        if resolver.isUniversal()
    ]
except:
    debrid_resolvers = []


def status():
    return debrid_resolvers != []


def resolver(url, debrid):
    try:
        debrid_resolver = [
            resolver for resolver in debrid_resolvers
            if resolver.name == debrid
예제 #36
0
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

from resources.lib.modules import control, log_utils

try:
    import urlresolver

    debrid_resolvers = [resolver() for resolver in urlresolver.relevant_resolvers(order_matters=True) if resolver.isUniversal()]

    if len(debrid_resolvers) == 0:
        # Support Rapidgator accounts! Unfortunately, `sources.py` assumes that rapidgator.net is only ever
        # accessed via a debrid service, so we add rapidgator as a debrid resolver and everything just works.
        # As a bonus(?), rapidgator links will be highlighted just like actual debrid links
        debrid_resolvers = [resolver() for resolver in urlresolver.relevant_resolvers(order_matters=True,include_universal=False) if 'rapidgator.net' in resolver.domains]

except:
    debrid_resolvers = []


def status(torrent=False):
    debrid_check = debrid_resolvers != []
    if debrid_check is True:
        if torrent:
예제 #37
0
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

from resources.lib.modules import log_utils

try:
    import urlresolver

    debrid_resolvers = [resolver() for resolver in urlresolver.relevant_resolvers(order_matters=True) if resolver.isUniversal()]
except:
    debrid_resolvers = []


def status():
    return debrid_resolvers != []


def resolver(url, debrid):
    try:
        debrid_resolver = [resolver for resolver in debrid_resolvers if resolver.name == debrid][0]

        debrid_resolver.login()
        _host, _media_id = debrid_resolver.get_host_and_id(url)
        stream_url = debrid_resolver.get_media_url(_host, _media_id)
예제 #38
0
def apply_urlresolver(hosters):
    filter_debrid = kodi.get_setting('filter_debrid') == 'true'
    show_debrid = kodi.get_setting('show_debrid') == 'true'
    if not filter_debrid and not show_debrid:
        print "RETURNING NON FILTERED"
        return hosters
## New Resolver
    try:
        import urlresolver.plugnplay
        resolvers = urlresolver.plugnplay.man.implementors(
            urlresolver.UrlResolver)
        debrid_resolvers = [
            resolver for resolver in resolvers if resolver.isUniversal()
            and resolver.get_setting('enabled') == 'true'
        ]
    except:
        import urlresolver
        debrid_resolvers = [
            resolver()
            for resolver in urlresolver.relevant_resolvers(order_matters=True)
            if resolver.isUniversal()
        ]
##   End New Resolver
    filtered_hosters = []
    debrid_hosts = {}
    unk_hosts = {}
    known_hosts = {}

    for hoster in hosters:
        #print "HOSTERS ARE: "+str(hoster)
        if 'direct' in hoster and hoster['direct'] == False and hoster['host']:
            host = hoster['host']
            host = (host.lower())
            #
            if kodi.get_setting('filter_debrid') == 'true':
                if host in unk_hosts:
                    # log_utils.log('Unknown Hit: %s from %s' % (host, hoster['class'].get_name()), log_utils.LOGDEBUG)
                    unk_hosts[host] += 1
                    continue
                elif host in known_hosts:
                    # log_utils.log('Known Hit: %s from %s' % (host, hoster['class'].get_name()), log_utils.LOGDEBUG)
                    known_hosts[host] += 1
                    filtered_hosters.append(hoster)
                else:
                    hmf = urlresolver.HostedMediaFile(
                        host=host, media_id='dummy'
                    )  # use dummy media_id to force host validation
                    if hmf:
                        # log_utils.log('Known Miss: %s from %s' % (host, hoster['class'].get_name()), log_utils.LOGDEBUG)
                        known_hosts[host] = known_hosts.get(host, 0) + 1
                        filtered_hosters.append(hoster)
                    else:
                        # log_utils.log('Unknown Miss: %s from %s' % (host, hoster['class'].get_name()), log_utils.LOGDEBUG)
                        unk_hosts[host] = unk_hosts.get(host, 0) + 1
                        continue
            else:
                filtered_hosters.append(hoster)

            if host in debrid_hosts:
                log_utils.log(
                    'Debrid cache found for %s: %s' %
                    (host, debrid_hosts[host]), log_utils.LOGDEBUG)
                hoster['debrid'] = debrid_hosts[host]
                #print debrid_hosts[host]
            else:
                temp_resolvers = []
                for resolver in debrid_resolvers:
                    if resolver.valid_url('', host):
                        #print resolver.name
                        rname = resolver.name.replace('Real-Debrid',
                                                      'RD').replace(
                                                          'Premiumize.me',
                                                          'PRE')
                        temp_resolvers.append(rname.upper())
                        #temp_resolvers.append(resolver.name.upper())
                        if kodi.get_setting('debug') == "true":
                            print '%s supported by: %s' % (host,
                                                           temp_resolvers)
                        debrid_hosts[host] = temp_resolvers
                    else:
                        hoster['debrid'] = ''
                if temp_resolvers:
                    hoster['debrid'] = temp_resolvers
                    #print temp_resolvers
        else:
            filtered_hosters.append(hoster)

    #log_utils.log('Discarded Hosts: %s' % (sorted(unk_hosts.items(), key=lambda x: x[1], reverse=True)), xbmc.LOGDEBUG)
    if kodi.get_setting('debug') == "true":
        kodi.log("FILTERED HOSTERS ARE =" + str(filtered_hosters))
    return filtered_hosters