Exemplo n.º 1
0
	def handle(self, link, item, download = False, popups = False, close = True, select = False, cloud = False):
		try:
			if item and 'direct' in item and item['direct'] == True:
				return link
			else:
				try: import resolveurl # Do not import at the start of the script, otherwise ResolveUrl will be loaded everytime handler.py is imported, drastically slowing down menus.
				except: pass

				# First check if a debrid resolver is available.
				resolvers = [i() for i in resolveurl.relevant_resolvers(order_matters = True) if i.isUniversal()]
				if len(resolvers) == 0: resolvers = [i() for i in resolveurl.relevant_resolvers(order_matters = True, include_universal = False) if 'rapidgator.net' in i.domains]
				for i in resolvers:
					try:
						i.login()
						host, id = i.get_host_and_id(link)
						linkNew = i.get_media_url(host, id)
						if linkNew: return debrid.Debrid.addResult(link = linkNew)
					except: pass

				# If not supported by debrid, try normal resolvers.
				media = resolveurl.HostedMediaFile(url = link, include_disabled = True, include_universal = False)
				if media.valid_url() == True:
					return debrid.Debrid.addResult(link = media.resolve(allow_popups = popups))
				else:
					return debrid.Debrid.addResult(link = None)
		except:
			return debrid.Debrid.addResult(link = None)
Exemplo n.º 2
0
    def getConstants(self):
        from resources.lib.sources import sources as sources
        self.sourceDict = sources()

        try:
            self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
            self.hostDict = [
                i.domains for i in self.hostDict if not '*' in i.domains
            ]
            self.hostDict = [
                i.lower() for i in reduce(lambda x, y: x + y, self.hostDict)
            ]
            self.hostDict = [
                x for y, x in enumerate(self.hostDict)
                if x not in self.hostDict[:y]
            ]
        except:
            self.hostDict = []

        self.hostprDict = [
            '1fichier.com', 'oboom.com', 'rapidgator.net', 'rg.to',
            'uploaded.net', 'uploaded.to', 'ul.to', 'filefactory.com',
            'nitroflare.com', 'turbobit.net', 'uploadrocket.net'
        ]

        self.hostcapDict = [
            'hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co',
            'thevideo.me', 'vidup.me', 'streamin.to', 'torba.se'
        ]

        self.hostblockDict = []
def router(url):

    try:

        add_plugin_dirs(
            [control.join(control.addonPath, 'resources', 'lib', 'resolvers')])

        forced_host = control.setting('force_host')

        rr = relevant_resolvers()
        domains = [r.domains for r in rr][1:]
        domain_list = [d for dm in domains for d in dm]

        if forced_host in domain_list:

            stream = HostedMediaFile(media_id=url, host=forced_host).resolve()

            return stream

        elif HostedMediaFile(url).valid_url():

            stream = resolve(url)

            return stream

        else:

            return url

    except ResolverError:

        return url
    def __get_resolvers(self, include_disabled, include_universal,
                        include_popups):
        if include_universal is None:
            include_universal = common.get_setting('allow_universal') == "true"

        if include_popups is None:
            include_popups = common.get_setting('allow_popups') == "true"

        klasses = resolveurl.relevant_resolvers(
            self._domain,
            include_universal=include_universal,
            include_popups=include_popups,
            include_external=True,
            include_disabled=include_disabled,
            order_matters=True)
        resolvers = []
        for klass in klasses:
            if klass in resolver_cache:
                common.logger.log_debug('adding resolver from cache: %s' %
                                        klass)
                resolvers.append(resolver_cache[klass])
            else:
                common.logger.log_debug('adding resolver to cache: %s' % klass)
                resolver_cache[klass] = klass()
                resolvers.append(resolver_cache[klass])
        return resolvers
Exemplo n.º 5
0
def get_epsiode_link(sess, data):
    headers = {
        'Accept':
        '*/*',
        'Accept-Language':
        'pl,en-US;q=0.7,en;q=0.3',
        'Cache-Control':
        'max-age=0',
        'Connection':
        'keep-alive',
        'Host':
        'www.animezone.pl',
        'Referer':
        str(url).replace("http://", "http://www."),
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
    }

    verify = sess.get('http://animezone.pl/images/statistics.gif',
                      headers=headers)
    hostDict = resolveurl.relevant_resolvers(order_matters=True)
    hostDict = [i.domains for i in hostDict if not '*' in i.domains]
    hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)]
    hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]]

    headers = {
        'Host': 'www.animezone.pl',
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',
        'Accept': '*/*',
        'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3',
        'Referer': str(url).replace("http://", "http://www."),
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'X-Requested-With': 'XMLHttpRequest',
        'Connection': 'keep-alive',
        'Pragma': 'no-cache',
        'Cache-Control': 'no-cache',
    }
    data = {'data': data}
    response = sess.post(str(url).replace("http://", "https://www."),
                         headers=headers,
                         data=data).content
    try:
        link = client.parseDOM(response, 'a', ret='href')[0]
    except:
        link = client.parseDOM(response, 'iframe', ret='src')[0]

    if not link:
        raise InvalidLink('No link')
    if str(link).startswith('//'):
        link = str(link).replace("//", "http://")
    try:

        valid, host = source_utils.is_host_valid(str(link), hostDict)
    except Exception as e:
        log_exception()
        raise InvalidLink('Exception {!r}'.format(e))
    if not valid:
        raise InvalidLink('Invalid host')
    return host, link
Exemplo n.º 6
0
def ListujLinki():
    url = urllib.unquote_plus(params['url'])
    result = requests.get(url).content
    result = client.parseDOM(result, 'table', attrs={'class': 'lista'})
    result = client.parseDOM(result, 'tr', attrs={'class': 'lista_hover'})
    odtwarzacz = "%sodtwarzacz-%s.html"
    hostDict = resolveurl.relevant_resolvers(order_matters=True)
    hostDict = [i.domains for i in hostDict if not '*' in i.domains]
    hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)]
    hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]]
    url = url.split("pl/")[0] + "pl/"
    for item in result:
        id = client.parseDOM(item, 'span', ret='rel')[0]
        content = odtwarzacz % (url, id)
        xbmc.log('Wbijam.pl | Listuje z url: %s' % content, xbmc.LOGNOTICE)
        temp = requests.get(content).content
        try:
            link = client.parseDOM(temp, 'iframe', ret='src')
        except:
            continue
        for item2 in link:
            try:
                if str(item2).startswith("//"):
                    item2 = str(item2).replace("//", "http://")
                valid, host = source_utils.is_host_valid(str(item2), hostDict)
                if valid == False:
                    continue
                xbmc.log('Wbijam.pl | Video Link: %s' % str(item2), xbmc.LOGNOTICE)
                addDir("[B]" + host + "[/B]", str(item2), 6, '', '', '', False)
            except:
                continue
Exemplo n.º 7
0
 def getConstants(self):
     from tikiscrapers import sources
     self.providerDatabase = os.path.join(
         xbmc.translatePath(__external__.getAddonInfo('profile')),
         "providers.db")
     self.sourceDict = sources()
     self.moduleProvider = __external__.getSetting('module.provider')
     try:
         self.hostDict = resolveurl.relevant_resolvers(order_matters=True)
         self.hostDict = [
             i.domains for i in self.hostDict if '*' not in i.domains
         ]
         self.hostDict = [
             i.lower() for i in reduce(lambda x, y: x + y, self.hostDict)
         ]
         self.hostDict = [
             x for y, x in enumerate(self.hostDict)
             if x not in self.hostDict[:y]
         ]
     except Exception:
         self.hostDict = []
     self.hostprDict = [
         '1fichier.com', 'oboom.com', 'rapidgator.net', 'rg.to',
         'uploaded.net', 'uploaded.to', 'ul.to', 'filefactory.com',
         'nitroflare.com', 'turbobit.net', 'uploadrocket.net'
     ]
     self.hostcapDict = [
         'kingfiles.net', 'openload.io', 'openload.co', 'oload.tv',
         'thevideo.me', 'vidup.me', 'streamin.to', 'torba.se', 'openload'
     ]
     self.hosthqDict = [
         'gvideo', 'google.com', 'openload.io', 'openload.co', 'oload.tv',
         'thevideo.me', 'rapidvideo.com', 'raptu.com', 'filez.tv',
         'uptobox.com', 'uptostream.com', 'xvidstage.com', 'streamango.com'
     ]
     self.hostblockDict = []
     self.furk_enabled = __fen__.getSetting('provider.furk')
     self.easynews_enabled = __fen__.getSetting('provider.easynews')
     self.local_enabled = __fen__.getSetting('provider.local')
     self.downloads_enabled = __fen__.getSetting('provider.downloads')
     self.progressHeading = int(__fen__.getSetting('progress.heading'))
     self.internal_activated = True if self.furk_enabled == 'true' or self.easynews_enabled == 'true' else False
     self.furk_sources, self.furk_sources_4K, self.furk_sources_1080p, self.furk_sources_720p, self.furk_sources_SD = (
         0 for _ in range(5))
     self.easynews_sources, self.easynews_sources_4K, self.easynews_sources_1080p, self.easynews_sources_720p, self.easynews_sources_SD = (
         0 for _ in range(5))
     self.local_sources, self.local_sources_4K, self.local_sources_1080p, self.local_sources_720p, self.local_sources_SD = (
         0 for _ in range(5))
     self.downloads_sources, self.downloads_sources_4K, self.downloads_sources_1080p, self.downloads_sources_720p, self.downloads_sources_SD = (
         0 for _ in range(5))
     self.internalSourcesTotal, self.internalSources4K, self.internalSources1080p, self.internalSources720p, self.internalSourcesSD = (
         0 for _ in range(5))
Exemplo n.º 8
0
	def services(self):
		if self.mServices == None:
			try: import resolveurl # Do not import at the start of the script, otherwise ResolveUrl will be loaded everytime handler.py is imported, drastically slowing down menus.
			except: pass
			try:
				result = resolveurl.relevant_resolvers(order_matters = True)
				result = [i.domains for i in result if not '*' in i.domains]
				result = [i.lower() for i in reduce(lambda x, y: x+y, result)]
				result = [x for y,x in enumerate(result) if x not in result[:y]]
				self.mServices = result
			except:
				return []
		return self.mServices
Exemplo n.º 9
0
def ListujLinki():
    try:
        url = urllib.unquote_plus(params['url'])
    except:
        pass
    id = url.split("/")[5]
    s = requests.session()
    referer = "https://www.kreskoweczki.pl/fullscreen/" + id
    headers = {
        'Referer':
        referer,
        'User-Agent':
        "User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3424.0 Safari/537.36",
    }
    result = client.request(url)
    h = HTMLParser()
    result = h.unescape(result)
    source_id = client.parseDOM(result,
                                'form',
                                attrs={'action': '/fullscreen/' + id})
    source_id = client.parseDOM(source_id, 'input', ret='value')

    hostDict = resolveurl.relevant_resolvers(order_matters=True)
    hostDict = [i.domains for i in hostDict if not '*' in i.domains]
    hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)]
    hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]]

    for item in source_id:
        data = {'source_id': str(item)}
        content = s.post("https://www.kreskoweczki.pl/fullscreen/" + id,
                         data=data).content
        try:
            temp = client.parseDOM(content,
                                   'div',
                                   attrs={'class': 'playerholder'})
            video_link = client.parseDOM(temp, 'a', ret='href')[0]
        except:
            try:
                video_link = client.parseDOM(content, 'iframe', ret='src')[0]
            except:
                continue
        if str(video_link).startswith("//"):
            video_link = str(video_link).replace("//", "http://")
        valid, host = source_utils.is_host_valid(video_link, hostDict)
        if valid == False:
            continue
        else:
            nazwa = "[COLOR green]" + host + " [/COLOR]"
            addLink(nazwa, str(video_link), 6, "", "", default_background, "",
                    "")
Exemplo n.º 10
0
    def __get_resolvers(self, include_disabled, include_universal):
        if include_universal is None:
            include_universal = common.get_setting('allow_universal') == "true"

        klasses = resolveurl.relevant_resolvers(self._domain, include_universal=include_universal,
                                                 include_external=True, include_disabled=include_disabled, order_matters=True)
        resolvers = []
        for klass in klasses:
            if klass in resolver_cache:
                common.logger.log_debug('adding resolver from cache: %s' % (klass))
                resolvers.append(resolver_cache[klass])
            else:
                common.logger.log_debug('adding resolver to cache: %s' % (klass))
                resolver_cache[klass] = klass()
                resolvers.append(resolver_cache[klass])
        return resolvers
Exemplo n.º 11
0
    def getConstants(self):
        self.itemProperty = 'plugin.video.michaelmyers.container.items'

        self.metaProperty = 'plugin.video.michaelmyers.container.meta'

        from resources.lib.sources import sources

        self.sourceDict = sources()

        try:
            self.hostDict = resolveurl.relevant_resolvers(order_matters=True)
            self.hostDict = [
                i.domains for i in self.hostDict if not '*' in i.domains
            ]
            self.hostDict = [
                i.lower() for i in reduce(lambda x, y: x + y, self.hostDict)
            ]
            self.hostDict = [
                x for y, x in enumerate(self.hostDict)
                if x not in self.hostDict[:y]
            ]
        except:
            self.hostDict = []

        self.hostprDict = [
            '1fichier.com', 'oboom.com', 'rapidgator.net', 'rg.to',
            'uploaded.net', 'uploaded.to', 'ul.to', 'filefactory.com',
            'nitroflare.com', 'turbobit.net', 'uploadrocket.net'
        ]

        self.hostcapDict = [
            'hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co',
            'oload.tv', 'thevideo.me', 'vidup.me', 'streamin.to', 'torba.se'
        ]

        self.hosthqDict = [
            'gvideo', 'google.com', 'openload.io', 'openload.co', 'oload.tv',
            'thevideo.me', 'rapidvideo.com', 'raptu.com', 'filez.tv',
            'uptobox.com', 'uptobox.com', 'uptostream.com', 'xvidstage.com',
            'streamango.com'
        ]

        self.hostblockDict = []
Exemplo n.º 12
0
def ListujLinki():
    import resolveurl
    url = urllib.unquote_plus(params['url'])
    result = client.request(url)
    h = HTMLParser()
    result = h.unescape(result)
    result = client.parseDOM(result,
                             'table',
                             attrs={'class': 'table table-bordered'})
    linki = client.parseDOM(result, 'a', ret='href')
    for item in linki:
        temp = client.request(str(item))
        link = client.parseDOM(temp, 'iframe', ret='src')[0]
        hostDict = resolveurl.relevant_resolvers(order_matters=True)
        hostDict = [i.domains for i in hostDict if not '*' in i.domains]
        hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)]
        hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]]
        valid, host = source_utils.is_host_valid(str(link), hostDict)
        if valid == False:
            continue
        addon.addLink("[B]" + host + "[/B]", link, mode=6)
Exemplo n.º 13
0
def uResolve(url):
    ourl = url
    if url.startswith('plugin'):
        # to another plugin, nothing to resolve here
        pass
    elif url.startswith('http') and 'youtube' in url:
        # Ugh, ok, so let's handle this. Cuz resolveurl doesn't always get the google content resolve right
        # see if video or list first cuz I am a lazy bastard.
        if 'list=' not in url.lower() and 'playlist' not in url.lower():
            v = url.split("?v=")[-1].split("/")[-1].split("?")[0].split("&")[0]
            check = client.request("http://www.youtube.com/watch?v=%s" % v)
            if check:
                url = "plugin://plugin.video.youtube/play/?video_id=%s" % (v)
        else:
            if 'list=' in url.lower():
                playlist_id = url.split('list=')[1]
                url = 'plugin://plugin.video.youtube/play/?playlist_id=%s&play=1' % playlist_id
    else:
        resolvers = resolveurl.relevant_resolvers(order_matters=True)
        hostDict = resolvers
        hostDict = [i.domains for i in hostDict if '*' not in i.domains]
        hostDict = [i.lower() for i in reduce(lambda x, y: x+y, hostDict)]
        hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]]
        valid, host = is_host_valid(url, hostDict)
        if not valid:
            log_utils.log('Source Utils uResolve: Invalid Host: %s' % (str(url)))
            return None
        try:
            resolver = [resolver for resolver in resolvers if resolver.name in url][0]
            host, media_id = resolver().get_host_and_id(url)
            url = resolver().get_media_url(host, media_id)
        except Exception:
            debugcheck = control.setting('menu_links')
            if (debugcheck == 'true'):
                log_utils.log('Source Utils uResolve: Unable to resolve: %s' % (ourl))
            return None
    return url
Exemplo n.º 14
0
# -*- coding: utf-8 -*-
"""
	Venom Add-on
"""

from resources.lib.modules import control
from resources.lib.modules import log_utils

try:
    import resolveurl
    debrid_resolvers = [
        resolver()
        for resolver in resolveurl.relevant_resolvers(order_matters=True)
        if resolver.isUniversal()
    ]
    if len(debrid_resolvers) == 0:
        debrid_resolvers = [
            resolver() for resolver in resolveurl.relevant_resolvers(
                order_matters=True, include_universal=False)
            if 'rapidgator.net' in resolver.domains
        ]
except:
    debrid_resolvers = []


def status(torrent=False):
    debrid_check = debrid_resolvers != []

    if debrid_check is True:
        if torrent:
            enabled = control.setting('torrent.enabled')
Exemplo n.º 15
0
# GNU General Public License per maggiori dettagli.
#
# Dovresti aver ricevuto una copia della GNU General Public License
# insieme a Stefano Thegroove 360. In caso contrario, vedi <http://www.gnu.org/licenses/>.
# ------------------------------------------------- -----------
# Client for Stefano Thegroove 360
#------------------------------------------------------------



from resources.lib.modules import log_utils

try:
    import resolveurl

    debrid_resolvers = [resolver() for resolver in resolveurl.relevant_resolvers(order_matters=True) if resolver.isUniversal()]

    if len(debrid_resolvers) == 0:
        # Support Rapidgator accounts! Unfortunately, `sources.py` assumes that rapidgator.net is only ever
        # accessed via a debrid service, so we add rapidgator as a debrid resolver and everything just works.
        # As a bonus(?), rapidgator links will be highlighted just like actual debrid links
        debrid_resolvers = [resolver() for resolver in resolveurl.relevant_resolvers(order_matters=True,include_universal=False) if 'rapidgator.net' in resolver.domains]

except:
    debrid_resolvers = []


def status():
    return debrid_resolvers != []

Exemplo n.º 16
0
def filter_host(host):
    try:
        import resolveurl
    except:
        import urlresolver as resolveurl
    try:
        hostDict = resolveurl.relevant_resolvers(order_matters=True)
        hostDict = [i.domains for i in hostDict if not '*' in i.domains]
        hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)]
        hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]]
    except:
        hostDict = []

    extra_hosts = [
        'example.com', 'allvid.ch', 'anime-portal.org', 'anyfiles.pl',
        'www.apnasave.club', 'castamp.com', 'clicknupload.com',
        'clicknupload.me', 'clicknupload.link', 'cloud.mail.ru', 'cloudy.ec',
        'cloudy.eu', 'cloudy.sx', 'cloudy.ch', 'cloudy.com', 'daclips.in',
        'daclips.com', 'dailymotion.com', 'ecostream.tv', 'exashare.com',
        'uame8aij4f.com', 'yahmaib3ai.com', 'facebook.com', 'filepup.net',
        'fileweed.net', 'flashx.tv', 'googlevideo.com',
        'googleusercontent.com', 'get.google.com', 'plus.google.com',
        'googledrive.com', 'drive.google.com', 'docs.google.com',
        'gorillavid.in', 'gorillavid.com', 'grifthost.com', 'hugefiles.net',
        'indavideo.hu', 'kingfiles.net', 'mail.ru', 'my.mail.ru',
        'm.my.mail.ru', 'videoapi.my.mail.ru', 'api.video.mail.ru',
        'mersalaayitten.com', 'mersalaayitten.co', 'mersalaayitten.us',
        'movdivx.com', 'divxme.com', 'movpod.net', 'movpod.in', 'movshare.net',
        'wholecloud.net', 'vidgg.to', 'mp4stream.com', 'myvi.ru',
        'nosvideo.com', 'noslocker.com', 'novamov.com', 'auroravid.to',
        'ok.ru', 'odnoklassniki.ru', 'openload.io', 'openload.co', 'oload.tv',
        'playwire.com', 'promptfile.com', 'rapidvideo.com', 'raptu.com',
        'rutube.ru', 'videos.sapo.pt', 'speedvideo.net', 'streamcloud.eu',
        'streamin.to', 'stream.moe', 'streamplay.to', 'teramixer.com',
        'thevid.net', 'thevideo.me', 'toltsd-fel.tk', 'toltsd-fel.xyz',
        'trollvid.net', 'trollvid.io', 'mp4edge.com', 'tudou.com', 'tune.pk',
        'upload.af', 'uploadx.org', 'uploadz.co', 'uptobox.com',
        'uptostream.com', 'veoh.com', 'videa.hu', 'videoget.me', 'videohut.to',
        'videoraj.ec', 'videoraj.eu', 'videoraj.sx', 'videoraj.ch',
        'videoraj.com', 'videoraj.to', 'videoraj.co', 'bitvid.sx',
        'videoweed.es', 'videoweed.com', 'videowood.tv', 'byzoo.org',
        'playpanda.net', 'videozoo.me', 'videowing.me', 'easyvideo.me',
        'play44.net', 'playbb.me', 'video44.net', 'vidlox.tv', 'vidmad.net',
        'tamildrive.com', 'vid.me', 'vidup.me', 'vimeo.com', 'vivo.sx',
        'vk.com', 'vshare.eu', 'watchers.to', 'watchonline.to',
        'everplay.watchpass.net', 'weshare.me', 'xvidstage.com',
        'yourupload.com', 'yucache.net', 'youtube.com', 'youtu.be',
        'youtube-nocookie.com', 'youwatch.org', 'chouhaa.info', 'aliez.me',
        'ani-stream.com', 'bestream.tv', 'blazefile.co', 'divxstage.eu',
        'divxstage.net', 'divxstage.to', 'cloudtime.to', 'downace.com',
        'entervideo.net', 'estream.to', 'fastplay.sx', 'fastplay.cc',
        'goodvideohost.com', 'jetload.tv', 'letwatch.us', 'letwatch.to',
        'vidshare.us', 'megamp4.net', 'mp4engine.com', 'mp4upload.com',
        'myvidstream.net', 'nowvideo.eu', 'nowvideo.ch', 'nowvideo.sx',
        'nowvideo.co', 'nowvideo.li', 'nowvideo.fo', 'nowvideo.at',
        'nowvideo.ec', 'playedto.me', 'www.playhd.video', 'www.playhd.fo',
        'putload.tv', 'shitmovie.com', 'rapidvideo.ws', 'speedplay.xyz',
        'speedplay.us', 'speedplay1.site', 'speedplay.pw', 'speedplay1.pw',
        'speedplay3.pw', 'speedplayy.site', 'speedvid.net', 'spruto.tv',
        'stagevu.com', 'streame.net', 'thevideos.tv', 'tusfiles.net',
        'userscloud.com', 'usersfiles.com', 'vidabc.com', 'vidcrazy.net',
        'uploadcrazy.net', 'thevideobee.to', 'videocloud.co', 'vidfile.net',
        'vidhos.com', 'vidto.me', 'vidtodo.com', 'vidup.org', 'vidzi.tv',
        'vodlock.co', 'vshare.io', 'watchvideo.us', 'watchvideo2.us',
        'watchvideo3.us', 'watchvideo4.us', 'watchvideo5.us', 'watchvideo6.us',
        'watchvideo7.us', 'watchvideo8.us', 'watchvideo9.us',
        'watchvideo10.us', 'watchvideo11.us', 'watchvideo12.us', 'zstream.to'
    ]

    total_hosts = hostDict + extra_hosts
    if host.lower() in set(total_hosts):
        return True
    else:
        return False
Exemplo n.º 17
0
# -*- coding: utf-8 -*-

from resources.lib.modules import log_utils

try:
    import resolveurl

    debrid_resolvers = [
        resolver()
        for resolver in resolveurl.relevant_resolvers(order_matters=True)
        if resolver.isUniversal()
    ]
except:
    debrid_resolvers = []


def status():
    return debrid_resolvers != []


def resolver(url, debrid):
    try:
        debrid_resolver = [
            resolver for resolver in debrid_resolvers
            if resolver.name == debrid
        ][0]

        debrid_resolver.login()
        _host, _media_id = debrid_resolver.get_host_and_id(url)
        stream_url = debrid_resolver.get_media_url(_host, _media_id)
Exemplo n.º 18
0
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

# Addon Name: LambdaScrapers Module
# Addon id: script.module.lambdascrapers

from lambdascrapers.modules import log_utils

try:
    import resolveurl

    debrid_resolvers = [resolver() for resolver in resolveurl.relevant_resolvers(order_matters=True) if resolver.isUniversal()]

    if len(debrid_resolvers) == 0:
        # Support Rapidgator accounts! Unfortunately, `sources.py` assumes that rapidgator.net is only ever
        # accessed via a debrid service, so we add rapidgator as a debrid resolver and everything just works.
        # As a bonus(?), rapidgator links will be highlighted just like actual debrid links
        debrid_resolvers = [resolver() for resolver in resolveurl.relevant_resolvers(order_matters=True,include_universal=False) if 'rapidgator.net' in resolver.domains]

except:
    debrid_resolvers = []


def status():
    return debrid_resolvers != []

Exemplo n.º 19
0
    def _cached_http_get(self,
                         url,
                         base_url,
                         timeout,
                         params=None,
                         data=None,
                         multipart_data=None,
                         headers=None,
                         cookies=None,
                         allow_redirect=True,
                         method=None,
                         require_debrid=False,
                         read_error=False,
                         cache_limit=8):
        if require_debrid:
            if Scraper.debrid_resolvers is None:
                Scraper.debrid_resolvers = [
                    resolver for resolver in resolveurl.relevant_resolvers()
                    if resolver.isUniversal()
                ]
            if not Scraper.debrid_resolvers:
                logger.log(
                    '%s requires debrid: %s' %
                    (self.__module__, Scraper.debrid_resolvers),
                    log_utils.LOGDEBUG)
                return ''

        if cookies is None: cookies = {}
        if timeout == 0: timeout = None
        if headers is None: headers = {}
        if url.startswith('//'): url = 'http:' + url
        referer = headers['Referer'] if 'Referer' in headers else base_url
        if params:
            if url == base_url and not url.endswith('/'):
                url += '/'

            parts = urlparse.urlparse(url)
            if parts.query:
                params.update(scraper_utils.parse_query(url))
                url = urlparse.urlunparse(
                    (parts.scheme, parts.netloc, parts.path, parts.params, '',
                     parts.fragment))

            url += '?' + urllib.urlencode(params)
        logger.log(
            'Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' %
            (url, cookies, data, headers), log_utils.LOGDEBUG)
        if data is not None:
            if isinstance(data, basestring):
                data = data
            else:
                data = urllib.urlencode(data, True)

        if multipart_data is not None:
            headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X'
            data = multipart_data

        _created, _res_header, html = self.db_connection().get_cached_url(
            url, data, cache_limit)
        if html:
            logger.log('Returning cached result for: %s' % (url),
                       log_utils.LOGDEBUG)
            return html

        try:
            self.cj = self._set_cookies(base_url, cookies)
            if isinstance(url, unicode): url = url.encode('utf-8')
            request = urllib2.Request(url, data=data)
            headers = headers.copy()
            request.add_header('User-Agent', scraper_utils.get_ua())
            request.add_header('Accept', '*/*')
            request.add_header('Accept-Encoding', 'gzip')
            request.add_unredirected_header('Host', request.get_host())
            if referer: request.add_unredirected_header('Referer', referer)
            if 'Referer' in headers: del headers['Referer']
            if 'Host' in headers: del headers['Host']
            for key, value in headers.iteritems():
                request.add_header(key, value)
            self.cj.add_cookie_header(request)
            if not allow_redirect:
                opener = urllib2.build_opener(NoRedirection)
                urllib2.install_opener(opener)
            else:
                opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
                urllib2.install_opener(opener)
                opener2 = urllib2.build_opener(
                    urllib2.HTTPCookieProcessor(self.cj))
                urllib2.install_opener(opener2)

            if method is not None: request.get_method = lambda: method.upper()
            response = urllib2.urlopen(request, timeout=timeout)
            self.cj.extract_cookies(response, request)
            if kodi.get_setting('cookie_debug') == 'true':
                logger.log(
                    'Response Cookies: %s - %s' %
                    (url, scraper_utils.cookies_as_str(self.cj)),
                    log_utils.LOGDEBUG)
            self.cj._cookies = scraper_utils.fix_bad_cookies(self.cj._cookies)
            self.cj.save(ignore_discard=True)
            if not allow_redirect and (
                    response.getcode() in [301, 302, 303, 307]
                    or response.info().getheader('Refresh')):
                if response.info().getheader('Refresh') is not None:
                    refresh = response.info().getheader('Refresh')
                    return refresh.split(';')[-1].split('url=')[-1]
                else:
                    redir_url = response.info().getheader('Location')
                    if redir_url.startswith('='):
                        redir_url = redir_url[1:]
                    return redir_url

            content_length = response.info().getheader('Content-Length', 0)
            if int(content_length) > MAX_RESPONSE:
                logger.log(
                    'Response exceeded allowed size. %s => %s / %s' %
                    (url, content_length, MAX_RESPONSE), log_utils.LOGWARNING)

            if method == 'HEAD':
                return ''
            else:
                if response.info().get('Content-Encoding') == 'gzip':
                    html = ungz(response.read(MAX_RESPONSE))
                else:
                    html = response.read(MAX_RESPONSE)
        except urllib2.HTTPError as e:
            if e.info().get('Content-Encoding') == 'gzip':
                html = ungz(e.read(MAX_RESPONSE))
            else:
                html = e.read(MAX_RESPONSE)

            if CF_CAPCHA_ENABLED and e.code == 403 and 'cf-captcha-bookmark' in html:
                html = cf_captcha.solve(url, self.cj, scraper_utils.get_ua(),
                                        self.get_name())
                if not html:
                    return ''
            elif e.code == 503 and 'cf-browser-verification' in html:
                html = cloudflare.solve(url,
                                        self.cj,
                                        scraper_utils.get_ua(),
                                        extra_headers=headers)
                if not html:
                    return ''
            else:
                logger.log(
                    'Error (%s) during scraper http get: %s' % (str(e), url),
                    log_utils.LOGWARNING)
                if not read_error:
                    return ''
        except Exception as e:
            logger.log(
                'Error (%s) during scraper http get: %s' % (str(e), url),
                log_utils.LOGWARNING)
            return ''

        self.db_connection().cache_url(url, html, data)
        return html
Exemplo n.º 20
0
def Wyciaganie_Linkow():
    import json
    basePath = "special://temp/cookie.txt"
    path = xbmc.translatePath(basePath)
    with open(path, 'r') as f:
        cookie = requests.utils.cookiejar_from_dict(json.load(f))
        s.cookies = cookie
    url = urllib.unquote_plus(params['url'])
    r = s.get(url, cookies=s.cookies).content

    results = client.parseDOM(r, 'tr', attrs={'class': 'wiersz'})
    counter = -1
    for result in results:
        counter += 1
        nazwa = client.parseDOM(result, 'a', ret='title')[0]
        index = str(result).find('\" rel')
        r = str(result)[index + 10:]
        index = str(r).find("\"")
        r = r[:index]
        data = {"o": str(r)}
        headers = {
            'Host': 'www.kreskowkazone.pl',
            'DNT': '1',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'pl-PL,pl;q=0.9,en-US;q=0.8,en;q=0.7',
            'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
            'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
            'Referer': url,
            'Connection': 'keep-alive',
        }

        s.get('http://www.kreskowkazone.pl/images/statystyki.gif',
              headers=headers,
              cookies=s.cookies)

        headers = {
            'Accept': 'text/html, */*; q=0.01',
            'Referer': url,
            'Origin': 'https://www.kreskowkazone.pl',
            'X-Requested-With': 'XMLHttpRequest',
            'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.39 Safari/537.36',
            'DNT': '1',
            'Content-Type': 'application/x-www-form-urlencoded',
        }
        hostDict = resolveurl.relevant_resolvers(order_matters=True)
        hostDict = [i.domains for i in hostDict if not '*' in i.domains]
        hostDict = [i.lower() for i in reduce(lambda x, y: x + y, hostDict)]
        hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]]
        response = s.post("https://www.kreskowkazone.pl/odcinki_ajax",
                          data=data,
                          headers=headers)
        link = client.parseDOM(response.text, 'a', ret='href')
        try:
            if link == '':
                continue
            if str(link[0]).startswith('//'):
                link[0] = str(link[0]).replace("//", "http://")
            valid, host = source_utils.is_host_valid(str(link[0]), hostDict)
            if valid == False:
                continue
            nazwa = "[COLOR green]" + host + ": [/COLOR]" + nazwa
            addLink("[B]" + str(nazwa) + "[/B]", str(link[0]), 6, "", "", "",
                    "", "")
        except:
            continue