Пример #1
0
    def __http_get_with_retry_1(self, url, headers):
        utils.log('Fetching URL: %s' % url, xbmc.LOGDEBUG)
        net = Net()
        cookiejar = _1CH.get_profile()
        cookiejar = os.path.join(cookiejar, 'cookies')
        net.set_cookies(cookiejar)
        retries = 0
        html = None
        while retries <= MAX_RETRIES:
            try:
                html = net.http_GET(url, headers=headers).content
                # if no exception, jump out of the loop
                break
            except socket.timeout:
                retries += 1
                utils.log(
                    'Retry #%s for URL %s because of timeout' % (retries, url),
                    xbmc.LOGWARNING)
                continue
            except urllib2.HTTPError as e:
                # if it's a temporary code, retry
                if e.code in TEMP_ERRORS:
                    retries += 1
                    utils.log(
                        'Retry #%s for URL %s because of HTTP Error %s' %
                        (retries, url, e.code), xbmc.LOGWARNING)
                    continue
                # if it's not pass it back up the stack
                else:
                    raise
        else:
            raise

        return html
Пример #2
0
def getnet(url, bypass_cloudflare=False):
    try:
        logdata('url', url)
        from addon.common.net import Net
        net = Net()
        USER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:30.0) Gecko/20100101 Firefox/30.0'
        MAX_TRIES = 3
        headers = {'User-Agent': USER_AGENT, 'Referer': url}
        try:
            data = net.http_GET(url).content
        except:
            logdata('getnet', "download error")
            data = requestsurl(url)
            if data is None:
                logdata('requests', "error")
        try:
            data = data.encode('utf-8', "ignore")
        except:
            pass
        if not data:
            return None

        return data
    except:
        trace_error()
        return None

    return None
Пример #3
0
 def __http_get_with_retry_1(self, url, headers):
     utils.log('Fetching URL: %s' % url, xbmc.LOGDEBUG)
     net = Net()
     cookiejar = _1CH.get_profile()
     cookiejar = os.path.join(cookiejar, 'cookies')
     net.set_cookies(cookiejar)
     retries=0
     html=None
     while retries<=MAX_RETRIES:
         try:
             html = net.http_GET(url, headers=headers).content
             # if no exception, jump out of the loop
             break
         except socket.timeout:
             retries += 1
             utils.log('Retry #%s for URL %s because of timeout' % (retries, url), xbmc.LOGWARNING)
             continue
         except urllib2.HTTPError as e:
             # if it's a temporary code, retry
             if e.code in TEMP_ERRORS:
                 retries += 1
                 utils.log('Retry #%s for URL %s because of HTTP Error %s' % (retries, url, e.code), xbmc.LOGWARNING)
                 continue
             # if it's not pass it back up the stack
             else:
                 raise
     else:
         raise
     
     return html
Пример #4
0
def resolve_cloudyvideos(name, url, iconimage):
    # print "cloudyvideos"
    url = re.sub('embed-|-.*?(?:\.html)', '', url)
    net = Net()
    web_url = url
    headers = {'Referer': web_url}
    html = net.http_GET(web_url, headers=headers).content
    data = {}
    time.sleep(3)
    for match in re.finditer(
            r'type="hidden".*?name="([^"]+)".*?value="([^"]+)', html):
        data[match.group(1)] = match.group(2)
        data.update({'method_free': 'Continue'})
    htmla = net.http_POST(web_url, data).content
    r = re.search('file:\s*\'(.*?)\',+', htmla)
    pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
    pl.clear()
    try:
        listitem = xbmcgui.ListItem(name, thumbnailImage=iconimage)
        url = r.group(
            1) + '|Referer=http://cloudyvideos&User-Agent=%s' % (USER_AGENT)
        pl.add(url, listitem)
        xbmc.Player().play(pl)
    except Exception, e:
        dialog = xbmcgui.DialogProgress()
        dialog1 = xbmcgui.Dialog()
        dialog1.ok(
            'error',
            '[UPPERCASE][B]                Sorry but the video is deleted!!![/B][/UPPERCASE]'
        )
        print '**** cloudyvideo Error occured: %s' % e
        raise
Пример #5
0
 def _fetch(self, url, form_data={}, headers={}, compression=True):
     '''
         A wrapper around the super's _fetch with cloudflare support
     '''
     helper.log_debug("Fetch attempt url: %s, form_data: %s, headers: %s" % (url, form_data, headers))
     if not self._cloudflare:
         return Net._fetch(self, url, form_data, headers, compression)
     else:
         try:
             r = Net._fetch(self, url, form_data, headers, compression)
             helper.log_debug('Did not encounter a cloudflare challenge')
             return r
         except urllib2.HTTPError as e:
             if e.code == 503:
                 helper.log_debug('Encountered a cloudflare challenge')
                 challenge = e.read()
                 if challenge == 'The service is unavailable.':
                     helper.log_debug('Challenge says the service is unavailable')
                     raise
                 try:
                     helper.log_debug("Received a challenge, so we'll need to get around cloudflare")
                     self._resolve_cloudflare(url, challenge, form_data, headers, compression)
                     helper.log_debug("Successfully resolved cloudflare challenge, fetching real response")
                     return Net._fetch(self, url, form_data, headers, compression)
                 except urllib2.HTTPError as e:
                     helper.log_debug("Failed to set up cloudflare with exception %s" % str(e))
                     raise
             else:
                 helper.log_debug('Initial attempt failed with code %d' % e.code)
                 raise
Пример #6
0
def login_and_retry(redirect):
    _1CH.log('Logging in for url %s' % redirect)
    # needed here because login_and_retry was moved to utils. Probably better to pass in but this works and is quick but dirty.
    USER_AGENT = ("User-Agent:Mozilla/5.0 (Windows NT 6.2; WOW64)"
                  "AppleWebKit/537.17 (KHTML, like Gecko)"
                  "Chrome/24.0.1312.56")
    BASE_URL = _1CH.get_setting('domain')
    if (_1CH.get_setting("enableDomain")=='true') and (len(_1CH.get_setting("customDomain")) > 10):
        BASE_URL = _1CH.get_setting("customDomain")

    user = _1CH.get_setting('username')
    passwd = _1CH.get_setting('passwd')
    url = BASE_URL + '/login.php'
    net = Net()
    cookiejar = _1CH.get_profile()
    cookiejar = os.path.join(cookiejar, 'cookies')
    host = re.sub('http://', '', BASE_URL)
    headers = {'Referer': redirect, 'Origin': BASE_URL, 'Host': host, 'User-Agent': USER_AGENT}
    form_data = {'username': user, 'password': passwd, 'remember': 'on', 'login_submit': 'Login'}
    html = net.http_POST(url, headers=headers, form_data=form_data).content
    if '<a href="/logout.php">[ Logout ]</a>' in html:
        net.save_cookies(cookiejar)
        return html
    else:
        _1CH.log('Failed to login')
        print html
Пример #7
0
def PLAYLIST(name, url, iconimage, imdb):
        net = Net()
        link = net.http_GET(url).content
        link=link.replace('\r\n', '').replace('"},]', '"}]')
        magic = api.loads(link, encoding='latin1')
        liste=[]
        stream1=[]
        stream2=[]
        dialog = xbmcgui.Dialog()
        for i in magic:
            if imdb in i['imdblink'].encode('utf-8'):
                try:
                    stream1=re.search("S*R*C*s*r*c*='(.*?)'.*?", i['streamlink1']).group(1)
                    vid1=re.search("S*R*C*s*r*c*='https*://(.*?)\.*c*o*m*t*o*/.*?'.*?",i['streamlink1'])
                    liste.append(vid1.group(1))
                except:
                    pass
                try:
                    stream2=re.search("S*R*C*s*r*c*='(.*?)'.*?", i['streamlink2']).group(1)
                    vid2=re.search("S*R*C*s*r*c*='https*://(.*?)\.*c*o*m*t*o*/.*?'.*?",i['streamlink2'])
                    liste.append(vid2.group(1))
                except:
                    pass
        hoster = dialog.select('HOSTER',liste)
        if hoster == 0:
            HOSTER(name,stream1,iconimage)
        elif hoster == 1:
            HOSTER(name,stream2,iconimage)
        else:
            pass
Пример #8
0
def resolve_cloudyvideos(name,url,iconimage):
        # print "cloudyvideos"
        url=re.sub('embed-|-.*?(?:\.html)','',url)
        net = Net()
        web_url = url
        headers = {'Referer': web_url}
        html = net.http_GET(web_url, headers=headers).content
        data={}
        time.sleep(3)
        for match in re.finditer(r'type="hidden".*?name="([^"]+)".*?value="([^"]+)', html):
            data[match.group(1)] = match.group(2)
            data.update ({'method_free': 'Continue'})
        htmla = net.http_POST(web_url, data).content
        r = re.search('file:\s*\'(.*?)\',+', htmla)
        pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
        pl.clear()
        try:
            listitem = xbmcgui.ListItem(name,thumbnailImage=iconimage)
            url = r.group(1)+'|Referer=http://cloudyvideos&User-Agent=%s' % (USER_AGENT)
            pl.add(url, listitem)
            xbmc.Player().play(pl)
        except Exception, e:
            dialog = xbmcgui.DialogProgress()
            dialog1 = xbmcgui.Dialog()
            dialog1.ok('error','[UPPERCASE][B]                Sorry but the video is deleted!!![/B][/UPPERCASE]')
            print '**** cloudyvideo Error occured: %s' % e
            raise
Пример #9
0
def read_url(url):
    net = Net()

    html=net.http_GET(url).content
    
    h = HTMLParser.HTMLParser()
    html = h.unescape(html)
    return html
Пример #10
0
def read_url(url):
    net = Net()

    html=net.http_GET(url).content
    
    h = HTMLParser.HTMLParser()
    html = h.unescape(html)
    return html.encode('utf-8')
Пример #11
0
def read_url(url):
    net = Net()

    html = net.http_GET(url).content
    import HTMLParser
    h = HTMLParser.HTMLParser()
    html = h.unescape(html)
    try:
        return html.encode('utf-8')
    except:
        return html
Пример #12
0
def postnet(url, data, referer):
    try:
        from addon.common.net import Net
        net = Net()
        USER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:30.0) Gecko/20100101 Firefox/30.0'
        MAX_TRIES = 3
        headers = {'User-Agent': USER_AGENT, 'Referer': referer}
        html = net.http_POST(url, form_data=data, headers=headers).content
        return html
    except:
        trace_error()
Пример #13
0
 def _update_opener_with_cloudflare(self):
     '''
         Uses the cloudflare jar temporarily for opening future links. 
         Revert back to the main jar by invoking _update_opener().
     '''
     tmp_jar = self._cj
     self._cloudflare_jar = cookielib.LWPCookieJar()
     self._cj = self._cloudflare_jar
     Net._update_opener(self)
     self._cj = tmp_jar
     return
Пример #14
0
def read_url(url):
    net = Net()

    html = net.http_GET(url).content
    import HTMLParser

    h = HTMLParser.HTMLParser()
    html = h.unescape(html)
    try:
        return html.encode("utf-8")
    except:
        return html
Пример #15
0
 def __login(self,redirect):
     url = self.base_url + '/login.php'
     net = Net()
     cookiejar = _1CH.get_profile()
     cookiejar = os.path.join(cookiejar, 'cookies')
     host = re.sub('http://', '', self.base_url)
     headers = {'Referer': redirect, 'Origin': self.base_url, 'Host': host, 'User-Agent': USER_AGENT}
     form_data = {'username': self.username, 'password': self.password, 'remember': 'on', 'login_submit': 'Login'}
     html = net.http_POST(url, headers=headers, form_data=form_data).content
     if '<a href="/logout.php">[ Logout ]</a>' in html:
         net.save_cookies(cookiejar)
         return True
     else:
         return False
Пример #16
0
	def __init__(self, load = None, disable = None, cache_results=False):
		self.threadpool_size = 5
		self.cache_results = cache_results
		self._load_list = load
		self._disable_list = disable
		self.enabled_scrapers = 0
		self.active_scrapers = []
		self.supported_scrapers = []
		self._active_scrapers = []
		self._load_scrapers()
		self._enable_scrapers()
		self.search_results = []
		expired = True
		cache_file = vfs.join(DATA_PATH, 'debrid_hosts.cache')
		if vfs.exists(cache_file):
			timestamp = int(time.time())
			m_time = vfs.get_stat(cache_file).st_mtime()
			if (timestamp - m_time) < 86400: expired = False
		
		if expired:
			hosts = {}
			response = Net().http_GET('http://real-debrid.com/api/hosters.php').content
			hosts['rd'] = [x.strip('"') for x in response.split(',')]
			
			response = Net().http_GET('http://alldebrid.com/api.php?action=get_host').content
			hosts['ad'] = [x.strip('"') for x in response.split(',\n')]
			
			response = Net().http_GET('http://premium.rpnet.biz/hoster2.json').content
			hosts['rp'] = json.loads(response)['supported']
			ADDON.save_data(cache_file, hosts)
Пример #17
0
 def __login(self,redirect):
     url = self.base_url + '/login.php'
     net = Net()
     cookiejar = _1CH.get_profile()
     cookiejar = os.path.join(cookiejar, 'cookies')
     host = re.sub('http://', '', self.base_url)
     headers = {'Referer': redirect, 'Origin': self.base_url, 'Host': host, 'User-Agent': USER_AGENT}
     form_data = {'username': self.username, 'password': self.password, 'remember': 'on', 'login_submit': 'Login'}
     html = net.http_POST(url, headers=headers, form_data=form_data).content
     if '<a href="/logout.php"' in html:
         net.save_cookies(cookiejar)
         return True
     else:
         return False
Пример #18
0
def readnet2(url):
            from addon.common.net import Net
            net=Net()
            USER_AGENT='Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:30.0) Gecko/20100101 Firefox/30.0'
            MAX_TRIES=3
            


            
            headers = {
                'User-Agent': USER_AGENT,
                'Referer': url
            }

            html = net.http_GET(url).content
            return html
Пример #19
0
    def _resolve_cloudflare(self, url, challenge, form_data={}, headers={}, compression=True):
        """
            Asks _cloudflare for an URL with the answer to overcome the 
            challenge, and then attempts the resolution.
        """
        helper.start("_resolve_cloudflare")
        parsed_url = urlparse(url)
        cloudflare_url = urlunparse((parsed_url.scheme, parsed_url.netloc, '', '', '', ''))
        query = self._get_cloudflare_answer(cloudflare_url, challenge, form_data, headers, compression)

        # Use the cloudflare jar instead for this attempt; revert back to 
        # main jar after attempt with call to update_opener()
        self._update_opener_with_cloudflare()

        try:
            helper.log_debug("Attempting to resolve the challenge")
            response = Net._fetch(self, query, form_data, headers, compression)
            helper.log_debug("Resolved the challenge, updating cookies")
            for c in self._cloudflare_jar:
                self._cj.set_cookie(c)
            self._update_opener()
        except urllib2.HTTPError as e:
            helper.log_debug("Failed to resolve the cloudflare challenge with exception %s" % str(e))
            self._update_opener()
            pass
        helper.end('_resolve_cloudflare')
Пример #20
0
	def __init__(self, load = None, disable = None, cache_results=False):
		self.threadpool_size = 5
		self.cache_results = cache_results
		self._load_list = load
		self._disable_list = disable
		self.enabled_scrapers = 0
		self.active_scrapers = []
		self.supported_scrapers = []
		self._active_scrapers = []
		self._load_scrapers()
		self._enable_scrapers()
		self.search_results = []
		self.show_scraper_progress = ADDON.get_setting('enable_scraper_progress') == 'true'
		
		expired = True
		self.filters = False
		if ADDON.get_setting('enable_result_filters') == 'true':
			cache_file = vfs.join(DATA_PATH, 'filters.cache')
			if vfs.exists(cache_file):
				self.filters = ADDON.load_data(cache_file)
		cache_file = vfs.join(DATA_PATH, 'debrid_hosts.cache')
		if vfs.exists(cache_file):
			timestamp = int(time.time())
			m_time = vfs.get_stat(cache_file).st_mtime()
			if (timestamp - m_time) < 86400: expired = False
		
		if expired:
			hosts = {"pm": [], "rd": [], "ad": [], "rp": []}
			net = Net()
			try:
				customer_id = xbmcaddon.Addon('script.module.urlresolver').getSetting('PremiumizeMeResolver_username')
				pin = xbmcaddon.Addon('script.module.urlresolver').getSetting('PremiumizeMeResolver_password')
				query = {"method": "hosterlist", "params[login]": customer_id, "params[pass]": pin}
				api_url = "http://api.premiumize.me/pm-api/v1.php?" + urllib.urlencode(query)
				response = net.http_GET(api_url).content
				data = json.loads(response)
				if 'result' in data:
					hosts['pm'] = data['result']['hosterlist']
			except: pass

			try:
				response = Net().http_GET('http://real-debrid.com/api/hosters.php').content
				hosts['rd'] = [x.strip('"') for x in response.split(',')]
			except: pass
			
			try:
				response = Net().http_GET('http://alldebrid.com/api.php?action=get_host').content
				hosts['ad'] = [x.strip('"') for x in response.split(',\n')]
			except: pass
				
			try:
				response = Net().http_GET('http://premium.rpnet.biz/hoster2.json').content
				hosts['rp'] = json.loads(response)['supported']
			except: pass

			ADDON.save_data(cache_file, hosts)
Пример #21
0
def SEARCH(url,name,imdb,move,movegen):
        net = Net()
        link = net.http_GET(url).content
        link=link.replace('\r\n', '').replace('"},]', '"}]')
        magic = api.loads(link, encoding='latin1')
        kb = xbmc.Keyboard('', 'Search KinoLeak', False)
        kb.doModal()
        search = kb.getText()
        # search=urllib.quote(search)
        for e,i in enumerate(magic):
            if search.lower() in (i['titel'].encode('utf-8')).lower():
                try:
                    imdb=re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1)
                except:
                    imdb=""
                try:
                    sub=re.search("(.*?)\((\d+)\)", i['titel'])
                    addDir(sub.group(1),url,2,i['cover'],imdb,"movie".decode('utf-8'),sub.group(2),None,"")
                except:
                    addDir(i['titel'],url,2,i['cover'],imdb,"movie",'',None,"")
        xbmcplugin.setContent(int(sys.argv[1]), 'movies')
        xbmc.executebuiltin("Container.SetViewMode(%s)" % addon.getSetting('MAIN') )
Пример #22
0
def SEARCH(url, name, imdb, move, movegen):
    net = Net()
    link = net.http_GET(url).content
    link = link.replace('\r\n', '').replace('"},]', '"}]')
    magic = api.loads(link, encoding='latin1')
    kb = xbmc.Keyboard('', 'Search KinoLeak', False)
    kb.doModal()
    search = kb.getText()
    # search=urllib.quote(search)
    for e, i in enumerate(magic):
        if search.lower() in (i['titel'].encode('utf-8')).lower():
            try:
                imdb = re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1)
            except:
                imdb = ""
            try:
                sub = re.search("(.*?)\((\d+)\)", i['titel'])
                addDir(sub.group(1), url, 2, i['cover'], imdb,
                       "movie".decode('utf-8'), sub.group(2), None, "")
            except:
                addDir(i['titel'], url, 2, i['cover'], imdb, "movie", '', None,
                       "")
    xbmcplugin.setContent(int(sys.argv[1]), 'movies')
    xbmc.executebuiltin("Container.SetViewMode(%s)" % addon.getSetting('MAIN'))
Пример #23
0
def PLAYLIST(name, url, iconimage, imdb):
    net = Net()
    link = net.http_GET(url).content
    link = link.replace('\r\n', '').replace('"},]', '"}]')
    magic = api.loads(link, encoding='latin1')
    liste = []
    stream1 = []
    stream2 = []
    dialog = xbmcgui.Dialog()
    for i in magic:
        if imdb in i['imdblink'].encode('utf-8'):
            try:
                stream1 = re.search("S*R*C*s*r*c*='(.*?)'.*?",
                                    i['streamlink1']).group(1)
                vid1 = re.search(
                    "S*R*C*s*r*c*='https*://(.*?)\.*c*o*m*t*o*/.*?'.*?",
                    i['streamlink1'])
                liste.append(vid1.group(1))
            except:
                pass
            try:
                stream2 = re.search("S*R*C*s*r*c*='(.*?)'.*?",
                                    i['streamlink2']).group(1)
                vid2 = re.search(
                    "S*R*C*s*r*c*='https*://(.*?)\.*c*o*m*t*o*/.*?'.*?",
                    i['streamlink2'])
                liste.append(vid2.group(1))
            except:
                pass
    hoster = dialog.select('HOSTER', liste)
    if hoster == 0:
        HOSTER(name, stream1, iconimage)
    elif hoster == 1:
        HOSTER(name, stream2, iconimage)
    else:
        pass
 def request(self, uri, params=None, query=None, headers=None, return_soup=False, return_json=False):
     COOKIE_JAR = vfs.join(COOKIE_PATH, self.service + ".lwp")
     net = Net()
     net.set_cookies(COOKIE_JAR)
     if headers:
         headers["Referer"] = self.referrer
         headers["Accept"] = self.ACCEPT
         headers["User-Agent"] = self.USER_AGENT
     else:
         headers = {"Referer": self.referrer, "Accept": self.ACCEPT, "User-Agent": self.USER_AGENT}
     if query:
         uri = uri % urllib.urlencode(query)
     if params:
         html = net.http_POST(self.base_url + uri, params, headers=headers).content
     else:
         html = net.http_GET(self.base_url + uri, headers=headers).content
     net.save_cookies(COOKIE_JAR)
     if return_soup:
         return BeautifulSoup(html)
     elif return_json:
         return json.loads(html)
     else:
         return html
Пример #25
0
# also searches imdb (using http://www.imdbapi.com/) for missing info in movies or tvshows

import sys
import simplejson as simplejson

import urllib, re
from datetime import datetime
import time
from addon.common.net import Net
from addon.common.addon import Addon
from threading import Thread
try:
    import Queue as queue
except ImportError:
    import queue
net = Net()
addon = Addon('script.module.metahandler')


class TMDB(object):
    '''
    This class performs TMDB and IMDB lookups.
    
    First call is made to TMDB by either IMDB ID or Name/Year depending on what is supplied. If movie is not found
    or if there is data missing on TMDB, another call is made to IMDB to fill in the missing information.       
    '''
    def __init__(self, api_key='', view='json', lang='en'):
        #view = yaml json xml
        self.view = view
        self.lang = lang
        self.api_key = api_key
Пример #26
0
        from t0mm0.common.addon import Addon
    except:
        from t0mm0_common_addon import Addon
try:
    from addon.common.net import Net
except:
    try:
        from t0mm0.common.net import Net
    except:
        from t0mm0_common_net import Net
#Define common.addon
addon_id = 'plugin.video.hubwizard'
AddonTitle = 'Config Wizard'
# Global Stuff
addon = Addon(addon_id, sys.argv)
net = Net()
settings = xbmcaddon.Addon(id=addon_id)
net.set_user_agent(
    'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
)
AddonIcon = settings.getAddonInfo('icon')
AddonFanart = settings.getAddonInfo('fanart')
AddonPath = settings.getAddonInfo('path')


# #
def get_params():
    param = []
    paramstring = sys.argv[2]
    if len(paramstring) >= 2:
        params = sys.argv[2]
Пример #27
0
##Run Add-On Pack Installer
import urllib,urllib2,re,xbmcplugin,xbmcgui,xbmc,xbmcaddon,os,sys,downloader,extract,time,shutil,subprocess
from resources.modules import main
addon_id='script.pack.installer'; AddonTitle='Add-on Pack Installer'; 
wizardUrl='https://raw.githubusercontent.com/Josh5/addon-packs/master/'; #wizardUrl='http://tribeca.xbmchub.com/tools/wizard/'
SiteDomain='TinyHTPC.co.nz'; #SiteDomain='XBMCHUB.com'
TeamName='Add-on Pack Installer'; #TeamName='Team XBMCHUB'
try:        from addon.common.addon import Addon
except:
    try:    from t0mm0.common.addon import Addon
    except: from t0mm0_common_addon import Addon
try:        from addon.common.net   import Net
except:
    try:    from t0mm0.common.net   import Net
    except: from t0mm0_common_net   import Net
addon=main.addon; net=Net(); settings=xbmcaddon.Addon(id=addon_id); net.set_user_agent('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'); 
#==========================Help WIZARD=====================================================================================================
def HELPCATEGORIES():
    if ((XBMCversion['Ver'] in ['','']) or (int(XBMCversion['two']) < 12)) and (settings.getSetting('bypass-xbmcversion')=='false'):
        eod(); addon.show_ok_dialog(["Compatibility Issue: Outdated Kodi Setup","Please upgrade to a newer version of XBMC first!","Visit %s for Support!"%SiteDomain],title="XBMC "+XBMCversion['Ver'],is_error=False); DoA('Back'); 
    else:
        link=OPEN_URL(wizardUrl+'packs.txt').replace('\n','').replace('\r','')
        match=re.compile('name="(.+?)".+?rl="(.+?)".+?mg="(.+?)".+?anart="(.+?)".+?escription="(.+?)".+?ype="(.+?)".+?nset="(.+?)".+?estart="(.+?)"').findall(link)
        for name,url,iconimage,fanart,description,filetype,skinset,restart in match:
            #if 'status' in filetype:
                #main.addHELPDir(name,url,'wizardstatus',iconimage,fanart,description,filetype)
            #else:    
                main.addHELPDir(name,url,'helpwizard',iconimage,fanart,description,filetype,skinset,restart)
                #print [name,url]
        main.AUTO_VIEW('movies')
        #main.addHELPDir('Testing','http://www.firedrive.com/file/################','helpwizard',iconimage,fanart,description,filetype) ## For Testing to test a url with a FileHost.
Пример #28
0
    def __init__(self,
                 load=None,
                 disable=None,
                 cache_results=False,
                 is_stream=False):
        self.threadpool_size = 5
        self.cache_results = cache_results
        self._load_list = load
        self._disable_list = disable
        self.enabled_scrapers = 0
        self.active_scrapers = []
        self.supported_scrapers = []
        self._active_scrapers = []
        self._load_scrapers()
        self._enable_scrapers()
        self.search_results = []
        if is_stream:
            self.show_scraper_progress = False
        else:
            self.show_scraper_progress = ADDON.get_setting(
                'enable_scraper_progress') == 'true'
        self.skip_second_search = True
        expired = True
        self.filters = False
        self.cache_queue = Queue()
        if ADDON.get_setting('enable_result_filters') == 'true':
            cache_file = vfs.join(DATA_PATH, 'filters.cache')
            if vfs.exists(cache_file):
                self.filters = ADDON.load_data(cache_file)
        cache_file = vfs.join(DATA_PATH, 'debrid_hosts.cache')

        if vfs.exists(cache_file):
            timestamp = int(time.time())
            m_time = vfs.get_stat(cache_file).st_mtime()
            if (timestamp - m_time) < 86400: expired = False

        if expired:
            hosts = {"pm": [], "rd": [], "ad": [], "rp": []}
            net = Net()
            try:
                customer_id = xbmcaddon.Addon(
                    'script.module.urlresolver').getSetting(
                        'PremiumizeMeResolver_username')
                pin = xbmcaddon.Addon('script.module.urlresolver').getSetting(
                    'PremiumizeMeResolver_password')
                query = {
                    "method": "hosterlist",
                    "params[login]": customer_id,
                    "params[pass]": pin
                }
                api_url = "http://api.premiumize.me/pm-api/v1.php?" + urllib.urlencode(
                    query)
                response = net.http_GET(api_url).content
                data = json.loads(response)
                if 'result' in data:
                    hosts['pm'] = data['result']['hosterlist']
            except:
                pass

            try:
                response = Net().http_GET(
                    'http://real-debrid.com/api/hosters.php').content
                hosts['rd'] = [x.strip('"') for x in response.split(',')]
            except:
                pass

            try:
                response = Net().http_GET(
                    'http://alldebrid.com/api.php?action=get_host').content
                hosts['ad'] = [x.strip('"') for x in response.split(',\n')]
            except:
                pass

            try:
                response = Net().http_GET(
                    'http://premium.rpnet.biz/hoster2.json').content
                hosts['rp'] = json.loads(response)['supported']
            except:
                pass

            ADDON.save_data(cache_file, hosts)
Пример #29
0
import urlresolver
import pyxbmct.addonwindow as pyxbmct
from metahandler import metahandlers
import cookielib
import time, re
import datetime
import shutil
from resources.modules import main, vipplaylist, resolvers, nhl
from addon.common.addon import Addon
from addon.common.net import Net
try:
    from sqlite3 import dbapi2 as lite
except:
    from pysqlite2 import dbapi2 as lite

net = Net(http_debug=True)
ADDON = xbmcaddon.Addon(id='plugin.video.phstreams')
art = 'http://artpathg'
addon_id = 'plugin.video.phstreams'
addon = main.addon
addonp = xbmcaddon.Addon('plugin.video.phstreams')
selfAddon = xbmcaddon.Addon(id=addon_id)
settings = xbmcaddon.Addon(id='plugin.video.phstreams')
#===========Add Main File Here=======================================
listmaker = 'http://mecca.watchkodi.com/phstreams.xml'
#========================Alternate Param Stuff=======================
mode = addon.queries["mode"]
url = addon.queries.get('url', '')
name = addon.queries.get('name', '')
thumb = addon.queries.get('thumb', '')
favtype = addon.queries.get('favtype', '')
Пример #30
0
# Thanks to Blazetamer, Eleazar Coding, Showgun, TheHighway, ....
siteTitle="TVADDONS.AG"; #siteTitle="XBMCHUB.COM"; 
addon_id='plugin.program.addoninstaller'; import urllib,urllib2,re,xbmcplugin,xbmcgui,xbmc,xbmcaddon,os,sys,time,shutil,downloader,extract
base_url2='http://addons.tvaddons.ag'; #'http://addons.xbmchub.com'
tribeca_url2='http://tribeca.tvaddons.ag/tools/'; #tribeca_url2='http://tribeca.xbmchub.com/tools/'; 
tribeca_url=tribeca_url2+'installer/sources/'; base_url=base_url2+'/'; 
try: 			from addon.common.addon 	import Addon
except:
    try: 		from t0mm0.common.addon import Addon
    except: from t0mm0_common_addon import Addon
addon=Addon(addon_id,sys.argv)
try: 			from addon.common.net 	import Net
except:
    try: 		from t0mm0.common.net import Net
    except: from t0mm0_common_net import Net
net=Net(); settings=xbmcaddon.Addon(id=addon_id); ADDON=xbmcaddon.Addon(id=addon_id); 
artPath=xbmc.translatePath(os.path.join('special://home','addons',addon_id,'resources','art2/')); 
def getArtwork(n): return xbmc.translatePath(os.path.join('special://home','addons',addon_id,'art2',n))
def getArtworkJ(n): return xbmc.translatePath(os.path.join('special://home','addons',addon_id,'art2',n+'.jpg'))
def catArtwork(n): return 'http://addons.tvaddons.ag/images/categories/%s.png'%n

mainPath=xbmc.translatePath(os.path.join('special://home','addons',addon_id)); 
fanart=xbmc.translatePath(os.path.join(mainPath,'fanart.jpg')); #fanart=artPath+'fanart.jpg'; #fanart=xbmc.translatePath(os.path.join('special://home','addons',addon_id+'/'))+'fanart.jpg'; #fanart=getArtworkJ('fanart')
iconart=xbmc.translatePath(os.path.join(mainPath,'icon.png')); #print ['fanart',fanart,'iconart',iconart]; 
TxtAddonUpdater='Addon Updater'; ImgAddonUpdater=getArtworkJ('autoupdater'); 
#****************************************************************
def MAININDEX():
    hubpath=xbmc.translatePath(os.path.join('special://home','addons','repository.xbmchub'))
    hubnotespath=xbmc.translatePath(os.path.join('special://home','addons','plugin.program.xbmchub.notifications'))
    try:
        if not os.path.exists(hubpath): HUBINSTALL('TVADDONS.AG.Repository','http://offshoregit.com/xbmchub/xbmc-hub-repo/raw/master/repository.xbmchub/repository.xbmchub-1.0.3.zip','','addon','none')
Пример #31
0
import HTMLParser
from elementtree.ElementTree import parse

try:
    from addon.common.addon import Addon
    from addon.common.net import Net
except:
    xbmc.log(
        'Failed to import script.module.addon.common, attempting t0mm0.common')
    xbmcgui.Dialog().ok(
        "Import Failure", "Failed to import addon.common",
        "A component needed by this addon is missing on your system",
        "Please visit www.xbmc.org for support")

addon = Addon('plugin.video.redlettermedia', sys.argv)
net = Net()

##### Queries ##########
play = addon.queries.get('play', None)
mode = addon.queries['mode']
url = addon.queries.get('url', None)
page_num = addon.queries.get('page_num', None)

addon.log('-----------------RedLetterMedia Addon Params------------------')
addon.log('--- Version: ' + str(addon.get_version()))
addon.log('--- Mode: ' + str(mode))
addon.log('--- Play: ' + str(play))
addon.log('--- URL: ' + str(url))
addon.log('--- Page: ' + str(page_num))
addon.log('---------------------------------------------------------------')
Пример #32
0
import xbmc, xbmcgui
import urllib, urllib2
import re
import HTMLParser
from elementtree.ElementTree import parse

try:
    from addon.common.addon import Addon
    from addon.common.net import Net
except:
    xbmc.log('Failed to import script.module.addon.common, attempting t0mm0.common')
    xbmcgui.Dialog().ok("Import Failure", "Failed to import addon.common", "A component needed by this addon is missing on your system", "Please visit www.xbmc.org for support")


addon = Addon('plugin.video.redlettermedia', sys.argv)
net = Net()

##### Queries ##########
play = addon.queries.get('play', None)
mode = addon.queries['mode']
url = addon.queries.get('url', None)
page_num = addon.queries.get('page_num', None)

addon.log('-----------------RedLetterMedia Addon Params------------------')
addon.log('--- Version: ' + str(addon.get_version()))
addon.log('--- Mode: ' + str(mode))
addon.log('--- Play: ' + str(play))
addon.log('--- URL: ' + str(url))
addon.log('--- Page: ' + str(page_num))
addon.log('---------------------------------------------------------------')
Пример #33
0
from metahandler import metahandlers

try:
    from addon.common.addon import Addon

except:
    from t0mm0.common.addon import Addon
addon_id = 'plugin.video.twomovies'

try:
    from addon.common.net import Net

except:
    from t0mm0.common.net import Net
net = Net()
try:
    import StorageServer
except:
    import storageserverdummy as StorageServer

#addon = Addon(addon_id, sys.argv)
addon = main.addon
# Cache
cache = StorageServer.StorageServer("Two Movies", 0)

mode = addon.queries['mode']
url = addon.queries.get('url', '')
name = addon.queries.get('name', '')
thumb = addon.queries.get('thumb', '')
ext = addon.queries.get('ext', '')
Пример #34
0
try:
        from addon.common.addon import Addon

except:
        from t0mm0.common.addon import Addon
addon_id = 'plugin.video.twomovies'
addon = main.addon


try:
        from addon.common.net import Net

except:  
        from t0mm0.common.net import Net
net = Net(http_debug=True)
newagent ='Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36'
net.set_user_agent(newagent)

base_url = 'http://www.twomovies.name'


#PATHS
artwork = xbmc.translatePath(os.path.join('http://rowthreemedia.com/xbmchub/2movies/art/', ''))
settings = xbmcaddon.Addon(id='plugin.video.twomovies')
addon_path = os.path.join(xbmc.translatePath('special://home/addons'), '')

#========================DLStuff=======================
mode = addon.queries['mode']
url = addon.queries.get('url', '')
name = addon.queries.get('name', '')
Пример #35
0
def INDEX(url, name, imdb, move, movegen):
    net = Net()
    link = net.http_GET(url).content
    link = link.replace('\r\n', '').replace('"},]', '"}]')
    magic = api.loads(link, encoding='latin1')
    progress = xbmcgui.DialogProgress()
    progress.create('Fortschritt', 'This is a progress bar.')
    genre = []
    neu = []
    sammeln = []
    for e, i in enumerate(reversed(magic)):
        if name == "Neu im Programm" and e < 27:
            neu.append(i)
    for e, i in enumerate(magic):
        if name in i['genre'].encode('utf-8'):
            genre.append(i)
    for e, i in enumerate(magic):
        if "sammeln" in name:
            sammeln.append(i)
    #----Neu im Programm----#
    for e, i in enumerate(neu):
        if e < len(neu):
            percent = int((e / len(neu)) * 100)
            message = str(e) + " von " + str(len(neu)) + " Filmen geladen"
            progress.update(percent, message,
                            "Dies passiert bei noch nie eingelesenen Filmen")
        try:
            imdb = re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1)
        except:
            imdb = ""
        try:
            sub = re.search("(.*?)\((\d+)\)", i['titel'])
            addDir(sub.group(1), url, 2, i['cover'], imdb,
                   "movie".decode('utf-8'), sub.group(2), None, "")
        except:
            addDir(i['titel'], url, 2, i['cover'], imdb, "movie", '', None, "")
    #----GENRES die Filme----#
    for e, i in enumerate(sorted(genre, key=lambda genre: genre['titel'])):
        if move <= e < move + 25:
            if e - move < move + 25:
                percent = ((e - move) / 25 * 100)
                message = "FilmInfo des " + str(
                    e - move) + ". von 25 Filmen geladen"
                progress.update(
                    percent, message,
                    "Dies passiert bei noch nie eingelesenen Filmen")
            try:
                imdb = re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1)
            except:
                imdb = ""
            try:
                sub = re.search("(.*?)\((\d+)\)", i['titel'])
                addDir(sub.group(1), url, 2, i['cover'], imdb,
                       "movie".decode('utf-8'), sub.group(2), None, "")
            except:
                addDir(i['titel'], url, 2, i['cover'], imdb, "movie", '', None,
                       "")
    #----FilmInfo von allen Filmen Sammeln für die Datenbank----#
    for e, i in enumerate(sorted(sammeln,
                                 key=lambda sammeln: sammeln['titel'])):
        if e < len(sammeln):
            percent = int((e / len(sammeln)) * 100)
            message = "FilmInfo des " + str(e) + ". von " + str(
                len(sammeln)) + " Filmen geladen"
            progress.update(percent, message,
                            "Dies passiert bei noch nie eingelesenen Filmen")
        try:
            imdb = re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1)
        except:
            imdb = ""
        try:
            sub = re.search("(.*?)\((\d+)\)", i['titel'])
            addDir(sub.group(1), url, 2, i['cover'], imdb,
                   "movie".decode('utf-8'), sub.group(2), None, "")
        except:
            addDir(i['titel'], url, 2, i['cover'], imdb, "movie", '', None, "")
    #----SEITENNAVIGATION----#
    if len(genre) > 0:
        if move != None and move == 0 and len(genre) > 25:
            print "<<<----OLDU-1---->>>"
            addDir("Next-->>", url, 4, "", "", "folder", "", move + 25, name)
        if move != None and move != 0 and move + 25 <= len(
                genre) and len(genre) - move > 0:
            print "<<<----OLDU-2---->>>"
            addDir("Next-->>", url, 4, "", "", "folder", "", move + 25, name)
            addDir("<<--Back", url, 4, "", "", "folder", "", move - 25, name)
        if move + 25 >= len(genre) and move != 0:
            print "<<<----OLDU-3---->>>"
            addDir("<<--Back", url, 4, "", "", "folder", "", move - 25, name)
        addDir("Home", "", None, "", "", "folder", "", None, "")
    progress.close()
    xbmcplugin.setContent(int(sys.argv[1]), 'movies')
    xbmc.executebuiltin("Container.SetViewMode(%s)" % addon.getSetting('MAIN'))
Пример #36
0
 def __init__(self, cookie_file, cloudflare=False):
     Net.__init__(self, cookie_file=cookie_file)
     self._cloudflare_jar = cookielib.MozillaCookieJar() # ISSUE #5537
     self._cloudflare = cloudflare
Пример #37
0
# -*- coding: cp1252 -*-
# Main Module by: Blazetamer and TheHighway
import urllib,urllib2,re,xbmcplugin,xbmcgui,sys,xbmc,os,xbmcaddon
try:        from addon.common.addon import Addon
except:
    try:    from t0mm0.common.addon import Addon
    except: from t0mm0_common_addon import Addon
try:        from addon.common.net   import Net
except:
    try:    from t0mm0.common.net   import Net
    except: from t0mm0_common_net   import Net
#Define common.addon
addon_id='plugin.video.phwizard'; 
AddonTitle='Config Wizard'; 
# Global Stuff
addon=Addon(addon_id,sys.argv); net=Net(); settings=xbmcaddon.Addon(id=addon_id); net.set_user_agent('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'); 
AddonIcon=settings.getAddonInfo('icon')
AddonFanart=settings.getAddonInfo('fanart')
AddonPath=settings.getAddonInfo('path')
# #
def get_params():
        param=[]
        paramstring=sys.argv[2]
        if len(paramstring)>=2:
                params=sys.argv[2]; cleanedparams=params.replace('?','')
                if (params[len(params)-1]=='/'): params=params[0:len(params)-2]
                pairsofparams=cleanedparams.split('&'); param={}
                for i in range(len(pairsofparams)):
                        splitparams={}; splitparams=pairsofparams[i].split('=')
                        if (len(splitparams))==2: param[splitparams[0]]=splitparams[1]
        return param
Пример #38
0
def INDEX(url,name,imdb,move,movegen):
        net = Net()
        link = net.http_GET(url).content
        link=link.replace('\r\n', '').replace('"},]', '"}]')
        magic = api.loads(link, encoding='latin1')
        progress = xbmcgui.DialogProgress()
        progress.create('Fortschritt', 'This is a progress bar.')
        genre=[]
        neu=[]
        sammeln=[]
        for e,i in enumerate(reversed(magic)):
            if name == "Neu im Programm" and e<27:
                neu.append(i)
        for e,i in enumerate(magic):
            if name in i['genre'].encode('utf-8'):
                genre.append(i)
        for e,i in enumerate(magic):
            if "sammeln" in name:
                sammeln.append(i)
        #----Neu im Programm----#
        for e,i in enumerate(neu):
            if e < len(neu):
                percent = int((e/len(neu))*100)
                message =   str(e) + " von "+str(len(neu))+" Filmen geladen"
                progress.update(percent, message, "Dies passiert bei noch nie eingelesenen Filmen")
            try:
                imdb=re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1)
            except:
                imdb=""
            try:
                sub=re.search("(.*?)\((\d+)\)", i['titel'])
                addDir(sub.group(1),url,2,i['cover'],imdb,"movie".decode('utf-8'),sub.group(2),None,"")
            except:
                addDir(i['titel'],url,2,i['cover'],imdb,"movie",'',None,"")
        #----GENRES die Filme----#
        for e,i in enumerate(sorted(genre, key=lambda genre: genre['titel'])):
            if move<=e<move+25:
                if e-move < move+25:
                    percent = ((e-move)/25*100)
                    message =   "FilmInfo des "+str(e-move) + ". von 25 Filmen geladen"
                    progress.update(percent, message, "Dies passiert bei noch nie eingelesenen Filmen")
                try:
                    imdb=re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1)
                except:
                    imdb=""
                try:
                    sub=re.search("(.*?)\((\d+)\)", i['titel'])
                    addDir(sub.group(1),url,2,i['cover'],imdb,"movie".decode('utf-8'),sub.group(2),None,"")
                except:
                    addDir(i['titel'],url,2,i['cover'],imdb,"movie",'',None,"")
        #----FilmInfo von allen Filmen Sammeln für die Datenbank----#
        for e,i in enumerate(sorted(sammeln, key=lambda sammeln: sammeln['titel'])):
            if e < len(sammeln):
                percent = int((e/len(sammeln))*100)
                message =   "FilmInfo des "+str(e) + ". von "+str(len(sammeln))+" Filmen geladen"
                progress.update(percent, message, "Dies passiert bei noch nie eingelesenen Filmen")
            try:
                imdb=re.search(".*?/(tt\d+)/*.*?$", i['imdblink']).group(1)
            except:
                imdb=""
            try:
                sub=re.search("(.*?)\((\d+)\)", i['titel'])
                addDir(sub.group(1),url,2,i['cover'],imdb,"movie".decode('utf-8'),sub.group(2),None,"")
            except:
                addDir(i['titel'],url,2,i['cover'],imdb,"movie",'',None,"")
        #----SEITENNAVIGATION----#
        if len(genre)>0:
            if move!=None and move==0 and len(genre)>25:
                print "<<<----OLDU-1---->>>"
                addDir("Next-->>",url,4,"","","folder","",move+25,name)
            if move!=None and move!=0 and move+25<=len(genre) and len(genre)-move>0:
                print "<<<----OLDU-2---->>>"
                addDir("Next-->>",url,4,"","","folder","",move+25,name)
                addDir("<<--Back",url,4,"","","folder","",move-25,name)
            if move+25>=len(genre) and move!=0:
                print "<<<----OLDU-3---->>>"
                addDir("<<--Back",url,4,"","","folder","",move-25,name)
            addDir("Home","",None,"","","folder","",None,"")
        progress.close()
        xbmcplugin.setContent(int(sys.argv[1]), 'movies')
        xbmc.executebuiltin("Container.SetViewMode(%s)" % addon.getSetting('MAIN') )