Ejemplo n.º 1
0
def get_main_dirs():

    ##CONSTANTS PARTS##
    MAIN_URL = XBMCUtils.getSettingFromContext(sys.argv[1],
                                               "remote_repository")
    BROWSE_CHANNELS = "browse_channels"
    ICON = XBMCUtils.getAddonFilePath('icon.png')

    #draw welcome menu
    add_dir(XBMCUtils.getString(10001), MAIN_URL, 1, ICON, '', 0)
    add_dir(XBMCUtils.getString(10010), BROWSE_CHANNELS, 3, '', ICON, 0)
    try:
        from window.ImageWindow import windowImage  # teletext window library
        add_dir(name=XBMCUtils.getString(10012),
                url='teletext',
                mode=4,
                provider='teletext',
                page=0,
                thumbnailImage="",
                iconimage=ICON)
    except:
        logger.info("No PIL module installed (needs Pillow 3.4.2 or less)")
        pass
    add_dir(XBMCUtils.getString(10014), 'paidonline', 3, "", 'paidonline', 0)
    add_dir(XBMCUtils.getString(10015), 'programsonline', 3, "",
            'programsonline', 0)
    try:
        if updater.isUpdatable():
            add_dir(XBMCUtils.getString(10011), '', 0, ICON, 0)
    except:
        logger.error("Couldn't add update option: probably server is down!")
        pass
Ejemplo n.º 2
0
def drawPastebinCom():
    param = urllib.quote_plus(
        str(XBMCUtils.getSettingFromContext(sys.argv[1], 'pastebin_param')))
    logger.debug("extracted param to be searched: " + param)
    channels = Pastebin.searchLists(param=param)
    logger.debug("items obtained: " + str(len(channels)))
    level = 1
    for channel in channels:
        add_dir(channel["title"], channel["link"], level, '', "pastebincom",
                channel["link"])
Ejemplo n.º 3
0
class Spliveappcom(Downloader):

    MAIN_URL = XBMCUtils.getSettingFromContext(int(sys.argv[1]), "splive_channel")
    DECODER_URL = XBMCUtils.getSettingFromContext(int(sys.argv[1]), "online_pbewithmd5anddes_decoder")

    PASSWORD = '******'

    @staticmethod
    def getChannels(page,decode=False,group=''):
        x = []
        if str(page) == '0' or "http" not in page:
            if str(page) is not '0' and group is '':
                group = page
            page=Spliveappcom.MAIN_URL
            if page.find("pastebin.com/")>-1 and page.find("/raw/")==-1:
                page = page.replace(".com/",".com/raw/")
        html = Spliveappcom.getContentFromUrl(page,"",Spliveappcom.cookie,"")
#        html = html.decode("utf-8","ignore").encode("ascii", "ignore")
#        logger.debug("replace bad characters proccess done!");
        try:
            x = Spliveappcom.extractElements(html,decode)
        except:
            logger.debug("trying json way...")
            x = Spliveappcom.extractJSONElements(html,grouped=group,url=page)
            logger.debug("finished json way...")
            pass
        return x

    @staticmethod
    def extractJSONElements(html,grouped='',url=''):
        x = []
        try:
            jsonGlobal = json.loads(html)
            logger.debug("charged json...")
            groups = jsonGlobal["groups"]
            logger.debug("get groups: " + str(len(groups)))
            for group in groups:
                element = {}
                title = group["name"]
                title = re.sub('[^A-Za-z0-9\.]+', ' ', title)
                element["title"] = title
                element["thumbnail"] = group["image"]
                element["link"] = url
                if group.has_key("url"):
                    logger.debug("extracted station...")
                    element["link"] = group["url"]
                    element["permaLink"] = True
                elif group.has_key("stations"):
                    if grouped is '':
                        element["link"] = group["name"]
                    elif group["name"] == grouped:
                        logger.debug("searching for group: " + grouped)
                        for elementLink in group["stations"]:
                            if not elementLink.has_key("isAd"):
                                element = {}
                                title = elementLink["name"]
                                title = re.sub('[^A-Za-z0-9\.]+', ' ', title)
                                element["title"] = title
                                element["thumbnail"] = elementLink["image"]
                                element["link"] = elementLink["url"]
                                if len(element["link"]) > 0:
                                    logger.debug("appended grouped: " + element["title"])
                                    element["permaLink"] = True
                                    x.append(element)

                    logger.debug("group station... " + group["name"])
                if element.has_key("link") and grouped is '':
                    logger.debug("appended: " + element["title"])
                    x.append(element)
        except:
            logger.debug("something goes wrong, trying brute way")

            html = html.replace("\n", "").replace("\t", "").strip().replace("  ", "")
            #logger.debug("analysing: " + html)
            html = html.replace("},]", "}]")
            #logger.debug("done little fix for '},]' bad encoding")
            #logger.debug("now BAD json is: " + html)
            i=1
            if grouped == '':
                #get groups
                for line in html.split('"stations": ['):
                    if "]" in line:
                        line = line[line.find(']'):]
                    else:
                        line = line[line.find('['):]
                    logger.debug("using line: " + line)
                    title = Decoder.extract('"name": "','"',line)
                    image = Decoder.extract('"image": "', '"', line)
                    element = {}
                    title = re.sub('[^A-Za-z0-9\.]+', ' ', title)
                    element["title"] = title
                    element["thumbnail"] = image
                    element["link"] = title
                    x.append(element)
                    i+=1
            else:
                group = Decoder.extract('"name": "'+grouped+'"',']',html)
                i=0
                for line in group.split('{'):
                    if i>0:
                        logger.debug("using line from group: "+line)
                        title = Decoder.extract('"name": "', '"', line)
                        image = Decoder.extract('"image": "', '"', line)
                        link = Decoder.extract('"url": "', '"', line)
                        element = {}
                        title = re.sub('[^A-Za-z0-9\.]+', ' ', title)
                        element["title"] = title
                        element["thumbnail"] = image
                        element["link"] = link
                        element["permaLink"] = True
                        x.append(element)
                    i+=1
            pass
        return x


    @staticmethod
    def extractElements(table,decode=False):
        x = []
        i = 0
        permaLink = False
        if table.find("@ /lista")>-1:
            splitter = "@ /lista"
        elif table.find("@ /channel")>-1:
            splitter = "@ /channel"
            permaLink = True
        elif table.find("@ /movie")>-1:
            splitter = "@ /movie"
            permaLink = True
        for value in table.split(splitter):
            element = {}
            title = ""
            link = ""
            referer = ""
            if value.find(" title ")>-1:
                title = Decoder.extract(" title \"","\"",value)
            elif value.find(" name")>-1:
                title = Decoder.extract(" name \"","\"",value)
            if value.find(" url \"")>-1:
                link = Decoder.extract(" url \"","\"",value)
            elif value.find(" url_servidor \"")>-1:
                link = Decoder.extract(" url_servidor \"","\"",value)
            element["title"] = title
            img = ""
            if value.find(" image \"")>-1:
                img = Decoder.extract(" image \"","\"",value)
            elif value.find(" image_url ")>-1:
                img = Decoder.extract(" image_url \"","\"",value)
            if decode and img.find("http")==-1:
                try:
                    img = Spliveappcom.decrypt(img)
                    element["thumbnail"] = img
                except:
                    logger.error("Could not be decoded img content.")
                    pass
            elif img!="":
                element["thumbnail"] = img
            if link.find("pastebin.com"):
                link = link.replace(".com/",".com/raw/")
            element["link"] = link
            if value.find("referer \"")>-1:
                referer = Decoder.extract("referer \"","\"",value)
                if referer!="0":
                    element["referer"] = referer
            if permaLink:
                element["permaLink"] = True
            logger.debug("append: "+title+", link: "+element["link"])
            if title!='' and link!='':
                x.append(element)
            i+=1
        return x

    @staticmethod
    def decodeUrl(url,referer=''):
        #content = Spliveappcom.getContentFromUrl(url)
        if url.find("http")==-1 and (url.find("sop://")==-1 and url.find("acestream://")==-1):
            decryptedUrl = Spliveappcom.decrypt(url)
        else:
            decryptedUrl = url
        element = {}
        element["title"] = "Link"
        if referer!='':
            referer = Spliveappcom.decrypt(referer)
            if referer!='0':
                decryptedUrl+=", referer: "+referer
        logger.debug("brute link to be launched: "+decryptedUrl)
        element["link"] = decryptedUrl
        x = []
        x.append(element)
        return x


    @staticmethod
    def decrypt(encrypted):
        decrypted = encrypted
        try:
            logger.debug("Encrypted content is: "+encrypted)
            if not ONLINE:
                decrypted = PBEWithMD5AndDES.decrypt(encrypted, Spliveappcom.PASSWORD)
            elif len(encrypted)>0 and encrypted.find("http://")==-1:
                decrypted = Downloader.getContentFromUrl(Spliveappcom.DECODER_URL+'?data='+encrypted+"&key="+Spliveappcom.PASSWORD+"&iterations=1000")
            logger.debug("Decrypted content is: "+decrypted)
        except:
            logger.error("Could not be unencrypted: "+encrypted)
            pass

        return decrypted
Ejemplo n.º 4
0
    def getChannels(page):
        logger.debug("starting with page from cineestrenos section: " + page)
        if str(page) == '0':
            tradicionalChannels = XBMCUtils.getSettingFromContext(
                int(sys.argv[1]), "extended_channels")
            logger.debug("using carrusel: " + str(tradicionalChannels))
            if tradicionalChannels == "false":
                page = Cineestrenostv.MAIN_URL + '/carrusel/tv.html'
            else:
                page = Cineestrenostv.MAIN_URL
        html = Cineestrenostv.getContentFromUrl(page, "", "",
                                                Cineestrenostv.MAIN_URL)
        x = []
        logger.debug("page is: " + page)
        if page.find("/carrusel/tv.html") > -1:
            table = Decoder.extract('<div class="container">',
                                    "</div></div></div></div></div>", html)
            for fieldHtml in table.split('<div class="content">'):
                element = {}
                element["link"] = Cineestrenostv.MAIN_URL + Decoder.extract(
                    "<div><a href=\"javascript:popUp('..", "')", fieldHtml)
                if element["link"] != Cineestrenostv.MAIN_URL:
                    if element["link"].find('/multi') != -1:
                        logger.debug("found multi link: " + element["link"])
                        element["title"] = Decoder.extract(
                            "/multi", "/", element["link"])
                    else:
                        element["title"] = Decoder.rExtract(
                            "/", ".html", element["link"])
                        if element["title"].find(".") > -1:
                            element["title"] = element[
                                "title"][:element["title"].rfind(".")]
                    element["thumbnail"] = Decoder.extract(
                        ' src="', '"', fieldHtml)
                    if element["thumbnail"].find("://") == -1:
                        element[
                            "thumbnail"] = Cineestrenostv.MAIN_URL + element[
                                "thumbnail"]
                    element["title"] = element["title"].replace("-", " ")
                    logger.debug("found title: " + element["title"] +
                                 ", link: " + element["link"] + ", thumb: " +
                                 element["thumbnail"])
                    if element["thumbnail"].find("http") == 0 and not (
                            element["title"] == "1"
                            or element["title"] == "venus"):
                        x.append(element)
        elif page == Cineestrenostv.MAIN_URL:
            table = Decoder.extract('<center><table>',
                                    '</td></tr></table></center>', html)
            for fieldHtml in table.split('<td>'):
                element = {}
                element[
                    "link"] = Cineestrenostv.MAIN_URL + "/" + Decoder.extract(
                        "<a href=\"/", '"', fieldHtml)
                if element["link"].find('"') > -1:
                    element["link"] = element["link"][0:element["link"].
                                                      find('"')]
                if element["link"].find('/multi') != -1:
                    logger.debug("found multi link: " + element["link"])
                    element["title"] = Decoder.extract("/multi", "/",
                                                       element["link"])
                else:
                    logger.debug("found normal link, continue... " +
                                 element["link"])
                    element["title"] = Decoder.extract('" title="', '" target',
                                                       fieldHtml)
                    if element["title"].find('"') > -1:
                        element["title"] = element["title"][0:element["title"].
                                                            find('"')]
                    if element["title"].find(" online") > -1:
                        element["title"] = element["title"][0:element["title"].
                                                            find(" online")]
                    if element["title"].find(" Online") > -1:
                        element["title"] = element["title"][0:element["title"].
                                                            find(" Online")]
                    if element["title"].find(" en directo") > -1:
                        element["title"] = element["title"][
                            0:element["title"].find(" en directo")]

                    element["title"] = element["title"].replace("ver ", "")

                #element["title"] = element["title"].decode('utf-8')
                element["thumbnail"] = Decoder.extract('<img src="',
                                                       '" height', fieldHtml)
                if element["thumbnail"].find('"') > -1:
                    element["thumbnail"] = element["thumbnail"][
                        0:element["thumbnail"].find('"')]
                if element["thumbnail"].find("://") == -1:
                    element["thumbnail"] = Cineestrenostv.MAIN_URL + element[
                        "thumbnail"]
                if element["thumbnail"] != Cineestrenostv.MAIN_URL:
                    logger.debug("found title: " + element["title"] +
                                 ", link: " + element["link"] + ", thumb: " +
                                 element["thumbnail"])
                    if element["thumbnail"].find("http") == 0 and not (
                            element["title"] == "1"
                            or element["title"] == "gran hermano mexico"
                            or element["title"] == "alx syfy"
                            or element["title"] == "intereconomia punto pelota"
                            or element["title"] == "cine"
                            or element["title"].find("-LATINOAMERICA") > -1):
                        x.append(element)
        else:
            logger.debug('extracting channel from: ' + page)
            x.append(Cineestrenostv.extractChannel(html, page))
        return x
Ejemplo n.º 5
0
class Mobdro(Downloader):

    MAIN_URL = "mobdro.me"
    CHANNELS = "channels"
    API_URL = "https://api.mobdro.sx/streambot/v4/show"
    API_URL_SEARCH = "https://api.mobdro.sx/streambot/v4/search"

    TOKEN = XBMCUtils.getSettingFromContext(
        int(sys.argv[1]),
        "mobdro_api")  #stored for premium support, implementation will change
    REMOTE_TOKEN = XBMCUtils.getSettingFromContext(int(sys.argv[1]),
                                                   "remote_api_url")

    REMOTE_AUTH = "https://api.mobdro.sx/utils/auth"
    SIGNATURE = "3560652538"

    @staticmethod
    def getChannels(page):
        x = []
        logger.debug("using Mobdro...")
        if str(page) == '0':
            element = {}
            element["title"] = "Channels"
            element["link"] = "channels"
            x.append(element)
            element = {}
            element["title"] = "News"
            element["link"] = "news"
            x.append(element)
            element = {}
            element["title"] = "Shows"
            element["link"] = "shows"
            x.append(element)
            element = {}
            element["title"] = "Movies"
            element["link"] = "movies"
            x.append(element)
            element = {}
            element["title"] = "Sports"
            element["link"] = "sports"
            x.append(element)
            element = {}
            element["title"] = "Music"
            element["link"] = "music"
            x.append(element)
            element = {}
            element["title"] = "Gaming"
            element["link"] = "gaming"
            x.append(element)
            element = {}
            element["title"] = "Animals"
            element["link"] = "animals"
            x.append(element)
            element = {}
            element["title"] = "Tech"
            element["link"] = "tech"
            x.append(element)
            element = {}
            element["title"] = "Podcasts"
            element["link"] = "podcasts"
            #x.append(element)
            element = {}
            element["title"] = "Spiritual"
            element["link"] = "spiritual"
            #x.append(element)
            element = {}
            element["title"] = "Others"
            element["link"] = "others"
            x.append(element)
            element = {}
            element["title"] = "Search"
            element["link"] = "search"
            #x.append(element) #TODO
        elif str(page) is not 'search':  #action
            logger.debug("launching action: " + page)
            response = Mobdro.channel_list(page)
            x = Mobdro.parse_results(response)
        elif str(page) == "search":  #search
            logger.debug("launching action: SEARCH")
            # display keyboard, it will wait for result
            keyboard = XBMCUtils.getKeyboard()
            keyboard.doModal()
            text = ""
            if (keyboard.isConfirmed()):
                text = keyboard.getText()
                response = Mobdro.search_list(text)
                x = Mobdro.parse_results(response)
        return x

    @staticmethod
    def parse_results(response):
        x = []
        # parse results
        results = json.loads(response)
        for result in results:
            url = Mobdro.parse_relayer(result)
            if url is not "exception":
                element = {}
                element["link"] = url
                try:
                    element["thumbnail"] = result["img"]
                except:
                    pass
                element["title"] = result["name"] + " - " + result["language"]
                element["finalLink"] = True
                logger.debug("appending: " + element["title"] + ", url: " +
                             url)
                x.append(element)
        return x

    @staticmethod
    def pretoken():
        if (Mobdro.TOKEN is None or len(Mobdro.TOKEN)
                == 0) and (Mobdro.REMOTE_TOKEN is not None
                           and len(Mobdro.REMOTE_TOKEN) > 0):
            remoteToken = Downloader.getContentFromUrl(
                Mobdro.REMOTE_TOKEN).encode("utf-8").strip()
            logger.debug("mobdro token is: " + remoteToken)
            Mobdro.TOKEN = remoteToken
            logger.debug("constant is: " + Mobdro.TOKEN)
        else:
            response = Downloader.getContentFromUrl(url=Mobdro.REMOTE_AUTH,
                                                    data="signature=" +
                                                    Mobdro.SIGNATURE)
            formatedResponse = json.loads(response)
            remoteToken = formatedResponse["token"]
            logger.debug("updated mobdro token is: " + remoteToken)
            Mobdro.TOKEN = remoteToken

    @staticmethod
    def search_list(term):
        response = "ERROR"
        c_headers = {"User-Agent": "Mobdro/5.0", "Referer": "api.mobdro.sx"}
        logger.debug("TOKEN is: " + Mobdro.TOKEN)
        c_data = {
            'query': term,
            'parental': 0,
            'languages': '[]',
            'alphabetical': 0,
            'token': Mobdro.TOKEN
        }
        c_data = urllib.urlencode(c_data)
        # Fetch channel list
        req = urllib2.Request(Mobdro.API_URL_SEARCH, c_data, c_headers)
        response = urllib2.urlopen(req)
        response = response.read()
        return response

    @staticmethod
    def channel_list(action):
        Mobdro.pretoken()
        response = "ERROR"
        # On first page, pagination parameters are fixed
        if action is not None:
            c_headers = {
                "User-Agent": "Mobdro/5.0",
                "Referer": "api.mobdro.sx"
            }
            logger.debug("TOKEN is: " + Mobdro.TOKEN)
            c_data = {
                'data': action,
                'parental': 0,
                'languages': '[]',
                'alphabetical': 0,
                'token': Mobdro.TOKEN
            }
            c_data = urllib.urlencode(c_data)
            # Fetch channel list
            req = urllib2.Request(Mobdro.API_URL, c_data, c_headers)
            response = urllib2.urlopen(req)
            response = response.read()
        return response

    @staticmethod
    def parse_relayer(params):
        url = "NonE"
        try:
            if params.has_key("url"):
                url = params["url"]
                logger.debug("mobdro.directURL: " + url)
            elif params.has_key("relayer"):
                params2 = json.loads(params["relayer"])
                logger.debug("RELAYED: " + repr(params2))
                protocol = "http"  #params2["protocol"]
                app = params2["app"]
                server = params2["server"]
                playpath = params2["playpath"]
                password = params2["password"]
                dire = params2["dir"]
                expiration_time = params2["expiration_time"]
                millis = int(round(time.time() * 1000))
                l = millis / 1000L + expiration_time
                arr = [password, l, dire, playpath]
                url = "%s%d/%s/%s"
                url = url % tuple(arr)
                url_md5 = md5.new(url).digest()
                url_base64 = base64.b64encode(url_md5)
                url_base64 = url_base64.replace("+", "-").replace("/",
                                                                  "_").replace(
                                                                      "=", "")
                #arr = [server, url_base64, l, playpath]
                arr = [protocol, server, app, playpath, url_base64, l]
                url = "%s://%s/%s/%s?st=%s&e=%d"  #"http://%s/live/%s/%d/%s"
                url = url % tuple(arr)
                url += "|" + Downloader.getHeaders(Mobdro.MAIN_URL)
            else:
                logger.debug("REJECTED: " + repr(params))
        except KeyError:
            url = "exception"
            pass
        return url
Ejemplo n.º 6
0
def browse_channels(url, page):  #BROWSES ALL PROVIDERS (it has been re-sorted)
    if str(url) == 'browse_channels':
        add_dir(XBMCUtils.getString(10016), 'popularonline', 3, "",
                'popularonline', 0)
        add_dir(XBMCUtils.getString(10017), 'tvseriesonline', 3, "",
                'tvseriesonline', 0)
        add_dir(XBMCUtils.getString(10018), 'torrentwebsites', 3, "",
                'torrentwebsites', 0)
        #add_dir(XBMCUtils.getString(10019), 'usersonlinewebsites', 3, "", 'usersonlinewebsites', 0)
        add_dir(XBMCUtils.getString(10020), 'sportsonline', 3, "",
                'sportsonline', 0)
        add_dir(XBMCUtils.getString(10021), 'newsonlinewebsites', 3, "",
                'newsonlinewebsites', 0)
        add_dir(XBMCUtils.getString(10022), 'worldstvonlinewebsites', 3, "",
                'worldstvonlinewebsites', 0)
        add_dir(XBMCUtils.getString(10023), 'listsonlinewebsites', 3, "",
                'listsonlinewebsites', 0)
        add_dir(XBMCUtils.getString(10024), 'webcamsonlinewebsites', 3, "",
                'webcamsonlinewebsites', 0)
        add_dir(XBMCUtils.getString(10025), 'otherssonlinewebsites', 3, "",
                'otherssonlinewebsites', 0)
    else:
        enableNews = XBMCUtils.getSettingFromContext(int(sys.argv[1]),
                                                     "enable_news")
        enablePlexus = XBMCUtils.getSettingFromContext(int(sys.argv[1]),
                                                       "enable_plexus")
        enableMobdro = XBMCUtils.getSettingFromContext(int(sys.argv[1]),
                                                       "enable_mobdro")
        enableSplive = XBMCUtils.getSettingFromContext(int(sys.argv[1]),
                                                       "enable_splive")
        patchedFfmpeg = XBMCUtils.getSettingFromContext(
            int(sys.argv[1]), "ffmpeg_patch")
        enableDinamic = XBMCUtils.getSettingFromContext(
            int(sys.argv[1]), "enable_pastebin")

        if str(url) == 'tvseriesonline':
            add_dir("HDFull.tv", 'hdfulltv', 4,
                    "http://hdfull.tv/templates/hdfull/images/logo.png",
                    'hdfulltv', 0)
            add_dir("Peliculasid.cc", 'peliculasbiz', 4, "", 'peliculasbiz', 0)
            add_dir("Pepecine.com", 'pepecine', 4,
                    "http://pepecine.net/assets/images/logo.png", 'pepecine',
                    0)
            if enablePlexus:
                #add_dir("[T] - Elitetorrent.net", 'elitetorrentnet', 4, "http://www.elitetorrent.net/images/logo_elite.png",'elitetorrentnet', 0)
                add_dir(
                    "[T] - TuMejorTorrent.net", 'tumejortorrent', 4,
                    "http://tumejortorrent.com/pct1/library/content/template/images/tmt_logo.jpg",
                    'tumejortorrent', 0)
                add_dir(
                    "[T] - MejorTorrent.net", 'mejortorrent', 4,
                    "http://www.mejortorrent.com/imagenes_web/cabecera.jpg",
                    'mejortorrent', 0)
        elif str(url) == 'popularonline':
            add_dir(
                "Youtube.com", 'youtube', 4,
                "https://upload.wikimedia.org/wikipedia/commons/thumb/e/ef/YouTube_logo_2015.svg/120px-YouTube_logo_2015.svg.png",
                'youtube', 0)
            add_dir(
                "RTVE AlaCarta", 'rtvealacarta', 4,
                "https://upload.wikimedia.org/wikipedia/commons/thumb/e/ee/Logo_RTVE.svg/150px-Logo_RTVE.svg.png",
                'rtvealacarta', 0)
            add_dir(
                "CLAN (rtve)", 'clan', 4,
                "https://upload.wikimedia.org/wikipedia/en/thumb/4/47/TVEClan_logo.png/150px-TVEClan_logo.png",
                'clan', 0)
            add_dir(
                "TuneIn.com", 'tunein', 4,
                "https://lh5.googleusercontent.com/-NsniPTwZFkc/AAAAAAAAAAI/AAAAAAAAOLE/qtdbWIxlF5M/s0-c-k-no-ns/photo.jpg",
                'tunein', 0)
        elif str(url) == 'paidonline':
            enableYomvi = XBMCUtils.getSettingFromContext(
                int(sys.argv[1]), "enable_yomvi")
            if enableYomvi == "true":
                add_dir(
                    "Yomvi.es", 'yomvies', 4,
                    "http://ver.movistarplus.es/img/logo-web-player-YOMVI.png",
                    'yomvies', 0)
        elif str(url) == 'programsonline':
            if enableSplive == "true":
                add_dir(
                    "Spliveapp.com", 'splive', 4,
                    "http://www.spliveapp.com/main/wp-content/uploads/footer_logo.png",
                    'splive', 0)
            if enableMobdro == 'true':
                add_dir("Mobdro.com", 'mobdro', 4,
                        "https://www.mobdro.com/favicon.ico", 'mobdro', 0)
        elif str(url) == 'torrentwebsites' and enablePlexus == "true":
            add_dir(
                "Arenavision.in", 'arenavisionin', 4,
                "http://www.arenavision.in/sites/default/files/logo_av2015.png",
                'arenavisionin', 0)
            add_dir("Acesoplisting.in", 'acesoplistingin', 4,
                    "http://acesoplisting.in/images/acesop.gif",
                    'acesoplistingin', 0)
            add_dir("Ace-tv.ru", 'acetvru', 4, "http://ace-tv.eu/logo.png",
                    'acetvru', 0)
        #elif str(url)=='usersonlinewebsites':
        #add_dir("Tvshow.me", 'tvshowme', 4, "http://www.tvshow.me/wp-content/uploads/2016/09/Icon_.png", 'tvshowme', 0)
        elif str(url) == 'sportsonline':
            add_dir("Live9.co", 'live9', 4, "", 'live9', 0)
            add_dir("Cricfree.tv", 'cricfree', 4,
                    "http://cricfree.tv/images/logosimg.png", 'cricfree', 0)
            add_dir("Mamahd.com", 'mamahdcom', 4,
                    "http://mamahd.com/images/logo.png", 'mamahdcom', 0)
            add_dir("Vipracing.net", 'vipracinginfo', 4, "", 'vipracinginfo',
                    0)
            add_dir("Zonasports.me", 'zonasportsme', 4,
                    "http://i.imgur.com/yAuKRZw.png", 'zonasportsme', 0)
        elif str(url) == 'newsonlinewebsites' and enableNews == "true":
            add_dir("Bbc.co.uk", 'bbccouk', 4, "", 'bbccouk',
                    'http://feeds.bbci.co.uk/news/rss.xml?edition=int')
            add_dir(
                "Reuters.com", 'reuters', 4,
                "http://www.thewrap.com/wp-content/uploads/2013/10/Reuters-Logo.jpg",
                'reuters', 0)
            add_dir(
                "CNN.com", 'editioncnn', 4,
                "http://i.cdn.cnn.com/cnn/.e1mo/img/4.0/logos/logo_cnn_badge_2up.png",
                'editioncnn', 0)
            add_dir("ElMundo.es", 'editionelmundo', 4,
                    "http://estaticos.elmundo.es/imagen/canalima144.gif",
                    'editionelmundo', 0)
            add_dir("ElPais.es", 'editionelpais', 4,
                    "http://ep01.epimg.net/corporativos/img/elpais2.jpg",
                    'editionelpais', 0)

        elif str(url) == 'worldstvonlinewebsites':
            add_dir(
                "Filmon.com", 'filmon', 4,
                "http://static.filmon.com/theme/img/filmon_small_logo.png",
                'filmoncom', 0)
            add_dir("Streamgaroo.com", 'streamgaroo', 4,
                    "http://www.streamgaroo.com/images/logo.png",
                    'streamgaroo', 0)
        elif str(url) == 'listsonlinewebsites':
            add_dir(
                "Ramalin.com", 'ramalin', 4,
                "http://websites-img.milonic.com/img-slide/420x257/r/ramalin.com.png",
                'ramalin', 0)
            if enableDinamic == "true":
                add_dir("Pastebin.com", 'pastebincom', 4, "", 'pastebincom', 0)
            add_dir("Redeneobux.com", 'redeneobuxcom', 4, "", 'redeneobuxcom',
                    0)
        elif str(url) == 'webcamsonlinewebsites':
            add_dir("Skylinewebcams.com", 'skylinewebcams', 4,
                    "http://www.skylinewebcams.com/website.jpg",
                    'skylinewebcams', 0)
        elif str(url) == 'otherssonlinewebsites':
            if patchedFfmpeg == "true":
                add_dir("Cinestrenostv.tv", 'cineestrenos', 4,
                        "http://i.imgur.com/z3CINCU.jpg", 'cineestrenos', 0)
                add_dir("Vipgoal.net", 'vigoal', 4,
                        "http://vipgoal.net/VIPgoal/img/logo.png", 'vigoal', 0)
Ejemplo n.º 7
0
import urllib2,os,sys
from tvboxcore.xbmcutils import XBMCUtils
from tvboxcore import logger
from tvboxcore import downloadtools
from tvboxcore import ziptools
import time
import CommonFunctions as common

REMOTE_FILE_XML = XBMCUtils.getSettingFromContext(sys.argv[1],"remote_updater")

ROOT_DIR = XBMCUtils.getAddonInfo('path')

def install(remote_file,id,folder):
    #first check if plexus exists, and where
    logger.info("installing "+id+"... ")

    addons_dir = XBMCUtils.getAddonsDir()
    logger.debug("Addons dir set to: "+addons_dir)

    localfile = ROOT_DIR+"/install.zip"

    downloadtools.downloadfile(remote_file, localfile, notStop=False)
    logger.debug("Download done, now it's time to unzip")
    unzipper = ziptools.ziptools()
    if folder == '':
        unzipper.extract(localfile,addons_dir) #github issues
    else:
        unzipper.extractReplacingMainFolder(localfile,addons_dir,folder)
    logger.debug("Unzip done! cleaning...")
    os.remove(localfile)
    logger.info("Additional addon clean done!")