def __init__(self, photos, download_path):
     self.len = len(photos)
     log("downloader.__init__ with %d items and path=%s" % (self.len, download_path))
     self.pDialog = xbmcgui.DialogProgress()
     self.pDialog.create(Addon.getAddonInfo("name"))
     s = Addon.getLocalizedString(32301)  # Gathering Data...
     self.pDialog.update(0, s)
     album_title = photos[0]["album_title"]
     self.sub_folder = re.sub("[^\w\- ]", "", album_title).replace(" ", "_")
     self.full_path = os.path.join(download_path, self.sub_folder)
     log('script.download_album using full_path="%s"' % self.full_path)
     self.__create_folder(self.full_path)
     for i, photo in enumerate(photos):
         self.current_item = i + 1
         url = photo["pic"]
         self.current_file = photo["pic"].split("/")[-1].split("?")[0]
         filename = os.path.join(self.full_path, self.current_file)
         log('downloader: Downloading "%s" to "%s"' % (url, filename))
         try:
             urllib.urlretrieve(url, filename, self.update_progress)
         except IOError, e:
             log('downloader: ERROR: "%s"' % str(e))
             break
         log("downloader: Done")
         if self.pDialog.iscanceled():
             log("downloader: Canceled")
             break
 def update_progress(self, count, block_size, total_size):
     percent = int(self.current_item * 100 / self.len)
     item_percent = int(count * block_size * 100 / total_size)
     line1 = Addon.getLocalizedString(32302) % (self.current_item, self.len)
     line2 = Addon.getLocalizedString(32303) % (self.current_file, item_percent)
     line3 = Addon.getLocalizedString(32304) % self.sub_folder
     self.pDialog.update(percent, line1, line2, line3)
 def onInit(self):
     log('script.onInit started')
     self.show_info = True
     self.aspect_keep = True
     self.last_seen_album_id = 0
     if Addon.getSetting('show_arrows') == 'false':
         self.getControl(self.CONTROL_ARROWS).setVisible(False)
     if Addon.getSetting('aspect_ratio2') == '0':
         self.getControl(self.CONTROL_ASPECT_KEEP).setVisible(False)
     self.showHelp()
     self.showAlbums()
     self.setFocus(self.getControl(self.CONTROL_MAIN_IMAGE))
     log('script.onInit finished')
Example #4
0
def notify(addon_id, typeq, title, message, times, line2='', line3=''):
    import xbmc
    addon_tmp = Addon(addon_id)
    if title == '' :
        title='[B]' + addon_tmp.get_name() + '[/B]'
    if typeq == 'small':
        if times == '':
           times='5000'
        smallicon= notify_icon
        xbmc.executebuiltin("XBMC.Notification("+title+","+message+","+times+","+smallicon+")")
    elif typeq == 'big':
        dialog = xbmcgui.Dialog()
        dialog.ok(' '+title+' ', ' '+message+' ', line2, line3)
    else:
        dialog = xbmcgui.Dialog()
        dialog.ok(' '+title+' ', ' '+message+' ')        
 def download_album(self):
     log('script.download_album started')
     download_path = Addon.getSetting('download_path')
     if not download_path:
         s = Addon.getLocalizedString(32300)  # Choose default download path
         new_path = xbmcgui.Dialog().browse(3, s, 'pictures')
         if not new_path:
             return
         else:
             download_path = new_path
             Addon.setSetting('download_path', download_path)
     log('script.download_album using download_path="%s"' % download_path)
     album_url = self.getProperty('album_url')
     items = self.ScraperManager.get_photos(album_url)
     downloader.Downloader(items, download_path)
     log('script.download_album finished')
 def startSlideshow(self):
     log('script.startSlideshow started')
     params = {}
     params['scraper_id'] = self.ScraperManager.scraper_id
     params['mode'] = 'photos'
     params['album_url'] = self.getProperty('album_url')
     if Addon.getSetting('random_slideshow') == 'true':
         random = 'random'
     else:
         random = 'notrandom'
     url = 'plugin://%s/?%s' % (Addon.getAddonInfo('id'),
                                urllib.urlencode(params))
     log('script.startSlideshow using url=%s' % url)
     xbmc.executebuiltin('Slideshow(%s, recursive, %s)'
                         % (url, random))
     log('script.startSlideshow finished')
def show_scrapers():
    log('plugin.show_scrapers started')
    scrapers = get_scrapers()
    for scraper in scrapers:
        liz = xbmcgui.ListItem(scraper['title'], iconImage='DefaultImage.png',
                               thumbnailImage='DefaultFolder.png')
        params = {'mode': 'albums',
                  'scraper_id': scraper['id']}
        url = 'plugin://%s/?%s' % (Addon.getAddonInfo('id'),
                                   urllib.urlencode(params))
        xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url,
                                    listitem=liz, isFolder=True)
    log('plugin.show_scrapers finished')
 def __init__(self):
     import urlresolver
     self.addon = Addon()
     self.common = self.addon.common
     self.urlresolver = urlresolver
     self.urlresolver.plugnplay.plugin_dirs = []
     if self.common.resolvers:
         self.urlresolver.plugnplay.set_plugin_dirs(self.urlresolver.common.plugins_path, self.common.resolvers_path,
                                                    self.common.builtin_resolvers_path)
     else:
         self.urlresolver.plugnplay.set_plugin_dirs(self.urlresolver.common.plugins_path,
                                                    self.common.builtin_resolvers_path)
     self.urlresolver.plugnplay.load_plugins()
def show_albums(scraper_id):
    log('plugin.show_albums started with scraper_id=%s' % scraper_id)
    albums = get_albums(scraper_id)
    for album in albums:
        liz = xbmcgui.ListItem(album['title'], iconImage='DefaultImage.png',
                               thumbnailImage=album['pic'])
        liz.setInfo(type='image', infoLabels={'Title': album['title']})
        params = {'mode': 'photos',
                  'scraper_id': scraper_id,
                  'album_url': album['id']}
        url = 'plugin://%s/?%s' % (Addon.getAddonInfo('id'),
                                   urllib.urlencode(params))
        xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url,
                                    listitem=liz, isFolder=True)
    log('plugin.show_albums finished')
Example #10
0
    
    try:
        # enable button
        CANCEL_BUTTON.setEnabled( True )
    except:
        pass
    
    pDialog.close()
    print 'Update Repo'
    xbmc.executebuiltin("UpdateAddonRepos")
    from addon import Addon

    addon_id = 'script.icechannel'

    try:
        addon = Addon(addon_id, sys.argv)
    except:
        addon = Addon(addon_id)

    addon_path = addon.get_path()
    lib_path = os.path.join(addon_path, 'lib', 'entertainment')
    plugins_path = os.path.join(lib_path, 'plugins')
    settings_file = os.path.join(addon_path, 'resources', 'settings.xml')
        
    plugin_dirs = [plugins_path]
    from glob import glob
    plugin_dirs.extend( glob( os.path.join( os.path.dirname(addon_path), addon_id + '.extn.*', 'plugins' ) ) )
    

    
    
Example #11
0
    def __init__(self, params):
        site = self.__module__
        addon = Addon()
        common = addon.common        
        mode = params['mode']

        if mode == 'main':

            sql_params = (params['sub_site'],)
            execute = 'SELECT * FROM ' + common.hist_db_table + ' WHERE sub_site=? ORDER BY id DESC'
            selected = common.db.fetchall(execute, sql_params)
            item_list = []
            if selected:
                for this_id, site, content, url, params in selected:
                    try:
                        params = AddonDict(common.addon_type()).str_update(params)
                        item_list.extend([params])
                    except:
                        pass
            if item_list:
                addon.add_items(item_list)
            addon.end_of_directory()

        elif mode == 'clear_history':
            if not params['sub_site']:
                execute = 'DROP TABLE ' + common.hist_db_table
                sql_params = ''
            else:
                execute = 'DELETE FROM ' + common.hist_db_table + ' WHERE sub_site=?'
                sql_params = (params['sub_site'],)
            clear_hist = xbmcgui.Dialog().yesno(
                common.addon_name + ' - ' + addon.language(30879, True), ' ', addon.language(30880, True),
                nolabel=addon.language(30899, True), yeslabel=addon.language(30898, True))
            if common.to_bool(clear_hist):
                cleared = common.db.execute(execute, sql_params)
                if common.to_bool(cleared):
                    common.db.execute('VACUUM ' + common.hist_db_table)
                    addon.alert(str(addon.language(30881, True)))
                    xbmc.executebuiltin('Container.Refresh')
                else:
                    addon.alert(str(addon.language(30919, True)))
Example #12
0
'''
    common XBMC Module
    Copyright (C) 2011 t0mm0

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
'''

from addon import Addon

addon = Addon('script.module.addon.common')
addon_path = addon.get_path()
addon_version = addon.get_version()
    def __init__(self, __params_):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        __site_ = self.__module__
        __mode_ = __params_['mode']

        __home_url_ = 'http://www.watchxxxfree.com/'
        __search_url_ = __home_url_ + '?s='
        __false_positives_ = ['http://www.watchxxxfree.com/watch-full-movies-hd/', 'http://www.watchxxxfree.com',
                              'http://www.watchxxxfree.com/category/movies/', 'http://www.watchxxxfree.com/category/ategorized222/',
                              'http://www.watchxxxfree.com/watch-full-movies-hd/']

        if __mode_ == 'main':
            __item_list_ = [{'site': __site_, 'mode': 'list', 'title': a.language(30003), 'content': 'movies',
                             'url': __home_url_ + '?filtre=date&cat=0', 'cover_url': a.image('recent.png', image), 'backdrop_url': a.art(), 'type': 3},
                            {'site': __site_, 'mode': 'categories', 'title': a.language(30005), 'content': 'movies',
                             'url': __home_url_ + 'categories/', 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3},
                            {'site': __site_, 'mode': 'topstars', 'title': a.language(30015), 'content': 'movies',
                             'url': __home_url_ + 'top-pornstars/', 'cover_url': a.image('toppornstars.png', image), 'backdrop_url': a.art(), 'type': 3},
                            {'site': __site_, 'mode': 'list', 'title': a.language(30004), 'content': 'search',
                             'url': __search_url_, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3}]
            __item_list_.extend(a.favs_hist_menu(__site_))
            __item_list_.extend(a.extended_menu())
            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'categories':
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('ul', {'class': 'listing-cat'}))
            __item_list_ = []
            if __soup_:
                for __item_ in __soup_.findAll('li'):
                    if __item_:
                        if __item_.a.get('href') not in __false_positives_:
                            try:
                                __vidcount_ = __item_.findAll('span', {'class': 'nb_cat border-radius-5'})[0].string.encode('UTF-8')
                                __vidcount_ = re.sub('\svideo[s]*', '', __vidcount_)
                            except: __vidcount_ = '0'
                            if __vidcount_ and __vidcount_ != '0':
                                __title_ = __item_.a.get('title').encode('UTF-8') + ' (%s)' % __vidcount_
                                __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.a.get('href'),
                                                      'content': __params_['content'], 'title': __title_,
                                                      'cover_url': a.image(__item_.img.get('src'), image), 'backdrop_url': a.art(), 'type': 3}])

            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'topstars':
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('ul', {'class': 'wp-tag-cloud'}))
            __item_list_ = []
            if __soup_:
                for __item_ in __soup_.findAll('li'):
                    if __item_:
                        if __item_.a.get('href') not in __false_positives_:
                            try:
                                __vidcount_ = __item_.a.get('title')
                                __vidcount_ = re.sub('\stopic[s]*', '', __vidcount_)
                            except: __vidcount_ = '0'
                            if __vidcount_ and __vidcount_ != '0':
                                __title_ = __item_.a.string.encode('UTF-8') + ' (%s)' % __vidcount_
                                __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.a.get('href'),
                                                      'content': __params_['content'], 'title': __title_,
                                                      'cover_url': a.image(), 'backdrop_url': a.art(), 'type': 3}])

            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'list':
            if __params_['content'] == 'search':
                __item_ = a.search_input()
                if __item_: __params_['url'] = __search_url_ + __item_
                else: exit(1)
            elif __params_['content'] == 'goto':
                __last_item_ = re.search('/page/([0-9]+)/', __params_['url'])
                if __last_item_: __last_item_ = int(__last_item_.group(1))
                else: __last_item_ = 10000
                __item_ = a.page_input(__last_item_)
                if __item_: __params_['url'] = re.sub('/page/[0-9]+/', '/page/' + str(__item_) + '/', __params_['url'])
                else: exit(1)
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('ul', {'class': 'listing-videos listing-tube'}))
            __item_list_ = []
            __params_['mode'] = 'play'
            __params_['content'] = 'movies'
            __params_['type'] = 0
            __params_['context'] = 0
            __params_['duration'] = '7200'
            if __soup_:
                __xbmcdict_ = XBMCDict(0).update(__params_)
                for __item_ in __soup_.findAll('li', {'class': 'border-radius-5 box-shadow'}):
                    if __item_:
                        if __item_.a.get('href') not in __false_positives_:
                            __dict_ = __xbmcdict_.copy()
                            if 'full movie' not in __dict_['title'].lower() and 'xtheatre' not in __dict_['title'].lower():
                                __dict_['duration'] = '1500'
                                __dict_['content'] = 'episodes'
                            __dict_['url'] = __item_.a.get('href')
                            __dict_['title'] = __item_.a.get('title').encode('UTF-8')
                            __dict_['tvshowtitle'] = __dict_['title']
                            __dict_['originaltitle'] = __dict_['title']
                            __dict_['cover_url'] = a.image(__item_.img.get('data-lazy-src'))
                            __dict_['thumb_url'] = __dict_['cover_url']
                            __dict_['poster'] = __dict_['cover_url']
                            __dict_['sub_site'] = __site_

                            __item_list_.extend([__dict_])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': 'pagination'}))
            __last_item_ = False
            if __soup_:
                for __item_ in __soup_.findAll('a'):
                    if (__item_.string.encode('UTF-8') == 'Last »') or (__item_.get('class') == 'last'):
                        __last_item_ = __item_.get('href')
                        break
                if __last_item_ is False:
                    for __last_item_ in __soup_.findAll('a', {'class': 'inactive'}): pass
                    if __last_item_: __last_item_ = __last_item_.get('href')
                __item_ = __soup_.find('span', {'class': 'current'})
                if __item_:
                    if __item_.parent:
                        __item_ = __item_.parent
                        if __item_.previousSibling:
                            if __item_.previousSibling.find('a'):
                                __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.previousSibling.a.get('href'), 'content': __params_['content'],
                                                      'title': a.language(30017, True), 'cover_url': a.image('previous.png', image),
                                                      'backdrop_url': a.art(), 'type': 3}])
                        if __item_.nextSibling:
                            if __item_.nextSibling.find('a'):
                                __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.nextSibling.a.get('href'), 'content': __params_['content'],
                                                      'title': a.language(30018, True), 'cover_url': a.image('next.png', image),
                                                      'backdrop_url': a.art(), 'type': 3}])
            if __last_item_:
                __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __last_item_, 'content': 'goto',
                                      'title': a.language(30019, True), 'cover_url': a.image('goto.png', image),
                                      'backdrop_url': a.art(), 'type': 3}])

            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'play':
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': 'video-embed'}))
            __item_list_ = []
            if __soup_:
                for __script_ in __soup_.findAll('script'):
                    __item_ = ''
                    if __script_.get('src'):
                        if 'http://videomega.tv/validatehash.php' in __script_['src']: __item_ = __script_['src']
                        elif 'ref=' in __script_.get('src'):
                            __temp_ = re.search('.*ref=[\'"](.+?)[\'"]', __script_.get('src'))
                            if __temp_: __item_ = 'http://videomega.tv/iframe.php?ref=' + __temp_.group(1)
                        __xbmcdict_ = XBMCDict(0).update(__params_)
                        if __item_:
                            __dict_ = __xbmcdict_.copy()
                            __dict_['url'] = __item_
                            __item_list_.extend([__dict_])
                if __soup_.find('iframe', src=True):
                    __item_ = ''
                    for __iframe_ in __soup_.findAll('iframe', src=True):
                        if __iframe_.get('data-lazy-src'):
                            __item_ = __iframe_.get('data-lazy-src')
                        else:
                            __item_ = __iframe_.get('src').replace('\\', '')
                        __xbmcdict_ = XBMCDict(0).update(__params_)
                        if __item_:
                            __dict_ = __xbmcdict_.copy()
                            __dict_['url'] = __item_
                            __item_list_.extend([__dict_])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'id': 'video-infos'}))
            if __soup_:
                __item_ = ''
                for __p_ in __soup_.findAll('p'):
                    if __p_.iframe:
                        __item_ = __p_.iframe.get('src')
                        __xbmcdict_ = XBMCDict(0).update(__params_)
                        if __item_:
                            __dict_ = __xbmcdict_.copy()
                            __dict_['url'] = __item_
                            __item_list_.extend([__dict_])
            if __item_list_:
                from playback import Playback
                Playback().choose_sources(__item_list_)
            else: a.alert(a.language(30904, True), sound=False)
Example #14
0
 def __init__(self):
     self.cpu = CPU()
     self.memory = Memory()
     self.disk = Disk()
     self.network = Network()
     self.addon = Addon()
Example #15
0
import sys
import threading
import datetime
import platform
import cPickle as pickle
from math import ceil
from traceback import format_exc
from contextlib import closing
from requests import get
import xbmc
import xbmcvfs
from addon import Addon
from utilities import get_duration, HachoirError

monitor = xbmc.Monitor()
addon = Addon()
# This is for potential statistic and debugging purposes
addon.log_notice('sys.platform: "{0}". platform.uname: "{1}"'.format(sys.platform, str(platform.uname())))

try:
    import libtorrent  # Try to import global module
except ImportError:
    sys.path.append(os.path.join(addon.path, 'site-packages'))
    from python_libtorrent import get_libtorrent
    libtorrent = get_libtorrent()

addon.log_debug('libtorrent version: {0}'.format(libtorrent.version))


class TorrenterError(Exception):
    """Custom exception"""
Example #16
0
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""


import os
import sys
import re
from addon import Addon


addon = Addon()
common = addon.common

common.create_tables()

sys.path.append(common.module_path)
sys.path.append(common.site_path)

params = addon.queries
param = params.get('site', 'main')
context = params.get('context', 3)

is_allowed_access = common.disclaim()


if is_allowed_access:
Example #17
0
import os
import sys
import re
from traceback import format_exc
from cStringIO import StringIO
from json import dumps
from inspect import getmembers, isfunction
import xbmc
import methods
from addon import Addon
from torrenter import Streamer, libtorrent, serve_file_from_torrent
from timers import Timer, check_seeding_limits, save_resume_data, log_torrents
from onscreen_label import OnScreenLabel
from utilities import get_mime

addon = Addon()

sys.path.append(os.path.join(addon.path, 'site-packages'))
from bottle import (route, default_app, request, template, response, debug,
                    static_file, TEMPLATE_PATH, HTTPError, HTTPResponse)

# Torrent client parameters
download_dir = addon.download_dir
resume_dir = os.path.join(addon.config_dir, 'torrents')
if not os.path.exists(resume_dir):
    os.mkdir(resume_dir)
# Initialize torrent client
torrent_client = Streamer(addon.torrent_port,
                          addon.torrent_port + 10,
                          addon.persistent,
                          resume_dir)
Example #18
0
# -*- coding: utf-8 -*-
try:
    from BeautifulSoup import BeautifulSoup as bss
except:
    from bs4 import BeautifulSoup as bss
try:
    import urllib2
except:
    import urllib.request as urllib2

import urllib, client, control
import re, sys
from random import randint
from log_utils import log
from addon import Addon
addon = Addon('plugin.video.croatia_od', sys.argv)


def track_ga(sc):
    try:
        cid = get_cid()
        av = addon.get_version()
        sc = urllib.quote(sc)
        client.request(
            'http://www.google-analytics.com/collect?payload_data&v=1&tid=UA-79665842-1&cid=%s&t=screenview&an=Croatia%%20On%%20Demand&av=%s&cd=%s'
            % (cid, av, sc))
    except:
        pass


def get_cid():
Example #19
0
    def __init__(self, __params_):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        __site_ = self.__module__
        __mode_ = __params_['mode']

        __base_url_ = 'http://yespornplease.com'
        __home_url_ = __base_url_ + '/index.php'
        __popular_url_ = __base_url_ + '/index.php?p=1&m=today'
        __search_url_ = __base_url_ + '/search.php?q='
        __false_positives_ = ['']

        if __mode_ == 'main':
            __item_list_ = [{'site': __site_, 'mode': 'list', 'title': a.language(30006), 'content': 'movies',
                             'url': __home_url_, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3},
                            {'site': __site_, 'mode': 'list', 'title': a.language(30016), 'content': 'movies',
                             'url': __popular_url_, 'cover_url': a.image('popular.png', image), 'backdrop_url': a.art(), 'type': 3},
                            {'site': __site_, 'mode': 'categories', 'title': a.language(30005), 'content': 'movies',
                             'url': __home_url_, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3},
                            {'site': __site_, 'mode': 'list', 'title': a.language(30004), 'content': 'search',
                             'url': __search_url_, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3}]
            __item_list_.extend(a.favs_hist_menu(__site_))
            __item_list_.extend(a.extended_menu())
            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'categories':
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'id': 'categories'}))
            __item_list_ = []
            if __soup_:
                for __item_ in __soup_.findAll('a'):
                    if __item_: __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href').replace(' ', '+'),
                                                      'content': __params_['content'], 'title': __item_.string.encode('UTF-8'),
                                                      'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}])
            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'list':
            if __params_['content'] == 'search':
                __item_ = a.search_input()
                if __item_: __params_['url'] = __search_url_ + __item_.replace(' ', '+')
                else: exit(1)
            elif __params_['content'] == 'goto':
                __last_item_ = re.search('p=([0-9]+)', __params_['url'])
                if __last_item_: __last_item_ = int(__last_item_.group(1))
                else: __last_item_ = 10000
                __item_ = a.page_input(__last_item_)
                if __item_: __params_['url'] = re.sub('p=[0-9]+', 'p=' + str(__item_), __params_['url']).replace(' ', '+')
                else: exit(1)
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'id': 'videos'}))
            __item_list_ = []
            __params_['mode'] = 'play'
            __params_['content'] = 'movies'
            __params_['type'] = 0
            __params_['context'] = 0
            __params_['duration'] = '7200'
            if __soup_:
                __xbmcdict_ = XBMCDict(0).update(__params_)
                for __item_ in __soup_.findAll('div', {'class': 'video-preview'}):
                    if __item_:
                        __dict_ = __xbmcdict_.copy()
                        __temp_ = __item_.find('div', {'class': 'jcarousel'}).a
                        if __temp_:
                            __temp_ = __temp_.get('href')
                            if not __temp_.startswith('http://'): __temp_ = __base_url_ + __temp_
                            __dict_['url'] = __temp_
                            __dict_['title'] = __item_.find('div', {'class': 'preview-title'}).get('title').encode('UTF-8')
                            __dict_['tvshowtitle'] = __dict_['title']
                            __dict_['originaltitle'] = __dict_['title']
                            __temp_ = __item_.find('div', {'class': 'jcarousel'}).img.get('src')
                            if __temp_.startswith('//'): __temp_ = 'http:' + __temp_
                            __dict_['cover_url'] = a.image(__temp_)
                            __dict_['thumb_url'] = __dict_['cover_url']
                            __dict_['poster'] = __dict_['cover_url']
                            __temp_ = __item_.find('div', {'class': 'preview-info-box length'}).b.string
                            if __temp_:
                                __temp_ = re.search('([0-9]+):([0-9]+):([0-9]+)', __temp_)
                                __dict_['duration'] = str((int(__temp_.group(1)) * 60 * 60) + (int(__temp_.group(2)) * 60) + int(__temp_.group(3)))
                            __dict_['sub_site'] = __site_

                            __item_list_.extend([__dict_])

                __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('body'))
                if __soup_.find('a', {'id': 'prev-page'}):
                    __item_ = __soup_.find('a', {'id': 'prev-page'}).get('href').replace(' ', '+')
                    if not __item_.startswith('http://'): __item_ = __base_url_ + __item_
                    if 'index.php' in __params_['url']: __item_ = __item_.replace('search.php', 'index.php')
                    __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_, 'content': __params_['content'],
                                          'title': a.language(30017, True), 'cover_url': a.image('previous.png', image),
                                          'backdrop_url': a.art(), 'type': 3}])
                if __soup_.find('a', {'id': 'next-page'}):
                    __item_ = __soup_.find('a', {'id': 'next-page'}).get('href').replace(' ', '+')
                    if 'index.php' in __params_['url']: __item_ = __item_.replace('search.php', 'index.php')
                    if not __item_.startswith('http://'): __item_ = __base_url_ + __item_
                    __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_, 'content': __params_['content'],
                                          'title': a.language(30018, True), 'cover_url': a.image('next.png', image),
                                          'backdrop_url': a.art(), 'type': 3}])

                __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'id': 'pagination'}))
                __last_item_ = False
                if __soup_:
                    for __item_ in reversed(__soup_.findAll('a')):
                        __last_item_ = __item_.get('href')
                        if not __last_item_.startswith('http://'): __last_item_ = __base_url_ + __last_item_
                        break
                if __last_item_:
                    __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __last_item_, 'content': 'goto',
                                          'title': a.language(30019, True), 'cover_url': a.image('goto.png', image),
                                          'backdrop_url': a.art(), 'type': 3}])

            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'play':
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('object', {'id': 'videoContainer'}))
            __item_list_ = []
            if __soup_:
                __item_ = __soup_.find('param', {'name': 'flashvars'})
                __item_ = re.search('.*?video_url=(.+?)&.*?', str(__item_))
                if __item_: __item_ = __item_.group(1)
                __xbmcdict_ = XBMCDict(0).update(__params_)
                if __item_:
                    __dict_ = __xbmcdict_.copy()
                    __dict_['url'] = __item_
                    __item_list_.extend([__dict_])
                else: a.alert(a.language(30904, True), sound=False)
            if __item_list_:
                from playback import Playback
                Playback().choose_sources(__item_list_)
            else: a.alert(a.language(30904, True), sound=False)
Example #20
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://playporn.to/'
        search_url = home_url + '?submit=Search&s='
        movies_url = home_url + 'category/xxx-movie-stream/'
        scenes_url = home_url + 'category/xxx-clips-scenes-stream/'
        false_positives = ['http://playporn.to/deutsche-milfs-anonym-sex/']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30003),
                'content': '',
                'url': home_url,
                'cover_url': a.image('recent.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'sub',
                'title': a.language(30001),
                'content': '',
                'url': movies_url,
                'cover_url': a.image('movies.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'sub',
                'title': a.language(30002),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('scenes.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'sub':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': params['url'],
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'category',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'category':
            index = 1
            if 'scenes' in params['url'].lower(): index = 2
            html = a.get_page(home_url)
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer('ul', 'nav fl'))
            item_list = []
            for item in soup.findAll('ul')[index].findAll({'a': True}):
                item_list.extend([{
                    'site': 'playporn',
                    'mode': 'list',
                    'url': item.get('href'),
                    'content': '',
                    'title': item.contents[0].encode('UTF-8'),
                    'cover_url': a.image(image, image),
                    'backdrop_url': a.art(),
                    'type': 3
                }])
            if item_list:
                a.add_items(item_list)
                a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            xbmcdict = XBMCDict(0).update(params)
            for item in soup.findAll('div', 'photo-thumb-image'):
                if not item.a.get('href') in false_positives:
                    _dict = xbmcdict.copy()
                    if 'scenes' in params['url']:
                        _dict['duration'] = '2700'
                        _dict['content'] = 'episodes'
                    _dict['url'] = item.a.get('href')
                    _dict['title'] = item.a.get('title').encode('UTF-8')
                    _dict['tvshowtitle'] = _dict['title']
                    _dict['originaltitle'] = _dict['title']
                    _dict['cover_url'] = a.image(item.img.get('src'))
                    _dict['thumb_url'] = _dict['cover_url']
                    _dict['poster'] = _dict['cover_url']
                    _dict['sub_site'] = site

                    item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', 'more_entries'))
            if soup:
                item = soup.find('a', 'previouspostslink')
                if item:
                    item_list.extend([{
                        'site':
                        site,
                        'mode':
                        'list',
                        'url':
                        item.get('href'),
                        'content':
                        params['content'],
                        'title':
                        a.language(30017, True),
                        'cover_url':
                        a.image('previous.png', image),
                        'backdrop_url':
                        a.art(),
                        'type':
                        3
                    }])
                item = soup.find('a', 'nextpostslink')
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image('next.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
                item = soup.find('a', 'last')
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': 'goto',
                        'title': a.language(30019, True),
                        'cover_url': a.image('goto.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
            if item_list:
                a.add_items(item_list)
                a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'loopedSlider'}))
            soup = soup.find(text=lambda text: isinstance(text, Comment))
            if soup:
                soup = re.sub('&lt;', '<', soup.encode('utf-8'))
                soup = re.sub('&gt;', '>', soup)
                soup = BeautifulSoup(soup,
                                     parseOnlyThese=SoupStrainer(
                                         'div', 'video'))
                if soup:
                    item_list = []
                    xbmcdict = XBMCDict(0).update(params)
                    for item in soup.findAll('iframe'):
                        _dict = xbmcdict.copy()
                        _dict['url'] = item.get('src').replace(
                            'http://playporn.to/stream/all/?file=',
                            '').encode('UTF-8')
                        if 'flashx.tv' in _dict['url'].lower():
                            item = re.search('hash=(.+?)&', _dict['url'])
                            if item:
                                _dict[
                                    'url'] = 'http://flashx.tv/video/' + item.group(
                                        1) + '/'
                        elif 'played.to' in _dict['url'].lower():
                            item = re.search('embed-([a-zA-Z0-9]+?)-.+?html',
                                             _dict['url'])
                            if item:
                                _dict[
                                    'url'] = 'http://played.to/' + item.group(
                                        1)
                        item_list.extend([_dict])
                    if item_list:
                        from playback import Playback
                        Playback().choose_sources(item_list)
                    else:
                        a.alert(a.language(30904, True), sound=False)
                else:
                    a.alert(a.language(30904, True), sound=False)
            else:
                a.alert(a.language(30904, True), sound=False)
class Playback:

    def __init__(self):
        import urlresolver
        self.addon = Addon()
        self.common = self.addon.common
        self.urlresolver = urlresolver
        self.urlresolver.plugnplay.plugin_dirs = []
        if self.common.resolvers:
            self.urlresolver.plugnplay.set_plugin_dirs(self.urlresolver.common.plugins_path, self.common.resolvers_path,
                                                       self.common.builtin_resolvers_path)
        else:
            self.urlresolver.plugnplay.set_plugin_dirs(self.urlresolver.common.plugins_path,
                                                       self.common.builtin_resolvers_path)
        self.urlresolver.plugnplay.load_plugins()

    def _dialog_sources(self, source_list):
        if not isinstance(source_list, list): return
        return self.urlresolver.choose_source(source_list)

    def _directory_sources(self, source_list, dict_list):
        from addondict import AddonDict
        item_list = []
        for index, item in enumerate(source_list):
            multipart = re.search('^playlist://[a-zA-Z0-9_]+?/([0-9]+?)/$', item.get_url())
            if multipart:
                _dict = AddonDict(0).update(dict_list[int(multipart.group(1))])
            else:
                _dict = AddonDict(0).update(dict_list[0])
            _dict['title'] = str(item.title) + ' | ' + str(_dict['title'])
            _dict['site'] = 'play_this'
            _dict['sub_site'] = ''
            _dict['mode'] = ''
            _dict['type'] = 0
            _dict['context'] = 3
            _dict['url'] = str(item.get_url())
            item_list.extend([_dict])

        if item_list:
            self.addon.add_items(item_list)
            self.addon.end_of_directory()

    def _create_source_list(self, dict_list):
        source_list = []
        part_list = []
        full_list = []
        playlist_list = []
        host = re.compile('(?:http|https)://(?:.+?\.)*?([0-9a-zA-Z_\-]+?)\.[0-9a-zA-Z]{2,}(?:/|:).*')
        old_host = ''
        separator = ' | '
        for iindex, item in enumerate(dict_list):
            playlist_host = 'playlist://%s/%s/'
            source_title = ''
            quality = separator + item.get('src_quality', 'SD')
            if item['multi-part']:
                if item['src_title']: source_title = item['src_title'] + separator
                source_host = host.search(item['url'])
                if source_host: source_host = source_host.group(1)
                else: source_host = 'UID'
                for index, part in enumerate(item['parts']):
                    part_title = source_title + source_host + separator + self.common.language(30651, True) + ' ' + str(index + 1) + quality
                    source = self.urlresolver.HostedMediaFile(url=part, title=part_title.upper())
                    if source:
                        part_list.extend([source])
                if old_host != source_host:
                    old_host = source_host
                    playlist_host = playlist_host % (old_host, iindex)
                    playlist_title = source_title + old_host + separator + self.common.language(30650, True) + quality
                    playlist_source = self.urlresolver.HostedMediaFile(url=playlist_host, title=playlist_title.upper())
                    if playlist_source:
                        playlist_list.extend([playlist_source])
            else:
                if item['src_title']: source_title = item['src_title'] + separator
                source_host = host.search(item['url'])
                if source_host: source_host = source_host.group(1)
                else: source_host = 'UID'
                source_title += source_host + quality
                source = self.urlresolver.HostedMediaFile(url=item['url'], title=source_title.upper())
                if source:
                    full_list.extend([source])
        full_list.extend(playlist_list)
        source_list.extend(full_list)
        source_list.extend(part_list)
        return [source_list, full_list]

    def _sort_sources(self, dict_list):
        usehd = self.common.usehd()
        autoplay = self.common.autoplay()
        hd = []
        hq = []
        sd = []
        lq = []
        new_dict_list = []
        for item in dict_list:
            source = True
            if item['multi-part']:
                item['url'] = 'playlist://' + item['parts'][0]
            if source:
                quality = item.get('src_quality', 'sd')
                if quality.lower() == 'hd': hd.extend([item])
                elif quality.lower() == 'hq': hq.extend([item])
                elif quality.lower() == 'sd': sd.extend([item])
                elif quality.lower() == 'lq': lq.extend([item])
        if (autoplay and usehd) or not autoplay:
            new_dict_list.extend(hd)
        new_dict_list.extend(hq)
        new_dict_list.extend(sd)
        new_dict_list.extend(lq)
        if autoplay and not usehd:
            new_dict_list.extend(hd)
        return new_dict_list

    def choose_sources(self, dict_list):
        if not isinstance(dict_list, list): raise TypeError
        for item in dict_list:
            try: item.keys()
            except: raise TypeError
        autoplay = self.common.autoplay()
        edit_url = self.common.editurl()
        dict_list = self._sort_sources(dict_list)
        if not dict_list:
            self.common.alert(self.common.language(30905, True), sound=False)
            return
        lists = self._create_source_list(dict_list)
        source_list = lists[0]
        full_list = lists[1]
        chosen = None
        img = dict_list[-1].get('cover_url', '')
        if (self.common.theme_path in img) or (self.common.media_path in img):
            img = ''
        thumb = dict_list[-1].get('thumb_url', None)
        if thumb:
            if (self.common.theme_path in thumb) or (self.common.media_path in thumb):
               img = thumb
        title = dict_list[-1].get('title', '')
        found = False
        if len(dict_list) == 1:
            if source_list:
                if len(source_list) > 0:
                    stream_url = source_list[0].resolve()
                    if stream_url:
                        found = True
                        self.play_this(stream_url, dict_list[0].get('title', ''), dict_list[0].get('cover_url', ''), self.common.usedirsources(), dict_list[0])
        elif autoplay and full_list:
            for index, chosen in enumerate(full_list):
                stream_url = chosen.resolve()
                if stream_url:
                    if not stream_url.startswith('playlist://'):
                        if edit_url: stream_url = self.addon.edit_input(stream_url)
                        found = True
                        playback_item = xbmcgui.ListItem(label=title, thumbnailImage=img, path=stream_url)
                        playback_item.setProperty('IsPlayable', 'true')
                        xbmcplugin.setResolvedUrl(self.common.handle, True, playback_item)
                        break
                    else:
                        list_index = re.search('^playlist://[a-zA-Z0-9_]+?/([0-9]+?)/$', stream_url)
                        if list_index:
                            found = True
                            self.play_list(dict_list[int(list_index.group(1))], title, img)
                            break
        elif source_list:
                if self.common.usedirsources():
                    found = True
                    self._directory_sources(source_list, dict_list)
                else:
                    chosen = self._dialog_sources(source_list)
                    if chosen:
                        idx = None
                        stream_url = chosen.resolve()
                        if stream_url:
                            if not stream_url.startswith('playlist://'):
                                if edit_url: stream_url = self.addon.edit_input(stream_url)
                                part_title = re.search('.+?(\s[Pp][Aa][Rr][Tt]\s[0-9]+)', chosen.title)
                                if part_title:
                                    title += part_title.group(1)
                                playback_item = xbmcgui.ListItem(label=title, thumbnailImage=img, path=stream_url)
                                playback_item.setProperty('IsPlayable', 'true')
                                found = True
                                xbmcplugin.setResolvedUrl(self.common.handle, True, playback_item)
                            else:
                                list_index = re.search('^playlist://[a-zA-Z0-9_]+?/([0-9]+?)/$', stream_url)
                                if list_index:
                                    found = True
                                    self.play_list(dict_list[int(list_index.group(1))], title, img)
        if not found:
            try:
                failmsg = str(stream_url.msg)
                self.common.alert(failmsg, self.common.language(30923, True))
            except:
                pass

    def play_list(self, source, title='', image=''):
        try: source.keys()
        except: raise TypeError

        if source['multi-part']:
            all_resolved = True
            playlist_item = self.addon.get_playlist(1, True)
            first_item = None
            try:
                source['parts'] = ast.literal_eval(source['parts'])
            except:
                pass
            for index, part in enumerate(source['parts']):
                this_title = title
                src_title = source['title'] + ' ' + self.common.language(30651, True) + ' ' + str(index + 1)
                if this_title:
                    this_title += ' ' + self.common.language(30651, True) + ' ' + str(index + 1)
                else:
                    this_title = src_title
                stream_url = None
                hmf = self.urlresolver.HostedMediaFile(url=part, title=src_title)
                if hmf:
                    stream_url = hmf.resolve()
                if stream_url:
                    playback_item = \
                        xbmcgui.ListItem(label=this_title, thumbnailImage=image,
                                         path=stream_url)
                    playback_item.setProperty('IsPlayable', 'true')
                    if not first_item: first_item = playback_item
                    playlist_item.add(stream_url, playback_item)
                else:
                    amsg = '%s %s %s' % (self.common.language(30991, True), str(index + 1), self.common.language(30922, True))
                    self.common.alert(amsg, self.common.language(30921, True))
                    all_resolved = False
                    break
            if all_resolved and first_item:
                xbmcplugin.setResolvedUrl(self.common.handle, True, first_item)

    def play_this(self, item, title='', image='', with_player=True, meta_dict=None):
        if not isinstance(item, str):
            try: item = str(item)
            except: return
        source = self.urlresolver.HostedMediaFile(url=item, title=title)
        stream_url = source.resolve()
        if not stream_url or not isinstance(stream_url, basestring):
            stream_url = item
        if stream_url:
            multipart = False
            if meta_dict:
                multipart = meta_dict.get('multi-part', False)
                if multipart:
                    if 'playlist://' not in item:
                        multipart = False
            if multipart:
                self.play_list(meta_dict, title, image)
            else:
                playback_item = xbmcgui.ListItem(label=title, thumbnailImage=image, path=stream_url)
                playback_item.setProperty('IsPlayable', 'true')
                if with_player:
                    core = self.common.player_core()
                    xbmc.Player(core).play(stream_url, playback_item)
                else:
                    xbmcplugin.setResolvedUrl(self.common.handle, True, playback_item)
Example #22
0
    def __init__(self, params):
        site = self.__module__
        addon = Addon()
        common = addon.common
        mode = params["mode"]
        common.update_favorites_db()

        if mode == "main":
            __item_list_ = [
                {
                    "site": site,
                    "mode": "list_favorites",
                    "title": addon.language(30884, True),
                    "content": "all",
                    "sub_site": params["sub_site"],
                    "cover_url": addon.image("all.png"),
                    "backdrop_url": addon.art(),
                    "type": 3,
                },
                {
                    "site": site,
                    "mode": "list_favorites",
                    "title": addon.language(30885, True),
                    "content": "movies",
                    "sub_site": params["sub_site"],
                    "cover_url": addon.image("movies.png"),
                    "backdrop_url": addon.art(),
                    "type": 3,
                },
                {
                    "site": site,
                    "mode": "list_favorites",
                    "title": addon.language(30886, True),
                    "content": "tvshows",
                    "sub_site": params["sub_site"],
                    "cover_url": addon.image("tvshows.png"),
                    "backdrop_url": addon.art(),
                    "type": 3,
                },
                {
                    "site": site,
                    "mode": "list_favorites",
                    "title": addon.language(30888, True),
                    "content": "episodes",
                    "sub_site": params["sub_site"],
                    "cover_url": addon.image("scenes.png"),
                    "backdrop_url": addon.art(),
                    "type": 3,
                },
            ]

            addon.add_items(__item_list_)
            addon.end_of_directory()

        elif mode == "add_favorite":
            params = AddonDict(common.addon_type()).str_update(params["__params_"])
            execute = "INSERT INTO " + common.fav_db_table + " (sub_site, content, url, __params_) VALUES (?, ?, ?, ?)"
            inserted = common.db.execute(execute, (params["sub_site"], params["content"], params["url"], str(params)))
            if common.to_bool(inserted):
                if inserted == 1:
                    addon.alert(
                        str(
                            addon.language(30891, True)
                            + " "
                            + params["title"].decode("ascii", "ignore")
                            + " "
                            + addon.language(30893, True)
                        )
                    )
                if inserted == 2:
                    addon.alert(str(params["title"].decode("ascii", "ignore") + " " + addon.language(30890, True)))

        elif mode == "delete_favorite":
            params = AddonDict(common.addon_type()).str_update(params["__params_"])
            execute = "DELETE FROM " + common.fav_db_table + " WHERE sub_site=? AND content=? AND url=?"
            deleted = common.db.execute(execute, (params["sub_site"], params["content"], params["url"]))
            if common.to_bool(deleted):
                addon.alert(
                    str(
                        addon.language(30892, True)
                        + " "
                        + params["title"].decode("ascii", "ignore")
                        + " "
                        + addon.language(30894, True)
                    )
                )
                xbmc.executebuiltin("Container.Refresh")

        elif mode == "list_favorites":
            if params["content"] == "all":
                sql_params = (params["sub_site"],)
                execute = "SELECT * FROM " + common.fav_db_table + " WHERE sub_site=?"
            else:
                sql_params = (params["sub_site"], params["content"])
                execute = "SELECT * FROM " + common.fav_db_table + " WHERE sub_site=? AND content=?"
            selected = common.db.fetchall(execute, sql_params)
            item_list = []
            if selected:
                for this_id, site, content, url, params in selected:
                    params = AddonDict(common.addon_type()).str_update(params)
                    params["context"] = 4
                    item_list.extend([params])
            if item_list:
                addon.add_items(item_list)
            addon.end_of_directory()

        elif mode == "clear_favorites":
            """
            Prompt user for confirmation prior to clearing all favorites / removing favorites table
            """
            if not params["sub_site"]:
                execute = "DROP TABLE " + common.fav_db_table
                sql_params = ""
            else:
                execute = "DELETE FROM " + common.fav_db_table + " WHERE sub_site=?"
                sql_params = (params["sub_site"],)
            clear_favs = xbmcgui.Dialog().yesno(
                common.addon_name + " - " + addon.language(30895, True),
                " ",
                addon.language(30896, True),
                nolabel=addon.language(30899, True),
                yeslabel=addon.language(30898, True),
            )
            if common.to_bool(clear_favs):
                cleared = common.db.execute(execute, sql_params)
                if common.to_bool(cleared):
                    common.db.execute("VACUUM " + common.fav_db_table)
                    addon.alert(str(addon.language(30897, True)))
                    xbmc.executebuiltin("Container.Refresh")
Example #23
0
    try:
        # enable button
        CANCEL_BUTTON.setEnabled(True)
    except:
        pass

    pDialog.close()
    print 'Update Repo'
    xbmc.executebuiltin("UpdateAddonRepos")
    from addon import Addon

    addon_id = 'script.icechannel'

    try:
        addon = Addon(addon_id, sys.argv)
    except:
        addon = Addon(addon_id)

    addon_path = addon.get_path()
    lib_path = os.path.join(addon_path, 'lib', 'entertainment')
    plugins_path = os.path.join(lib_path, 'plugins')
    settings_file = os.path.join(addon_path, 'resources', 'settings.xml')

    plugin_dirs = [plugins_path]
    from glob import glob
    plugin_dirs.extend(
        glob(
            os.path.join(os.path.dirname(addon_path), addon_id + '.extn.*',
                         'plugins')))
"""
    common XBMC Module
    Copyright (C) 2011 t0mm0

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

from addon import Addon

addon = Addon("script.module.addon.common")
addon_path = addon.get_path()
addon_version = addon.get_version()
    def __init__(self, params):
        site = self.__module__
        addon = Addon()
        common = addon.common
        mode = params['mode']
        common.update_favorites_db()

        if mode == 'main':
            __item_list_ = [{'site': site, 'mode': 'list_favorites', 'title': addon.language(30884, True), 'content': 'all',
                             'sub_site': params['sub_site'], 'cover_url': addon.image('all.png'), 'backdrop_url': addon.art(), 'type': 3},
                            {'site': site, 'mode': 'list_favorites', 'title': addon.language(30885, True), 'content': 'movies',
                             'sub_site': params['sub_site'], 'cover_url': addon.image('movies.png'), 'backdrop_url': addon.art(), 'type': 3},
                            {'site': site, 'mode': 'list_favorites', 'title': addon.language(30886, True), 'content': 'tvshows',
                             'sub_site': params['sub_site'], 'cover_url': addon.image('tvshows.png'), 'backdrop_url': addon.art(), 'type': 3},
                            {'site': site, 'mode': 'list_favorites', 'title': addon.language(30888, True), 'content': 'episodes',
                             'sub_site': params['sub_site'], 'cover_url': addon.image('scenes.png'), 'backdrop_url': addon.art(), 'type': 3}]

            addon.add_items(__item_list_)
            addon.end_of_directory()

        elif mode == 'add_favorite':
            params = AddonDict(common.addon_type()).str_update(params['__params_'])
            execute = 'INSERT INTO ' + common.fav_db_table + ' (sub_site, content, url, __params_) VALUES (?, ?, ?, ?)'
            inserted = common.db.execute(execute, (params['sub_site'], params['content'], params['url'], str(params)))
            if common.to_bool(inserted):
                if inserted == 1:
                    addon.alert(str(addon.language(30891, True) + ' ' + params['title'].decode('ascii', 'ignore') + ' ' + addon.language(30893, True)))
                if inserted == 2:
                    addon.alert(str(params['title'].decode('ascii', 'ignore') + ' ' + addon.language(30890, True)))

        elif mode == 'delete_favorite':
            params = AddonDict(common.addon_type()).str_update(params['__params_'])
            execute = 'DELETE FROM ' + common.fav_db_table + ' WHERE sub_site=? AND content=? AND url=?'
            deleted = common.db.execute(execute, (params['sub_site'], params['content'], params['url']))
            if common.to_bool(deleted):
                addon.alert(str(addon.language(30892, True) + ' ' + params['title'].decode('ascii', 'ignore') + ' ' + addon.language(30894, True)))
                xbmc.executebuiltin('Container.Refresh')

        elif mode == 'list_favorites':
            if params['content'] == 'all':
                sql_params = (params['sub_site'],)
                execute = 'SELECT * FROM ' + common.fav_db_table + ' WHERE sub_site=?'
            else:
                sql_params = (params['sub_site'], params['content'])
                execute = 'SELECT * FROM ' + common.fav_db_table + ' WHERE sub_site=? AND content=?'
            selected = common.db.fetchall(execute, sql_params)
            item_list = []
            if selected:
                for this_id, site, content, url, params in selected:
                    params = AddonDict(common.addon_type()).str_update(params)
                    params['context'] = 4
                    item_list.extend([params])
            if item_list:
                addon.add_items(item_list)
            addon.end_of_directory()

        elif mode == 'clear_favorites':
            """
            Prompt user for confirmation prior to clearing all favorites / removing favorites table
            """
            if not params['sub_site']:
                execute = 'DROP TABLE ' + common.fav_db_table
                sql_params = ''
            else:
                execute = 'DELETE FROM ' + common.fav_db_table + ' WHERE sub_site=?'
                sql_params = (params['sub_site'],)
            clear_favs = xbmcgui.Dialog().yesno(
                common.addon_name + ' - ' + addon.language(30895, True), ' ', addon.language(30896, True),
                nolabel=addon.language(30899, True), yeslabel=addon.language(30898, True))
            if common.to_bool(clear_favs):
                cleared = common.db.execute(execute, sql_params)
                if common.to_bool(cleared):
                    common.db.execute('VACUUM ' + common.fav_db_table)
                    addon.alert(str(addon.language(30897, True)))
                    xbmc.executebuiltin('Container.Refresh')
Example #26
0
import re
import time
from traceback import format_exc
from cStringIO import StringIO
from json import dumps
from inspect import getmembers, isfunction
import xbmc
import methods
from addon import Addon
from torrenter import Streamer, libtorrent
from timers import Timer, check_seeding_limits, save_resume_data
from onscreen_label import OnScreenLabel
from utilities import get_mime

monitor = xbmc.Monitor()
addon = Addon()
_ = addon.initialize_gettext()

sys.path.append(os.path.join(addon.path, 'site-packages'))
from bottle import (route, default_app, request, template, response,
                    static_file, TEMPLATE_PATH, HTTPError, HTTPResponse)

app = default_app()

# Torrent client parameters
resume_dir = os.path.join(addon.config_dir, 'torrents')
if not os.path.exists(resume_dir):
    os.mkdir(resume_dir)
# Initialize torrent client
torrent_client = Streamer(addon.torrent_port,
                          addon.torrent_port + 10,
Example #27
0
    import simplejson as simplejson
except:
    import json as simplejson

import urllib, re
from datetime import datetime
import time
from net import Net
from addon import Addon
from threading import Thread
try:
    import Queue as queue
except ImportError:
    import queue
net = Net()
addon = Addon('script.module.metahandler')

class TMDB(object):
    '''
    This class performs TMDB and IMDB lookups.
    
    First call is made to TMDB by either IMDB ID or Name/Year depending on what is supplied. If movie is not found
    or if there is data missing on TMDB, another call is made to IMDB to fill in the missing information.       
    '''  
    
    def __init__(self, api_key='', view='json', lang='en'):
        #view = yaml json xml
        self.view = view
        self.lang = lang
        self.api_key = api_key
        self.url_prefix = 'http://api.themoviedb.org/3'
Example #28
0
import cloudflare
import jsunpack
import net
import resolveurl
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
from addon import Addon

net = net.Net()
addon_id = 'plugin.video.anime69'
selfAddon = xbmcaddon.Addon(id=addon_id)
datapath = xbmc.translatePath(selfAddon.getAddonInfo('profile'))
addon = Addon(addon_id, sys.argv)
fanart = xbmc.translatePath(
    os.path.join('special://home/addons/' + addon_id, 'fanart.jpg'))
icon = xbmc.translatePath(
    os.path.join('special://home/addons/' + addon_id, 'icon.png'))
superc = xbmc.translatePath(
    os.path.join('special://home/addons/' + addon_id, 'superc.png'))
animexd = xbmc.translatePath(
    os.path.join('special://home/addons/' + addon_id, 'animexd.png'))
try:
    os.mkdir(datapath)
except:
    pass
file_var = open(xbmc.translatePath(os.path.join(datapath, 'cookie.lwp')), "a")
cookie_file = os.path.join(os.path.join(datapath, ''), 'cookie.lwp')
    def __init__(self, __params_):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        __site_ = self.__module__
        __mode_ = __params_['mode']

        __home_url_ = 'http://www.freeomovie.com/'
        __movies_url_ = __home_url_ + 'category/full-movie/'
        __scenes_url_ = __home_url_ + 'category/clips/'
        __search_url_ = __home_url_ + '/?s='
        __false_positives_ = ['http://www.freeomovie.com/category/full-movie/', 'http://www.freeomovie.com/category/clips/']

        if __mode_ == 'main':
            __item_list_ = [{'site': __site_, 'mode': 'list', 'title': a.language(30006), 'content': 'movies',
                             'url': __home_url_, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3},
                            {'site': __site_, 'mode': 'list', 'title': a.language(30001), 'content': 'movies',
                             'url': __movies_url_, 'cover_url': a.image('movies.png', image), 'backdrop_url': a.art(), 'type': 3},
                            {'site': __site_, 'mode': 'list', 'title': a.language(30002), 'content': 'movies',
                             'url': __scenes_url_, 'cover_url': a.image('scenes.png', image), 'backdrop_url': a.art(), 'type': 3},
                            {'site': __site_, 'mode': 'categories', 'title': a.language(30005), 'content': 'movies',
                             'url': __home_url_, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3},
                            {'site': __site_, 'mode': 'list', 'title': a.language(30004), 'content': 'search',
                             'url': __search_url_, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3}]
            __item_list_.extend(a.favs_hist_menu(__site_))
            __item_list_.extend(a.extended_menu())
            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'categories':
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': 'multi-column-taxonomy-list'}))
            __item_list_ = []
            if __soup_:
                for __item_ in __soup_.findAll('a'):
                    if __item_:
                        if __item_.get('href') not in __false_positives_:
                            __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'),
                                                  'content': __params_['content'], 'title': __item_.string.encode('UTF-8'),
                                                  'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}])

            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'list':
            if __params_['content'] == 'search':
                __item_ = a.search_input()
                if __item_: __params_['url'] = __search_url_ + __item_
                else: exit(1)
            elif __params_['content'] == 'goto':
                __last_item_ = re.search('/page/([0-9]+)/', __params_['url'])
                if __last_item_: __last_item_ = int(__last_item_.group(1))
                else: __last_item_ = 10000
                __item_ = a.page_input(__last_item_)
                if __item_: __params_['url'] = re.sub('/page/[0-9]+/', '/page/' + str(__item_) + '/', __params_['url'])
                else: exit(1)
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'id': 'content'}))
            __item_list_ = []
            __params_['mode'] = 'play'
            __params_['content'] = 'movies'
            __params_['type'] = 0
            __params_['context'] = 0
            __params_['duration'] = '7200'
            if __soup_:
                __xbmcdict_ = XBMCDict(0).update(__params_)
                for __item_ in __soup_.findAll('div', {'class': 'postbox'}):
                    if __item_:
                        if __item_.h2.a.get('href') not in __false_positives_:
                            __dict_ = __xbmcdict_.copy()
                            if __scenes_url_ in __params_['url']:
                                __dict_['duration'] = '1500'
                                __dict_['content'] = 'episodes'
                            __dict_['url'] = __item_.h2.a.get('href')
                            __dict_['title'] = __item_.h2.a.get('title').encode('UTF-8')
                            __dict_['tvshowtitle'] = __dict_['title']
                            __dict_['originaltitle'] = __dict_['title']
                            __dict_['cover_url'] = a.image(__item_.img.get('src'))
                            __dict_['thumb_url'] = __dict_['cover_url']
                            __dict_['poster'] = __dict_['cover_url']
                            __dict_['sub_site'] = __site_

                            __item_list_.extend([__dict_])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': 'wp-pagenavi'}))
            __last_item_ = False
            if __soup_:
                for __item_ in __soup_.findAll('a', href=True):
                    if __item_:
                        if __item_.get('class') == 'previouspostslink':
                            __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'), 'content': __params_['content'],
                                                  'title': a.language(30017, True), 'cover_url': a.image('previous.png', image),
                                                  'backdrop_url': a.art(), 'type': 3}])
                        if __item_.get('class') == 'nextpostslink':
                            __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'), 'content': __params_['content'],
                                                  'title': a.language(30018, True), 'cover_url': a.image('next.png', image),
                                                  'backdrop_url': a.art(), 'type': 3}])
                        if __item_.get('class') == 'last':
                            __last_item_ = __item_.get('href')
                if not __last_item_:
                    try:
                        if not __soup_.find('a', {'class': 'nextpostslink'}): __last_item_ = __soup_.findAll('a', href=True)[-1].get('href')
                        else: __last_item_ = __soup_.findAll('a', href=True)[-2].get('href')
                    except: pass
                if __last_item_:
                    __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __last_item_, 'content': 'goto',
                                          'title': a.language(30019, True), 'cover_url': a.image('goto.png', image),
                                          'backdrop_url': a.art(), 'type': 3}])

            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'play':
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('ul', {'id': 'countrytabs'}))
            __item_list_ = []
            if __soup_:
                for __index_, __items_ in enumerate(__soup_.findAll('a', href=True)):
                    __item_ = ''
                    if not __items_.get('id') == 'jpg':
                        __item_ = __items_.get('href')
                        __item_ = re.search('.*myURL\[\]=(.+)$', __item_, re.DOTALL)
                        if __item_: __item_ = re.sub('&tab=[0-9]+', '', __item_.group(1))
                        __xbmcdict_ = XBMCDict(0).update(__params_)
                        if __item_:
                            __dict_ = __xbmcdict_.copy()
                            __dict_['url'] = __item_
                            __dict_['count'] = __index_
                            __item_list_.extend([__dict_])
                if __item_list_:
                    from playback import Playback
                    Playback().choose_sources(__item_list_)
                else:
                    __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': 'videosection'}))
                    if __soup_:
                        for  __items_ in __soup_.findAll('a', href=True):
                            __item_ = __items_.get('href')
                            __xbmcdict_ = XBMCDict(0).update(__params_)
                            if __item_:
                                __dict_ = __xbmcdict_.copy()
                                __dict_['url'] = __item_
                                __item_list_.extend([__dict_])
                        for __items_ in __soup_.findAll('iframe', src=True):
                            __item_ = __items_.get('src')
                            __xbmcdict_ = XBMCDict(0).update(__params_)
                            if __item_:
                                __dict_ = __xbmcdict_.copy()
                                __dict_['url'] = __item_
                                __item_list_.extend([__dict_])
                    else: a.alert(a.language(30904, True), sound=False)
                    if __item_list_:
                        from playback import Playback
                        Playback().choose_sources(__item_list_)
                    else: a.alert(a.language(30904, True), sound=False)
            else: a.alert(a.language(30904, True), sound=False)
    def __init__(self, __params_):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        __site_ = self.__module__
        __mode_ = __params_['mode']

        __home_url_ = 'http://pornhardx.com/'
        __movies_url_ = __home_url_ + 'category/full-movie/'
        __scenes_url_ = __home_url_ + 'video/'
        __search_url_ = __home_url_ + '?s='
        __false_positives_ = ['http://pornhardx.com/video', 'http://pornhardx.com/video/?order=viewed',
                              'http://pornhardx.com/video/?order=liked', 'http://pornhardx.com/']

        if __mode_ == 'main':
            __item_list_ =[]
            __item_list_.extend([{'site': __site_, 'mode': 'list', 'title': a.language(30006), 'content': 'movies',
                                  'url': __scenes_url_, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3}])
            __item_list_.extend([{'site': __site_, 'mode': 'list', 'title': a.language(30003), 'content': 'movies',
                                  'url': __home_url_, 'cover_url': a.image('recent.png', image), 'backdrop_url': a.art(), 'type': 3}])
            __item_list_.extend([{'site': __site_, 'mode': 'categories', 'title': a.language(30005), 'content': 'movies',
                                  'url': __scenes_url_, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(), 'type': 3}])
            __item_list_.extend([{'site': __site_, 'mode': 'list', 'title': a.language(30004), 'content': 'search',
                                  'url': __search_url_, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(), 'type': 3}])
            __item_list_.extend(a.favs_hist_menu(__site_))
            __item_list_.extend(a.extended_menu())
            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'categories':
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'id': 'navigation-wrapper'}))
            __item_list_ = []
            if __soup_:
                for __item_ in __soup_.findAll('a', {'href': True}):
                    if __item_:
                        if __item_.get('href') not in __false_positives_:
                            if 'full-movie' in __params_['url']:
                                if __movies_url_ != __item_.get('href') and 'full-movie' in __item_.get('href'):
                                    __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'),
                                                          'content': __params_['content'], 'title':__item_.contents[0].encode('UTF-8'),
                                                          'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}])
                            elif 'full-movie' not in __item_.get('href'):
                                __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'),
                                                      'content': __params_['content'], 'title':__item_.contents[0].encode('UTF-8'),
                                                      'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}])
            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'list':
            if __params_['content'] == 'search':
                __item_ = a.search_input()
                if __item_: __params_['url'] = __search_url_ + __item_
                else: exit(1)
            elif __params_['content'] == 'goto':
                __last_item_ = re.search('/page/([0-9]+)/', __params_['url'])
                if __last_item_: __last_item_ = int(__last_item_.group(1))
                else: __last_item_ = 10000
                __item_ = a.page_input(__last_item_)
                if __item_: __params_['url'] = re.sub('/page/[0-9]+/', '/page/' + str(__item_) + '/', __params_['url'])
                else: exit(1)
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': re.compile('col-sm-8(?:\s*main-content)*')}))
            __item_list_ = []
            __params_['mode'] = 'play'
            __params_['content'] = 'movies'
            __params_['type'] = 0
            __params_['context'] = 0
            __params_['duration'] = '7200'
            if __soup_:
                __xbmcdict_ = XBMCDict(0).update(__params_)
                for __item_ in __soup_.findAll('div', {'class': re.compile('.*(?:col-xs-6 item|post type-post status-publish).*')}):
                    if __item_:
                        if __item_.a.get('href') not in __false_positives_:
                            __dict_ = __xbmcdict_.copy()
                            if 'full-movie' not in __params_['url']:
                                __dict_['duration'] = '1500'
                                __dict_['content'] = 'episodes'
                            if __item_.h3:
                                __dict_['url'] = __item_.h3.a.get('href')
                                if __item_.h3.a.contents: __dict_['title'] = __item_.h3.a.contents[0].encode('UTF-8')
                                else: __dict_['title'] = 'Untitled'
                            elif __item_.h2:
                                __dict_['url'] = __item_.h2.a.get('href')
                                if __item_.h2.a.contents: __dict_['title'] = __item_.h2.a.contents[0].encode('UTF-8')
                                else: __dict_['title'] = 'Untitled'
                            __dict_['tvshowtitle'] = __dict_['title']
                            __dict_['originaltitle'] = __dict_['title']
                            __dict_['cover_url'] = a.image(__item_.img.get('src'))
                            __dict_['thumb_url'] = __dict_['cover_url']
                            __dict_['poster'] = __dict_['cover_url']
                            __dict_['sub_site'] = __site_

                            __item_list_.extend([__dict_])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('ul', {'class': 'pagination'}))
            if __soup_.li:
                __item_ = __soup_.find('a', {'class': 'prev page-numbers'})
                if __item_:
                    __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'), 'content': __params_['content'],
                                          'title': a.language(30017, True), 'cover_url': a.image(image, image),
                                          'backdrop_url': a.art(), 'type': 3}])
                __item_ = __soup_.find('a', {'class': 'next page-numbers'})
                if __item_:
                    __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.get('href'), 'content': __params_['content'],
                                          'title': a.language(30018, True), 'cover_url': a.image(image, image),
                                          'backdrop_url': a.art(), 'type': 3}])
                    if len(__soup_.findAll('a')) > 2:
                        __last_item_= __soup_.find('a', {'class': 'next page-numbers'}).parent.previousSibling.a.get('href')
                        __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __last_item_, 'content': 'goto',
                                              'title': a.language(30019, True), 'cover_url': a.image(image, image),
                                              'backdrop_url': a.art(), 'type': 3}])
                else:
                    __item_ = __soup_.find('span', {'class': 'page-numbers current'})
                    if __item_:
                        if len(__soup_.findAll('a')) > 2:
                            __last_item_ = __soup_.find('span', {'class': 'page-numbers current'}).parent.previousSibling.a.get('href')
                            __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __last_item_, 'content': 'goto',
                                                  'title': a.language(30019, True), 'cover_url': a.image('goto.png', image),
                                                  'backdrop_url': a.art(), 'type': 3}])
            else:
                __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('ul', {'class': 'pager'}))
                __item_ = __soup_.find('li', {'class': 'previous'})
                if __item_:
                    __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.previousSibling.get('href'), 'content': __params_['content'],
                                          'title': a.language(30017, True), 'cover_url': a.image('previous.png', image),
                                          'backdrop_url': a.art(), 'type': 3}])
                __item_ = __soup_.find('li', {'class': 'next'})
                if __item_:
                    __item_list_.extend([{'site': __site_, 'mode': 'list', 'url': __item_.previousSibling.get('href'), 'content': __params_['content'],
                                          'title': a.language(30018, True), 'cover_url': a.image('next.png', image),
                                          'backdrop_url': a.art(), 'type': 3}])
            a.add_items(__item_list_)
            a.end_of_directory()

        elif __mode_ == 'play':
            __html_ = a.get_page(__params_['url'])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('object', {'id': re.compile('flashplayer.+')}))
            __item_ = ''
            __item_list_ = []
            if __soup_:
                for __item_ in __soup_.findAll('param', {'name': 'FlashVars'}):
                    __item_ = __item_.get('value')
                    __item_ = re.search('.*?proxy\.link=(.+?)&(?:proxy|skin).*?', __item_)
                    if __item_:
                        if __item_ not in __item_list_: __item_ = __item_.group(1)
                        else: __item_ = ''
                    else: __item_ = ''
                    __xbmcdict_ = XBMCDict(0).update(__params_)
                    if __item_:
                        __dict_ = __xbmcdict_.copy()
                        __dict_['url'] = __item_
                        __item_list_.extend([__dict_])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('video'))
            __item_ = ''
            if __soup_:
                for __item_ in __soup_.findAll('source'):
                    __src_ = __item_.get('src')
                    if __src_:
                        __xbmcdict_ = XBMCDict(0).update(__params_)
                        if __item_ and ('..' not in __src_):
                            __dict_ = __xbmcdict_.copy()
                            try: __dict_['src_title'] = __item_.get('data-res') + 'p'
                            except: pass
                            __dict_['url'] = __src_
                            __item_list_.extend([__dict_])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': 'videoWrapper player'}))
            __item_ = ''
            if __soup_:
                for __script_ in __soup_.findAll('script'):
                    __item_ = ''
                    if __script_.get('src'):
                        if 'http://videomega.tv/validatehash.php' in __script_['src']: __item_ = __script_['src']
                        elif 'ref=' in __script_.get('src'):
                            __temp_ = re.search('.*ref=[\'"](.+?)[\'"]', __script_.get('src'))
                            if __temp_: __item_ = 'http://videomega.tv/iframe.php?ref=' + __temp_.group(1)
                        __xbmcdict_ = XBMCDict(0).update(__params_)
                        if __item_:
                            __dict_ = __xbmcdict_.copy()
                            __dict_['url'] = __item_
                            __item_list_.extend([__dict_])
                for __iframe_ in __soup_.findAll('iframe'):
                    __item_ = ''
                    if __iframe_.get('src'):
                        if 'http://videomega.tv/validatehash.php' in __iframe_['src']: __item_ = __iframe_['src']
                        elif 'ref=' in __iframe_.get('src'):
                            __temp_ = re.search('.*ref=[\'"](.+?)[\'"]', __iframe_.get('src'))
                            if __temp_: __item_ = 'http://videomega.tv/iframe.php?ref=' + __temp_.group(1)
                        else: __item_ = __iframe_.get('src')
                        __xbmcdict_ = XBMCDict(0).update(__params_)
                        if __item_:
                            __dict_ = __xbmcdict_.copy()
                            __dict_['url'] = __item_
                            __item_list_.extend([__dict_])
            __soup_ = BeautifulSoup(__html_, parseOnlyThese=SoupStrainer('div', {'class': re.compile('player player-small.*')}))
            __item_ = ''
            if __soup_:
                for __iframe_ in __soup_.findAll('iframe'):
                    __item_ = ''
                    if __iframe_.get('src'):
                        if 'http://videomega.tv/validatehash.php' in __iframe_['src']: __item_ = __iframe_['src']
                        elif 'ref=' in __iframe_.get('src'):
                            __temp_ = re.search('.*ref=[\'"](.+?)[\'"]', __iframe_.get('src'))
                            if __temp_: __item_ = 'http://videomega.tv/iframe.php?ref=' + __temp_.group(1)
                        else: __item_ = __iframe_.get('src')
                        __xbmcdict_ = XBMCDict(0).update(__params_)
                        if __item_:
                            __dict_ = __xbmcdict_.copy()
                            __dict_['url'] = __item_
                            __item_list_.extend([__dict_])
            if __item_list_:
                from playback import Playback
                Playback().choose_sources(__item_list_)
            else: a.alert(a.language(30904, True), sound=False)
Example #31
0
'''
    Ice Channel
'''
import os    
import sys
from addon import Addon

addon_id = 'script.icechannel'

try:
    addon = Addon(addon_id, sys.argv)
except:
    addon = Addon(addon_id)

addon_path = addon.get_path()
addon_version = addon.get_version()

lib_path = os.path.join(addon_path, 'lib', 'entertainment')
plugins_path = os.path.join(lib_path, 'plugins')
settings_file = os.path.join(addon_path, 'resources', 'settings.xml')

profile_path = addon.get_profile()

theme_name = addon.get_setting('theme')
theme_type = addon.get_setting(theme_name+'_themetype')
if theme_type == 'online':
    icon_path = addon.get_setting(theme_name+'_themeurl')
else:
    theme_addon = Addon( addon.get_setting(theme_name+'_themeaddon') )
    icon_path = os.path.join(theme_addon.get_path(), 'theme')
Example #32
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://qwertty.net'
        search_url = home_url + '/index.php?do=search&subaction=search&full_search=0&search_start=0&result_from=1&story='
        false_positives = ['']

        if mode == 'main':
            item_list = [{'site': site, 'mode': 'list', 'title': a.language(30006), 'content': '',
                          'url': home_url, 'cover_url': a.image('all.png', image), 'backdrop_url': a.art(), 'type': 3},
                         {'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '',
                          'url': home_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search',
                          'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(),
                          'type': 3}]
            item_list.extend(a.favs_hist_menu(site))
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'navi-wrap'}))
            item_list = []
            if soup:
                for item in soup.findAll('a'):
                    if item: item_list.extend([{'site': site, 'mode': 'list', 'url': home_url + item.get('href'),
                                                'content': '', 'title': item.string.encode('UTF-8'),
                                                'cover_url': a.image(image, image), 'backdrop_url': a.art(),
                                                'type': 3}])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                if 'do=search' in params['url']:
                    last_item = re.search('search_start=([0-9]+)', params['url'])
                else:
                    last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    if 'do=search' in params['url']:
                        page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + str(item), params['url'])
                        params['url'] = re.sub(r'(result_from=)([0-9]+)', '\g<01>' + str(int(str(item)) * 10 + 1), page)
                    else:
                        params['url'] = re.sub('/page/[0-9]+/', '/page/' + str(item) + '/', params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'id': 'dle-content'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll('div', {'class': 'short-item'}):
                    if item:
                        _dict = xbmcdict.copy()
                        _dict['url'] = item.a.get('href')
                        _dict['title'] = item.a.img.get('alt').encode('UTF-8')
                        _dict['tvshowtitle'] = _dict['title']
                        _dict['originaltitle'] = _dict['title']
                        item = home_url + item.a.img.get('src').replace('/thumbs', '')
                        _dict['cover_url'] = a.image(item)
                        _dict['thumb_url'] = _dict['cover_url']
                        _dict['poster'] = _dict['cover_url']
                        _dict['sub_site'] = site

                        item_list.extend([_dict])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'bottom-nav'}))
            if soup:
                last_item = len(soup.findAll('a', href=True)) - 1
                for index, item in enumerate(soup.findAll('a', href=True)):
                    page = ''
                    if item:
                        if index == 0 and item.string.encode('UTF-8') != 'Back': last_item -= 1
                        if item.string.encode('UTF-8') == 'Back':
                            if item.get('href') == '#':
                                temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick'))
                                if temp:
                                    page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url'])
                                    page = re.sub(r'(result_from=)([0-9]+)',
                                                  '\g<01>' + str(int(temp.group(1)) * 10 + 1), page)
                            else:
                                page = item.get('href')
                            if page:
                                item_list.extend(
                                    [{'site': site, 'mode': 'list', 'url': page, 'content': params['content'],
                                      'title': a.language(30017, True), 'cover_url': a.image('previous.png', image),
                                      'backdrop_url': a.art(), 'type': 3}])
                        if item.string.encode('UTF-8') == 'Next':
                            if item.get('href') == '#':
                                temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick'))
                                if temp:
                                    page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url'])
                                    page = re.sub(r'(result_from=)([0-9]+)',
                                                  '\g<01>' + str(int(temp.group(1)) * 10 + 1), page)
                            else:
                                page = item.get('href')
                            if page:
                                item_list.extend(
                                    [{'site': site, 'mode': 'list', 'url': page, 'content': params['content'],
                                      'title': a.language(30018, True), 'cover_url': a.image('next.png', image),
                                      'backdrop_url': a.art(), 'type': 3}])
                        if index == last_item:
                            if item.get('href') == '#':
                                temp = re.search('.*list_submit\(([0-9]+)\).*', item.get('onclick'))
                                if temp:
                                    page = re.sub(r'(search_start=)([0-9]+)', '\g<01>' + temp.group(1), params['url'])
                                    page = re.sub(r'(result_from=)([0-9]+)',
                                                  '\g<01>' + str(int(temp.group(1)) * 10 + 1), page)
                            else:
                                page = item.get('href')
                            if page:
                                item_list.extend([{'site': site, 'mode': 'list', 'url': page, 'content': 'goto',
                                                   'title': a.language(30019, True),
                                                   'cover_url': a.image('goto.png', image),
                                                   'backdrop_url': a.art(), 'type': 3}])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'full-text clearfix desc-text'}))
            item = soup.find('a')
            item_list = []
            xbmcdict = XBMCDict(0).update(params)
            if item:
                _dict = xbmcdict.copy()
                _dict['url'] = item.get('href')
                item_list.extend([_dict])
            else:
                a.alert(a.language(30904, True), sound=False)
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
Example #33
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://pornhardx.com/'
        movies_url = home_url + 'category/full-movie/'
        scenes_url = home_url + 'video/'
        search_url = home_url + '?s='
        false_positives = [
            'http://pornhardx.com/video',
            'http://pornhardx.com/video/?order=viewed',
            'http://pornhardx.com/video/?order=liked', 'http://pornhardx.com/'
        ]

        if mode == 'main':
            item_list = []
            item_list.extend([{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend([{
                'site': site,
                'mode': 'list',
                'title': a.language(30003),
                'content': '',
                'url': home_url,
                'cover_url': a.image('recent.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend([{
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend([{
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }])
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'navigation-wrapper'}))
            item_list = []
            if soup:
                for item in soup.findAll('a', {'href': True}):
                    if item:
                        if item.get('href') not in false_positives:
                            if 'full-movie' in params['url']:
                                if movies_url != item.get(
                                        'href') and 'full-movie' in item.get(
                                            'href'):
                                    item_list.extend([{
                                        'site':
                                        site,
                                        'mode':
                                        'list',
                                        'url':
                                        item.get('href'),
                                        'content':
                                        '',
                                        'title':
                                        item.contents[0].encode('UTF-8'),
                                        'cover_url':
                                        a.image(image, image),
                                        'backdrop_url':
                                        a.art(),
                                        'type':
                                        3
                                    }])
                            elif 'full-movie' not in item.get('href'):
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.get('href'),
                                    'content':
                                    '',
                                    'title':
                                    item.contents[0].encode('UTF-8'),
                                    'cover_url':
                                    a.image(image, image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(
                html,
                parseOnlyThese=SoupStrainer(
                    'div',
                    {'class': re.compile('col-sm-8(?:\s*main-content)*')}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll(
                        'div',
                    {
                        'class':
                        re.compile(
                            '.*(?:col-xs-6 item|post type-post status-publish).*'
                        )
                    }):
                    if item:
                        if item.a.get('href') not in false_positives:
                            _dict = xbmcdict.copy()
                            if 'full-movie' not in params['url']:
                                _dict['duration'] = '1500'
                                _dict['content'] = 'episodes'
                            if item.h3:
                                _dict['url'] = item.h3.a.get('href')
                                if item.h3.a.contents:
                                    _dict['title'] = item.h3.a.contents[
                                        0].encode('UTF-8')
                                else:
                                    _dict['title'] = 'Untitled'
                            elif item.h2:
                                _dict['url'] = item.h2.a.get('href')
                                if item.h2.a.contents:
                                    _dict['title'] = item.h2.a.contents[
                                        0].encode('UTF-8')
                                else:
                                    _dict['title'] = 'Untitled'
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            _dict['cover_url'] = a.image(item.img.get('src'))
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            _dict['sub_site'] = site

                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'ul', {'class': 'pagination'}))
            if soup.li:
                item = soup.find('a', {'class': 'prev page-numbers'})
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': params['content'],
                        'title': a.language(30017, True),
                        'cover_url': a.image(image, image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
                item = soup.find('a', {'class': 'next page-numbers'})
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.get('href'),
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image(image, image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
                    if len(soup.findAll('a')) > 2:
                        last_item = soup.find('a', {
                            'class': 'next page-numbers'
                        }).parent.previousSibling.a.get('href')
                        item_list.extend([{
                            'site': site,
                            'mode': 'list',
                            'url': last_item,
                            'content': 'goto',
                            'title': a.language(30019, True),
                            'cover_url': a.image(image, image),
                            'backdrop_url': a.art(),
                            'type': 3
                        }])
                else:
                    item = soup.find('span', {'class': 'page-numbers current'})
                    if item:
                        if len(soup.findAll('a')) > 2:
                            last_item = soup.find(
                                'span', {
                                    'class': 'page-numbers current'
                                }).parent.previousSibling.a.get('href')
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                last_item,
                                'content':
                                'goto',
                                'title':
                                a.language(30019, True),
                                'cover_url':
                                a.image('goto.png', image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
            else:
                soup = BeautifulSoup(html,
                                     parseOnlyThese=SoupStrainer(
                                         'ul', {'class': 'pager'}))
                item = soup.find('li', {'class': 'previous'})
                if item:
                    item_list.extend([{
                        'site':
                        site,
                        'mode':
                        'list',
                        'url':
                        item.previousSibling.get('href'),
                        'content':
                        params['content'],
                        'title':
                        a.language(30017, True),
                        'cover_url':
                        a.image('previous.png', image),
                        'backdrop_url':
                        a.art(),
                        'type':
                        3
                    }])
                item = soup.find('li', {'class': 'next'})
                if item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item.previousSibling.get('href'),
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image('next.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
            item = ''
            item_list = []
            if soup:
                for item in soup.findAll('param', {'name': 'FlashVars'}):
                    item = item.get('value')
                    item = re.search('.*?proxy\.link=(.+?)&(?:proxy|skin).*?',
                                     item)
                    if item:
                        if item not in item_list:
                            item = item.group(1)
                        else:
                            item = ''
                    else:
                        item = ''
                    xbmcdict = XBMCDict(0).update(params)
                    if item:
                        _dict = xbmcdict.copy()
                        _dict['url'] = item
                        item_list.extend([_dict])
                item = ''
                for item in soup.findAll('video'):
                    for source in soup.findAll('source'):
                        src = source.get('src')
                        if src:
                            xbmcdict = XBMCDict(0).update(params)
                            if item and ('..' not in src):
                                _dict = xbmcdict.copy()
                                try:
                                    _dict['src_title'] = source.get(
                                        'data-res') + 'p'
                                except:
                                    pass
                                _dict['url'] = src
                                item_list.extend([_dict])
                    try:
                        src = item.get('src')
                        if src:
                            xbmcdict = XBMCDict(0).update(params)
                            if item and ('..' not in src):
                                _dict = xbmcdict.copy()
                                try:
                                    _dict['src_title'] = source.get(
                                        'data-res') + 'p'
                                except:
                                    pass
                                _dict['url'] = src
                                item_list.extend([_dict])
                    except:
                        pass
                for script in soup.findAll('script'):
                    item = ''
                    if script.get('src'):
                        if 'http://videomega.tv/validatehash.php' in script[
                                'src']:
                            item = script['src']
                        elif 'ref=' in script.get('src'):
                            temp = re.search('.*ref=[\'"](.+?)[\'"]',
                                             script.get('src'))
                            if temp:
                                item = 'http://videomega.tv/iframe.php?ref=' + temp.group(
                                    1)
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
                for iframe in soup.findAll('iframe'):
                    item = ''
                    if iframe.get('src'):
                        if 'http://videomega.tv/validatehash.php' in iframe[
                                'src']:
                            item = iframe['src']
                        elif 'ref=' in iframe.get('src'):
                            temp = re.search('.*ref=[\'"](.+?)[\'"]',
                                             iframe.get('src'))
                            if temp:
                                item = 'http://videomega.tv/iframe.php?ref=' + temp.group(
                                    1)
                        else:
                            item = iframe.get('src')
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])

            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
Example #34
0
    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

import os
import sys
import re
from addon import Addon

addon = Addon()
common = addon.common

common.create_tables()

sys.path.append(common.module_path)
sys.path.append(common.site_path)

params = addon.queries
param = params.get('site', 'main')
context = params.get('context', 3)

is_allowed_access = common.disclaim()

if is_allowed_access:
    if common.history_size_limit() != 0:
Example #35
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://xtheatre.net/'
        search_url = home_url + '?s='
        false_positives = [
            'http://watchxxxhd.net/watch-full-movies-hd/',
            'http://watchxxxhd.net', 'http://watchxxxhd.net/category/movies/',
            'http://watchxxxhd.net/category/ategorized222/',
            'http://watchxxxhd.net/watch-full-movies-hd/'
        ]

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url + '?filtre=date&cat=0',
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url + 'categories/',
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'ul', {'class': 'listing-cat'}))
            item_list = []
            if soup:
                for item in soup.findAll('li'):
                    if item:
                        if item.a.get('href') not in false_positives:
                            try:
                                vidcount = item.findAll(
                                    'span',
                                    {'class': 'nb_cat border-radius-5'
                                     })[0].string.encode('UTF-8')
                                vidcount = re.sub('\svideo[s]*', '', vidcount)
                            except:
                                vidcount = '0'
                            if vidcount and vidcount != '0':
                                img = item.find('img')
                                if img:
                                    try:
                                        img = img.get('data-lazy-src')
                                    except:
                                        try:
                                            img = img.get('src')
                                        except:
                                            img = ''
                                if not img:
                                    img = ''
                                title = item.a.get('title').encode(
                                    'UTF-8') + ' (%s)' % vidcount
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.a.get('href'),
                                    'content':
                                    '',
                                    'title':
                                    title,
                                    'cover_url':
                                    a.image(img, image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(
                html,
                parseOnlyThese=SoupStrainer(
                    'ul', {'class': 'listing-videos listing-extract'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll(
                        'li', {'class': 'border-radius-5 box-shadow'}):
                    if item:
                        if item.a.get('href') not in false_positives:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item.a.get('href')
                            _dict['title'] = item.a.get('title').encode(
                                'UTF-8')
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            img = item.find('img')
                            if img:
                                try:
                                    img = img.get('data-lazy-src')
                                except:
                                    try:
                                        img = img.get('src')
                                    except:
                                        img = ''
                            if not img:
                                img = ''
                            _dict['cover_url'] = a.image(img)
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            _dict['sub_site'] = site
                            plot = item.find('div', {'class': 'right'})
                            if plot:
                                plot = plot.p.contents[0].encode('utf-8')
                                _dict['plot'] = plot
                                _dict['plotoutline'] = plot
                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'pagination'}))
            last_item = False
            if soup:
                for item in soup.findAll('a'):
                    if (item.string.encode('UTF-8')
                            == 'Last »') or (item.get('class') == 'last'):
                        last_item = item.get('href')
                        break
                if last_item is False:
                    for last_item in soup.findAll('a', {'class': 'inactive'}):
                        pass
                    if last_item: last_item = last_item.get('href')
                item = soup.find('span', {'class': 'current'})
                if item:
                    if item.parent:
                        item = item.parent
                        if item.previousSibling:
                            if item.previousSibling.find('a'):
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.previousSibling.a.get('href'),
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30017, True),
                                    'cover_url':
                                    a.image('previous.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
                        if item.nextSibling:
                            if item.nextSibling.find('a'):
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    item.nextSibling.a.get('href'),
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30018, True),
                                    'cover_url':
                                    a.image('next.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
            if last_item:
                item_list.extend([{
                    'site': site,
                    'mode': 'list',
                    'url': last_item,
                    'content': 'goto',
                    'title': a.language(30019, True),
                    'cover_url': a.image('goto.png', image),
                    'backdrop_url': a.art(),
                    'type': 3
                }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'video-embed'}))
            item_list = []
            if soup:
                for script in soup.findAll(re.compile('s_*c_*r_*i_*p_*t')):
                    item = ''
                    if script.get('src'):
                        if 'http://videomega.tv/validatehash.php' in script[
                                'src']:
                            item = script['src']
                        elif 'ref=' in script.get('src'):
                            temp = re.search('.*ref=[\'"](.+?)[\'"]',
                                             script.get('src'))
                            if temp:
                                item = 'http://videomega.tv/iframe.php?ref=' + temp.group(
                                    1)
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
                if soup.find('iframe', src=True):
                    item = ''
                    for iframe in soup.findAll('iframe', src=True):
                        if iframe.get('data-lazy-src'):
                            item = iframe.get('data-lazy-src')
                            r = re.search('.+old=(.+)$', item)
                            if r:
                                item = r.group(1)
                        else:
                            item = iframe.get('src').replace('\\', '')
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'video-infos'}))
            if soup:
                item = ''
                for p in soup.findAll('p'):
                    if p.iframe:
                        item = p.iframe.get('src')
                        xbmcdict = XBMCDict(0).update(params)
                        if item:
                            _dict = xbmcdict.copy()
                            _dict['url'] = item
                            item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
Example #36
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        base_url = 'http://filmikz.ch'
        home_url = base_url + '/index.php?genre=14'
        search_url = home_url + '&search='
        false_positives = ['#']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('pg=([0-9]+)', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                last_item = int(last_item / 10)
                item = a.page_input(last_item)
                if item:
                    item = str(int(item) * 10)
                    params['url'] = re.sub('pg=[0-9]+', 'pg=' + str(item),
                                           params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'td', {'width': '490'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            params['sub_site'] = site
            if soup:
                addondict = AddonDict(0).update(params)
                for item in soup.findAll('table', {
                        'width': '100%',
                        'height': '155'
                }):
                    _dict = addondict.copy()
                    ahref = item.find('a', {'href': True})
                    if ahref:
                        url = ahref.get('href')
                        if not url.startswith('http://'):
                            url = base_url + url
                        _dict['url'] = url
                        data = item.find('strong')
                        _dict['title'] = str(data.contents[0]).rstrip(' XXX :')
                        _dict['tvshowtitle'] = _dict['title']
                        _dict['originaltitle'] = _dict['title']
                        img = item.find('img')
                        if img:
                            img = img.get('src')
                            if not img.startswith('http://'):
                                img = base_url + '/' + img
                        else:
                            img = ''
                        _dict['cover_url'] = a.image(img)
                        _dict['thumb_url'] = _dict['cover_url']
                        _dict['poster'] = _dict['cover_url']
                        cast = item.find('p',
                                         text=re.compile('[Ss]tarring:.+'))
                        if cast:
                            _dict['plot'] = str(cast)
                            _dict['plotoutline'] = _dict['plot']
                            cast = re.search('[Ss]tarring:\s*(.+?)\s*\.+',
                                             str(cast))
                            if cast:
                                cast = cast.group(1)
                                _dict['cast'] = cast.split(', ')
                        item_list.extend([_dict])
                pages = BeautifulSoup(html,
                                      parseOnlyThese=SoupStrainer(
                                          'table', {'width': '250'}))
                if pages:
                    previouspage = None
                    nextpage = None
                    lastpage = None
                    for ahref in pages.findAll('a', {'href': True}):
                        astr = ahref.string.encode('utf-8')
                        if astr == '‹‹ ':
                            previouspage = base_url + '/' + ahref.get('href')
                        elif astr == '››':
                            nextpage = base_url + '/' + ahref.get('href')
                        elif astr == ' Last ':
                            lastpage = base_url + '/' + ahref.get('href')
                            last_item = re.search('pg=(-*[0-9]+)',
                                                  str(lastpage))
                            if last_item:
                                last_item = int(last_item.group(1))
                                if last_item < 10:
                                    lastpage = None
                    if previouspage:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            previouspage,
                            'content':
                            params['content'],
                            'title':
                            a.language(30017, True),
                            'cover_url':
                            a.image('previous.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
                    if nextpage:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            nextpage,
                            'content':
                            params['content'],
                            'title':
                            a.language(30018, True),
                            'cover_url':
                            a.image('next.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
                    if lastpage:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            lastpage,
                            'content':
                            'goto',
                            'title':
                            a.language(30019, True),
                            'cover_url':
                            a.image('goto.png', image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
            item_list = []
            _bad_hosts = ['NowDownload', 'ePornik']
            if soup:
                buttons = soup.findAll('input', {
                    'type': 'button',
                    'onclick': True
                })
                if buttons:
                    addondict = AddonDict(0).update(params)
                    for button in buttons:
                        value = button.get('value')
                        newhost = re.search('.+?-([a-zA-Z]+)', value)
                        if newhost:
                            newhost = newhost.group(1)
                        else:
                            newhost = ''
                        if newhost not in _bad_hosts:
                            item = button.get('onclick')
                            item = re.sub(
                                'javascript:popUp\([\'"](.+?)[\'"]\);*',
                                '\g<01>', item)
                            item = base_url + item
                            value = button.get('value')
                            if not re.search('[Pp]art ', value):
                                try:
                                    thtml = a.get_page(item)
                                    tsoup = BeautifulSoup(thtml)
                                    source = tsoup.find('frame')
                                    if source:
                                        source = source.get('src')
                                        if 'ads.php' not in source:
                                            _dict = addondict.copy()
                                            _dict['url'] = source
                                            item_list.extend([_dict])
                                except:
                                    continue
                    parts = []
                    oldhost = ''
                    _dict = addondict.copy()
                    _dict['multi-part'] = True
                    for button in buttons:
                        value = button.get('value')
                        newhost = re.search('.+?-([a-zA-Z]+)', value)
                        if newhost:
                            newhost = newhost.group(1)
                        else:
                            newhost = ''
                        if newhost not in _bad_hosts:
                            item = button.get('onclick')
                            item = re.sub(
                                'javascript:popUp\([\'"](.+?)[\'"]\);*',
                                '\g<01>', item)
                            item = base_url + item
                            if re.search('[Pp]art ', value):
                                if oldhost != newhost:
                                    if oldhost != '':
                                        _dict['parts'] = parts
                                        item_list.extend([_dict])
                                        _dict = addondict.copy()
                                        _dict['multi-part'] = True
                                        parts = []
                                    oldhost = newhost

                                try:
                                    thtml = a.get_page(item)
                                    tsoup = BeautifulSoup(thtml)
                                    source = tsoup.find('frame')
                                    if source:
                                        source = source.get('src')
                                        if 'ads.php' not in source:
                                            parts.extend([source])
                                except:
                                    continue
                    if parts:
                        _dict['parts'] = parts
                        item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
Example #37
0
    def __init__(self, params):
        import re
        import json
        import urllib
        from addon import Addon
        from addondict import AddonDict

        a = Addon()
        site = self.__module__
        mode = params['mode']

        api_version = 'v5'
        recent_url = 'http://beeg.com/api/%s/index/main/0/pc' % api_version
        long_url = 'http://beeg.com/api/%s/index/tag/0/pc?tag=long%svideos' % (api_version, '%20')
        search_url = 'http://beeg.com/api/%s/index/search/0/pc?query=' % api_version
        tag_url = 'http://beeg.com/api/%s/index/tag/0/pc?tag=' % api_version
        img_url = 'http://img.beeg.com/236x177/%s.jpg'

        data_markers = 'data=pc.US'

        if mode == 'main':
            item_list = [{'site': site, 'mode': 'list', 'title': a.language(30003), 'content': '',
                          'url': recent_url, 'cover_url': a.image('recent.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'categories', 'title': a.language(30005), 'content': '',
                          'url': recent_url, 'cover_url': a.image('categories.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'list', 'title': a.language(30039), 'content': '',
                          'url': long_url, 'cover_url': a.image('longvideos.png', image), 'backdrop_url': a.art(),
                          'type': 3},
                         {'site': site, 'mode': 'list', 'title': a.language(30004), 'content': 'search',
                          'url': search_url, 'cover_url': a.image('search.png', image), 'backdrop_url': a.art(),
                          'type': 3}]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            data = json.loads(html)
            item_list = []
            tags = data.get('tags', None)
            if tags:
                popular = tags.get('popular', None)
                if popular:
                    for item in popular:
                        url_item = re.search('(.+?)-', str(item))
                        if url_item: url_item = url_item.group(1)
                        else: url_item = item
                        item_list.extend([{'site': site, 'mode': 'list', 'url': tag_url + url_item,
                                           'content': '', 'title': str(item).capitalize(),
                                           'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}])
                nonpopular = tags.get('nonpopular', None)
                if nonpopular:
                    for item in nonpopular:
                        url_item = re.search('(.+?)-', str(item))
                        if url_item: url_item = url_item.group(1)
                        else: url_item = item
                        item_list.extend([{'site': site, 'mode': 'list', 'url': tag_url + urllib.quote(url_item),
                                           'content': '', 'title': str(item).capitalize(),
                                           'cover_url': a.image(image, image), 'backdrop_url': a.art(), 'type': 3}])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item.replace(' ', '+')
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/([0-9]+)/pc', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/[0-9]+/pc', '/' + str(item) + '/pc', params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            item_list = []
            data = json.loads(html)
            allvideos = []
            videos = data.get('videos', None)
            if videos:
                for video in videos:
                    nt_name = video.get('nt_name', '').encode('utf-8', 'ignore')
                    ps_name = video.get('ps_name', '').encode('utf-8', 'ignore')
                    atitle = video.get('title', '').encode('utf-8', 'ignore')
                    vid_id = video.get('id', '').encode('utf-8', 'ignore')
                    if nt_name.lower() == 'na': nt_name = ''
                    if ps_name.lower() == 'na': ps_name = ''
                    atitle = '%s - %s' % (atitle, ps_name)
                    if nt_name:
                        atitle += ' (%s)' % nt_name
                    if vid_id:
                        allvideos.append([vid_id, atitle, video])

                if allvideos:
                    params['mode'] = 'play'
                    params['content'] = 'episodes'
                    params['type'] = 0
                    params['context'] = 0
                    params['duration'] = '480'
                    params['sub_site'] = site
                    addondict = AddonDict(0).update(params)

                    for number, name, idata in allvideos:
                        _dict = addondict.copy()
                        _dict['title'] = name
                        _dict['tvshowtitle'] = _dict['title']
                        _dict['originaltitle'] = _dict['title']
                        _dict['cover_url'] = a.image(img_url % number)
                        _dict['thumb_url'] = _dict['cover_url']
                        _dict['poster'] = _dict['cover_url']
                        _dict['url'] = params['url']
                        _dict['count'] = number
                        item_list.extend([_dict])
                    pages = data.get('pages', 0)
                    if pages != 0:
                        pages -= 1
                    page = re.search('/([0-9]+)/pc', params['url'])
                    if page:
                        page = int(page.group(1))
                    else:
                        page = 0
                    previouspage = None
                    nextpage = None
                    lastpage = None
                    if page > 0:
                        previouspage = re.sub('/[0-9]+/pc', '/' + str(page - 1) + '/pc', params['url'])
                    if pages > 1:
                        lastpage = re.sub('/[0-9]+/pc', '/' + str(pages) + '/pc', params['url'])
                    if page < pages:
                        nextpage = re.sub('/[0-9]+/pc', '/' + str(page + 1) + '/pc', params['url'])

                    if previouspage:
                        item_list.extend([{'site': site, 'mode': 'list', 'url': previouspage, 'content': params['content'],
                                           'title': a.language(30017, True), 'cover_url': a.image('previous.png', image),
                                           'backdrop_url': a.art(), 'type': 3}])
                    if nextpage:
                        item_list.extend([{'site': site, 'mode': 'list', 'url': nextpage, 'content': params['content'],
                                           'title': a.language(30018, True), 'cover_url': a.image('next.png', image),
                                           'backdrop_url': a.art(), 'type': 3}])
                    if lastpage:
                        item_list.extend([{'site': site, 'mode': 'list', 'url': lastpage, 'content': 'goto',
                                           'title': a.language(30019, True), 'cover_url': a.image('goto.png', image),
                                           'backdrop_url': a.art(), 'type': 3}])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            data = json.loads(html)
            video = None
            videos = data.get('videos', None)
            if videos:
                for vid in videos:
                    if vid.get('id', None) == params['count']:
                        video = vid
                        break
                if video:
                    img = img_url % video.get('id')
                    name = params['title']
                    url = video.get('720p', None)
                    if not url:
                        url = video.get('480p', None)
                        if not url:
                            url = video.get('240p', None)
                    if url:
                        url = 'http:' + re.sub('\{DATA_MARKERS\}', data_markers, url)
                        from playback import Playback
                        Playback().play_this(url, name, img, a.common.usedirsources())
                    else:
                        a.alert(a.language(30904, True), sound=False)
Example #38
0
"""
    metahandler XBMC Addon
    Copyright (C) 2012 Eldorado

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

import os    
from addon import Addon

addon = Addon('script.module.metahandler')
addon_path = addon.get_path()
profile_path = addon.get_profile()
settings_file = os.path.join(addon_path, 'resources', 'settings.xml')
addon_version = addon.get_version()
Example #39
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        base_url = 'https://chaturbate.com'
        home_url = base_url

        false_positives = ['#']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30021),
                'content': '',
                'url': home_url,
                'cover_url': a.image('featuredcams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'bygender',
                'title': a.language(30017),
                'content': '',
                'cover_url': a.image('bygender.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'byage',
                'title': a.language(30018),
                'content': '',
                'cover_url': a.image('byage.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'byregion',
                'title': a.language(30019),
                'content': '',
                'cover_url': a.image('byregion.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'bystatus',
                'title': a.language(30020),
                'content': '',
                'cover_url': a.image('bystatus.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'bygender':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30022),
                'content': '',
                'url': base_url + '/female-cams/',
                'cover_url': a.image('femalecams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30023),
                'content': '',
                'url': base_url + '/male-cams/',
                'cover_url': a.image('malecams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30024),
                'content': '',
                'url': base_url + '/couple-cams/',
                'cover_url': a.image('couplecams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30025),
                'content': '',
                'url': base_url + '/transsexual-cams/',
                'cover_url': a.image('transcams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'byage':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30026),
                'content': '',
                'url': base_url + '/teen-cams/',
                'cover_url': a.image('teencams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30027),
                'content': '',
                'url': base_url + '/18to21-cams/',
                'cover_url': a.image('18to21cams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30028),
                'content': '',
                'url': base_url + '/20to30-cams/',
                'cover_url': a.image('20to30cams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30029),
                'content': '',
                'url': base_url + '/30to50-cams/',
                'cover_url': a.image('30to50cams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30030),
                'content': '',
                'url': base_url + '/mature-cams/',
                'cover_url': a.image('maturecams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'byregion':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30031),
                'content': '',
                'url': base_url + '/north-american-cams/',
                'cover_url': a.image('north-americancams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30032),
                'content': '',
                'url': base_url + '/other-region-cams/',
                'cover_url': a.image('other-regioncams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30033),
                'content': '',
                'url': base_url + '/euro-russian-cams/',
                'cover_url': a.image('euro-russiancams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30034),
                'content': '',
                'url': base_url + '/philippines-cams/',
                'cover_url': a.image('philippinescams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30035),
                'content': '',
                'url': base_url + '/asian-cams/',
                'cover_url': a.image('asiancams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30036),
                'content': '',
                'url': base_url + '/south-american-cams/',
                'cover_url': a.image('south-americancams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'bystatus':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30037),
                'content': '',
                'url': base_url + '/exhibitionist-cams/',
                'cover_url': a.image('exhibitionistcams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30038),
                'content': '',
                'url': base_url + '/hd-cams/',
                'cover_url': a.image('hdcams.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'goto':
                last_item = re.search('page=([0-9]+)', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('page=[0-9]+', 'page=' + str(item),
                                           params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div',
                                     {'class': 'c-1 endless_page_template'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'episodes'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = ''
            params['sub_site'] = site
            if soup:
                ul = soup.find('ul', {'class': 'list'})
                if ul:
                    addondict = AddonDict(0).update(params)
                    for item in ul.findAll('li'):
                        _dict = addondict.copy()
                        clip_link = item.find('a')
                        if clip_link:
                            url = clip_link.get('href')
                            if not url.startswith('http://'):
                                url = base_url + url
                            _dict['url'] = url
                            ctitle = ''
                            cage = ''
                            cname = ''
                            ccams = ''
                            details = item.find('div', {'class': 'details'})
                            if details:
                                temp = details.find('a')
                                if temp:
                                    cname = str(temp.contents[0])
                                temp = details.find(
                                    'span', {'class': re.compile('age.*')})
                                if temp:
                                    cage = temp.string.encode('utf-8')
                                temp = details.find('li', {'class': 'cams'})
                                if temp:
                                    ccams = str(temp.contents[0])
                                temp = details.find('li', {'title': True})
                                if temp:
                                    ctitle = temp.get('title').encode('UTF-8')
                            if cname:
                                usetitle = '%s [%syr, %s] %s' % (cname, cage,
                                                                 ccams, ctitle)
                                _dict['title'] = usetitle
                                _dict['tvshowtitle'] = _dict['title']
                                _dict['originaltitle'] = _dict['title']
                                img = item.find('img')
                                if img:
                                    img = img.get('src')
                                    if img.startswith('//'):
                                        img = 'http:' + img
                                else:
                                    img = ''
                                _dict['cover_url'] = a.image(img)
                                _dict['thumb_url'] = _dict['cover_url']
                                _dict['poster'] = _dict['cover_url']
                                item_list.extend([_dict])

                    pages = BeautifulSoup(html,
                                          parseOnlyThese=SoupStrainer(
                                              'ul', {'class': 'paging'}))
                    if pages:
                        previouspage = pages.find(
                            'a', {'class': re.compile('prev.*')})
                        nextpage = pages.find('a',
                                              {'class': re.compile('next.*')})
                        lastpage = pages.find('span',
                                              {'class': 'endless_separator'})
                        if lastpage:
                            lastpage = lastpage.findNext('a')

                        if previouspage:
                            previouspage = previouspage.get('href').replace(
                                ' ', '+')
                            if previouspage != '#':
                                if not previouspage.startswith('http://'):
                                    previouspage = base_url + previouspage
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    previouspage,
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30017, True),
                                    'cover_url':
                                    a.image('previous.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
                        if nextpage:
                            nextpage = nextpage.get('href').replace(' ', '+')
                            if nextpage != '#':
                                if not nextpage.startswith('http://'):
                                    nextpage = base_url + nextpage
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    nextpage,
                                    'content':
                                    params['content'],
                                    'title':
                                    a.language(30018, True),
                                    'cover_url':
                                    a.image('next.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])
                        if lastpage:
                            lastpage = lastpage.get('href').replace(' ', '+')
                            if lastpage != '#':
                                if not lastpage.startswith('http://'):
                                    lastpage = base_url + lastpage
                                item_list.extend([{
                                    'site':
                                    site,
                                    'mode':
                                    'list',
                                    'url':
                                    lastpage,
                                    'content':
                                    'goto',
                                    'title':
                                    a.language(30019, True),
                                    'cover_url':
                                    a.image('goto.png', image),
                                    'backdrop_url':
                                    a.art(),
                                    'type':
                                    3
                                }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            link = re.search('html \+= "src=\'(.+?)\'', html)
            if link:
                from playback import Playback
                Playback().play_this(link.group(1), params['title'],
                                     params['cover_url'],
                                     a.common.usedirsources())
            else:
                a.alert(a.language(30904, True), sound=False)
Example #40
0
except:
    import json as simplejson

import urllib, re
from datetime import datetime
import time
from net import Net
from addon import Addon
from threading import Thread

try:
    import Queue as queue
except ImportError:
    import queue
net = Net()
addon = Addon("script.module.metahandler")


class TMDB(object):
    """
    This class performs TMDB and IMDB lookups.
    
    First call is made to TMDB by either IMDB ID or Name/Year depending on what is supplied. If movie is not found
    or if there is data missing on TMDB, another call is made to IMDB to fill in the missing information.       
    """

    def __init__(self, api_key="", view="json", lang="en"):
        # view = yaml json xml
        self.view = view
        self.lang = lang
        self.api_key = api_key
 def __init__(self):
     self.ad = Addon()
     self.create_dirs()
     self.copy_db()
Example #42
0
"""
JSON-RPC methods implementation

The methods are called via POST request at this address.
Don't forget to add ('Content-Type': 'application/json') header to your http-request.
The API is compliant with JSON-RPC 2.0, though 'jsonrpc' and 'id' keys are optional in requests.
Example:
{"method": "pause_torrent", "params": {"info_hash":"21df87c3cc3209e3b6011a88053aec35a58582a9"}}

"params" is a JSON object (dict) containing method call parameters. Some methods do not take any parameters.
For those methods "params" key can be equal null or omitted.
"""

from addon import Addon

addon = Addon()


def ping(torrent_client, params=None):
    """
    Connection test method

    :return: 'pong'
    """
    return 'pong'


def add_torrent(torrent_client, params):
    """
    Add torrent method
Example #43
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        base_url = 'http://yespornplease.com'
        home_url = base_url + '/index.php'
        popular_url = base_url + '/index.php?p=1&m=today'
        search_url = base_url + '/search.php?q='
        false_positives = ['']

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30016),
                'content': '',
                'url': popular_url,
                'cover_url': a.image('popular.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'categories'}))
            item_list = []
            if soup:
                for item in soup.findAll('a'):
                    if item:
                        item_list.extend([{
                            'site':
                            site,
                            'mode':
                            'list',
                            'url':
                            item.get('href').replace(' ', '+'),
                            'content':
                            '',
                            'title':
                            item.string.encode('UTF-8'),
                            'cover_url':
                            a.image(image, image),
                            'backdrop_url':
                            a.art(),
                            'type':
                            3
                        }])
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item.replace(' ', '+')
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('p=([0-9]+)', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('p=[0-9]+', 'p=' + str(item),
                                           params['url']).replace(' ', '+')
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'videos'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll('div', {'class': 'video-preview'}):
                    if item:
                        _dict = xbmcdict.copy()
                        temp = item.find('div', {'class': 'jcarousel'}).a
                        if temp:
                            temp = temp.get('href')
                            if not temp.startswith('http://'):
                                temp = base_url + temp
                            _dict['url'] = temp
                            _dict['title'] = item.find('div', {
                                'class': 'preview-title'
                            }).get('title').encode('UTF-8')
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            temp = item.find('div', {
                                'class': 'jcarousel'
                            }).img.get('src')
                            if temp.startswith('//'): temp = 'http:' + temp
                            _dict['cover_url'] = a.image(temp)
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            temp = item.find('div', {
                                'class': 'preview-info-box length'
                            }).b.string
                            if temp:
                                temp = re.search('([0-9]+):([0-9]+):([0-9]+)',
                                                 temp)
                                _dict['duration'] = str(
                                    (int(temp.group(1)) * 60 * 60) +
                                    (int(temp.group(2)) * 60) +
                                    int(temp.group(3)))
                            _dict['sub_site'] = site

                            item_list.extend([_dict])

                soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('body'))
                if soup.find('a', {'id': 'prev-page'}):
                    item = soup.find('a', {
                        'id': 'prev-page'
                    }).get('href').replace(' ', '+')
                    if not item.startswith('http://'): item = base_url + item
                    if 'index.php' in params['url']:
                        item = item.replace('search.php', 'index.php')
                    item_list.extend([{
                        'site':
                        site,
                        'mode':
                        'list',
                        'url':
                        item,
                        'content':
                        params['content'],
                        'title':
                        a.language(30017, True),
                        'cover_url':
                        a.image('previous.png', image),
                        'backdrop_url':
                        a.art(),
                        'type':
                        3
                    }])
                if soup.find('a', {'id': 'next-page'}):
                    item = soup.find('a', {
                        'id': 'next-page'
                    }).get('href').replace(' ', '+')
                    if 'index.php' in params['url']:
                        item = item.replace('search.php', 'index.php')
                    if not item.startswith('http://'): item = base_url + item
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': item,
                        'content': params['content'],
                        'title': a.language(30018, True),
                        'cover_url': a.image('next.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])

                soup = BeautifulSoup(html,
                                     parseOnlyThese=SoupStrainer(
                                         'div', {'id': 'pagination'}))
                last_item = False
                if soup:
                    for item in reversed(soup.findAll('a')):
                        last_item = item.get('href')
                        if not last_item.startswith('http://'):
                            last_item = base_url + last_item
                        break
                if last_item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': last_item,
                        'content': 'goto',
                        'title': a.language(30019, True),
                        'cover_url': a.image('goto.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'object', {'id': 'videoContainer'}))
            item_list = []
            if soup:
                item = soup.find('param', {'name': 'flashvars'})
                item = re.search('.*?video_url=(.+?)&.*?', str(item))
                if item: item = item.group(1)
                xbmcdict = XBMCDict(0).update(params)
                if item:
                    _dict = xbmcdict.copy()
                    _dict['url'] = item
                    item_list.extend([_dict])
                else:
                    a.alert(a.language(30904, True), sound=False)
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
Example #44
0
'''
    Ice Channel
'''
import os
import sys
from addon import Addon

addon_id = 'script.icechannel'

try:
    addon = Addon(addon_id, sys.argv)
except:
    addon = Addon(addon_id)

addon_path = addon.get_path()
addon_version = addon.get_version()

lib_path = os.path.join(addon_path, 'lib', 'entertainment')
plugins_path = os.path.join(lib_path, 'plugins')
settings_file = os.path.join(addon_path, 'resources', 'settings.xml')

profile_path = addon.get_profile()

theme_name = addon.get_setting('theme')
theme_type = addon.get_setting(theme_name + '_themetype')
if theme_type == 'online':
    icon_path = addon.get_setting(theme_name + '_themeurl')
else:
    theme_addon = Addon(addon.get_setting(theme_name + '_themeaddon'))
    icon_path = os.path.join(theme_addon.get_path(), 'theme')
Example #45
0
    def __init__(self, params):
        import re
        from addon import Addon
        from addondict import AddonDict as XBMCDict
        from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment

        a = Addon()
        site = self.__module__
        mode = params['mode']

        home_url = 'http://www.freeomovie.com/'
        movies_url = home_url + 'category/full-movie/'
        scenes_url = home_url + 'category/clips/'
        search_url = home_url + '/?s='
        false_positives = [
            'http://www.freeomovie.com/category/full-movie/',
            'http://www.freeomovie.com/category/clips/'
        ]

        if mode == 'main':
            item_list = [{
                'site': site,
                'mode': 'list',
                'title': a.language(30006),
                'content': '',
                'url': home_url,
                'cover_url': a.image('all.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30001),
                'content': '',
                'url': movies_url,
                'cover_url': a.image('movies.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30002),
                'content': '',
                'url': scenes_url,
                'cover_url': a.image('scenes.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'categories',
                'title': a.language(30005),
                'content': '',
                'url': home_url,
                'cover_url': a.image('categories.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }, {
                'site': site,
                'mode': 'list',
                'title': a.language(30004),
                'content': 'search',
                'url': search_url,
                'cover_url': a.image('search.png', image),
                'backdrop_url': a.art(),
                'type': 3
            }]
            item_list.extend(a.favs_hist_menu(site))
            item_list.extend(a.extended_menu())
            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'categories':
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div',
                                     {'class': 'multi-column-taxonomy-list'}))
            item_list = []
            if soup:
                for item in soup.findAll('a'):
                    if item:
                        if item.get('href') not in false_positives:
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href'),
                                'content':
                                '',
                                'title':
                                item.string.encode('UTF-8'),
                                'cover_url':
                                a.image(image, image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'list':
            if params.get('content', '') == 'search':
                item = a.search_input()
                if item:
                    params['url'] = search_url + item
                else:
                    exit(1)
            elif params.get('content', '') == 'goto':
                last_item = re.search('/page/([0-9]+)/', params['url'])
                if last_item:
                    last_item = int(last_item.group(1))
                else:
                    last_item = 10000
                item = a.page_input(last_item)
                if item:
                    params['url'] = re.sub('/page/[0-9]+/',
                                           '/page/' + str(item) + '/',
                                           params['url'])
                else:
                    exit(1)
            html = a.get_page(params['url'])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'id': 'content'}))
            item_list = []
            params['mode'] = 'play'
            params['content'] = 'movies'
            params['type'] = 0
            params['context'] = 0
            params['duration'] = '7200'
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                for item in soup.findAll('div', {'class': 'postbox'}):
                    if item:
                        if item.h2.a.get('href') not in false_positives:
                            _dict = xbmcdict.copy()
                            if scenes_url in params['url']:
                                _dict['duration'] = '1500'
                                _dict['content'] = 'episodes'
                            _dict['url'] = item.h2.a.get('href')
                            _dict['title'] = item.h2.a.get('title').encode(
                                'UTF-8')
                            _dict['tvshowtitle'] = _dict['title']
                            _dict['originaltitle'] = _dict['title']
                            _dict['cover_url'] = a.image(item.img.get('src'))
                            _dict['thumb_url'] = _dict['cover_url']
                            _dict['poster'] = _dict['cover_url']
                            _dict['sub_site'] = site
                            item_list.extend([_dict])
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'wp-pagenavi'}))
            last_item = False
            if soup:
                for item in soup.findAll('a', href=True):
                    if item:
                        if item.get('class') == 'previouspostslink':
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href'),
                                'content':
                                params['content'],
                                'title':
                                a.language(30017, True),
                                'cover_url':
                                a.image('previous.png', image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
                        if item.get('class') == 'nextpostslink':
                            item_list.extend([{
                                'site':
                                site,
                                'mode':
                                'list',
                                'url':
                                item.get('href'),
                                'content':
                                params['content'],
                                'title':
                                a.language(30018, True),
                                'cover_url':
                                a.image('next.png', image),
                                'backdrop_url':
                                a.art(),
                                'type':
                                3
                            }])
                        if item.get('class') == 'last':
                            last_item = item.get('href')
                if not last_item:
                    try:
                        if not soup.find('a', {'class': 'nextpostslink'}):
                            last_item = soup.findAll('a',
                                                     href=True)[-1].get('href')
                        else:
                            last_item = soup.findAll('a',
                                                     href=True)[-2].get('href')
                    except:
                        pass
                if last_item:
                    item_list.extend([{
                        'site': site,
                        'mode': 'list',
                        'url': last_item,
                        'content': 'goto',
                        'title': a.language(30019, True),
                        'cover_url': a.image('goto.png', image),
                        'backdrop_url': a.art(),
                        'type': 3
                    }])

            a.add_items(item_list)
            a.end_of_directory()

        elif mode == 'play':
            html = a.get_page(params['url'])
            item_list = []
            soup = BeautifulSoup(html,
                                 parseOnlyThese=SoupStrainer(
                                     'div', {'class': 'videosection'}))
            if soup:
                xbmcdict = XBMCDict(0).update(params)
                pages = soup.findAll('li', {'class': re.compile('pg.')})
                if pages:
                    old_li = pages[0].get('class')
                    _dict = xbmcdict.copy()
                    _dict['multi-part'] = True
                    parts = []
                    for li in pages:
                        if old_li != li.get('class'):
                            _dict['parts'] = parts
                            item_list.extend([_dict])
                            _dict = xbmcdict.copy()
                            _dict['multi-part'] = True
                            old_li = li.get('class')
                            parts = []
                        url = re.search('.+myurl=(.+)', li.a.get('href'),
                                        re.IGNORECASE)
                        if url:
                            url = url.group(1)
                            parts.extend([url])
                    if parts:
                        _dict['parts'] = parts
                        item_list.extend([_dict])
                alink = soup.find('a', {'target': '_blank'})
                if alink:
                    alink = alink.get('href')
                    if 'main.exoclick.com' not in alink:
                        _dict = xbmcdict.copy()
                        _dict['url'] = alink
                        item_list.extend([_dict])
                iframes = soup.findAll('iframe', {'src': True})
                if iframes:
                    for iframe in iframes:
                        iframe = iframe.get('src')
                        if 'main.exoclick.com' not in iframe:
                            _dict = xbmcdict.copy()
                            _dict['url'] = iframe
                            item_list.extend([_dict])
                if not item_list:
                    soup = BeautifulSoup(html,
                                         parseOnlyThese=SoupStrainer(
                                             'ul', {'id': 'countrytabs'}))
                    if soup:
                        xbmcdict = XBMCDict(0).update(params)
                        for index, items in enumerate(
                                soup.findAll('a', href=True)):
                            item = ''
                            if not items.get('id') == 'jpg':
                                item = items.get('href')
                                item = re.search('.*myURL\[\]=(.+)$', item,
                                                 re.DOTALL)
                                if item:
                                    item = re.sub('&tab=[0-9]+', '',
                                                  item.group(1))
                                if item:
                                    _dict = xbmcdict.copy()
                                    _dict['url'] = item
                                    _dict['count'] = index
                                    item_list.extend([_dict])
            if item_list:
                from playback import Playback
                Playback().choose_sources(item_list)
            else:
                a.alert(a.language(30904, True), sound=False)
Example #46
0
import sys
import threading
import datetime
import platform
import cPickle as pickle
from math import ceil
from traceback import format_exc
from contextlib import closing
from requests import get
import xbmc
import xbmcvfs
from addon import Addon
from utilities import get_duration, HachoirError

monitor = xbmc.Monitor()
addon = Addon()
# This is for potential statistic and debugging purposes
addon.log_notice('sys.platform: "{0}". platform.uname: "{1}"'.format(
    sys.platform, str(platform.uname())))

try:
    import libtorrent  # Try to import global module
except ImportError:
    sys.path.append(os.path.join(addon.path, 'site-packages'))
    from python_libtorrent import get_libtorrent
    libtorrent = get_libtorrent()

addon.log_debug('libtorrent version: {0}'.format(libtorrent.version))


class TorrenterError(Exception):
 def showHelp(self):
     if not Addon.getSetting('dont_show_help') == 'true':
         Addon.openSettings()
Example #48
0
nextButtonState = None

globals.init()
initConfigValues()

conf = configparser.ConfigParser()
conf.read('TS5AddonInstaller.ini')

# fetch possible addons
config = configparser.ConfigParser()
try:  # try custom lookup server from config file
    configData = requests.get(str(conf['config']['url'])).text
except Exception:  # fallback to default lookup server
    configData = requests.get(
        "https://julianimhof.de/files/TS5Addons/experimental/addons.ini").text
config.read_file(io.StringIO(configData))

MAXPAGES = math.ceil((len(config.sections()) - 1) / ITEMSPERPAGE)

addons = []
for addon in [x for x in config.sections() if x != "general"]:
    addons.append(Addon(addon, config[addon]))

app = Application(master=root)
globals.app = app
globals.showPage = lambda: showPage(currpage)

showPage(0)

app.mainloop()
Example #49
0
import urllib, urllib2, sys, re, xbmcplugin, xbmcgui, xbmcaddon, xbmc, os
import datetime
from datetime import date
import time
from addon import Addon
from threading import Timer

addon_id = 'plugin.video.hushamiptv'
ADDON = xbmcaddon.Addon(id=addon_id)
ADDON_HELPER = Addon(addon_id, sys.argv)

base_url = 'http://iptv.husham.com'
api_url = 'http://api.iptvapi.com/api/v1/'
site_id = '14'

api_key = '49216ba9-f7fa-479f-8e24-7b8426694c64'

# get parameters
mode = ADDON_HELPER.queries['mode']
play = ADDON_HELPER.queries.get('play', None)
image = ADDON_HELPER.queries.get('img', '')
title = ADDON_HELPER.queries.get('title', None)
dir_end = ADDON_HELPER.queries.get('dir_end', 'true')
dir_update = ADDON_HELPER.queries.get('dir_update', 'false')
url = ADDON_HELPER.queries.get('url', '')
referer = ADDON_HELPER.queries.get('referer', base_url)
channel_id = ADDON_HELPER.queries.get('channel_id', 0)
date = ADDON_HELPER.queries.get('date', None)
date_title = ADDON_HELPER.queries.get('date_title', '')

Example #50
0
from __future__ import division
import os
import sys
import time
import threading
import datetime
import platform
import cPickle as pickle
from math import ceil
from requests import get
import xbmc
from addon import Addon
from utilities import get_duration

addon = Addon()
addon.log('Platform: "{0}"; machine: "{1}"; processor: "{2}"; system: "{3}"'.format(
            sys.platform,
            platform.machine(),
            platform.processor(),
            platform.system()), xbmc.LOGNOTICE)  # This is for potential statistic and debugging purposes

try:
    import libtorrent  # Try to import global module
except ImportError:
    sys.path.append(os.path.join(addon.path, 'site-packages'))
    from python_libtorrent import get_libtorrent
    libtorrent = get_libtorrent()

addon.log('libtorrent version: {0}'.format(libtorrent.version))
Example #51
0
import urllib,urllib2,sys,re,xbmcplugin,xbmcgui,xbmcaddon,xbmc,os
import datetime
from datetime import date
import time
from addon import Addon
from threading import Timer

addon_id='plugin.video.hushamiptv'
ADDON = xbmcaddon.Addon(id=addon_id)
ADDON_HELPER = Addon(addon_id, sys.argv)

base_url = 'http://iptv.husham.com'
api_url = 'http://api.iptvapi.com/api/v1/'
site_id = '14'

api_key = '49216ba9-f7fa-479f-8e24-7b8426694c64'

# get parameters
mode = ADDON_HELPER.queries['mode']
play = ADDON_HELPER.queries.get('play', None)
image = ADDON_HELPER.queries.get('img', '')
title = ADDON_HELPER.queries.get('title', None)
dir_end = ADDON_HELPER.queries.get('dir_end', 'true')
dir_update = ADDON_HELPER.queries.get('dir_update', 'false')
url = ADDON_HELPER.queries.get('url', '')
referer = ADDON_HELPER.queries.get('referer', base_url)
channel_id = ADDON_HELPER.queries.get('channel_id', 0)
date = ADDON_HELPER.queries.get('date', None)
date_title = ADDON_HELPER.queries.get('date_title', '')

def Exit():
Example #52
0
class Playback:
    def __init__(self):
        import urlresolver
        self.addon = Addon()
        self.common = self.addon.common
        self.urlresolver = urlresolver
        self.urlresolver.plugnplay.plugin_dirs = []
        if self.common.resolvers:
            self.urlresolver.plugnplay.set_plugin_dirs(
                self.urlresolver.common.plugins_path,
                self.common.resolvers_path, self.common.builtin_resolvers_path)
        else:
            self.urlresolver.plugnplay.set_plugin_dirs(
                self.urlresolver.common.plugins_path,
                self.common.builtin_resolvers_path)
        self.urlresolver.plugnplay.load_plugins()

    def _dialog_sources(self, source_list):
        if not isinstance(source_list, list): return
        return self.urlresolver.choose_source(source_list)

    def _directory_sources(self, source_list, dict_list):
        from addondict import AddonDict
        item_list = []
        for index, item in enumerate(source_list):
            multipart = re.search('^playlist://[a-zA-Z0-9_]+?/([0-9]+?)/$',
                                  item.get_url())
            if multipart:
                _dict = AddonDict(0).update(dict_list[int(multipart.group(1))])
            else:
                _dict = AddonDict(0).update(dict_list[0])
            _dict['title'] = str(item.title) + ' | ' + str(_dict['title'])
            _dict['site'] = 'play_this'
            _dict['sub_site'] = ''
            _dict['mode'] = ''
            _dict['type'] = 0
            _dict['context'] = 3
            _dict['url'] = str(item.get_url())
            item_list.extend([_dict])

        if item_list:
            self.addon.add_items(item_list)
            self.addon.end_of_directory()

    def _create_source_list(self, dict_list):
        source_list = []
        part_list = []
        full_list = []
        playlist_list = []
        host = re.compile(
            '(?:http|https)://(?:.+?\.)*?([0-9a-zA-Z_\-]+?)\.[0-9a-zA-Z]{2,}(?:/|:).*'
        )
        old_host = ''
        separator = ' | '
        for iindex, item in enumerate(dict_list):
            playlist_host = 'playlist://%s/%s/'
            source_title = ''
            quality = separator + item.get('src_quality', 'SD')
            if item['multi-part']:
                if item['src_title']:
                    source_title = item['src_title'] + separator
                source_host = host.search(item['url'])
                if source_host: source_host = source_host.group(1)
                else: source_host = 'UID'
                for index, part in enumerate(item['parts']):
                    part_title = source_title + source_host + separator + self.common.language(
                        30651, True) + ' ' + str(index + 1) + quality
                    source = self.urlresolver.HostedMediaFile(
                        url=part, title=part_title.upper())
                    if source:
                        part_list.extend([source])
                if old_host != source_host:
                    old_host = source_host
                    playlist_host = playlist_host % (old_host, iindex)
                    playlist_title = source_title + old_host + separator + self.common.language(
                        30650, True) + quality
                    playlist_source = self.urlresolver.HostedMediaFile(
                        url=playlist_host, title=playlist_title.upper())
                    if playlist_source:
                        playlist_list.extend([playlist_source])
            else:
                if item['src_title']:
                    source_title = item['src_title'] + separator
                source_host = host.search(item['url'])
                if source_host: source_host = source_host.group(1)
                else: source_host = 'UID'
                source_title += source_host + quality
                source = self.urlresolver.HostedMediaFile(
                    url=item['url'], title=source_title.upper())
                if source:
                    full_list.extend([source])
        full_list.extend(playlist_list)
        source_list.extend(full_list)
        source_list.extend(part_list)
        return [source_list, full_list]

    def _sort_sources(self, dict_list):
        usehd = self.common.usehd()
        autoplay = self.common.autoplay()
        hd = []
        hq = []
        sd = []
        lq = []
        new_dict_list = []
        for item in dict_list:
            source = True
            if item['multi-part']:
                item['url'] = 'playlist://' + item['parts'][0]
            if source:
                quality = item.get('src_quality', 'sd')
                if quality.lower() == 'hd': hd.extend([item])
                elif quality.lower() == 'hq': hq.extend([item])
                elif quality.lower() == 'sd': sd.extend([item])
                elif quality.lower() == 'lq': lq.extend([item])
        if (autoplay and usehd) or not autoplay:
            new_dict_list.extend(hd)
        new_dict_list.extend(hq)
        new_dict_list.extend(sd)
        new_dict_list.extend(lq)
        if autoplay and not usehd:
            new_dict_list.extend(hd)
        return new_dict_list

    def choose_sources(self, dict_list):
        if not isinstance(dict_list, list): raise TypeError
        for item in dict_list:
            try:
                item.keys()
            except:
                raise TypeError
        autoplay = self.common.autoplay()
        edit_url = self.common.editurl()
        dict_list = self._sort_sources(dict_list)
        if not dict_list:
            self.common.alert(self.common.language(30905, True), sound=False)
            return
        lists = self._create_source_list(dict_list)
        source_list = lists[0]
        full_list = lists[1]
        chosen = None
        img = dict_list[-1].get('cover_url', '')
        if (self.common.theme_path in img) or (self.common.media_path in img):
            img = ''
        thumb = dict_list[-1].get('thumb_url', None)
        if thumb:
            if (self.common.theme_path in thumb) or (self.common.media_path
                                                     in thumb):
                img = thumb
        title = dict_list[-1].get('title', '')
        found = False
        if len(dict_list) == 1:
            stream_url = source_list[0].resolve()
            if stream_url:
                found = True
                self.play_this(stream_url, dict_list[0].get('title', ''),
                               dict_list[0].get('cover_url', ''),
                               self.common.usedirsources(), dict_list[0])
        elif autoplay and full_list:
            for index, chosen in enumerate(full_list):
                stream_url = chosen.resolve()
                if stream_url:
                    if not stream_url.startswith('playlist://'):
                        if edit_url:
                            stream_url = self.addon.edit_input(stream_url)
                        found = True
                        playback_item = xbmcgui.ListItem(label=title,
                                                         thumbnailImage=img,
                                                         path=stream_url)
                        playback_item.setProperty('IsPlayable', 'true')
                        xbmcplugin.setResolvedUrl(self.common.handle, True,
                                                  playback_item)
                        break
                    else:
                        list_index = re.search(
                            '^playlist://[a-zA-Z0-9_]+?/([0-9]+?)/$',
                            stream_url)
                        if list_index:
                            found = True
                            self.play_list(dict_list[int(list_index.group(1))],
                                           title, img)
                            break
        elif source_list:
            if self.common.usedirsources():
                found = True
                self._directory_sources(source_list, dict_list)
            else:
                chosen = self._dialog_sources(source_list)
                if chosen:
                    idx = None
                    stream_url = chosen.resolve()
                    if stream_url:
                        if not stream_url.startswith('playlist://'):
                            if edit_url:
                                stream_url = self.addon.edit_input(stream_url)
                            part_title = re.search(
                                '.+?(\s[Pp][Aa][Rr][Tt]\s[0-9]+)',
                                chosen.title)
                            if part_title:
                                title += part_title.group(1)
                            playback_item = xbmcgui.ListItem(
                                label=title,
                                thumbnailImage=img,
                                path=stream_url)
                            playback_item.setProperty('IsPlayable', 'true')
                            found = True
                            xbmcplugin.setResolvedUrl(self.common.handle, True,
                                                      playback_item)
                        else:
                            list_index = re.search(
                                '^playlist://[a-zA-Z0-9_]+?/([0-9]+?)/$',
                                stream_url)
                            if list_index:
                                found = True
                                self.play_list(
                                    dict_list[int(list_index.group(1))], title,
                                    img)
        if not found:
            try:
                failmsg = str(stream_url.msg)
            except:
                failmsg = self.common.language(30905, True)
            else:
                self.common.alert(failmsg, self.common.language(30923, True))

    def play_list(self, source, title='', image=''):
        try:
            source.keys()
        except:
            raise TypeError

        if source['multi-part']:
            all_resolved = True
            playlist_item = self.addon.get_playlist(1, True)
            first_item = None
            try:
                source['parts'] = ast.literal_eval(source['parts'])
            except:
                pass
            for index, part in enumerate(source['parts']):
                this_title = title
                src_title = source['title'] + ' ' + self.common.language(
                    30651, True) + ' ' + str(index + 1)
                if this_title:
                    this_title += ' ' + self.common.language(
                        30651, True) + ' ' + str(index + 1)
                else:
                    this_title = src_title
                stream_url = None
                hmf = self.urlresolver.HostedMediaFile(url=part,
                                                       title=src_title)
                if hmf:
                    stream_url = hmf.resolve()
                if stream_url:
                    playback_item = \
                        xbmcgui.ListItem(label=this_title, thumbnailImage=image,
                                         path=stream_url)
                    playback_item.setProperty('IsPlayable', 'true')
                    if not first_item: first_item = playback_item
                    playlist_item.add(stream_url, playback_item)
                else:
                    amsg = '%s %s %s' % (self.common.language(
                        30991, True), str(index + 1),
                                         self.common.language(30922, True))
                    self.common.alert(amsg, self.common.language(30921, True))
                    all_resolved = False
                    break
            if all_resolved and first_item:
                xbmcplugin.setResolvedUrl(self.common.handle, True, first_item)

    def play_this(self,
                  item,
                  title='',
                  image='',
                  with_player=True,
                  meta_dict=None):
        if not isinstance(item, str):
            try:
                item = str(item)
            except:
                return
        source = self.urlresolver.HostedMediaFile(url=item, title=title)
        stream_url = source.resolve()
        if not stream_url:
            try:
                failmsg = str(stream_url.msg)
            except:
                failmsg = self.common.language(30905, True)
            else:
                self.common.alert(failmsg, self.common.language(30923, True))
            finally:
                stream_url = item
        if stream_url:
            multipart = False
            if meta_dict:
                multipart = meta_dict.get('multi-part', False)
                if multipart:
                    if 'playlist://' not in item:
                        multipart = False
            if multipart:
                self.play_list(meta_dict, title, image)
            else:
                playback_item = xbmcgui.ListItem(label=title,
                                                 thumbnailImage=image,
                                                 path=stream_url)
                playback_item.setProperty('IsPlayable', 'true')
                if with_player:
                    core = self.common.player_core()
                    xbmc.Player(core).play(stream_url, playback_item)
                else:
                    xbmcplugin.setResolvedUrl(self.common.handle, True,
                                              playback_item)