Exemple #1
0
    def worker(self):
        try:
            total = len(self.list)

            threads = []
            for i in range(0, total):
                if i <= total:
                    threads.append(workers.Thread(self.get_fanart, i))
            [i.start() for i in threads]

            timeout = 30
            progress = control.progressDialog
            progress.create(control.addonInfo('name'), '')
            progress.update(0,
                            line1="%s shows found." % total,
                            line2="Loading information.")

            print "Adding progress dialog with %s total shows" % total
            for i in range(timeout * 2):
                if xbmc.abortRequested == True: return sys.exit()

                progress.update(
                    int((100 / float(len(threads))) *
                        len([x for x in threads if x.is_alive() is False])),
                    line3="%s remaining." %
                    len([x for x in threads if x.is_alive() is True]))
                if progress.iscanceled(): break

                is_alive = [x.is_alive() for x in threads]
                if all(x is False for x in is_alive): break
                xbmc.sleep(100)
            progress.close()
        except:
            pass
    def keep_sending_video(self, dest_stream):
        try:
            # self.average_download_speed = 0.0
            self.average_download_speed = float(
                control.setting('average_download_speed')) if control.setting(
                    'average_download_speed') else 0.0

            queue = Queue.Queue(self.max_queue_size)
            worker = workers.Thread(self.queue_processor, queue, dest_stream,
                                    self.g_stopEvent)
            worker.daemon = True
            worker.start()

            try:
                self.download_loop(self.url, queue, self.maxbitrate,
                                   self.g_stopEvent)
            except Exception as ex:
                self.log_error("ERROR DOWNLOADING: %s" % ex)

            control.setSetting('average_download_speed',
                               str(self.average_download_speed))

            if not self.g_stopEvent.isSet():
                self.log("WAITING FOR QUEUE...")
                queue.join()
                self.log("DONE.")
                self.g_stopEvent.set()
                self.log("WAITING FOR WORKER THREAD...")
                worker.join()
                self.log("DONE.")
        except:
            traceback.print_exc()
        finally:
            self.g_stopEvent.set()
Exemple #3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None:
                return self._sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.search.format('8', urllib.quote(query))
            else:
                url = self.search.format('4', urllib.quote(query))
            self.hostDict = hostDict + hostprDict
            headers = {'User-Agent': client.agent()}
            _html = client.request(url, headers=headers)
            threads = []
            for i in re.findall(r'<item>(.+?)</item>', _html, re.DOTALL):
                threads.append(workers.Thread(self._get_items, i))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self._sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('---Torrdown Testing - Exception: \n' + str(failure))
            return self._sources
Exemple #4
0
    def worker(self, level=1):
        self.meta = []
        total = len(self.list)

        self.fanart_tv_headers = {'api-key': 'db10619c97af83891690f4cfd7205d62'}
        if not self.fanart_tv_user == '':
            self.fanart_tv_headers.update({'client-key': self.fanart_tv_user})

        for i in range(0, total):
            self.list[i].update({'metacache': False})

        self.list = metacache.fetch(self.list, self.lang, self.user)

        for r in range(0, total, 40):
            threads = []
            for i in range(r, r + 40):
                if i <= total:
                    threads.append(workers.Thread(self.super_info, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            if self.meta:
                metacache.insert(self.meta)

        self.list = [i for i in self.list if not i['imdb'] == '0']

        self.list = metacache.local(self.list, self.tm_img_link, 'poster3', 'fanart2')

        if self.fanart_tv_user == '':
            for i in self.list:
                i.update({'clearlogo': '0', 'clearart': '0'})
    def worker(self):
        total = len(self.list)

        threads = []
        for r in range(0, total, 30):
            for i in range(r, r + 30):
                if i <= total:
                    threads.append(workers.Thread(self.get_fanart, i))
        [i.start() for i in threads]

        timeout = 60
        progress = control.progressDialog
        progress.create(control.addonInfo('name'), '')
        progress.update(0,
                        line1="%s episodes found." % total,
                        line2="Loading recent information.")

        for i in range(timeout * 10):
            if xbmc.abortRequested is True: return sys.exit()

            progress.update(int(
                (100 / float(len(threads))) *
                len([x for x in threads if x.is_alive() is False])),
                            line3="%s animes remaining." %
                            len([x for x in threads if x.is_alive() is True]))
            if progress.iscanceled(): break

            is_alive = [x.is_alive() for x in threads]
            if all(x is False for x in is_alive): break
            xbmc.sleep(100)
Exemple #6
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            self.items = []
            if url is None:
                return self._sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            url = self.search.format(urllib.quote(query))
            self._get_items(url)
            self.hostDict = hostDict + hostprDict
            threads = []
            for i in self.items:
                threads.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self._sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('---Kickass2 Testing - Exception: \n' + str(failure))
            return self._sources
Exemple #7
0
def get_4k():

    config = cache.get(client.request, 1, GLOBOPLAY_CONFIGURATION)

    if not config or 'features' not in config or 'videos4k' not in config[
            'features']:
        return []

    video_ids = config['features']['videos4k']

    if not video_ids or len(video_ids) == 0:
        return []

    threads = []
    programs = []

    for id in video_ids:
        threads.append(
            workers.Thread(__add_search_results, __get_program_info, programs,
                           id))

    [i.start() for i in threads]
    [i.join() for i in threads]

    return programs
    def keep_sending_video(self, dest_stream):
        global average_download_speed
        try:
            average_download_speed = float(
                control.setting('average_download_speed')) if control.setting(
                    'average_download_speed') else 0.0
            queue = Queue.Queue()
            worker = workers.Thread(queue_processor, queue, dest_stream,
                                    self.g_stopEvent)
            worker.daemon = True
            worker.start()
            download_internal(self.url, queue, self.maxbitrate,
                              self.g_stopEvent)
            control.setSetting('average_download_speed',
                               str(average_download_speed))

            if not self.g_stopEvent.isSet():
                log("WAITING FOR QUEUE...")
                queue.join()
                log("DONE.")
                self.g_stopEvent.set()
                log("WAITING FOR WORKER THREAD...")
                worker.join()
                log("DONE.")
        except:
            traceback.print_exc()
        finally:
            self.g_stopEvent.set()
Exemple #9
0
 def sources(self, url, hostDict, hostprDict):
     try:
         self._sources = []
         if url == None:
             return self._sources
         if debrid.status() is False:
             raise Exception()
         if debrid.tor_enabled() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
         self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) \
             if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         if 'tvshowtitle' in data:
             url = self.search.format('8', urllib.quote(query))
         else:
             url = self.search.format('4', urllib.quote(query))
         self.hostDict = hostDict + hostprDict
         headers = {'User-Agent': client.agent()}
         _html = client.request(url, headers=headers)
         threads = []
         for i in re.findall(r'<item>(.+?)</item>', _html, re.DOTALL):
             threads.append(workers.Thread(self._get_items, i))
         [i.start() for i in threads]
         [i.join() for i in threads]
         return self._sources
     except:
         return self._sources
Exemple #10
0
    def get(self):
        self.items = []
        threads = []

        try:
            lib = control.jsonrpc(
                '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": {"properties" : ["thumbnail", "imdbnumber", "title", "year"]}, "id": 1}'
            )
            lib = unicode(lib, 'utf-8', errors='ignore')
            lib = json.loads(lib)['result']['tvshows']

            for item in lib:
                poster = item['thumbnail']
                title = item['title'].encode('utf-8')
                imdb = item['imdbnumber']

                w = workers.Thread(self.getTvdbAiring, imdb, title, poster)
                threads.append(w)

            [i.start() for i in threads]

            for i in range(0, 60):
                is_alive = [x for x in threads if x.is_alive() == True]
                if not is_alive: break
                time.sleep(1)
        except:
            pass
        for title, poster, label in self.items:
            control.infoDialog(label, heading=title.upper(), icon=poster)
            time.sleep(3)
Exemple #11
0
def PLAY_FULL(name, url, iconimage):
    albumlist = []
    link = client.request(url)
    soup = bs(link)
    threads = []
    album_icon = iconimage
    print("ALBUM ICON", album_icon)
    r = soup.find('div', {'class': 'artist-songs'})
    global count

    reg = re.compile(
        '<div class="song-name"><a href="([^"]+)">(.*?)</a></div>')
    result = re.findall(reg, str(r))
    count = 0
    playlist = xbmc.PlayList(0)
    playlist.clear()
    progressDialog = control.progressDialog
    progressDialog.create('Karma', '')
    progressDialog.update(0)
    for url, title in result:
        if progressDialog.iscanceled(): break
        count += 1
        url = re.sub('/track/', '/download/', url)
        url = base_url + url
        title = client.replaceHTMLCodes(title)
        progress = (float(count) / float(len(result))) * 100
        progressDialog.update(int(progress),
                              'Retrieving and Checking Songs...', title)
        w = workers.Thread(fetch_album, url, title, album_icon)
        w.start()
        w.join()
    xbmc.Player().play(playlist)
Exemple #12
0
    def worker(self, level=1):
        self.meta = []
        total = len(self.list)

        self.fanart_tv_headers = {}
        fanart_tv_level = 'user'
        fanart_tv_user = control.setting('fanart.tv.user')
        self.fanart_tv_headers.update({'api-key': 'YTc2MGMyMTEzYTM1OTk5NzFiN2FjMWU0OWUzMTAyMGQ='.decode('base64')})
        if level == 1 and not fanart_tv_user == '':
            self.fanart_tv_headers.update({'client-key': fanart_tv_user})
            #try: fanart_tv_level = json.loads(client.request(self.fanart_tv_level_link, headers=self.fanart_tv_headers))['level']
            #except: pass

        for i in range(0, total): self.list[i].update({'metacache': False})
        self.list = metacache.fetch(self.list, self.lang)

        for r in range(0, total, 40):
            threads = []
            for i in range(r, r+40):
                if i <= total: threads.append(workers.Thread(self.super_info, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            if len(self.meta) > 0: metacache.insert(self.meta)

        if fanart_tv_level == 'user':
            for i in self.list: i.update({'poster2': '0', 'fanart2': '0', 'banner2': '0', 'clearlogo': '0', 'clearart': '0'})

        self.list = [i for i in self.list if not i['tvdb'] == '0']
Exemple #13
0
    def worker(self, level=1):
        self.meta = []
        total = len(self.list)

        self.fanart_tv_headers = {'api-key': 'NDZkZmMyN2M1MmE0YTc3MjY3NWQ4ZTMyYjdiY2E2OGU='.decode('base64')}
        if not self.fanart_tv_user == '':
            self.fanart_tv_headers.update({'client-key': self.fanart_tv_user})

        for i in range(0, total): self.list[i].update({'metacache': False})

        self.list = metacache.fetch(self.list, self.lang, self.user)

        for r in range(0, total, 40):
            threads = []
            for i in range(r, r+40):
                if i <= total: threads.append(workers.Thread(self.super_info, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            if self.meta: metacache.insert(self.meta)

        self.list = [i for i in self.list if not i['imdb'] == '0']

        self.list = metacache.local(self.list, self.tm_img_link, 'poster3', 'fanart2')

        if self.fanart_tv_user == '':
            for i in self.list: i.update({'clearlogo': '0', 'clearart': '0'})
Exemple #14
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            self.items = []
            if url is None:
                return self._sources

            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
            data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
            data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            url = self.search.format(urllib.quote(query))

            self._get_items(url)
            self.hostDict = hostDict + hostprDict
            threads = []
            for i in self.items:
                threads.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except BaseException:
            return self._sources
Exemple #15
0
 def worker(self):
     threads = []
     for item_position, item in enumerate(self.list):
         threads.append(workers.Thread(self.set_info, item_position, item))
     [i.start() for i in threads]
     [i.join() for i in threads]
     self.build_tvshow_content()
    def __download_single_segment(self, segment):
        if self.g_stopEvent.isSet():
            return

        segment_number = self.get_segment_number(segment.absolute_uri)

        if str(segment_number) in self.media_buffer or int(
                self.requested_segment) > int(segment_number):
            log("SKIPPING SEGMENT %s" % segment_number)
            return

        start = datetime.datetime.now()
        segment_size = 0.0
        segment_data = []
        for chunk in self.download_chunks(segment.absolute_uri):
            if self.g_stopEvent.isSet():
                return

            segment_size += len(chunk)
            segment_data.append(chunk)

        log("SEGMENT %s READY!" % segment_number)

        self.media_buffer[str(segment_number)] = b''.join(segment_data)

        stop = datetime.datetime.now()

        worker = workers.Thread(self.__calculate_bitrate, start, stop,
                                segment_size, segment.duration)
        worker.daemon = True
        worker.start()
Exemple #17
0
def get_content(category, subcategory):

    response = _get_page(category)

    item = next((item for item in response.get('response', {}).get('categories', []) if item.get('title', '') == subcategory), {})

    if not item.get('contents', []):
        if item.get('type') == 'continue_watching':
            url = 'https://www.nowonline.com.br/AGL/1.0/R/ENG/{platform}/ALL/NET/USER/BOOKMARKS'.format(platform=PLATFORM)
            response = request_logged_in(url, False)
            contents = [result.get('content', {}) for result in response.get('resultObj', []) if result.get('content', {})]
        else:
            id = item.get('id', -1)
            url = 'https://www.nowonline.com.br/avsclient/categories/{id}/contents?offset=1&channel={platform}&limit=30'.format(platform=PLATFORM, id=id)
            response = request_logged_in(url)
            contents = response.get('response', {}).get('contents', [])
    else:
        contents = item.get('contents', [])

    threads = [{
                'thread': workers.Thread(_get_content, content.get('id', -1)),
                'id': content.get('id', -1)
                } for content in contents if content.get('type', '') in ['tvshow', 'movie', 'episode']]

    [i['thread'].start() for i in threads]
    [i['thread'].join() for i in threads]

    return [_hydrate_content(next((next(iter(t['thread'].get_result().get('response', [])), {}) for t in threads if t['id'] == content.get('id', -1)), content)) for content in contents]
Exemple #18
0
    def worker(self, level=1):
        self.meta = []
        total = len(self.list)

        self.fanart_tv_headers = {'api-key': 'YTc2MGMyMTEzYTM1OTk5NzFiN2FjMWU0OWUzMTAyMGQ='.decode('base64')}
        if not self.fanart_tv_user == '':
            self.fanart_tv_headers.update({'client-key': self.fanart_tv_user})

        for i in range(0, total): self.list[i].update({'metacache': False})

        self.list = metacache.fetch(self.list, self.lang, self.user)

        for r in range(0, total, 40):
            threads = []
            for i in range(r, r+40):
                if i <= total: threads.append(workers.Thread(self.super_info, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            if self.meta: metacache.insert(self.meta)

        self.list = [i for i in self.list if not i['tvdb'] == '0']

        if self.fanart_tv_user == '':
            for i in self.list: i.update({'clearlogo': '0', 'clearart': '0'})
Exemple #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []

            if url is None:
                return self._sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')

            hdlr = 'S%02d' % (int(
                data['season'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = self.scraper.get(url).content

            posts = client.parseDOM(r, 'figure')

            items = []
            for post in posts:
                try:
                    tit = client.parseDOM(post, 'img', ret='title')[0]
                    tit = client.replaceHTMLCodes(tit)
                    t = tit.split(hdlr)[0].replace(data['year'], '').replace(
                        '(', '').replace(')', '').replace('&', 'and')
                    if cleantitle.get(t) != cleantitle.get(title):
                        continue

                    if hdlr not in tit:
                        continue

                    url = client.parseDOM(post, 'a', ret='href')[0]

                    items.append((url, tit))

                except:
                    pass

            threads = []
            for i in items:
                threads.append(
                    workers.Thread(self._get_sources, i[0], i[1], hostDict,
                                   hostprDict))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self._sources
        except:
            return self._sources
Exemple #20
0
    def get(self):
        channels = [
            ('ActionWomen', '1811'), ('ActionWomen HD', '4020'),
            ('Christmas 24', '4420'), ('Christmas 24+', '4421'),
            ('Film4', '1627'), ('Film4 HD', '4044'), ('Film4+', '1629'),
            ('Horror Channel', '3605'), ('Horror Channel+', '4502'),
            ('ROK', '3542'),
            ('Sky Action', '1001'), ('Sky Action HD', '4014'),
            ('Sky Christmas', '1816'), ('Sky Christmas HD', '4016'),
            ('Sky Comedy', '1002'), ('Sky Comedy HD', '4019'),
            ('Sky Family', '1808'), ('Sky Family HD', '4018'),
            ('Sky Greats', '1815'), ('Sky Greats HD', '4015'),
            ('Sky Hits', '1814'), ('Sky Hits HD', '4033'),
            ('Sky Premiere', '1409'), ('Sky Premiere HD', '4021'), ('Sky Premiere+', '1823'),
            ('Sky ScFi/Horror', '1807'), ('Sky ScFi/Horror HD', '4017'),
            ('Sky Thriller', '1818'), ('Sky Thriller HD', '4062'),
            ('Sony Action', '3708'), ('Sony Action+', '3721'),
            ('Sony Christmas', '3643'), ('Sony Christmas+', '3751'),
            ('Sony Movies', '3709'), ('Sony Movies+', '3771'),
            ('TalkingPictures', '5252'),
            ('TCM Movies', '5605'), ('TCM Movies+', '5275')
        ]

        threads = []
        for i in channels: threads.append(workers.Thread(self.sky_list, i[0], i[1]))
        [i.start() for i in threads]
        [i.join() for i in threads]
        del threads

        self_items = []
        filtered_items = set()

        for t, y, c, r in self.items:
           if not t in filtered_items:
              filtered_items.add(t)
              self_items.append((t, y, c, r))

        threads = []
        for i in range(0, len(self_items)): threads.append(workers.Thread(self.items_list, self_items[i]))
        [i.start() for i in threads]
        [i.join() for i in threads]
        del threads

        self.list = sorted(self.list, key=lambda k: k['channel'].lower())

        self.channelDirectory(self.list)
        return self.list
Exemple #21
0
	def tmdb_list(self, url):
		try:
			result = get_request(url % API_key)
			items = result['results']
		except: return
		self.list = [] ; sortList = []
		try:
			page = int(result['page'])
			total = int(result['total_pages'])
			if page >= total: raise Exception()
			if 'page=' not in url: raise Exception()
			next = '%s&page=%s' % (url.split('&page=', 1)[0], page+1)
		except: next = ''
		for item in items:
			try:
				values = {}
				values['next'] = next 
				values['tmdb'] = str(item.get('id', '')) if item.get('id') else ''
				sortList.append(values['tmdb'])
				values['imdb'] = ''
				values['tvdb'] = ''
				values['metacache'] = False
				self.list.append(values)
			except:
				log_utils.error()

		def items_list(i):
			if self.list[i]['metacache']: 	return
			try:
				values = {}
				tmdb = self.list[i].get('tmdb', '')
				# movie_meta = self.get_movie_meta(tmdb)
				movie_meta = cache.get(self.get_movie_meta, 96, tmdb)
				values.update(movie_meta)
				imdb = values['imdb']
				if not self.disable_fanarttv:
					extended_art = cache.get(fanarttv.get_movie_art, 168, imdb, tmdb)
					if extended_art: values.update(extended_art)
				values = dict((k,v) for k, v in control.iteritems(values) if v is not None and v != '') # remove empty keys so .update() doesn't over-write good meta with empty values.
				self.list[i].update(values)
				meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': '', 'lang': self.lang, 'user': self.user, 'item': values}
				self.meta.append(meta)
			except:
				log_utils.error()

		self.list = metacache.fetch(self.list, self.lang, self.user)
		threads = []
		for i in range(0, len(self.list)):
			threads.append(workers.Thread(items_list, i))
		[i.start() for i in threads]
		[i.join() for i in threads]
		if self.meta:
			self.meta = [i for i in self.meta if i.get('tmdb')] # without this "self.list=" below removes missing tmdb but here still writes these cases to metacache?
			metacache.insert(self.meta)
		sorted_list = []
		self.list = [i for i in self.list if i.get('tmdb')]
		for i in sortList:
			sorted_list += [item for item in self.list if item['tmdb'] == i] # resort to match TMDb list because threading will loose order.
		return sorted_list
Exemple #22
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []

            if url is None:
                return self._sources

            if debrid.status() is False:
                raise Exception()

            self.hostDict = hostDict + hostprDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')

            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']
            self.year = data['year']

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = client.request(url)

            # switch to client.parseDOM() to rid import
            posts = dom_parser.parse_dom(r, 'div', {'class': 'eTitle'})
            posts = [
                dom_parser.parse_dom(i.content, 'a', req='href') for i in posts
                if i
            ]
            posts = [(i[0].attrs['href'], re.sub('<.+?>', '', i[0].content))
                     for i in posts if i]
            posts = [[i[0], i[1]] for i in posts]

            threads = []
            for i in posts:
                threads.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self._sources
        except:
            source_utils.scraper_error('ONLINESERIES')
            return self._sources
    def get_channels(self):

        live = []

        threads = []

        if control.is_globoplay_available():
            threads.append(workers.Thread(self.append_result, globoplay.Indexer().get_live_channels, live))

        if control.is_globosat_available():
            threads.append(workers.Thread(self.append_result, globosat.Indexer().get_live, live))
            if control.show_adult_content:
                threads.append(workers.Thread(self.append_result, sexyhotplay.get_live_channels, live))

        threads.append(workers.Thread(self.append_result, futuraplay.get_live_channels, live))

        [i.start() for i in threads]
        [i.join() for i in threads]

        # live.append({
        #     'slug': 'bandnews',
        #     'name': 'Bandnews',
        #     'title': 'Bandnews',
        #     'sorttitle': 'Bandnews',
        #     'logo': os.path.join(control.artPath(), 'logo_bandnews.png'),
        #     # 'clearlogo': os.path.join(control.artPath(), 'logo_bandnews.png'),
        #     'color': None,
        #     'fanart': os.path.join(control.artPath(), 'fanart_bandnews.jpg'),
        #     'thumb': None,
        #     'playable': 'true',
        #     'plot': None,
        #     'id': -1,
        #     'channel_id': -1,
        #     'duration': None,
        #     'url': 'http://evcv.mm.uol.com.br:1935/band/bandnews/playlist.m3u8'
        # })

        # live = sorted(live, key=lambda k: k['sorttitle'])
        # live = sorted(live, key=lambda k: '1' if 'isFolder' in k and k['isFolder'] == 'true' else '0')
        # live = sorted(live, key=lambda k: k['dateadded'] if 'dateadded' in k else None, reverse=True)

        # shuffle(live)

        self.channel_directory(live)

        return live
Exemple #24
0
	def popupAbout(self, wait = False):
		try:
			# So that the interface can load in the background while the splash loads.
			thread = workers.Thread(self.__popupAbout)
			thread.start()
			if wait: thread.join()
		except:
			pass
Exemple #25
0
    def get_channels(self):

        live = []

        threads = []

        if control.is_globoplay_available():
            threads.append(
                workers.Thread(self.append_result,
                               globoplay.Indexer().get_live_channels, live))

        if control.is_globosat_available():
            threads.append(
                workers.Thread(self.append_result,
                               globosat.Indexer().get_live, live))
            if control.show_adult_content:
                threads.append(
                    workers.Thread(self.append_result,
                                   sexyhotplay.Indexer().get_live_channels,
                                   live))

        if control.is_oiplay_available():
            threads.append(
                workers.Thread(self.append_result, oiplay.get_live_channels,
                               live))

        if control.is_tntplay_available():
            threads.append(
                workers.Thread(self.append_result, tntplay.get_live_channels,
                               live))

        [i.start() for i in threads]
        [i.join() for i in threads]

        live = sorted(live, key=lambda k: k['sorttitle'])
        # live = sorted(live, key=lambda k: '1' if 'isFolder' in k and k['isFolder'] == 'true' else '0')
        live = sorted(live,
                      key=lambda k: k['dateadded']
                      if 'dateadded' in k else None,
                      reverse=True)

        # shuffle(live)

        self.channel_directory(live)

        return live
    def init(self, out_stream, url, proxy=None, stopEvent=None, maxbitrate=0):

        try:
            from requests.packages.urllib3.exceptions import InsecureRequestWarning
            requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
            self.session = requests.Session()
            self.session.cookies = self.cookieJar
            self.clientHeader = None
            self.proxy = proxy

            if self.proxy and len(self.proxy) == 0:
                self.proxy=None

            self.use_proxy = False

            if stopEvent: stopEvent.clear()

            self.g_stopEvent=stopEvent
            self.maxbitrate=maxbitrate

            if '|' in url:
                sp = url.split('|')
                url = sp[0]
                self.clientHeader = sp[1]
                log(self.clientHeader)
                self.clientHeader= urlparse.parse_qsl(self.clientHeader)
                log('header received now url and headers are %s | %s' % (url, self.clientHeader))

            self.url=url

            self.queue = Queue.Queue()

            worker = workers.Thread(self.__bandwidth_selector)
            worker.daemon = True
            worker.start()

            if self.DOWNLOAD_IN_BACKGROUND:
                worker = workers.Thread(self.__load_buffer)
                worker.daemon = True
                worker.start()

            return True
        except:
            traceback.print_exc()

        return False
Exemple #27
0
	def popupDonations(self, currency, wait = False):
		try:
			# So that the interface can load in the background while the splash loads.
			thread = workers.Thread(self.__popupDonations, currency)
			thread.start()
			if wait: thread.join()
		except:
			pass
Exemple #28
0
	def tmdb_collections_list(self, url):
		if not url: return
		try:
			result = get_request(url)
			if '/3/' in url: items = result['items']
			else: items = result['results']
		except: return
		self.list = []
		try:
			page = int(result['page'])
			total = int(result['total_pages'])
			if page >= total: raise Exception()
			if 'page=' not in url: raise Exception()
			next = '%s&page=%s' % (url.split('&page=', 1)[0], page+1)
		except: next = ''
		for item in items:
			try:
				values = {}
				values['next'] = next 
				media_type = item.get('media_type', '')
				if media_type == 'movie': 	continue
				values['tmdb'] = str(item.get('id', '')) if item.get('id') else ''
				values['metacache'] = False 
				self.list.append(values)
			except:
				log_utils.error()

		def items_list(i):
			if self.list[i]['metacache']: return
			try:
				values = {}
				tmdb = self.list[i].get('tmdb', '')
				# showSeasons_meta =self.get_showSeasons_meta(tmdb)
				showSeasons_meta = cache.get(self.get_showSeasons_meta, 96, tmdb)
				values.update(showSeasons_meta)
				imdb = values['imdb']
				tvdb = values['tvdb']
				if not self.disable_fanarttv:
					extended_art = cache.get(fanarttv.get_tvshow_art, 168, tvdb)
					if extended_art: values.update(extended_art)
				values = dict((k,v) for k, v in control.iteritems(values) if v is not None and v != '') # remove empty keys so .update() doesn't over-write good meta with empty values.
				self.list[i].update(values)
				meta = {'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'lang': self.lang, 'user': self.user, 'item': values}
				self.meta.append(meta)
			except:
				log_utils.error()

		self.list = metacache.fetch(self.list, self.lang, self.user)
		threads = []
		for i in range(0, len(self.list)):
			threads.append(workers.Thread(items_list, i))
		[i.start() for i in threads]
		[i.join() for i in threads]
		if self.meta:
			self.meta = [i for i in self.meta if i.get('tmdb')] # without this "self.list=" below removes missing tmdb but here still writes these cases to metacache?
			metacache.insert(self.meta)
		self.list = [i for i in self.list if i.get('tmdb')]
		return self.list
    def addSearch(self, url=None):
        try:
            link = 'http://cosmix.offshorepastebin.com/Weblinks/adult/search.xml'

            if (url == None or url == ''):
                keyboard = control.keyboard(
                    '',
                    control.lang(30702).encode('utf-8'))
                keyboard.doModal()
                if not (keyboard.isConfirmed()): return
                url = keyboard.getText()

            if (url == None or url == ''): return

            def search():
                return [url]

            query = cache.get(search, 600000000, table='rel_srch')

            def search():
                return [
                    x for y, x in enumerate((query + [url]))
                    if x not in (query + [url])[:y]
                ]

            cache.get(search, 0, table='rel_srch')

            links = client.request(link)
            links = re.findall('<link>(.+?)</link>', links)
            links = [i for i in links if str(i).startswith('http')]

            self.list = []
            threads = []
            for link in links:
                threads.append(workers.Thread(self.jizzplanet_list, link))
            [i.start() for i in threads]
            [i.join() for i in threads]

            self.list = [
                i for i in self.list if url.lower() in i['name'].lower()
            ]

            for i in self.list:
                try:
                    name = ''
                    if not i['vip'] in ['jizzplanet TV']:
                        name += '[B]%s[/B] | ' % i['vip'].upper()
                    name += i['name']
                    i.update({'name': name})
                except:
                    pass

            for i in self.list:
                i.update({'content': 'videos'})
            self.addDirectory(self.list)
        except:
            pass
Exemple #30
0
    def get(self, netloc, ua, timeout):
        threads = []

        for i in range(0, 15): threads.append(workers.Thread(self.get_cookie, netloc, ua, timeout))
        [i.start() for i in threads]

        for i in range(0, 30):
            if not self.cookie == None: return self.cookie
            time.sleep(1)