def parse(self):
        helper.start('MediaContainerList.parse')
        if self.soup == None:
            return

        timestamper = t_s.TimeStamper('MediaContainerList.parse')
        table = self.soup.find('table', class_='listing')
        if table == None:
            self.links = self.__parse_upcoming()
            timestamper.stamp_and_dump()
            return

        self.links = table.find_all('a', {'href': re.compile('\/Anime\/')})
        helper.log_debug('# of links found with href=/Anime/: %d' %
                         len(self.links))

        # Pagination support
        pager_section = self.soup.find('ul', class_='pager')
        if pager_section != None:
            page_links = pager_section.find_all('a')
            if "Next" in page_links[-2].string and "Last" in page_links[
                    -1].string:
                self.links.append(page_links[-2])
                self.links.append(page_links[-1])
                self.has_next_page = True
        helper.end('MediaContainerList.parse')
        timestamper.stamp_and_dump()
コード例 #2
0
 def __init__(self, url_val=args.value, form_data=None):
     timestamper = t_s.TimeStamper('MediaList.init')
     WebList.__init__(self, url_val, form_data)
     self.has_next_page = False
     self.data_tips = []
     self.media_type_list = []
     timestamper.stamp_and_dump()
    def add_items(self, title_prefix=None):
        timestamper = t_s.TimeStamper('MediaContainerList.add_items')
        end_dir = title_prefix == None
        title_prefix = title_prefix if title_prefix else ''
        if end_dir:
            helper.set_content('tvshows')
        iter_links = self.links[:-2] if self.has_next_page else self.links

        # Filter out the episode links for ongoing series
        mc_links = []
        idx = 0
        for link in iter_links:
            url = link['href']
            if re.search('(\?id=)', url) != None:
                continue
            mc_links.append((link.string.strip(), url, idx))
            idx += 1
        timestamper.stamp('Initial loop')

        self.links_with_metadata = [None] * idx
        pool = threadpool.ThreadPool(
            4 if not helper.debug_metadata_threads() else 1)
        pool.map(self.worker, mc_links)
        pool.wait_completion()

        timestamper.stamp('Grabbing metadata with threads')

        for (name, url, metadata, media_type) in self.links_with_metadata:
            icon, fanart = self._get_art_from_metadata(metadata)
            if media_type == 'tvshow' and (
                    ' (OVA)' in name or ' Specials' in name
                    or re.search('( OVA)( \(((Sub)|(Dub))\))?$', name) != None
                    or re.search(' (Special)$', name) != None):
                media_type = 'special'
            query = self._construct_query(url, 'mediaList', metadata, name,
                                          media_type)

            contextmenu_items = self._get_contextmenu_items(
                url, name, metadata, media_type)
            metadata[
                'title'] = title_prefix + name  # adjust title for sub and dub
            helper.add_directory(query,
                                 metadata,
                                 img=icon,
                                 fanart=fanart,
                                 contextmenu_items=contextmenu_items,
                                 total_items=len(mc_links))

        if self.has_next_page:
            query = self._construct_query(self.links[-2]['href'],
                                          'mediaContainerList')
            helper.add_directory(query, {'title': 'Next'})
            query = self._construct_query(self.links[-1]['href'],
                                          'mediaContainerList')
            helper.add_directory(query, {'title': 'Last'})

        if end_dir:
            helper.end_of_directory()
        timestamper.stamp_and_dump('Adding all items')
コード例 #4
0
    def parse(self):
        if self.soup == None:
            return

        helper.start('MediaList.parse')
        timestamper = t_s.TimeStamper('MediaList.parse')
        self.links, self.data_tips, self.media_type_list = self._parse_links_from_grid()
        helper.log_debug('# of links found with class=name: %d' % len(self.links))
        self._find_next_page_link()
        helper.end('MediaList.parse')
        timestamper.stamp_and_dump()
コード例 #5
0
    def add_items(self, title_prefix=None):
        helper.start('MediaList.add_items')
        timestamper = t_s.TimeStamper('MediaList.add_items')
        end_dir = (title_prefix == None)
        title_prefix = title_prefix if title_prefix else ''
        if end_dir:
            helper.set_content('tvshows')
        mlinks = self.links[:-1] if self.has_next_page else self.links
        #helper.show_error_dialog(['',str(self.links)])
        # Grabbing metadata is a huge bottleneck, so grabbing it in parallel speeds up things
        self.links_with_metadata = [None] * len(mlinks)
        if helper.debug_metadata_threads():
            map(lambda x: self.worker(x), enumerate(mlinks))#mc_links)		
        else:
            #pool = threadpool.ThreadPool(4)
            #pool.map(self.worker, enumerate(mlinks))
            #pool.wait_completion()
            #helper.show_error_dialog(['',str(mlinks[0])])			
            #helper.show_error_dialog(['',str(self.media_type_list)])		
            pool = ThreadPool(8) # Sets the pool size to 4			
            pool.map(self.worker, enumerate(mlinks))
            pool.close() 
            pool.join() 			
						
        timestamper.stamp('Grabbing metadata with threads')
        #helper.show_error_dialog(['',str(self.links_with_metadata)])

        try:		
            for (name, url, metadata, media_type) in self.links_with_metadata:
                icon, fanart = self._get_art_from_metadata(metadata)
                query = self._construct_query(url, 'episodeList', metadata, name, media_type)		
                contextmenu_items = self._get_contextmenu_items(url, name, metadata, media_type)
                metadata['title'] = title_prefix + name # adjust title for sub and dub
                helper.add_directory(query, metadata, img=icon, fanart=fanart, contextmenu_items=contextmenu_items, total_items=len(mlinks))
        except: pass
        self._add_next_page_link()

        if end_dir:
            helper.end_of_directory()
        timestamper.stamp_and_dump('Adding all items')
        helper.end('MediaList.add_items')
コード例 #6
0
    You should have received a copy of the GNU General Public License
    along with UKAP.  If not, see <http://www.gnu.org/licenses/>.
'''


import time
t_start = time.time()

from resources.lib.common.helpers import helper
from resources.lib.common.args import args
from resources.lib.common import controller
import dat1guy.shared.timestamper as t_s
t_s.timestamps_on = helper.debug_timestamp()

timestamper = t_s.TimeStamper('default.py', t0=t_start, t1_msg='Default imports')
helper.location("Default entry point, version %s" % helper.get_version())

if helper.debug_import():
    from resources.lib.list_types import local_list, media_container_list, episode_list, movie_listing, specials_list, bookmarklist
    from resources.lib.players import videoplayer, qualityplayer
    from resources.lib.metadata import metadatafinder
    from resources.lib.metadata.metadatahandler import meta
    timestamper.stamp('Importing everything else')

if args.action == None:
    controller.Controller().main_menu()
elif args.action == 'localList':
    controller.Controller().show_local_list()
elif args.action == 'mediaContainerList':
    controller.Controller().show_media_container_list()
 def __init__(self, url_val=args.value, form_data=None):
     timestamper = t_s.TimeStamper('MediaContainerList.init')
     WebList.__init__(self, url_val, form_data)
     self.has_next_page = False
     timestamper.stamp_and_dump()