예제 #1
0
 def __get_scrapers(self, include_disabled, exclude):
     klasses = universalscrapers.relevant_scrapers(self.host, include_disabled, exclude=exclude)
     scrapers = []
     for klass in klasses:
         if klass in scraper_cache:
             scrapers.append(scraper_cache[klass])
         else:
             scraper_cache[klass] = klass()
             scrapers.append(scraper_cache[klass])
     return scrapers
예제 #2
0
파일: hl.py 프로젝트: varunrai/scrapers
 def __get_scrapers(self, include_disabled, exclude):
     klasses = universalscrapers.relevant_scrapers(self.host, include_disabled, exclude=exclude)
     scrapers = []
     for klass in klasses:
         if klass in scraper_cache:
             scrapers.append(scraper_cache[klass])
         else:
             scraper_cache[klass] = klass()
             scrapers.append(scraper_cache[klass])
     return scrapers
예제 #3
0
 def instanceEnabled(self):
     # Get the latests setting.
     if not self.name == '':
         try:
             return universalscrapers.relevant_scrapers(
                 names_list=self.name.lower(),
                 include_disabled=False,
                 exclude=None)[0]()._is_enabled()
         except:
             return False
     return self.enabled
예제 #4
0
	def instances(self):
		result = []
		try:
			get_scrapers = universalscrapers.relevant_scrapers(names_list = None, include_disabled = True, exclude = None)
			for scraper in get_scrapers:
				scraper = scraper()
				id = scraper.name.replace(' ', '').lower()
				if id == 'orion' or id == 'orionoid': continue
				scraperNew = source()
				scraperNew.id = id
				scraperNew.name = scraper.name
				try: scraperNew.language[0] = scraper.language[0]
				except: pass
				if not hasattr(scraper, '_base_link'): # _base_link: Do not use base_link that is defined as a property (eg: KinoX), since this can make additional HTTP requests, slowing down the process.
					if not scraperNew.base_link or scraperNew.base_link == '':
						try: scraperNew.base_link = scraper.base_link
						except: pass
				scraperNew.enabled = scraper._is_enabled()
				result.append(scraperNew)
		except:
			tools.Logger.error()
		return result
예제 #5
0
    def get_sources(
        title,
        year,
        imdb,
        tvdb,
        season,
        episode,
        tvshowtitle,
        premiered,
        timeout=30,
        preset="search",
        dialog=None,
        exclude=None,
        scraper_title=False,
        listitem=None,
        output_function=koding.Play_Video,
        skip_selector=False,
        player=None,
    ):
        """
        scrapes for video sources using NaN scraper library
        Args:
            title: movie or episode title
            year: year movie/episode came out
            imdb: imdb identifier
            tvdb:  tvdb identifier
            season: season number
            episode: episode number
            tvshowtitle: title of tv show
            premiered: year tv show premiered
            timeout: timeout for scraping link
            preset: preferred quality of stream
            dialog: dialog to use for displaying messages
            exclude: list of scrapers to exclude
            scraper_title: extra movie/tv show title to search first.
                           required if scrapers use an alternate spelling
        Returns:
            Boolean indicating playback success
        """
        year = str(year)
        content = "movie" if tvshowtitle is None else "episode"
        allow_debrid = ADDON.getSetting("allow_debrid") == "true"

        if ADDON.getSetting("use_link_dialog") == "true" and not skip_selector:
            # use link selector
            if content == "movie":
                scraper = universalscrapers.scrape_movie_with_dialog
                link, rest = scraper(
                    title,
                    year,
                    imdb,
                    timeout=timeout,
                    exclude=exclude,
                    extended=True,
                    sort_function=Sources.sort_function,
                    enable_debrid=allow_debrid,
                )
            elif content == "episode":
                scraper = universalscrapers.scrape_episode_with_dialog
                link, rest = scraper(
                    tvshowtitle,
                    year,
                    premiered,
                    season,
                    episode,
                    imdb,
                    tvdb,
                    timeout=timeout,
                    exclude=exclude,
                    extended=True,
                    sort_function=Sources.sort_function,
                    enable_debrid=allow_debrid,
                )
            else:
                return

            if type(link) == dict and "path" in link:
                link = link["path"]
            if link is None:
                return False
            url = link["url"]
            if ADDON.getSetting("link_fallthrough") == "true":
                played = False
                index = 0
                links = []
                for item in rest:
                    if type(item) == dict and "path" in item:
                        links.extend(item["path"][1])
                    else:
                        links.extend(item[1])
                index = links.index(link)
                links = links[index + 1 :]
                num_results = len(rest) + 1
                while not played:
                    try:
                        if dialog is not None and dialog.iscanceled():
                            return False
                        if dialog is not None:
                            index = index + 1
                            percent = int((index * 100) / num_results)
                            line = "%s - %s (%s)" % (
                                link["scraper"],
                                link["source"],
                                link["quality"],
                            )
                            dialog.update(percent, line)
                    except:
                        pass
                    try:
                        resolved_link = universalscrapers.resolve(
                            link["scraper"], link["url"]
                        )
                        played = output_function(
                            resolved_link,
                            showbusy=False,
                            ignore_dp=True,
                            item=listitem,
                            player=player,
                            resolver=resolveurl,
                        )
                        link = links[0]
                        links = links[1:]
                    except:
                        return False
                return played
            else:
                resolved_link = universalscrapers.resolve(link["scraper"], link["url"])
                return output_function(
                    resolved_link,
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl,
                )
        else:
            if content == "movie":
                title = title
                scraper = universalscrapers.scrape_movie
                links_scraper = scraper(
                    title,
                    year,
                    imdb,
                    timeout=timeout,
                    exclude=exclude,
                    enable_debrid=allow_debrid,
                )

            elif content == "episode":
                if scraper_title:
                    tvshowtitle = title
                tvshowtitle = tvshowtitle
                scraper = universalscrapers.scrape_episode
                links_scraper = scraper(
                    tvshowtitle,
                    year,
                    premiered,
                    season,
                    episode,
                    imdb,
                    tvdb,
                    timeout=timeout,
                    exclude=exclude,
                    enable_debrid=allow_debrid,
                )
            else:
                return

        sd_links = []
        non_direct_links = []
        non_direct_sd_links = []
        num_scrapers = len(universalscrapers.relevant_scrapers())
        index = 0
        try:
            for scraper_links in links_scraper():
                if dialog is not None and dialog.iscanceled():
                    return
                if dialog is not None:
                    index = index + 1
                    percent = int((index * 100) / num_scrapers)
                    dialog.update(percent)
                if scraper_links is not None:
                    random.shuffle(scraper_links)
                    for scraper_link in scraper_links:
                        if dialog is not None and dialog.iscanceled():
                            return False

                        if Sources().__check_skip_pairing(scraper_link):
                            continue

                        quality = Sources.__determine_quality(scraper_link["quality"])
                        preset = preset.lower()
                        if preset == "searchsd":
                            if quality == "HD":
                                continue
                        elif preset == "search":
                            if quality == "SD":
                                sd_links.append(scraper_link)

                        if scraper_link["direct"]:
                            resolved_link = universalscrapers.resolve(scraper_link["scraper"], scraper_link["url"])
                            result = output_function(
                                resolved_link,
                                showbusy=False,
                                ignore_dp=True,
                                item=listitem,
                                player=player,
                                resolver=resolveurl,
                            )
                            if result:
                                return result
                        else:
                            non_direct_links.append(scraper_link)

            for scraper_link in non_direct_links:
                if dialog is not None and dialog.iscanceled():
                    return False
                resolved_link = universalscrapers.resolve(scraper_link["scraper"], scraper_link["url"])
                result = output_function(
                    resolved_link,
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl,
                )
                if result:
                    return result

            for scraper_link in sd_links:
                if dialog is not None and dialog.iscanceled():
                    return

                if scraper_link["direct"]:
                    resolved_link = universalscrapers.resolve(scraper_link["scraper"], scraper_link["url"])
                    result = output_function(
                        resolved_link,
                        showbusy=False,
                        ignore_dp=True,
                        item=listitem,
                        player=player,
                        resolver=resolveurl,
                    )
                    if result:
                        return result
                else:
                    non_direct_sd_links.append(scraper_link)

            for scraper_link in non_direct_sd_links:
                if dialog is not None and dialog.iscanceled():
                    return
                resolved_link = universalscrapers.resolve(scraper_link["scraper"], scraper_link["url"])
                result = output_function(
                    resolved_link,
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl,
                )
                if result:
                    return result

            return False
        except:
            return False
예제 #6
0
    def get_music_sources(
        title,
        artist,
        timeout=30,
        preset="search",
        dialog=None,
        exclude=None,
        listitem=None,
        output_function=koding.Play_Video,
        skip_selector=False,
        player=None,
    ):
        """
        scrapes for music sources using NaN scraper library
        Args:
            title: song title
            artist: song artist
            timeout: timeout for scraping link
            preset: preferred quality of stream
            dialog: dialog to use for displaying messages
            exclude: list of scrapers to exclude
        Returns:
            Boolean indicating playback success
        """
        title = title
        allow_debrid = ADDON.getSetting("allow_debrid") == "true"
        if ADDON.getSetting("use_link_dialog") == "true" and not skip_selector:
            link, rest = universalscrapers.scrape_song_with_dialog(
                title,
                artist,
                timeout=timeout,
                exclude=exclude,
                enable_debrid=allow_debrid,
                extended=True,
            )
            if type(link) == dict and "path" in link:
                link = link["path"]
            if link is None:
                return False
            url = link["url"]
            if ADDON.getSetting("link_fallthrough") == "true":
                played = False
                index = 0
                links = []
                for item in rest:
                    if type(item) == dict and "path" in item:
                        links.extend(item["path"][1])
                    else:
                        links.extend(item[1])
                index = links.index(link)
                links = links[index + 1 :]
                num_results = len(rest) + 1
                while not played:
                    try:
                        if dialog is not None and dialog.iscanceled():
                            return
                        if dialog is not None:
                            index = index + 1
                            percent = int((index * 100) / num_results)
                            line = "%s - %s (%s)" % (
                                link["scraper"],
                                link["source"],
                                link["quality"],
                            )
                            dialog.update(percent, line)
                    except:
                        pass
                    try:
                        played = output_function(
                            url,
                            showbusy=False,
                            ignore_dp=True,
                            item=listitem,
                            player=player,
                            resolver=resolveurl,
                        )
                        link = links[0]
                        links = links[1:]
                    except:
                        return False
                return played
            else:
                return output_function(
                    url,
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl,
                )
        links_scraper = universalscrapers.scrape_song(
            title, artist, timeout=timeout, exclude=exclude, enable_debrid=allow_debrid
        )

        sd_links = []
        num_scrapers = len(universalscrapers.relevant_scrapers())
        index = 0
        try:
            for scraper_links in links_scraper():
                if dialog is not None and dialog.iscanceled():
                    return
                if dialog is not None:
                    index = index + 1
                    percent = int((index * 100) / num_scrapers)
                    dialog.update(percent)
                if scraper_links is not None:
                    random.shuffle(scraper_links)
                    for scraper_link in scraper_links:
                        if dialog is not None and dialog.iscanceled():
                            return

                        if Sources().__check_skip_pairing(scraper_link):
                            continue

                        quality = Sources.__determine_quality(scraper_link["quality"])
                        preset = preset.lower()
                        if preset == "searchsd":
                            if quality == "HD":
                                continue
                        elif preset == "search":
                            if quality == "SD":
                                sd_links.append(scraper_link)

                        result = output_function(
                            scraper_link["url"],
                            showbusy=False,
                            ignore_dp=True,
                            item=listitem,
                            player=player,
                            resolver=resolveurl,
                        )
                        if result:
                            return result

            for scraper_link in sd_links:
                if dialog is not None and dialog.iscanceled():
                    return
                result = output_function(
                    scraper_link["url"],
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl,
                )
                if result:
                    return result
        except:
            pass
        return False
예제 #7
0
    Open = open(scraper_results_path, 'w+')

scrapers_path = xbmc.translatePath(
    'special://home/addons/script.module.universalscrapers/lib/universalscrapers/scraperplugins'
)
for Root, Dir, Files in os.walk(scrapers_path):
    for File in Files:
        if not 'pyo' in File and not '__' in File and 'py' in File and not 'broken' in Root and not 'slow' in Root and not 'ok' in Root and not 'unsure' in Root and not 'test' in Root:
            No_of_scrapers.append('1')
            scraper_paths.append(File)

params = dict(urlparse.parse_qsl(sys.argv[2].replace('?', '')))
mode = params.get('mode')
if mode == "DisableAll":
    scrapers = sorted(
        universalscrapers.relevant_scrapers(include_disabled=True),
        key=lambda x: x.name.lower())
    for scraper in scrapers:
        key = "%s_enabled" % scraper.name
        xbmcaddon.Addon('script.module.universalscrapers').setSetting(
            key, "false")
    sys.exit()
elif mode == "EnableAll":
    scrapers = sorted(
        universalscrapers.relevant_scrapers(include_disabled=True),
        key=lambda x: x.name.lower())
    for scraper in scrapers:
        key = "%s_enabled" % scraper.name
        xbmcaddon.Addon('script.module.universalscrapers').setSetting(
            key, "true")
    sys.exit()
예제 #8
0
    def get_sources(title,
                    year,
                    imdb,
                    tvdb,
                    season,
                    episode,
                    tvshowtitle,
                    premiered,
                    timeout=30,
                    preset="search",
                    dialog=None,
                    exclude=None,
                    scraper_title=False,
                    listitem=None,
                    output_function=koding.Play_Video,
                    skip_selector=False,
                    player=None):
        """
        scrapes for video sources using NaN scraper library
        Args:
            title: movie or episode title
            year: year movie/episode came out
            imdb: imdb identifier
            tvdb:  tvdb identifier
            season: season number
            episode: episode number
            tvshowtitle: title of tv show
            premiered: year tv show premiered
            timeout: timeout for scraping link
            preset: preferred quality of stream
            dialog: dialog to use for displaying messages
            exclude: list of scrapers to exclude
            scraper_title: extra movie/tv show title to search first.
                           required if scrapers use an alternate spelling
        Returns:
            Boolean indicating playback success
        """
        year = str(year)
        content = 'movie' if tvshowtitle is None else 'episode'
        allow_debrid = ADDON.getSetting('allow_debrid') == "true"

        if ADDON.getSetting('use_link_dialog') == 'true' and not skip_selector:
            # use link selector
            if content == 'movie':
                scraper = universalscrapers.scrape_movie_with_dialog
                link, rest = scraper(
                    title,
                    year,
                    imdb,
                    timeout=timeout,
                    exclude=exclude,
                    extended=True,
                    sort_function=Sources.sort_function,
                    enable_debrid=allow_debrid)
            elif content == "episode":
                scraper = universalscrapers.scrape_episode_with_dialog
                link, rest = scraper(
                    tvshowtitle,
                    year,
                    premiered,
                    season,
                    episode,
                    imdb,
                    tvdb,
                    timeout=timeout,
                    exclude=exclude,
                    extended=True,
                    sort_function=Sources.sort_function,
                    enable_debrid=allow_debrid)
            else:
                return

            if type(link) == dict and "path" in link:
                link = link["path"]
            if link is None:
                return False
            url = link['url']
            if ADDON.getSetting('link_fallthrough') == 'true':
                played = False
                index = 0
                links = []
                for item in rest:
                    if type(item) == dict and "path" in item:
                        links.extend(item["path"][1])
                    else:
                        links.extend(item[1])
                index = links.index(link)
                links = links[index + 1:]
                num_results = len(rest) + 1
                while not played:
                    try:
                        if dialog is not None and dialog.iscanceled():
                            return False
                        if dialog is not None:
                            index = index + 1
                            percent = int((index * 100) / num_results)
                            line = "%s - %s (%s)" % (link['scraper'],
                                                     link['source'],
                                                     link['quality'])
                            dialog.update(percent, line)
                    except:
                        pass
                    try:
                        played = output_function(
                            link["url"],
                            showbusy=False,
                            ignore_dp=True,
                            item=listitem,
                            player=player,
                            resolver=resolveurl)
                        link = links[0]
                        links = links[1:]
                    except:
                        return False
                return played
            else:
                return output_function(
                    url,
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl)
        else:
            if content == 'movie':
                title = title
                scraper = universalscrapers.scrape_movie
                links_scraper = scraper(
                    title,
                    year,
                    imdb,
                    timeout=timeout,
                    exclude=exclude,
                    enable_debrid=allow_debrid)

            elif content == 'episode':
                if scraper_title:
                    tvshowtitle = title
                tvshowtitle = tvshowtitle
                scraper = universalscrapers.scrape_episode
                links_scraper = scraper(
                    tvshowtitle,
                    year,
                    premiered,
                    season,
                    episode,
                    imdb,
                    tvdb,
                    timeout=timeout,
                    exclude=exclude,
                    enable_debrid=allow_debrid)
            else:
                return

        sd_links = []
        non_direct_links = []
        non_direct_sd_links = []
        num_scrapers = len(universalscrapers.relevant_scrapers())
        index = 0
        try:
            for scraper_links in links_scraper():
                if dialog is not None and dialog.iscanceled():
                    return
                if dialog is not None:
                    index = index + 1
                    percent = int((index * 100) / num_scrapers)
                    dialog.update(percent)
                if scraper_links is not None:
                    random.shuffle(scraper_links)
                    for scraper_link in scraper_links:
                        if dialog is not None and dialog.iscanceled():
                            return False

                        if Sources().__check_skip_pairing(scraper_link):
                            continue

                        quality = Sources.__determine_quality(
                            scraper_link["quality"])
                        preset = preset.lower()
                        if preset == 'searchsd':
                            if quality == "HD":
                                continue
                        elif preset == "search":
                            if quality == "SD":
                                sd_links.append(scraper_link)

                        if scraper_link["direct"]:
                            result = output_function(
                                scraper_link["url"],
                                showbusy=False,
                                ignore_dp=True,
                                item=listitem,
                                player=player,
                                resolver=resolveurl)
                            if result:
                                return result
                        else:
                            non_direct_links.append(scraper_link)

            for scraper_link in non_direct_links:
                if dialog is not None and dialog.iscanceled():
                    return False
                result = output_function(
                    scraper_link["url"],
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl)
                if result:
                    return result

            for scraper_link in sd_links:
                if dialog is not None and dialog.iscanceled():
                    return

                if scraper_link['direct']:
                    result = output_function(
                        scraper_link["url"],
                        showbusy=False,
                        ignore_dp=True,
                        item=listitem,
                        player=player,
                        resolver=resolveurl)
                    if result:
                        return result
                else:
                    non_direct_sd_links.append(scraper_link)

            for scraper_link in non_direct_sd_links:
                if dialog is not None and dialog.iscanceled():
                    return
                result = output_function(
                    scraper_link["url"],
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl)
                if result:
                    return result

            return False
        except:
            return False
예제 #9
0
    def get_music_sources(title,
                          artist,
                          timeout=30,
                          preset="search",
                          dialog=None,
                          exclude=None,
                          listitem=None,
                          output_function=koding.Play_Video,
                          skip_selector=False,
                          player=None):
        """
        scrapes for music sources using NaN scraper library
        Args:
            title: song title
            artist: song artist
            timeout: timeout for scraping link
            preset: preferred quality of stream
            dialog: dialog to use for displaying messages
            exclude: list of scrapers to exclude
        Returns:
            Boolean indicating playback success
        """
        title = title
        allow_debrid = ADDON.getSetting('allow_debrid') == "true"
        if ADDON.getSetting('use_link_dialog') == 'true' and not skip_selector:
            link, rest = universalscrapers.scrape_song_with_dialog(
                title,
                artist,
                timeout=timeout,
                exclude=exclude,
                enable_debrid=allow_debrid,
                extended=True)
            if type(link) == dict and "path" in link:
                link = link["path"]
            if link is None:
                return False
            url = link['url']
            if ADDON.getSetting('link_fallthrough') == 'true':
                played = False
                index = 0
                links = []
                for item in rest:
                    if type(item) == dict and "path" in item:
                        links.extend(item["path"][1])
                    else:
                        links.extend(item[1])
                index = links.index(link)
                links = links[index + 1:]
                num_results = len(rest) + 1
                while not played:
                    try:
                        if dialog is not None and dialog.iscanceled():
                            return
                        if dialog is not None:
                            index = index + 1
                            percent = int((index * 100) / num_results)
                            line = "%s - %s (%s)" % (link['scraper'],
                                                     link['source'],
                                                     link['quality'])
                            dialog.update(percent, line)
                    except:
                        pass
                    try:
                        played = output_function(
                            url,
                            showbusy=False,
                            ignore_dp=True,
                            item=listitem,
                            player=player,
                            resolver=resolveurl)
                        link = links[0]
                        links = links[1:]
                    except:
                        return False
                return played
            else:
                return output_function(
                    url,
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl)
        links_scraper = universalscrapers.scrape_song(
            title,
            artist,
            timeout=timeout,
            exclude=exclude,
            enable_debrid=allow_debrid)

        sd_links = []
        num_scrapers = len(universalscrapers.relevant_scrapers())
        index = 0
        try:
            for scraper_links in links_scraper():
                if dialog is not None and dialog.iscanceled():
                    return
                if dialog is not None:
                    index = index + 1
                    percent = int((index * 100) / num_scrapers)
                    dialog.update(percent)
                if scraper_links is not None:
                    random.shuffle(scraper_links)
                    for scraper_link in scraper_links:
                        if dialog is not None and dialog.iscanceled():
                            return

                        if Sources().__check_skip_pairing(scraper_link):
                            continue

                        quality = Sources.__determine_quality(
                            scraper_link["quality"])
                        preset = preset.lower()
                        if preset == 'searchsd':
                            if quality == "HD":
                                continue
                        elif preset == "search":
                            if quality == "SD":
                                sd_links.append(scraper_link)

                        result = output_function(
                            scraper_link["url"],
                            showbusy=False,
                            ignore_dp=True,
                            item=listitem,
                            player=player,
                            resolver=resolveurl)
                        if result:
                            return result

            for scraper_link in sd_links:
                if dialog is not None and dialog.iscanceled():
                    return
                result = output_function(
                    scraper_link["url"],
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl)
                if result:
                    return result
        except:
            pass
        return False
예제 #10
0
scraper_results_path = xbmc.translatePath(full_file)
if not os.path.exists(scraper_results_path):
	Open = open(scraper_results_path,'w+')
	
scrapers_path = xbmc.translatePath('special://home/addons/script.module.universalscrapers/lib/universalscrapers/scraperplugins')
for Root, Dir, Files in os.walk(scrapers_path):
	for File in Files:
		if not 'pyo' in File and not '__' in File and 'py' in File and not 'broken' in Root and not 'slow' in Root and not 'ok' in Root and not 'unsure' in Root and not 'test' in Root:
			No_of_scrapers.append('1')
			scraper_paths.append(File)

params = dict(urlparse.parse_qsl(sys.argv[2].replace('?', '')))
mode = params.get('mode')
if mode == "DisableAll":
    scrapers = sorted(
        universalscrapers.relevant_scrapers(include_disabled=True), key=lambda x: x.name.lower())
    for scraper in scrapers:
        key = "%s_enabled" % scraper.name
        xbmcaddon.Addon('script.module.universalscrapers').setSetting(key, "false")
    sys.exit()
elif mode == "EnableAll":
    scrapers = sorted(
        universalscrapers.relevant_scrapers(include_disabled=True), key=lambda x: x.name.lower())
    for scraper in scrapers:
        key = "%s_enabled" % scraper.name
        xbmcaddon.Addon('script.module.universalscrapers').setSetting(key, "true")
    sys.exit()
elif mode == "Deletelog":
    from universalscrapers.common import Del_LOG
    Del_LOG()
    sys.exit()
예제 #11
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			debridHas = False
			if not debridHas:
				premiumize = debrid.Premiumize()
				debridHas = premiumize.accountEnabled() and premiumize.accountValid()
				if not debridHas:
					offcloud = debrid.OffCloud()
					debridHas = offcloud.accountEnabled() and offcloud.accountValid()
					if not debridHas:
						realdebrid = debrid.RealDebrid()
						debridHas = realdebrid.accountEnabled() and realdebrid.accountValid()
						if not debridHas:
							alldebrid = debrid.AllDebrid()
							debridHas = alldebrid.accountEnabled() and alldebrid.accountValid()
							if not debrid:
								rapidpremium = debrid.RapidPremium()
								debridHas = rapidpremium.accountEnabled() and rapidpremium.accountValid()

			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			movie = False if 'tvshowtitle' in data else True
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			year = str(data['year']) if 'year' in data and not data['year'] == None else ''
			season = str(data['season']) if 'season' in data and not data['season'] == None else ''
			episode = str(data['episode']) if 'episode' in data and not data['episode'] == None else ''
			imdb = data['imdb'] if 'imdb' in data else ''
			tvdb = data['tvdb'] if 'tvdb' in data else ''

			scraper = universalscrapers.relevant_scrapers(names_list = self.name.lower(), include_disabled = True, exclude = None)[0]()
			if self.base_link and not self.base_link == '': scraper.base_link = self.base_link
			if movie:
				result = scraper.scrape_movie(title = title, year = year, imdb = imdb, debrid = debridHas)
			else:
				showYear = year
				try:
					if 'premiered' in data and not data['premiered'] == None and not data['premiered'] == '':
						for format in ['%Y-%m-%d', '%Y-%d-%m', '%d-%m-%Y', '%m-%d-%Y']:
							try:
								showYear = str(int(convert.ConverterTime(value = data['premiered'], format = format).string(format = '%Y')))
								if len(showYear) == 4: break
							except:
								pass
				except:
					pass
				result = scraper.scrape_episode(title = title, year = year, show_year = showYear, season = season, episode = episode, imdb = imdb, tvdb = tvdb, debrid = debridHas)

			if result:
				for item in result:
					item['external'] = True
					item['language']= self.language[0]
					item['debridonly'] = False
					item['url'] = item['url'].replace('http:http:', 'http:').replace('https:https:', 'https:').replace('http:https:', 'https:').replace('https:http:', 'http:') # Some of the links start with a double http.

					# External providers (eg: "Get Out"), sometimes has weird characters in the URL.
					# Ignore the links that have non-printable ASCII or UTF8 characters.
					try: item['url'].decode('utf-8')
					except: continue

					source = item['source'].lower().replace(' ', '')
					if source == 'direct' or source == 'directlink':
						source = urlparse.urlsplit(item['url'])[1].split(':')[0]
						if network.Networker.ipIs(source):
							source = 'Anonymous'
						else:
							split = source.split('.')
							for i in split:
								i = i.lower()
								if i in ['www', 'ftp']: continue
								source = i
								break
						item['source'] = source
					sources.append(item)

			return sources
		except:
			tools.Logger.error()
			return sources
예제 #12
0
    def get_sources(
        title,
        year,
        imdb,
        tvdb,
        season,
        episode,
        tvshowtitle,
        premiered,
        timeout=30,
        preset="search",
        dialog=None,
        exclude=None,
        scraper_title=False,
        listitem=None,
        output_function=koding.Play_Video,
        skip_selector=False,
        player=None,
        icon=' ',
        fanart=' ',
                    
    ):
        """
        scrapes for video sources using NaN scraper library
        Args:
            title: movie or episode title
            year: year movie/episode came out
            imdb: imdb identifier
            tvdb:  tvdb identifier
            season: season number
            episode: episode number
            tvshowtitle: title of tv show
            premiered: year tv show premiered
            timeout: timeout for scraping link
            preset: preferred quality of stream
            dialog: dialog to use for displaying messages
            exclude: list of scrapers to exclude
            scraper_title: extra movie/tv show title to search first.
                           required if scrapers use an alternate spelling
        Returns:
            Boolean indicating playback success
        """
        
        year = str(year)
        content = "movie" if tvshowtitle is None else "episode"
        allow_debrid = ADDON.getSetting("allow_debrid") == "true"
        if  ADDON.getSetting("jen_sources")=='1':
            if content == "movie":
                original_title=title
                episode='%20'
                season='%20'
            else:
                original_title=tvshowtitle
            logging.warning('movie info')
            logging.warning(content)
            logging.warning(original_title)
            logging.warning(year)
            logging.warning(imdb)
            logging.warning(tvshowtitle)
            
            
            try:
                xbmc.executebuiltin(('Container.update("plugin://plugin.video.destinyds/?data=%s&dates=EMPTY&description=%s-NEXTUP-&eng_name=%s&episode=%s&fanart=%s&heb_name=%s&iconimage=%s&id=%s&isr=%s&mode2=4&name=%s&original_title=%s&season=%s&show_original_year=%s&tmdbid=EMPTY&url=%s&fast_link=%s&url=www",return)'%(str(year),urllib.quote_plus(' '),original_title,episode,urllib.quote_plus(fanart),original_title,urllib.quote_plus(icon),imdb,' ',original_title,original_title,season,str(year),urllib.quote_plus(''),urllib.quote_plus(''))).replace('EMPTY','%20'))
                            
                #play(name,fast_link,iconimage,image,description,data,season,episode,original_title,name,heb_name,show_original_year,eng_name,isr,original_title,id,windows_play=True,auto_fast=False,nextup=True)
                
                logging.warning('PLAY NEXTUP FULLSCREEN')
                xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
                return '0'
            except Exception as e:
                logging.warning(e)
        if ADDON.getSetting("use_link_dialog") == "true" and not skip_selector:
            # use link selector
            if content == "movie":
                scraper = universalscrapers.scrape_movie_with_dialog
                link, rest = scraper(
                    title,
                    year,
                    imdb,
                    timeout=timeout,
                    exclude=exclude,
                    extended=True,
                    sort_function=Sources.sort_function,
                    enable_debrid=allow_debrid,
                )
            elif content == "episode":
                scraper = universalscrapers.scrape_episode_with_dialog
                link, rest = scraper(
                    tvshowtitle,
                    year,
                    premiered,
                    season,
                    episode,
                    imdb,
                    tvdb,
                    timeout=timeout,
                    exclude=exclude,
                    extended=True,
                    sort_function=Sources.sort_function,
                    enable_debrid=allow_debrid,
                )
            else:
                return

            if type(link) == dict and "path" in link:
                link = link["path"]
            if link is None:
                return False
            url = link["url"]
            if ADDON.getSetting("link_fallthrough") == "true":
                played = False
                index = 0
                links = []
                for item in rest:
                    if type(item) == dict and "path" in item:
                        links.extend(item["path"][1])
                    else:
                        links.extend(item[1])
                index = links.index(link)
                links = links[index + 1 :]
                num_results = len(rest) + 1
                while not played:
                    try:
                        if dialog is not None and dialog.iscanceled():
                            return False
                        if dialog is not None:
                            index = index + 1
                            percent = int((index * 100) / num_results)
                            line = "%s - %s (%s)" % (
                                link["scraper"],
                                link["source"],
                                link["quality"],
                            )
                            dialog.update(percent, line)
                    except:
                        pass
                    try:
                        resolved_link = universalscrapers.resolve(
                            link["scraper"], link["url"]
                        )
                        played = output_function(
                            resolved_link,
                            showbusy=False,
                            ignore_dp=True,
                            item=listitem,
                            player=player,
                            resolver=resolveurl,
                        )
                        link = links[0]
                        links = links[1:]
                    except:
                        return False
                return played
            else:
                resolved_link = universalscrapers.resolve(link["scraper"], link["url"])
                return output_function(
                    resolved_link,
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl,
                )
        else:
            if content == "movie":
                title = title
                scraper = universalscrapers.scrape_movie
                links_scraper = scraper(
                    title,
                    year,
                    imdb,
                    timeout=timeout,
                    exclude=exclude,
                    enable_debrid=allow_debrid,
                )

            elif content == "episode":
                if scraper_title:
                    tvshowtitle = title
                tvshowtitle = tvshowtitle
                scraper = universalscrapers.scrape_episode
                links_scraper = scraper(
                    tvshowtitle,
                    year,
                    premiered,
                    season,
                    episode,
                    imdb,
                    tvdb,
                    timeout=timeout,
                    exclude=exclude,
                    enable_debrid=allow_debrid,
                )
            else:
                return

        sd_links = []
        non_direct_links = []
        non_direct_sd_links = []
        num_scrapers = len(universalscrapers.relevant_scrapers())
        index = 0
        try:
            for scraper_links in links_scraper():
                if dialog is not None and dialog.iscanceled():
                    return
                if dialog is not None:
                    index = index + 1
                    percent = int((index * 100) / num_scrapers)
                    dialog.update(percent)
                if scraper_links is not None:
                    random.shuffle(scraper_links)
                    for scraper_link in scraper_links:
                        if dialog is not None and dialog.iscanceled():
                            return False

                        if Sources().__check_skip_pairing(scraper_link):
                            continue

                        quality = Sources.__determine_quality(scraper_link["quality"])
                        preset = preset.lower()
                        if preset == "searchsd":
                            if quality == "HD":
                                continue
                        elif preset == "search":
                            if quality == "SD":
                                sd_links.append(scraper_link)

                        if scraper_link["direct"]:
                            resolved_link = universalscrapers.resolve(scraper_link["scraper"], scraper_link["url"])
                            result = output_function(
                                resolved_link,
                                showbusy=False,
                                ignore_dp=True,
                                item=listitem,
                                player=player,
                                resolver=resolveurl,
                            )
                            if result:
                                return result
                        else:
                            non_direct_links.append(scraper_link)

            for scraper_link in non_direct_links:
                if dialog is not None and dialog.iscanceled():
                    return False
                resolved_link = universalscrapers.resolve(scraper_link["scraper"], scraper_link["url"])
                result = output_function(
                    resolved_link,
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl,
                )
                if result:
                    return result

            for scraper_link in sd_links:
                if dialog is not None and dialog.iscanceled():
                    return

                if scraper_link["direct"]:
                    resolved_link = universalscrapers.resolve(scraper_link["scraper"], scraper_link["url"])
                    result = output_function(
                        resolved_link,
                        showbusy=False,
                        ignore_dp=True,
                        item=listitem,
                        player=player,
                        resolver=resolveurl,
                    )
                    if result:
                        return result
                else:
                    non_direct_sd_links.append(scraper_link)

            for scraper_link in non_direct_sd_links:
                if dialog is not None and dialog.iscanceled():
                    return
                resolved_link = universalscrapers.resolve(scraper_link["scraper"], scraper_link["url"])
                result = output_function(
                    resolved_link,
                    showbusy=False,
                    ignore_dp=True,
                    item=listitem,
                    player=player,
                    resolver=resolveurl,
                )
                if result:
                    return result

            return False
        except:
            return False
예제 #13
0
scraper_results_path = xbmc.translatePath(full_file)
if not os.path.exists(scraper_results_path):
    Open = open(scraper_results_path,'w+')

scrapers_path = xbmc.translatePath('special://home/addons/script.module.universalscrapers/lib/universalscrapers/scraperplugins')
for Root, Dir, Files in os.walk(scrapers_path):
    for File in Files:
        if not 'pyo' in File and not '__' in File and 'py' in File and not 'broken' in Root and not 'slow' in Root and not 'ok' in Root and not 'unsure' in Root and not 'test' in Root:
            No_of_scrapers.append('1')
            scraper_paths.append(File)

params = dict(urlparse.parse_qsl(sys.argv[2].replace('?', '')))
mode = params.get('mode')
if mode == "DisableAll":
    scrapers = sorted(
        universalscrapers.relevant_scrapers(include_disabled=True), key=lambda x: x.name.lower())
    for scraper in scrapers:
        key = "%s_enabled" % scraper.name
        xbmcaddon.Addon('script.module.universalscrapers').setSetting(key, "false")
    sys.exit()
elif mode == "EnableAll":
    scrapers = sorted(
        universalscrapers.relevant_scrapers(include_disabled=True), key=lambda x: x.name.lower())
    for scraper in scrapers:
        key = "%s_enabled" % scraper.name
        xbmcaddon.Addon('script.module.universalscrapers').setSetting(key, "true")
    sys.exit()
elif mode == "Deletelog":
    from universalscrapers.common import Del_LOG
    Del_LOG()
    sys.exit()