Example #1
0
def scrape_movie(name,year):
	import process, xbmcgui,re, xbmcplugin, sys
	from nanscrapers import scrape_movie
	if '(' in name:
		year_remover = re.compile('\((.+?)\)').findall(str(name))
		for item in year_remover:
			name = name.replace(item,'').replace('(','').replace(')','')
	year = year.replace('(','').replace(')','').replace(' ','')
	if name is not "" and year is not "":
		progress = []
		item = []
		dp =  xbmcgui.DialogProgress()
		dp.create('Initiating Scrapers')
		links_scraper = scrape_movie(name, year, '', timeout=60)
		if links_scraper is False:
			pass
		else:
			scraped_links = []
			for scraper_links in links_scraper():
				item.append(scraper_links)
			items = len(item)
			for scraper_links in links_scraper():
				if scraper_links is not None:
					progress.append(scraper_links)
					dp_add = len(progress) / float(items) * 100
					dp.create('Checking for stream')
					dp.update(int(dp_add),'Checking sources',"Checking Nan Scrapers",'Please Wait')				
					scraped_links.extend(scraper_links)
			for link in scraped_links:
				if link["quality"]=='SD': name = link["source"] + " - " + link["scraper"] + " (" + link["quality"] + ")"
				elif link["quality"]=='CAM': name = link["source"] + " - " + link["scraper"] + " (" + link["quality"] + ")"
				elif link["quality"]=='360': name = link["source"] + " - " + link["scraper"] + " (" + link["quality"] + ")"
				elif link["quality"]=='480': name = link["source"] + " - " + link["scraper"] + " (" + link["quality"] + ")"
				elif link["quality"]=='560': name = ' '+link["source"] + " - " + link["scraper"] + " (" + link["quality"] + ")"
				elif link["quality"]=='720': name = ' '+link["source"] + " - " + link["scraper"] + " (" + link["quality"] + ")"
				elif link["quality"]=='1080': name = ' '+link["source"] + " - " + link["scraper"] + " (" + link["quality"] + ")"
				elif link["quality"]=='HD': name = ' '+link["source"] + " - " + link["scraper"] + " (" + link["quality"] + ")"
				elif 'vidzi' in link["source"]: name = '  '+link["source"] + " - " + link["scraper"] + " (" + link["quality"] + ")"
				else: name = link["source"] + " - " + link["scraper"] + " (" + link["quality"] + ")"
				process.Play(name,link["url"],906,'','','','')
				xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_TITLE);
Example #2
0
def test():
    global movies, shows
    try:
        test_movies = []
        test_episodes = []
        profile_path = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile')).decode('utf-8')
        test_file = xbmcvfs.File(os.path.join(profile_path, "testings.xml"))
        xml = BeautifulStoneSoup(test_file.read())
        test_file.close()
        items = xml.findAll("item")
        for item in items:
            try:
                content = item.find("content")
                if content:
                    if "movie" in content.text:
                        meta = item.find("meta")
                        test_movies.append({
                            'title': meta.find("title").text,
                            'imdb': meta.find("imdb").text,
                            'year': meta.find("year").text,
                        })
                    elif "episode" in content.text:
                        meta = item.find("meta")
                        test_episodes.append({
                            'title': meta.find("tvshowtitle").text,
                            'show_year': int(meta.find("premiered").text[0:4]),
                            'year': meta.find("year").text,
                            'season': meta.find("season").text,
                            'episode': meta.find("season").text,
                            'imdb': meta.find("imdb").text,
                        })
            except:
                pass

            movies = test_movies
            shows = test_episodes
    except:
        pass

    dialog = xbmcgui.Dialog()
    pDialog = xbmcgui.DialogProgress()
    if dialog.yesno("NaNscrapers Testing Mode", 'Clear cache?'):
        nanscrapers.clear_cache()
    try:
        dbcon = database.connect(os.path.join(
            xbmc.translatePath(xbmcaddon.Addon("script.module.nanscrapers").getAddonInfo('profile')).decode('utf-8'),
            'url_cache.db'))
        dbcur = dbcon.cursor()
    except:
        dialog.ok("NaNscrapers Testing Mode", 'Error connecting to db')
        sys.exit()

    num_movies = len(movies)
    if num_movies > 0:
        pDialog.create('NaNscrapers Testing mode active', 'please wait')
        index = 0
        for movie in movies:
            index += 1
            title = movie['title']
            year = movie['year']
            imdb = movie['imdb']
            if pDialog.iscanceled():
                pDialog.close()
                break
            pDialog.update((index / num_movies) * 100, "Scraping movie {} of {}".format(index, num_movies), title)
            links_scraper = nanscrapers.scrape_movie(title, year, imdb)
            links_scraper = links_scraper()
            for scraper_links in links_scraper:
                if pDialog.iscanceled():
                    break
                if scraper_links:
                    random.shuffle(scraper_links)

        pDialog.close()
        dbcur.execute("SELECT COUNT(DISTINCT(scraper)) FROM rel_src where episode = ''")
        match = dbcur.fetchone()
        num_movie_scrapers = match[0]

        dbcur.execute("SELECT scraper, count(distinct(urls)) FROM rel_src where episode = '' group by scraper")
        matches = dbcur.fetchall()
        failed = []
        for match in matches:
            if int(match[1]) <= 1:
                failed.append(match[0])

        if len(failed) > 0:
            failedstring = "Failed: {}".format(len(failed))
            for fail in failed:
                failedstring += "\n        - {}".format(str(fail))
        else:
            failedstring = ""

        dbcur.execute("SELECT title, count(distinct(urls)) FROM rel_src where episode = '' group by title")
        matches = dbcur.fetchall()
        failed_movies = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode == '' and title == '{}' group by scraper".format(
                            match[0]))
                    new_matches = dbcur.fetchall()
                    found = False
                    for new_match in new_matches:
                        if new_match[1] == "[]":
                            continue
                        else:
                            found = True
                    if not found:
                        failed_movies.append(match[0])
                else:
                    failed_movies.append(match[0])

        if len(failed_movies) > 0:
            failed_movie_string = "Failed movies: {}".format(len(failed_movies))
            for fail in failed_movies:
                for movie in movies:
                    if clean_title(movie['title']).upper() == str(fail):
                        failed_movie_string += "\n        - {}".format(movie["title"])

        else:
            failed_movie_string = ""

    num_shows = len(shows)
    if num_shows > 0:
        pDialog.create('NaNscrapers Testing mode active', 'please wait')
        index = 0
        for show in shows:
            index += 1
            title = show['title']
            show_year = show['show_year']
            year = show['year']
            season = show['season']
            episode = show['episode']
            imdb = show['imdb']
            tvdb = show.get('tvdb', '')

            if pDialog.iscanceled():
                pDialog.close()
                break
            pDialog.update((index / num_shows) * 100, "Scraping show {} of {}".format(index, num_shows), title)
            links_scraper = nanscrapers.scrape_episode(title, show_year, year, season, episode, imdb, tvdb)
            links_scraper = links_scraper()
            for scraper_links in links_scraper:
                if pDialog.iscanceled():
                    break
                if scraper_links:
                    random.shuffle(scraper_links)

        pDialog.close()
        dbcur.execute("SELECT COUNT(DISTINCT(scraper)) FROM rel_src where episode != ''")
        match = dbcur.fetchone()
        num_show_scrapers = match[0]

        dbcur.execute("SELECT scraper, count(distinct(urls)) FROM rel_src where episode != '' group by scraper")
        matches = dbcur.fetchall()
        failed = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode != '' and scraper == '{}' group by scraper".format(
                            match[0]))
                    match = dbcur.fetchone()
                    if match[1] == "[]":
                        failed.append(match[0])
                else:
                    failed.append(match[0])

        if len(failed) > 0:
            show_scraper_failedstring = "Failed: {}".format(len(failed))
            for fail in failed:
                show_scraper_failedstring += "\n        - {}".format(str(fail))
        else:
            show_scraper_failedstring = ""

        dbcur.execute("SELECT title, count(distinct(urls)) FROM rel_src where episode != '' group by title")
        matches = dbcur.fetchall()
        failed_shows = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode != '' and title == '{}' group by scraper".format(
                            match[0]))
                    new_matches = dbcur.fetchall()
                    found = False
                    for new_match in new_matches:
                        if new_match[1] == "[]":
                            continue
                        else:
                            found = True
                    if not found:
                        failed_shows.append(match[0])
                else:
                    failed_shows.append(match[0])

        if len(failed_shows) > 0:
            failed_show_string = "Failed shows: {}".format(len(failed_shows))
            for fail in failed_shows:
                for show in shows:
                    if clean_title(show['title']).upper() == str(fail):
                        failed_show_string += "\n        - {} S{}-E{}".format(show["title"], show["season"],
                                                                              show["episode"])

        else:
            failed_show_string = ""

    resultstring = 'Results:\n'
    if num_movies > 0:
        resultstring = resultstring + \
                       '    Movie Scrapers: {}\n' \
                       '    {}\n' \
                       '    {}\n'.format(num_movie_scrapers, failedstring, failed_movie_string)
    if num_shows > 0:
        resultstring = resultstring + \
                       '    Episode Scrapers: {}\n' \
                       '    {}\n' \
                       '    {}\n'.format(num_show_scrapers, show_scraper_failedstring, failed_show_string)

    dialog.textviewer("NaNscrapers Testing Mode", resultstring)
Example #3
0
    def getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, timeout=30,
                   progress=True, preset="search", dialog=None, exclude=None, scraper_title=False, queueing=False):

        year = str(year)
        content = 'movie' if tvshowtitle == None else 'episode'
        allow_debrid = control.setting('allow_debrid') == "true"

        if content == 'movie':
            title = cleantitle.normalize(title)
            links_scraper = nanscrapers.scrape_movie(title, year, imdb, timeout=timeout, exclude=exclude,
                                                     enable_debrid=allow_debrid)
        elif content == 'episode':
            if scraper_title:
                tvshowtitle = title
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            links_scraper = nanscrapers.scrape_episode(tvshowtitle, year, premiered, season, episode, imdb, tvdb,
                                                       timeout=timeout, exclude=exclude, enable_debrid=allow_debrid)
        else:
            return

        if not queueing and control.setting('use_link_dialog') == 'true':
            if control.setting('check_url') == "true":
                check_url = True
            else:
                check_url = False
            if content == 'movie':
                link, items = nanscrapers.scrape_movie_with_dialog(title, year, imdb, timeout=timeout, exclude=exclude,
                                                                   sort_function=sources.sort_function,
                                                                   check_url=check_url, extended=True,
                                                                   enable_debrid=allow_debrid)
            elif content == "episode":
                link, items = nanscrapers.scrape_episode_with_dialog(tvshowtitle, year, premiered, season, episode,
                                                                     imdb, tvdb,
                                                                     timeout=timeout, exclude=exclude,
                                                                     sort_function=sources.sort_function,
                                                                     check_url=check_url, extended=True,
                                                                     enable_debrid=allow_debrid)
            else:
                return

            if link is None:
                return False

            if type(link) == dict and "path" in link:
                link = link["path"]
            url = link['url']
            if allow_debrid:
                new_url = debrid.resolve(url)
                if new_url:
                    url = new_url
            import urlresolver9
            try:
                hmf = urlresolver9.HostedMediaFile(url=url, include_disabled=False,
                                                   include_universal=allow_debrid)
                if hmf.valid_url() == True:
                    resolved_url = hmf.resolve()
                else:
                    resolved_url = url
            except:
                resolved_url = None

            if resolved_url and sources().check_playable(resolved_url) is not None:
                url = resolved_url
            else:
                if control.setting('link_fallthtough') == 'true':
                    try:
                        links = []
                        for item in items:
                            if type(item) == dict and "path" in item:
                                links.extend(item["path"][1])
                            else:
                                links.extend(item[1])
                        index = links.index(link)
                        links = links[index + 1:]
                        for link in links:
                            try:
                                hmf = urlresolver9.HostedMediaFile(url=link["url"], include_disabled=False,
                                                                   include_universal=allow_debrid)
                                if hmf.valid_url() == True:
                                    resolved_url = hmf.resolve()
                                else:
                                    resolved_url = None
                            except:
                                resolved_url = None

                            if resolved_url and sources().check_playable(resolved_url) is not None:
                                url = resolved_url
                                return url
                        url = None
                    except:
                        pass
                else:
                    url = None
            return url

        sd_links = []
        non_direct = []
        sd_non_direct = []
        links_scraper = links_scraper()
        for scraper_links in links_scraper:
            if scraper_links is not None:
                random.shuffle(scraper_links)
                for scraper_link in scraper_links:
                    if dialog is not None and dialog.iscanceled():
                        return

                    if allow_debrid:
                        new_url = debrid.resolve(scraper_link['url'])
                        if new_url:
                            scraper_link['url'] = new_url

                    if (not control.setting('allow_openload') == 'true' and 'openload' in scraper_link['url']) or (
                                not control.setting('allow_the_video_me') == 'true' and 'thevideo.me' in scraper_link[
                                'url']):
                        continue
                    if preset.lower() == 'searchsd':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality > 576:
                                continue
                        except:
                            if scraper_link['quality'] not in ["SD", "CAM", "SCR"]:
                                continue
                    elif preset.lower() == 'search':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality <= 576:
                                if scraper_link["direct"]:
                                    sd_links.append(scraper_link)
                                else:
                                    sd_non_direct.append(scraper_link)
                                continue
                        except:
                            if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                                if scraper_link["direct"]:
                                    sd_links.append(scraper_link)
                                else:
                                    sd_non_direct.append(scraper_link)
                                continue

                    if "m4u" in scraper_link['url']:
                        if sources().check_playable(scraper_link['url']) is not None:
                            return scraper_link['url']

                    else:
                        try:
                            if scraper_link['direct']:
                                import urlresolver9
                                hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                                   include_universal=allow_debrid)
                                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                                # resolved_url = urlresolver9.resolve(scraper_link['url'])
                                if resolved_url and sources().check_playable(resolved_url) is not None:
                                    url = resolved_url
                                    return url
                                else:
                                    if sources().check_playable(scraper_link['url']):
                                        return scraper_link['url']
                            else:
                                non_direct.append(scraper_link)
                        except:
                            if scraper_link['direct']:
                                url = scraper_link['url']
                                if sources().check_playable(url) is not None:
                                    return url
                            else:
                                non_direct.append(scraper_link)

        for scraper_link in non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            if (not control.setting('allow_openload') == 'true' and 'openload' in scraper_link['url']) or (
                        not control.setting('allow_the_video_me') == 'true' and 'thevideo.me' in scraper_link[
                        'url']):
                continue
            if preset.lower() == 'searchsd':
                try:
                    quality = int(scraper_link['quality'])
                    if quality > 576:
                        continue
                except:
                    if scraper_link['quality'] not in ["SD", "CAM", "SCR"]:
                        continue
            elif preset.lower() == 'search':
                try:
                    quality = int(scraper_link['quality'])
                    if quality <= 576:
                        sd_non_direct.append(scraper_link)
                        continue
                except:
                    if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                        sd_non_direct.append(scraper_link)
                        continue

            try:
                hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                   include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (
                        resolved_url.startswith("plugin://") or sources().check_playable(resolved_url) is not None):
                url = resolved_url
                return url

        for scraper_link in sd_links:

            if dialog is not None and dialog.iscanceled():
                return

            if "m4u" in scraper_link['url']:
                return scraper_link['url']

            else:
                try:
                    if scraper_link['direct']:
                        import urlresolver9
                        hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                           include_universal=allow_debrid)
                        if hmf.valid_url() == True: resolved_url = hmf.resolve()
                        # resolved_url = urlresolver9.resolve(scraper_link['url'])
                        if resolved_url and sources().check_playable(resolved_url) is not None:
                            url = resolved_url
                            return url
                        else:
                            if sources().check_playable(scraper_link['url']):
                                return scraper_link['url']
                    else:
                        non_direct.append(scraper_link)
                except:
                    if scraper_link['direct']:
                        url = scraper_link['url']
                        if sources().check_playable(url) is not None:
                            return url
                    else:
                        non_direct.append(scraper_link)

        try:
            import urlresolver9
        except:
            control.dialog.ok("Dependency missing",
                              "please install script.mrknow.urlresolver to resolve non-direct links")
            return

        for scraper_link in sd_non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            try:
                hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                   include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (
                        resolved_url.startswith("plugin://") or sources().check_playable(resolved_url) is not None):
                url = resolved_url
                return url
Example #4
0
    def getSources(title,
                   year,
                   imdb,
                   tvdb,
                   season,
                   episode,
                   tvshowtitle,
                   premiered,
                   timeout=20,
                   progress=True,
                   preset="search",
                   dialog=None,
                   exclude=None,
                   scraper_title=False,
                   queueing=False):
        progressDialog = control.progressDialog
        progressDialog.create(control.addonInfo('name'), '')
        progressDialog.update(
            0, '[COLOR yellow]Scraping the net for Streams[/COLOR]',
            'Please Be Patient...')
        year = str(year)
        content = 'movie' if tvshowtitle == None else 'episode'
        allow_debrid = control.setting('allow_debrid') == "true"

        if content == 'movie':
            title = cleantitle.normalize(title)
            links_scraper = nanscrapers.scrape_movie(
                title,
                year,
                imdb,
                timeout=timeout,
                exclude=exclude,
                enable_debrid=allow_debrid)
        elif content == 'episode':
            if scraper_title:
                tvshowtitle = title
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            links_scraper = nanscrapers.scrape_episode(
                tvshowtitle,
                year,
                premiered,
                season,
                episode,
                imdb,
                tvdb,
                timeout=timeout,
                exclude=exclude,
                enable_debrid=allow_debrid)
        else:
            return

        sd_links = []
        non_direct = []
        sd_non_direct = []
        links_scraper = links_scraper()
        for scraper_links in links_scraper:
            if scraper_links is not None:
                random.shuffle(scraper_links)
                for scraper_link in scraper_links:
                    if dialog is not None and dialog.iscanceled():
                        return

                    if allow_debrid:
                        new_url = debrid.resolve(scraper_link['url'])
                        if new_url:
                            scraper_link['url'] = new_url

                    if (not control.setting('allow_openload') == 'true'
                            and 'openload' in scraper_link['url']
                        ) or (
                            not control.setting('allow_the_video_me') == 'true'
                            and 'thevideo.me' in scraper_link['url']):
                        continue
                    if preset.lower() == 'searchsd':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality > 576:
                                continue
                        except:
                            if scraper_link['quality'] not in [
                                    "SD", "CAM", "SCR"
                            ]:
                                continue
                    elif preset.lower() == 'search':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality <= 576:
                                if scraper_link["direct"]:
                                    sd_links.append(scraper_link)
                                else:
                                    sd_non_direct.append(scraper_link)
                                continue
                        except:
                            if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                                if scraper_link["direct"]:
                                    sd_links.append(scraper_link)
                                else:
                                    sd_non_direct.append(scraper_link)
                                continue

                    if "m4u" in scraper_link['url']:
                        if sources().check_playable(
                                scraper_link['url']) is not None:
                            return scraper_link['url']

                    else:
                        try:
                            if scraper_link['direct']:
                                import urlresolver9
                                hmf = urlresolver9.HostedMediaFile(
                                    url=scraper_link['url'],
                                    include_disabled=False,
                                    include_universal=allow_debrid)
                                if hmf.valid_url() == True:
                                    resolved_url = hmf.resolve()
                                # resolved_url = urlresolver9.resolve(scraper_link['url'])
                                if resolved_url and sources().check_playable(
                                        resolved_url) is not None:
                                    url = resolved_url
                                    return url
                                else:
                                    if sources().check_playable(
                                            scraper_link['url']):
                                        return scraper_link['url']
                            else:
                                non_direct.append(scraper_link)
                        except:
                            if scraper_link['direct']:
                                url = scraper_link['url']
                                if sources().check_playable(url) is not None:
                                    return url
                            else:
                                non_direct.append(scraper_link)

        for scraper_link in non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            if (not control.setting('allow_openload') == 'true'
                    and 'openload' in scraper_link['url']) or (
                        not control.setting('allow_the_video_me') == 'true'
                        and 'thevideo.me' in scraper_link['url']):
                continue
            if preset.lower() == 'searchsd':
                try:
                    quality = int(scraper_link['quality'])
                    if quality > 576:
                        continue
                except:
                    if scraper_link['quality'] not in ["SD", "CAM", "SCR"]:
                        continue
            elif preset.lower() == 'search':
                try:
                    quality = int(scraper_link['quality'])
                    if quality <= 576:
                        sd_non_direct.append(scraper_link)
                        continue
                except:
                    if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                        sd_non_direct.append(scraper_link)
                        continue

            try:
                hmf = urlresolver9.HostedMediaFile(
                    url=scraper_link['url'],
                    include_disabled=False,
                    include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (resolved_url.startswith("plugin://")
                                 or sources().check_playable(resolved_url)
                                 is not None):
                url = resolved_url
                return url

        for scraper_link in sd_links:

            if dialog is not None and dialog.iscanceled():
                return

            if "m4u" in scraper_link['url']:
                return scraper_link['url']

            else:
                try:
                    if scraper_link['direct']:
                        import urlresolver9
                        hmf = urlresolver9.HostedMediaFile(
                            url=scraper_link['url'],
                            include_disabled=False,
                            include_universal=allow_debrid)
                        if hmf.valid_url() == True:
                            resolved_url = hmf.resolve()
                        # resolved_url = urlresolver9.resolve(scraper_link['url'])
                        if resolved_url and sources().check_playable(
                                resolved_url) is not None:
                            url = resolved_url
                            return url
                        else:
                            if sources().check_playable(scraper_link['url']):
                                return scraper_link['url']
                    else:
                        non_direct.append(scraper_link)
                except:
                    if scraper_link['direct']:
                        url = scraper_link['url']
                        if sources().check_playable(url) is not None:
                            return url
                    else:
                        non_direct.append(scraper_link)

        try:
            import urlresolver9
        except:
            control.dialog.ok(
                "Dependency missing",
                "please install script.mrknow.urlresolver to resolve non-direct links"
            )
            return

        for scraper_link in sd_non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            try:
                hmf = urlresolver9.HostedMediaFile(
                    url=scraper_link['url'],
                    include_disabled=False,
                    include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (resolved_url.startswith("plugin://")
                                 or sources().check_playable(resolved_url)
                                 is not None):
                url = resolved_url
                return url
        try:
            progressDialog.close()
        except:
            pass
Example #5
0
    def getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, timeout=20,
                   progress=True, preset="search", dialog=None, exclude=None, scraper_title=False, queueing=False):
        progressDialog = control.progressDialog
        progressDialog.create(control.addonInfo('name'), '')
        progressDialog.update(0,'[COLOR yellow]Scraping the net for Streams[/COLOR]' , 'Please Be Patient...')
        year = str(year)
        content = 'movie' if tvshowtitle == None else 'episode'
        allow_debrid = control.setting('allow_debrid') == "true"

        if content == 'movie':
            title = cleantitle.normalize(title)
            links_scraper = nanscrapers.scrape_movie(title, year, imdb, timeout=timeout, exclude=exclude,
                                                     enable_debrid=allow_debrid)
        elif content == 'episode':
            if scraper_title:
                tvshowtitle = title
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            links_scraper = nanscrapers.scrape_episode(tvshowtitle, year, premiered, season, episode, imdb, tvdb,
                                                       timeout=timeout, exclude=exclude, enable_debrid=allow_debrid)
        else:
            return

        sd_links = []
        non_direct = []
        sd_non_direct = []
        links_scraper = links_scraper()
        for scraper_links in links_scraper:
            if scraper_links is not None:
                random.shuffle(scraper_links)
                for scraper_link in scraper_links:
                    if dialog is not None and dialog.iscanceled():
                        return

                    if allow_debrid:
                        new_url = debrid.resolve(scraper_link['url'])
                        if new_url:
                            scraper_link['url'] = new_url

                    if (not control.setting('allow_openload') == 'true' and 'openload' in scraper_link['url']) or (
                                not control.setting('allow_the_video_me') == 'true' and 'thevideo.me' in scraper_link[
                                'url']):
                        continue
                    if preset.lower() == 'searchsd':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality > 576:
                                continue
                        except:
                            if scraper_link['quality'] not in ["SD", "CAM", "SCR"]:
                                continue
                    elif preset.lower() == 'search':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality <= 576:
                                if scraper_link["direct"]:
                                    sd_links.append(scraper_link)
                                else:
                                    sd_non_direct.append(scraper_link)
                                continue
                        except:
                            if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                                if scraper_link["direct"]:
                                    sd_links.append(scraper_link)
                                else:
                                    sd_non_direct.append(scraper_link)
                                continue

                    if "m4u" in scraper_link['url']:
                        if sources().check_playable(scraper_link['url']) is not None:
                            return scraper_link['url']

                    else:
                        try:
                            if scraper_link['direct']:
                                import urlresolver9
                                hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                                   include_universal=allow_debrid)
                                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                                # resolved_url = urlresolver9.resolve(scraper_link['url'])
                                if resolved_url and sources().check_playable(resolved_url) is not None:
                                    url = resolved_url
                                    return url
                                else:
                                    if sources().check_playable(scraper_link['url']):
                                        return scraper_link['url']
                            else:
                                non_direct.append(scraper_link)
                        except:
                            if scraper_link['direct']:
                                url = scraper_link['url']
                                if sources().check_playable(url) is not None:
                                    return url
                            else:
                                non_direct.append(scraper_link)

        for scraper_link in non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            if (not control.setting('allow_openload') == 'true' and 'openload' in scraper_link['url']) or (
                        not control.setting('allow_the_video_me') == 'true' and 'thevideo.me' in scraper_link[
                        'url']):
                continue
            if preset.lower() == 'searchsd':
                try:
                    quality = int(scraper_link['quality'])
                    if quality > 576:
                        continue
                except:
                    if scraper_link['quality'] not in ["SD", "CAM", "SCR"]:
                        continue
            elif preset.lower() == 'search':
                try:
                    quality = int(scraper_link['quality'])
                    if quality <= 576:
                        sd_non_direct.append(scraper_link)
                        continue
                except:
                    if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                        sd_non_direct.append(scraper_link)
                        continue

            try:
                hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                   include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (
                        resolved_url.startswith("plugin://") or sources().check_playable(resolved_url) is not None):
                url = resolved_url
                return url

        for scraper_link in sd_links:

            if dialog is not None and dialog.iscanceled():
                return

            if "m4u" in scraper_link['url']:
                return scraper_link['url']

            else:
                try:
                    if scraper_link['direct']:
                        import urlresolver9
                        hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                           include_universal=allow_debrid)
                        if hmf.valid_url() == True: resolved_url = hmf.resolve()
                        # resolved_url = urlresolver9.resolve(scraper_link['url'])
                        if resolved_url and sources().check_playable(resolved_url) is not None:
                            url = resolved_url
                            return url
                        else:
                            if sources().check_playable(scraper_link['url']):
                                return scraper_link['url']
                    else:
                        non_direct.append(scraper_link)
                except:
                    if scraper_link['direct']:
                        url = scraper_link['url']
                        if sources().check_playable(url) is not None:
                            return url
                    else:
                        non_direct.append(scraper_link)

        try:
            import urlresolver9
        except:
            control.dialog.ok("Dependency missing",
                              "please install script.mrknow.urlresolver to resolve non-direct links")
            return

        for scraper_link in sd_non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            try:
                hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                   include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (
                        resolved_url.startswith("plugin://") or sources().check_playable(resolved_url) is not None):
                url = resolved_url
                return url
        try:
            progressDialog.close()
        except:
            pass
Example #6
0
    def getSources(title,
                   year,
                   imdb,
                   tvdb,
                   season,
                   episode,
                   tvshowtitle,
                   premiered,
                   timeout=30,
                   progress=True,
                   preset="search",
                   dialog=None,
                   exclude=None,
                   scraper_title=False):

        year = str(year)

        content = 'movie' if tvshowtitle == None else 'episode'

        if content == 'movie':
            title = cleantitle.normalize(title)
            links_scraper = nanscrapers.scrape_movie(title,
                                                     year,
                                                     imdb,
                                                     timeout=timeout,
                                                     exclude=exclude)
        elif content == 'episode':
            if scraper_title:
                tvshowtitle = title
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            links_scraper = nanscrapers.scrape_episode(tvshowtitle,
                                                       year,
                                                       premiered,
                                                       season,
                                                       episode,
                                                       imdb,
                                                       tvdb,
                                                       timeout=timeout,
                                                       exclude=exclude)
        else:
            return

        allow_debrid = bool(control.setting('allow_debrid'))

        if control.setting('use_link_dialog') == 'true':
            if content == 'movie':
                link = nanscrapers.scrape_movie_with_dialog(
                    title,
                    year,
                    imdb,
                    timeout=timeout,
                    exclude=exclude,
                    sort_function=sources.sort_function)
            elif content == "episode":
                link = nanscrapers.scrape_episode_with_dialog(
                    tvshowtitle,
                    year,
                    premiered,
                    season,
                    episode,
                    imdb,
                    tvdb,
                    timeout=timeout,
                    exclude=exclude,
                    sort_function=sources.sort_function)
            else:
                return

            url = link['url']
            import urlresolver9
            hmf = urlresolver9.HostedMediaFile(url=url,
                                               include_disabled=False,
                                               include_universal=allow_debrid)
            if hmf.valid_url() == True:
                resolved_url = hmf.resolve()
            else:
                resolved_url = None
            if resolved_url and sources().check_playable(
                    resolved_url) is not None:
                url = resolved_url
            return url

        sd_links = []
        non_direct = []
        sd_non_direct = []
        links_scraper = links_scraper()
        for scraper_links in links_scraper:
            if scraper_links is not None:
                random.shuffle(scraper_links)
                for scraper_link in scraper_links:
                    if dialog is not None and dialog.iscanceled():
                        return

                    if (not control.setting('allow_openload') == 'true'
                            and 'openload' in scraper_link['url']
                        ) or (
                            not control.setting('allow_the_video_me') == 'true'
                            and 'thevideo.me' in scraper_link['url']):
                        continue
                    if preset.lower() == 'searchsd':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality > 576:
                                continue
                        except:
                            if scraper_link['quality'] not in [
                                    "SD", "CAM", "SCR"
                            ]:
                                continue
                    elif preset.lower() == 'search':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality <= 576:
                                sd_links.append(scraper_link)
                                continue
                        except:
                            if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                                sd_links.append(scraper_link)
                                continue

                    if "m4u" in scraper_link['url']:
                        if sources().check_playable(
                                scraper_link['url']) is not None:
                            return scraper_link['url']

                    else:
                        try:
                            if scraper_link['direct']:
                                import urlresolver9
                                hmf = urlresolver9.HostedMediaFile(
                                    url=scraper_link['url'],
                                    include_disabled=False,
                                    include_universal=allow_debrid)
                                if hmf.valid_url() == True:
                                    resolved_url = hmf.resolve()
                                # resolved_url = urlresolver9.resolve(scraper_link['url'])
                                if resolved_url and sources().check_playable(
                                        resolved_url) is not None:
                                    url = resolved_url
                                    return url
                                else:
                                    if sources().check_playable(
                                            scraper_link['url']):
                                        return scraper_link['url']
                            else:
                                non_direct.append(scraper_link)
                        except:
                            if scraper_link['direct']:
                                url = scraper_link['url']
                                if sources().check_playable(url) is not None:
                                    return url
                            else:
                                non_direct.append(scraper_link)

        for scraper_link in non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            if (not control.setting('allow_openload') == 'true'
                    and 'openload' in scraper_link['url']) or (
                        not control.setting('allow_the_video_me') == 'true'
                        and 'thevideo.me' in scraper_link['url']):
                continue
            if preset.lower() == 'searchsd':
                try:
                    quality = int(scraper_link['quality'])
                    if quality > 576:
                        continue
                except:
                    if scraper_link['quality'] not in ["SD", "CAM", "SCR"]:
                        continue
            elif preset.lower() == 'search':
                try:
                    quality = int(scraper_link['quality'])
                    if quality <= 576:
                        sd_non_direct.append(scraper_link)
                        continue
                except:
                    if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                        sd_non_direct.append(scraper_link)
                        continue

            try:
                hmf = urlresolver9.HostedMediaFile(
                    url=scraper_link['url'],
                    include_disabled=False,
                    include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (resolved_url.startswith("plugin://")
                                 or sources().check_playable(resolved_url)
                                 is not None):
                url = resolved_url
                return url

        for scraper_link in sd_links:

            if dialog is not None and dialog.iscanceled():
                return

            if "m4u" in scraper_link['url']:
                return scraper_link['url']

            else:
                try:
                    if scraper_link['direct']:
                        import urlresolver9
                        hmf = urlresolver9.HostedMediaFile(
                            url=scraper_link['url'],
                            include_disabled=False,
                            include_universal=allow_debrid)
                        if hmf.valid_url() == True:
                            resolved_url = hmf.resolve()
                        # resolved_url = urlresolver9.resolve(scraper_link['url'])
                        if resolved_url and sources().check_playable(
                                resolved_url) is not None:
                            url = resolved_url
                            return url
                        else:
                            if sources().check_playable(scraper_link['url']):
                                return scraper_link['url']
                    else:
                        non_direct.append(scraper_link)
                except:
                    if scraper_link['direct']:
                        url = scraper_link['url']
                        if sources().check_playable(url) is not None:
                            return url
                    else:
                        non_direct.append(scraper_link)

        try:
            import urlresolver9
        except:
            control.dialog.ok(
                "Dependency missing",
                "please install script.mrknow.urlresolver to resolve non-direct links"
            )
            return

        for scraper_link in sd_non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            try:
                hmf = urlresolver9.HostedMediaFile(
                    url=scraper_link['url'],
                    include_disabled=False,
                    include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (resolved_url.startswith("plugin://")
                                 or sources().check_playable(resolved_url)
                                 is not None):
                url = resolved_url
                return url
Example #7
0
def test():
    global movies, shows
    try:
        test_movies = []
        test_episodes = []
        profile_path = xbmc.translatePath(
            xbmcaddon.Addon().getAddonInfo('profile')).decode('utf-8')
        test_file = xbmcvfs.File(os.path.join(profile_path, "testings.xml"))
        xml = BeautifulStoneSoup(test_file.read())
        test_file.close()
        items = xml.findAll("item")
        for item in items:
            try:
                content = item.find("content")
                if content:
                    if "movie" in content.text:
                        meta = item.find("meta")
                        test_movies.append({
                            'title': meta.find("title").text,
                            'imdb': meta.find("imdb").text,
                            'year': meta.find("year").text,
                        })
                    elif "episode" in content.text:
                        meta = item.find("meta")
                        test_episodes.append({
                            'title':
                            meta.find("tvshowtitle").text,
                            'show_year':
                            int(meta.find("premiered").text[0:4]),
                            'year':
                            meta.find("year").text,
                            'season':
                            meta.find("season").text,
                            'episode':
                            meta.find("season").text,
                            'imdb':
                            meta.find("imdb").text,
                        })
            except:
                pass

            movies = test_movies
            shows = test_episodes
    except:
        pass

    dialog = xbmcgui.Dialog()
    pDialog = xbmcgui.DialogProgress()
    if dialog.yesno("NaNscrapers Testing Mode", 'Clear cache?'):
        nanscrapers.clear_cache()
    try:
        dbcon = database.connect(
            os.path.join(
                xbmc.translatePath(
                    xbmcaddon.Addon("script.module.nanscrapers").getAddonInfo(
                        'profile')).decode('utf-8'), 'url_cache.db'))
        dbcur = dbcon.cursor()
    except:
        dialog.ok("NaNscrapers Testing Mode", 'Error connecting to db')
        sys.exit()

    num_movies = len(movies)
    if num_movies > 0:
        pDialog.create('NaNscrapers Testing mode active', 'please wait')
        index = 0
        for movie in movies:
            index += 1
            title = movie['title']
            year = movie['year']
            imdb = movie['imdb']
            if pDialog.iscanceled():
                pDialog.close()
                break
            pDialog.update((index / num_movies) * 100,
                           "Scraping movie {} of {}".format(index,
                                                            num_movies), title)
            links_scraper = nanscrapers.scrape_movie(title, year, imdb)
            links_scraper = links_scraper()
            for scraper_links in links_scraper:
                if pDialog.iscanceled():
                    break
                if scraper_links:
                    random.shuffle(scraper_links)

        pDialog.close()
        dbcur.execute(
            "SELECT COUNT(DISTINCT(scraper)) FROM rel_src where episode = ''")
        match = dbcur.fetchone()
        num_movie_scrapers = match[0]

        dbcur.execute(
            "SELECT scraper, count(distinct(urls)) FROM rel_src where episode = '' group by scraper"
        )
        matches = dbcur.fetchall()
        failed = []
        for match in matches:
            if int(match[1]) <= 1:
                failed.append(match[0])

        if len(failed) > 0:
            failedstring = "Failed: {}".format(len(failed))
            for fail in failed:
                failedstring += "\n        - {}".format(str(fail))
        else:
            failedstring = ""

        dbcur.execute(
            "SELECT title, count(distinct(urls)) FROM rel_src where episode = '' group by title"
        )
        matches = dbcur.fetchall()
        failed_movies = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode == '' and title == '{}' group by scraper"
                        .format(match[0]))
                    new_matches = dbcur.fetchall()
                    found = False
                    for new_match in new_matches:
                        if new_match[1] == "[]":
                            continue
                        else:
                            found = True
                    if not found:
                        failed_movies.append(match[0])
                else:
                    failed_movies.append(match[0])

        if len(failed_movies) > 0:
            failed_movie_string = "Failed movies: {}".format(
                len(failed_movies))
            for fail in failed_movies:
                for movie in movies:
                    if clean_title(movie['title']).upper() == str(fail):
                        failed_movie_string += "\n        - {}".format(
                            movie["title"])

        else:
            failed_movie_string = ""

    num_shows = len(shows)
    if num_shows > 0:
        pDialog.create('NaNscrapers Testing mode active', 'please wait')
        index = 0
        for show in shows:
            index += 1
            title = show['title']
            show_year = show['show_year']
            year = show['year']
            season = show['season']
            episode = show['episode']
            imdb = show['imdb']
            tvdb = show.get('tvdb', '')

            if pDialog.iscanceled():
                pDialog.close()
                break
            pDialog.update((index / num_shows) * 100,
                           "Scraping show {} of {}".format(index,
                                                           num_shows), title)
            links_scraper = nanscrapers.scrape_episode(title, show_year, year,
                                                       season, episode, imdb,
                                                       tvdb)
            links_scraper = links_scraper()
            for scraper_links in links_scraper:
                if pDialog.iscanceled():
                    break
                if scraper_links:
                    random.shuffle(scraper_links)

        pDialog.close()
        dbcur.execute(
            "SELECT COUNT(DISTINCT(scraper)) FROM rel_src where episode != ''")
        match = dbcur.fetchone()
        num_show_scrapers = match[0]

        dbcur.execute(
            "SELECT scraper, count(distinct(urls)) FROM rel_src where episode != '' group by scraper"
        )
        matches = dbcur.fetchall()
        failed = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode != '' and scraper == '{}' group by scraper"
                        .format(match[0]))
                    match = dbcur.fetchone()
                    if match[1] == "[]":
                        failed.append(match[0])
                else:
                    failed.append(match[0])

        if len(failed) > 0:
            show_scraper_failedstring = "Failed: {}".format(len(failed))
            for fail in failed:
                show_scraper_failedstring += "\n        - {}".format(str(fail))
        else:
            show_scraper_failedstring = ""

        dbcur.execute(
            "SELECT title, count(distinct(urls)) FROM rel_src where episode != '' group by title"
        )
        matches = dbcur.fetchall()
        failed_shows = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode != '' and title == '{}' group by scraper"
                        .format(match[0]))
                    new_matches = dbcur.fetchall()
                    found = False
                    for new_match in new_matches:
                        if new_match[1] == "[]":
                            continue
                        else:
                            found = True
                    if not found:
                        failed_shows.append(match[0])
                else:
                    failed_shows.append(match[0])

        if len(failed_shows) > 0:
            failed_show_string = "Failed shows: {}".format(len(failed_shows))
            for fail in failed_shows:
                for show in shows:
                    if clean_title(show['title']).upper() == str(fail):
                        failed_show_string += "\n        - {} S{}-E{}".format(
                            show["title"], show["season"], show["episode"])

        else:
            failed_show_string = ""

    resultstring = 'Results:\n'
    if num_movies > 0:
        resultstring = resultstring + \
                       '    Movie Scrapers: {}\n' \
                       '    {}\n' \
                       '    {}\n'.format(num_movie_scrapers, failedstring, failed_movie_string)
    if num_shows > 0:
        resultstring = resultstring + \
                       '    Episode Scrapers: {}\n' \
                       '    {}\n' \
                       '    {}\n'.format(num_show_scrapers, show_scraper_failedstring, failed_show_string)

    dialog.textviewer("NaNscrapers Testing Mode", resultstring)
Example #8
0
    def getSources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, timeout=30,
                   progress=True, preset="search", dialog=None, exclude=None, scraper_title=False):

        year = str(year)

        content = 'movie' if tvshowtitle == None else 'episode'

        if content == 'movie':
            title = cleantitle.normalize(title)
            links_scraper = nanscrapers.scrape_movie(title, year, imdb, timeout=timeout, exclude=exclude)
        elif content == 'episode':
            if scraper_title:
                tvshowtitle = title
            tvshowtitle = cleantitle.normalize(tvshowtitle)
            links_scraper = nanscrapers.scrape_episode(tvshowtitle, year, premiered, season, episode, imdb, tvdb,
                                                       timeout=timeout, exclude=exclude)
        else:
            return

        allow_debrid = bool(control.setting('allow_debrid'))

        if control.setting('use_link_dialog') == 'true':
            if content == 'movie':
                link = nanscrapers.scrape_movie_with_dialog(title, year, imdb, timeout=timeout, exclude=exclude, sort_function=sources.sort_function)
            elif content == "episode":
                link = nanscrapers.scrape_episode_with_dialog(tvshowtitle, year, premiered, season, episode, imdb, tvdb,
                                                       timeout=timeout, exclude=exclude, sort_function=sources.sort_function)
            else:
                return

            url = link['url']
            import urlresolver9
            hmf = urlresolver9.HostedMediaFile(url=url, include_disabled=False,
                                               include_universal=allow_debrid)
            if hmf.valid_url() == True:
                resolved_url = hmf.resolve()
            else:
                resolved_url = None
            if resolved_url and sources().check_playable(resolved_url) is not None:
                url = resolved_url
            return url




        sd_links = []
        non_direct = []
        sd_non_direct = []
        links_scraper = links_scraper()
        for scraper_links in links_scraper:
            if scraper_links is not None:
                random.shuffle(scraper_links)
                for scraper_link in scraper_links:
                    if dialog is not None and dialog.iscanceled():
                        return

                    if (not control.setting('allow_openload') == 'true' and 'openload' in scraper_link['url']) or (
                                not control.setting('allow_the_video_me') == 'true' and 'thevideo.me' in scraper_link[
                                'url']):
                        continue
                    if preset.lower() == 'searchsd':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality > 576:
                                continue
                        except:
                            if scraper_link['quality'] not in ["SD", "CAM", "SCR"]:
                                continue
                    elif preset.lower() == 'search':
                        try:
                            quality = int(scraper_link['quality'])
                            if quality <= 576:
                                sd_links.append(scraper_link)
                                continue
                        except:
                            if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                                sd_links.append(scraper_link)
                                continue

                    if "m4u" in scraper_link['url']:
                        if sources().check_playable(scraper_link['url']) is not None:
                            return scraper_link['url']

                    else:
                        try:
                            if scraper_link['direct']:
                                import urlresolver9
                                hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                                   include_universal=allow_debrid)
                                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                                # resolved_url = urlresolver9.resolve(scraper_link['url'])
                                if resolved_url and sources().check_playable(resolved_url) is not None:
                                    url = resolved_url
                                    return url
                                else:
                                    if sources().check_playable(scraper_link['url']):
                                        return scraper_link['url']
                            else:
                                non_direct.append(scraper_link)
                        except:
                            if scraper_link['direct']:
                                url = scraper_link['url']
                                if sources().check_playable(url) is not None:
                                    return url
                            else:
                                non_direct.append(scraper_link)

        for scraper_link in non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            if (not control.setting('allow_openload') == 'true' and 'openload' in scraper_link['url']) or (
                        not control.setting('allow_the_video_me') == 'true' and 'thevideo.me' in scraper_link[
                        'url']):
                continue
            if preset.lower() == 'searchsd':
                try:
                    quality = int(scraper_link['quality'])
                    if quality > 576:
                        continue
                except:
                    if scraper_link['quality'] not in ["SD", "CAM", "SCR"]:
                        continue
            elif preset.lower() == 'search':
                try:
                    quality = int(scraper_link['quality'])
                    if quality <= 576:
                        sd_non_direct.append(scraper_link)
                        continue
                except:
                    if scraper_link['quality'] in ["SD", "CAM", "SCR"]:
                        sd_non_direct.append(scraper_link)
                        continue

            try:
                hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                   include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (
                        resolved_url.startswith("plugin://") or sources().check_playable(resolved_url) is not None):
                url = resolved_url
                return url

        for scraper_link in sd_links:

            if dialog is not None and dialog.iscanceled():
                return

            if "m4u" in scraper_link['url']:
                return scraper_link['url']

            else:
                try:
                    if scraper_link['direct']:
                        import urlresolver9
                        hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                           include_universal=allow_debrid)
                        if hmf.valid_url() == True: resolved_url = hmf.resolve()
                        # resolved_url = urlresolver9.resolve(scraper_link['url'])
                        if resolved_url and sources().check_playable(resolved_url) is not None:
                            url = resolved_url
                            return url
                        else:
                            if sources().check_playable(scraper_link['url']):
                                return scraper_link['url']
                    else:
                        non_direct.append(scraper_link)
                except:
                    if scraper_link['direct']:
                        url = scraper_link['url']
                        if sources().check_playable(url) is not None:
                            return url
                    else:
                        non_direct.append(scraper_link)

        try:
            import urlresolver9
        except:
            control.dialog.ok("Dependency missing",
                              "please install script.mrknow.urlresolver to resolve non-direct links")
            return

        for scraper_link in sd_non_direct:
            if dialog is not None and dialog.iscanceled():
                return

            try:
                hmf = urlresolver9.HostedMediaFile(url=scraper_link['url'], include_disabled=False,
                                                   include_universal=allow_debrid)
                if hmf.valid_url() == True: resolved_url = hmf.resolve()
                # resolved_url = urlresolver9.resolve(scraper_link['url'])
            except:
                continue
            if resolved_url and (
                        resolved_url.startswith("plugin://") or sources().check_playable(resolved_url) is not None):
                url = resolved_url
                return url