Esempio n. 1
0
def clear_cache():
    import xbmcgui
    skip_prompt = xbmcaddon.Addon().getSetting("quiet_cache")
    dialog = xbmcgui.Dialog()
    if skip_prompt == 'false':
        if dialog.yesno(addon_name, _("Clear Metadata?")):
            koding.Remove_Table("meta")
            koding.Remove_Table("episode_meta")
        if dialog.yesno(addon_name, _("Clear Scraper Cache?")):
            import universalscrapers
            universalscrapers.clear_cache()
        if dialog.yesno(addon_name, _("Clear GIF Cache?")):
            dest_folder = os.path.join(
                xbmc.translatePath(
                    xbmcaddon.Addon().getSetting("cache_folder")), "artcache")
            koding.Delete_Folders(dest_folder)
    else:
        koding.Remove_Table("meta")
        koding.Remove_Table("episode_meta")
        import universalscrapers
        universalscrapers.clear_cache()
        dest_folder = os.path.join(
            xbmc.translatePath(xbmcaddon.Addon().getSetting("cache_folder")),
            "artcache")
        koding.Delete_Folders(dest_folder)

    xbmc.log("running hook: clear cache", xbmc.LOGNOTICE)
    run_hook("clear_cache")
    xbmcgui.Dialog().notification('Clear Cache', 'Cache has been cleared',
                                  xbmcaddon.Addon().getAddonInfo("icon"), 4000)
Esempio n. 2
0
    def get_muscic_url(scraper, title, artist, cache_location, maximum_age, debrid = False):
        cache_enabled = xbmcaddon.Addon('script.module.universalscrapers').getSetting("cache_enabled") == 'true'
        try:
            dbcon = database.connect(cache_location)
            dbcur = dbcon.cursor()

            try:
                dbcur.execute("SELECT * FROM version")
                match = dbcur.fetchone()
            except:
                universalscrapers.clear_cache()
                dbcur.execute("CREATE TABLE version (""version TEXT)")
                dbcur.execute("INSERT INTO version Values ('0.5.4')")
                dbcon.commit()

            dbcur.execute(
                "CREATE TABLE IF NOT EXISTS rel_music_src (""scraper TEXT, ""title Text, ""artist TEXT, ""urls TEXT, ""added TEXT, ""UNIQUE(scraper, title, artist)"");")
        except:
            pass

        if cache_enabled:
            try:
                sources = []
                dbcur.execute(
                    "SELECT * FROM rel_music_src WHERE scraper = '%s' AND title = '%s' AND artist = '%s'" % (
                        scraper.name, clean_title(title).upper(), artist.upper()))
                match = dbcur.fetchone()
                t1 = int(re.sub('[^0-9]', '', str(match[4])))
                t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
                update = abs(t2 - t1) > maximum_age
                if update == False:
                    sources = json.loads(match[3])
                    return sources
            except:
                pass

        try:
            sources = scraper.scrape_music(title, artist, debrid = debrid)
            if sources == None:
                sources = []
            else:
                if cache_enabled:
                    dbcur.execute(
                        "DELETE FROM rel_music_src WHERE scraper = '%s' AND title = '%s' AND artist = '%s'" % (
                            scraper.name, clean_title(title).upper(), artist.upper))
                    dbcur.execute("INSERT INTO rel_music_src Values (?, ?, ?, ?, ?)", (
                        scraper.name, clean_title(title).upper(), artist.upper(), json.dumps(sources),
                        datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
                    dbcon.commit()

            return sources
        except:
            pass
Esempio n. 3
0
    def get_muscic_url(scraper, title, artist, cache_location, maximum_age, debrid = False):
        cache_enabled = xbmcaddon.Addon('script.module.universalscrapers').getSetting("cache_enabled") == 'true'
        try:
            dbcon = database.connect(cache_location)
            dbcur = dbcon.cursor()

            try:
                dbcur.execute("SELECT * FROM version")
                match = dbcur.fetchone()
            except:
                universalscrapers.clear_cache()
                dbcur.execute("CREATE TABLE version (""version TEXT)")
                dbcur.execute("INSERT INTO version Values ('0.5.4')")
                dbcon.commit()

            dbcur.execute(
                "CREATE TABLE IF NOT EXISTS rel_music_src (""scraper TEXT, ""title Text, ""artist TEXT, ""urls TEXT, ""added TEXT, ""UNIQUE(scraper, title, artist)"");")
        except:
            pass

        if cache_enabled:
            try:
                sources = []
                dbcur.execute(
                    "SELECT * FROM rel_music_src WHERE scraper = '%s' AND title = '%s' AND artist = '%s'" % (
                        scraper.name, clean_title(title).upper(), artist.upper()))
                match = dbcur.fetchone()
                t1 = int(re.sub('[^0-9]', '', str(match[4])))
                t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
                update = abs(t2 - t1) > maximum_age
                if update == False:
                    sources = json.loads(match[3])
                    return sources
            except:
                pass

        try:
            sources = scraper.scrape_music(title, artist, debrid = debrid)
            if sources == None:
                sources = []
            else:
                if cache_enabled:
                    dbcur.execute(
                        "DELETE FROM rel_music_src WHERE scraper = '%s' AND title = '%s' AND artist = '%s'" % (
                            scraper.name, clean_title(title).upper(), artist.upper))
                    dbcur.execute("INSERT INTO rel_music_src Values (?, ?, ?, ?, ?)", (
                        scraper.name, clean_title(title).upper(), artist.upper(), json.dumps(sources),
                        datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
                    dbcon.commit()

            return sources
        except:
            pass
Esempio n. 4
0
def test():
    pDialog = xbmcgui.DialogProgress()
    if dialog.yesno("universalscrapers Testing Mode", 'Clear Scraper Log?'):
        clear_scraper_log()
    if dialog.yesno("universalscrapers Testing Mode", 'Clear cache?'):
        universalscrapers.clear_cache()
    test_type = xbmcgui.Dialog().select("Choose type of test",
                                        ["Single Scraper", "Full Test"])
    if test_type == 0:
        single_test(0, 0)
    elif test_type == 1:
        full_test()
Esempio n. 5
0
def clear_cache():
    import xbmcgui
    dialog = xbmcgui.Dialog()
    if dialog.yesno(addon_name, _("Clear Metadata?")):
        koding.Remove_Table("meta")
        koding.Remove_Table("episode_meta")
    if dialog.yesno(addon_name, _("Clear Scraper Cache?")):
        import universalscrapers
        universalscrapers.clear_cache()
    if dialog.yesno(addon_name, _("Clear GIF Cache?")):
        dest_folder = os.path.join(
            xbmc.translatePath(xbmcaddon.Addon().getSetting("cache_folder")),
            "artcache")
        koding.Delete_Folders(dest_folder)
    xbmc.log("running hook:", xbmc.LOGNOTICE)
    run_hook("clear_cache")
Esempio n. 6
0
def clear_cache():
    import xbmcgui
    dialog = xbmcgui.Dialog()
    if dialog.yesno(addon_name, _("Clear Metadata?")):
        koding.Remove_Table("meta")
        koding.Remove_Table("episode_meta")
    if dialog.yesno(addon_name, _("Clear Scraper Cache?")):
        import universalscrapers
        universalscrapers.clear_cache()
    if dialog.yesno(addon_name, _("Clear GIF Cache?")):
        dest_folder = os.path.join(
            xbmc.translatePath(xbmcaddon.Addon().getSetting("cache_folder")),
            "artcache")
        koding.Delete_Folders(dest_folder)
    xbmc.log("running hook: clear cache", xbmc.LOGNOTICE)
    run_hook("clear_cache")
Esempio n. 7
0
def test():
	pDialog = xbmcgui.DialogProgress()
	if dialog.yesno("Universalscrapers Testing Mode", 'Clear Scraper Log?'):
		clear_scraper_log()
	if dialog.yesno("Universalscrapers Testing Mode", 'Clear cache?'):
		universalscrapers.clear_cache()
	if tmdb_test == '':
		test_type = xbmcgui.Dialog().select("Choose type of test", ["Single Scraper" , "Full Test" ])
	else:
		test_type = xbmcgui.Dialog().select("Choose type of test", ["Single Scraper" , "Full Test", "TMDB Test" ])
	if test_type == 0:
		single_test(0,0)
	elif test_type == 1:
		full_test()
	if tmdb_test != '':
		if test_type == 2:
			tmdb_test_menu()
Esempio n. 8
0
def clear_scraper_cache():
    import xbmcgui
    dialog = xbmcgui.Dialog()
    try:
        import universalscrapers
        universalscrapers.clear_cache()
    except:
        pass
    db = sqlite3.connect('%s' % (database_loc))
    cursor = db.cursor()
    db.execute("vacuum")
    db.commit()
    db.close()
    xbmc.log("running hook: clear cache", xbmc.LOGNOTICE)
    run_hook("clear_cache")
    xbmcgui.Dialog().notification('Clear Scraper Cache',
                                  'Scraper Cache has been cleared',
                                  xbmcaddon.Addon().getAddonInfo("icon"), 4000)
Esempio n. 9
0
    def get_url(scraper,
                title,
                show_year,
                year,
                season,
                episode,
                imdb,
                tvdb,
                type,
                cache_location,
                maximum_age,
                check_url=False,
                debrid=False):
        cache_enabled = xbmcaddon.Addon('script.module.universalscrapers'
                                        ).getSetting("cache_enabled") == 'true'
        try:
            dbcon = database.connect(cache_location)
            dbcur = dbcon.cursor()
            try:
                dbcur.execute("SELECT * FROM version")
                match = dbcur.fetchone()
            except:
                universalscrapers.clear_cache()
                dbcur.execute("CREATE TABLE version (" "version TEXT)")
                dbcur.execute("INSERT INTO version Values ('0.5.4')")
                dbcon.commit()

            dbcur.execute("CREATE TABLE IF NOT EXISTS rel_src ("
                          "scraper TEXT, "
                          "title Text, show_year TEXT, year TEXT, "
                          "season TEXT, "
                          "episode TEXT, "
                          "imdb_id TEXT, "
                          "urls TEXT, "
                          "added TEXT, "
                          "UNIQUE(scraper, title, year, season, episode)"
                          ");")
        except:
            pass

        if cache_enabled:
            try:
                sources = []
                dbcur.execute(
                    "SELECT * FROM rel_src WHERE scraper = '%s' AND title = '%s' AND show_year= '%s' AND year = '%s' AND season = '%s' AND episode = '%s'"
                    % (scraper.name, clean_title(title).upper(), show_year,
                       year, season, episode))
                match = dbcur.fetchone()
                t1 = int(re.sub('[^0-9]', '', str(match[8])))
                t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
                update = abs(t2 - t1) > maximum_age
                if update == False:
                    sources = json.loads(match[7])
                    return sources
            except:
                pass

        try:
            sources = []
            if type == "movie":
                sources = scraper.scrape_movie(title,
                                               year,
                                               imdb,
                                               debrid=debrid)
            elif type == "episode":
                sources = scraper.scrape_episode(title,
                                                 show_year,
                                                 year,
                                                 season,
                                                 episode,
                                                 imdb,
                                                 tvdb,
                                                 debrid=debrid)
            if sources == None:
                sources = []
            else:
                if cache_enabled:
                    try:
                        dbcur.execute(
                            "DELETE FROM rel_src WHERE scraper = '%s' AND title = '%s' AND show_year= '%s' AND year = '%s' AND season = '%s' AND episode = '%s'"
                            % (scraper.name, clean_title(title).upper(),
                               show_year, year, season, episode))
                        dbcur.execute(
                            "INSERT INTO rel_src Values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
                            (scraper.name, clean_title(title).upper(),
                             show_year, year, season, episode, imdb,
                             json.dumps(sources),
                             datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
                             ))
                        dbcon.commit()
                    except:
                        pass

            if check_url:
                noresolver = False
                try:
                    import resolveurl as urlresolver
                except:
                    try:
                        import urlresolver as urlresolver
                    except:
                        noresolver = True
                new_sources = []
                from common import check_playable
                for source in sources:
                    if source["direct"]:
                        check = check_playable(source["url"])
                        if check:
                            new_sources.append(source)
                    elif not noresolver:
                        try:
                            hmf = urlresolver.HostedMediaFile(
                                url=source['url'],
                                include_disabled=False,
                                include_universal=False)
                            if hmf.valid_url():
                                resolved_url = hmf.resolve()
                                check = check_playable(resolved_url)
                                if check:
                                    new_sources.append(source)
                        except:
                            pass
                    else:
                        new_sources.append(source)
                sources = new_sources
            return sources
        except:
            pass
Esempio n. 10
0
    if os.path.exists(progressFile):
        if control.yesnoDialog(control.lang(32056).encode('utf-8'), '', ''):
            try:
                os.remove(progressFile)
                dialog.ok('Clear Progress', 'Clear Progress Complete', '', '')
            except:
                dialog.ok('Clear Progress',
                          'There was an error Deleting the Database', '', '')
    else:
        control.infoDialog(control.lang2(161).encode('utf-8'),
                           heading='"Progress Database"',
                           sound=False,
                           icon=thumbnail)
elif action == 'clearSources':
    import universalscrapers
    universalscrapers.clear_cache()
elif action == 'deleteFavourite':
    favourites.deleteFavourite(meta, content)
elif action == 'deleteProgress':
    favourites.deleteProgress(meta, content)
elif action == 'download':
    try:
        downloader.download(
            name, image,
            sources().sourcesResolve(json.loads(source)[0], True))
    except:
        pass
elif action == 'downloadNavigator':
    navigator.navigator().downloads()
elif action == 'episodePlaycount':
    playcount.episodes(imdb, tvdb, season, episode, query)
Esempio n. 11
0
def clear_cache():
    import xbmcgui
    skip_prompt = xbmcaddon.Addon().getSetting("quiet_cache")
    dialog = xbmcgui.Dialog()
    if skip_prompt == 'false':
        if dialog.yesno(addon_name, _("Clear Metadata?")):
            koding.Remove_Table("meta")
            koding.Remove_Table("episode_meta")
        if dialog.yesno(addon_name, _("Clear Scraper Cache?")):
            import universalscrapers
            try:
                universalscrapers.clear_cache()
            except:
                pass
        if dialog.yesno(addon_name, _("Clear GIF Cache?")):
            dest_folder = os.path.join(
                xbmc.translatePath(
                    xbmcaddon.Addon().getSetting("cache_folder")), "artcache")
            koding.Delete_Folders(dest_folder)
        if dialog.yesno(addon_name, _("Clear Main Cache?")):
            res = koding.Get_All_From_Table("Table_names")
            for results in res:
                table_nm = results['name']
                print table_nm
                koding.Remove_Table(table_nm)
        if dialog.yesno(addon_name, _("Clear Plugin Cache?")):
            res = koding.Get_All_From_Table("Plugin_table_names")
            for results in res:
                table_nm = results['name']
                print table_nm
                koding.Remove_Table(table_nm)
    else:
        koding.Remove_Table("meta")
        koding.Remove_Table("episode_meta")
        import universalscrapers
        try:
            universalscrapers.clear_cache()
        except:
            pass
        dest_folder = os.path.join(
            xbmc.translatePath(xbmcaddon.Addon().getSetting("cache_folder")),
            "artcache")
        koding.Delete_Folders(dest_folder)
        res = koding.Get_All_From_Table("Table_names")
        for results in res:
            table_nm = results['name']
            print table_nm
            koding.Remove_Table(table_nm)
        res = koding.Get_All_From_Table("Plugin_table_names")
        for results in res:
            table_nm = results['name']
            print table_nm
            koding.Remove_Table(table_nm)

    db = sqlite3.connect('%s' % (database_loc))
    cursor = db.cursor()
    db.execute("vacuum")
    db.commit()
    db.close()
    xbmc.log("running hook: clear cache", xbmc.LOGNOTICE)
    run_hook("clear_cache")
    xbmcgui.Dialog().notification('Clear Cache', 'Cache has been cleared',
                                  xbmcaddon.Addon().getAddonInfo("icon"), 4000)
Esempio n. 12
0
    def get_url(scraper, title, show_year, year, season, episode, imdb, tvdb, type, cache_location, maximum_age, check_url = False, debrid = False):
        cache_enabled = xbmcaddon.Addon('script.module.universalscrapers').getSetting("cache_enabled") == 'true'
        try:
            dbcon = database.connect(cache_location)
            dbcur = dbcon.cursor()
            try:
                dbcur.execute("SELECT * FROM version")
                match = dbcur.fetchone()
            except:
                universalscrapers.clear_cache()
                dbcur.execute("CREATE TABLE version (""version TEXT)")
                dbcur.execute("INSERT INTO version Values ('0.5.4')")
                dbcon.commit()

            dbcur.execute(
                "CREATE TABLE IF NOT EXISTS rel_src (""scraper TEXT, ""title Text, show_year TEXT, year TEXT, ""season TEXT, ""episode TEXT, ""imdb_id TEXT, ""urls TEXT, ""added TEXT, ""UNIQUE(scraper, title, year, season, episode)"");")
        except:
            pass

        if cache_enabled:
            try:
                sources = []
                dbcur.execute(
                    "SELECT * FROM rel_src WHERE scraper = '%s' AND title = '%s' AND show_year= '%s' AND year = '%s' AND season = '%s' AND episode = '%s'" % (
                        scraper.name, clean_title(title).upper(), show_year, year, season, episode))
                match = dbcur.fetchone()
                t1 = int(re.sub('[^0-9]', '', str(match[8])))
                t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
                update = abs(t2 - t1) > maximum_age
                if update == False:
                    sources = json.loads(match[7])
                    return sources
            except:
                pass

        try:
            sources = []
            if type == "movie":
                sources = scraper.scrape_movie(title, year, imdb, debrid = debrid)
            elif type == "episode":
                sources = scraper.scrape_episode(title, show_year, year, season, episode, imdb, tvdb, debrid = debrid)
            if sources == None:
                sources = []
            else:
                if cache_enabled:
                    try:
                        dbcur.execute(
                            "DELETE FROM rel_src WHERE scraper = '%s' AND title = '%s' AND show_year= '%s' AND year = '%s' AND season = '%s' AND episode = '%s'" % (
                                scraper.name, clean_title(title).upper(), show_year, year, season, episode))
                        dbcur.execute("INSERT INTO rel_src Values (?, ?, ?, ?, ?, ?, ?, ?, ?)", (
                            scraper.name, clean_title(title).upper(), show_year, year, season, episode, imdb,
                            json.dumps(sources),
                            datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
                        dbcon.commit()
                    except:
                        pass

            if check_url:
                noresolver = False
                try:
                    import resolveurl as urlresolver
                except:
                    try:
                        import urlresolver as urlresolver
                    except:
                        noresolver = True
                new_sources = []
                from common import check_playable
                for source in sources:
                    if source["direct"]:
                        check = check_playable(source["url"])
                        if check:
                            new_sources.append(source)
                    elif not noresolver:
                        try:
                            hmf = urlresolver.HostedMediaFile(url=source['url'], include_disabled=False,
                                                         include_universal=False)
                            if hmf.valid_url():
                                resolved_url = hmf.resolve()
                                check = check_playable(resolved_url)
                                if check:
                                    new_sources.append(source)
                        except:
                            pass
                    else:
                        new_sources.append(source)
                sources = new_sources
            return sources
        except:
            pass
Esempio n. 13
0
def test():
    global movies, shows
    try:
        test_movies = []
        test_episodes = []
        profile_path = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile')).decode('utf-8')
        test_file = xbmcvfs.File(os.path.join(profile_path, "testings.xml"))
        xml = BeautifulStoneSoup(test_file.read())
        test_file.close()
        items = xml.findAll("item")
        for item in items:
            try:
                content = item.find("content")
                if content:
                    if "movie" in content.text:
                        meta = item.find("meta")
                        test_movies.append({
                            'title': meta.find("title").text,
                            'imdb': meta.find("imdb").text,
                            'year': meta.find("year").text,
                        })
                    elif "episode" in content.text:
                        meta = item.find("meta")
                        test_episodes.append({
                            'title': meta.find("tvshowtitle").text,
                            'show_year': int(meta.find("premiered").text[0:4]),
                            'year': meta.find("year").text,
                            'season': meta.find("season").text,
                            'episode': meta.find("season").text,
                            'imdb': meta.find("imdb").text,
                        })
            except:
                pass

            movies = test_movies
            shows = test_episodes
    except:
        pass

    dialog = xbmcgui.Dialog()
    pDialog = xbmcgui.DialogProgress()
    if dialog.yesno("universalscrapers Testing Mode", 'Clear cache?'):
        universalscrapers.clear_cache()
    try:
        dbcon = database.connect(os.path.join(
            xbmc.translatePath(xbmcaddon.Addon("script.module.universalscrapers").getAddonInfo('profile')).decode('utf-8'),
            'url_cache.db'))
        dbcur = dbcon.cursor()
    except:
        dialog.ok("universalscrapers Testing Mode", 'Error connecting to db')
        sys.exit()

    num_movies = len(movies)
    if num_movies > 0:
        pDialog.create('universalscrapers Testing mode active', 'please wait')
        index = 0
        for movie in movies:
            index += 1
            title = movie['title']
            year = movie['year']
            imdb = movie['imdb']
            if pDialog.iscanceled():
                pDialog.close()
                break
            pDialog.update((index / num_movies) * 100, "Scraping movie {} of {}".format(index, num_movies), title)
            links_scraper = universalscrapers.scrape_movie(title, year, imdb)
            links_scraper = links_scraper()
            for scraper_links in links_scraper:
                if pDialog.iscanceled():
                    break
                if scraper_links:
                    random.shuffle(scraper_links)

        pDialog.close()
        dbcur.execute("SELECT COUNT(DISTINCT(scraper)) FROM rel_src where episode = ''")
        match = dbcur.fetchone()
        num_movie_scrapers = match[0]

        dbcur.execute("SELECT scraper, count(distinct(urls)) FROM rel_src where episode = '' group by scraper")
        matches = dbcur.fetchall()
        failed = []
        for match in matches:
            if int(match[1]) <= 1:
                failed.append(match[0])

        if len(failed) > 0:
            failedstring = "Failed: {}".format(len(failed))
            for fail in failed:
                failedstring += "\n        - {}".format(str(fail))
        else:
            failedstring = ""

        dbcur.execute("SELECT title, count(distinct(urls)) FROM rel_src where episode = '' group by title")
        matches = dbcur.fetchall()
        failed_movies = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode == '' and title == '{}' group by scraper".format(
                            match[0]))
                    new_matches = dbcur.fetchall()
                    found = False
                    for new_match in new_matches:
                        if new_match[1] == "[]":
                            continue
                        else:
                            found = True
                    if not found:
                        failed_movies.append(match[0])
                else:
                    failed_movies.append(match[0])

        if len(failed_movies) > 0:
            failed_movie_string = "Failed movies: {}".format(len(failed_movies))
            for fail in failed_movies:
                for movie in movies:
                    if clean_title(movie['title']).upper() == str(fail):
                        failed_movie_string += "\n        - {}".format(movie["title"])

        else:
            failed_movie_string = ""

    num_shows = len(shows)
    if num_shows > 0:
        pDialog.create('universalscrapers Testing mode active', 'please wait')
        index = 0
        for show in shows:
            index += 1
            title = show['title']
            show_year = show['show_year']
            year = show['year']
            season = show['season']
            episode = show['episode']
            imdb = show['imdb']
            tvdb = show.get('tvdb', '')

            if pDialog.iscanceled():
                pDialog.close()
                break
            pDialog.update((index / num_shows) * 100, "Scraping show {} of {}".format(index, num_shows), title)
            links_scraper = universalscrapers.scrape_episode(title, show_year, year, season, episode, imdb, tvdb)
            links_scraper = links_scraper()
            for scraper_links in links_scraper:
                if pDialog.iscanceled():
                    break
                if scraper_links:
                    random.shuffle(scraper_links)

        pDialog.close()
        dbcur.execute("SELECT COUNT(DISTINCT(scraper)) FROM rel_src where episode != ''")
        match = dbcur.fetchone()
        num_show_scrapers = match[0]

        dbcur.execute("SELECT scraper, count(distinct(urls)) FROM rel_src where episode != '' group by scraper")
        matches = dbcur.fetchall()
        failed = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode != '' and scraper == '{}' group by scraper".format(
                            match[0]))
                    match = dbcur.fetchone()
                    if match[1] == "[]":
                        failed.append(match[0])
                else:
                    failed.append(match[0])

        if len(failed) > 0:
            show_scraper_failedstring = "Failed: {}".format(len(failed))
            for fail in failed:
                show_scraper_failedstring += "\n        - {}".format(str(fail))
        else:
            show_scraper_failedstring = ""

        dbcur.execute("SELECT title, count(distinct(urls)) FROM rel_src where episode != '' group by title")
        matches = dbcur.fetchall()
        failed_shows = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode != '' and title == '{}' group by scraper".format(
                            match[0]))
                    new_matches = dbcur.fetchall()
                    found = False
                    for new_match in new_matches:
                        if new_match[1] == "[]":
                            continue
                        else:
                            found = True
                    if not found:
                        failed_shows.append(match[0])
                else:
                    failed_shows.append(match[0])

        if len(failed_shows) > 0:
            failed_show_string = "Failed shows: {}".format(len(failed_shows))
            for fail in failed_shows:
                for show in shows:
                    if clean_title(show['title']).upper() == str(fail):
                        failed_show_string += "\n        - {} S{}-E{}".format(show["title"], show["season"],
                                                                              show["episode"])

        else:
            failed_show_string = ""

    resultstring = 'Results:\n'
    if num_movies > 0:
        resultstring = resultstring + \
                       '    Movie Scrapers: {}\n' \
                       '    {}\n' \
                       '    {}\n'.format(num_movie_scrapers, failedstring, failed_movie_string)
    if num_shows > 0:
        resultstring = resultstring + \
                       '    Episode Scrapers: {}\n' \
                       '    {}\n' \
                       '    {}\n'.format(num_show_scrapers, show_scraper_failedstring, failed_show_string)

    dialog.textviewer("universalscrapers Testing Mode", resultstring)