Ejemplo n.º 1
0
def clear(table=None):
    try:
        control.idle()

        if table == None: table = ['rel_list', 'rel_lib']
        elif not type(table) == list: table = [table]

        yes = control.yesnoDialog(control.lang(30401).encode('utf-8'), '', '')
        if not yes: return

        dbcon = database.connect(control.cacheFile)
        dbcur = dbcon.cursor()

        for t in table:
            try:
                dbcur.execute("DROP TABLE IF EXISTS %s" % t)
                dbcur.execute("VACUUM")
                dbcon.commit()
            except:
                pass

        nanscrapers.clear_cache()

        control.infoDialog(control.lang(30402).encode('utf-8'))
    except:
        pass
Ejemplo n.º 2
0
def clear(table=None):
    try:
        control.idle()

        if table == None: table = ['rel_list', 'rel_lib']
        elif not type(table) == list: table = [table]

        yes = control.yesnoDialog(control.lang(30401).encode('utf-8'), '', '')
        if not yes: return

        dbcon = database.connect(control.cacheFile)
        dbcur = dbcon.cursor()

        for t in table:
            try:
                dbcur.execute("DROP TABLE IF EXISTS %s" % t)
                dbcur.execute("VACUUM")
                dbcon.commit()
            except:
                pass

        nanscrapers.clear_cache()

        control.infoDialog(control.lang(30402).encode('utf-8'))
    except:
        pass
Ejemplo n.º 3
0
    def get_url(scraper, title, show_year, year, season, episode, imdb, tvdb, type, cache_location, maximum_age):
        cache_enabled = xbmcaddon.Addon('script.module.nanscrapers').getSetting("cache_enabled") == 'true'
        try:
            dbcon = database.connect(cache_location)
            dbcur = dbcon.cursor()
            try:
                dbcur.execute("SELECT * FROM version")
                match = dbcur.fetchone()
            except:
                nanscrapers.clear_cache()
                dbcur.execute("CREATE TABLE version (""version TEXT)")
                dbcur.execute("INSERT INTO version Values ('0.5.4')")
                dbcon.commit()

            dbcur.execute(
                "CREATE TABLE IF NOT EXISTS rel_src (""scraper TEXT, ""title Text, show_year TEXT, year TEXT, ""season TEXT, ""episode TEXT, ""imdb_id TEXT, ""urls TEXT, ""added TEXT, ""UNIQUE(scraper, title, year, season, episode)"");")
        except:
            pass

        if cache_enabled:
            try:
                sources = []
                dbcur.execute(
                    "SELECT * FROM rel_src WHERE scraper = '%s' AND title = '%s' AND show_year= '%s' AND year = '%s' AND season = '%s' AND episode = '%s'" % (
                        scraper.name, clean_title(title).upper(), show_year, year, season, episode))
                match = dbcur.fetchone()
                t1 = int(re.sub('[^0-9]', '', str(match[8])))
                t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
                update = abs(t2 - t1) > maximum_age
                if update == False:
                    sources = json.loads(match[7])
                    return sources
            except:
                pass

        try:
            sources = []
            if type == "movie":
                sources = scraper.scrape_movie(title, year, imdb)
            elif type == "episode":
                sources = scraper.scrape_episode(title, show_year, year, season, episode, imdb, tvdb)
            if sources == None:
                sources = []
            else:
                if cache_enabled:
                    dbcur.execute(
                        "DELETE FROM rel_src WHERE scraper = '%s' AND title = '%s' AND show_year= '%s' AND year = '%s' AND season = '%s' AND episode = '%s'" % (
                            scraper.name, clean_title(title).upper(), show_year, year, season, episode))
                    dbcur.execute("INSERT INTO rel_src Values (?, ?, ?, ?, ?, ?, ?, ?, ?)", (
                        scraper.name, clean_title(title).upper(), show_year, year, season, episode, imdb,
                        json.dumps(sources),
                        datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
                    dbcon.commit()

            return sources
        except:
            pass
Ejemplo n.º 4
0
    def get_muscic_url(scraper, title, artist, cache_location, maximum_age, debrid = False):
        cache_enabled = xbmcaddon.Addon('script.module.nanscrapers').getSetting("cache_enabled") == 'true'
        try:
            dbcon = database.connect(cache_location)
            dbcur = dbcon.cursor()

            try:
                dbcur.execute("SELECT * FROM version")
                match = dbcur.fetchone()
            except:
                nanscrapers.clear_cache()
                dbcur.execute("CREATE TABLE version (""version TEXT)")
                dbcur.execute("INSERT INTO version Values ('0.5.4')")
                dbcon.commit()

            dbcur.execute(
                "CREATE TABLE IF NOT EXISTS rel_music_src (""scraper TEXT, ""title Text, ""artist TEXT, ""urls TEXT, ""added TEXT, ""UNIQUE(scraper, title, artist)"");")
        except:
            pass

        if cache_enabled:
            try:
                sources = []
                dbcur.execute(
                    "SELECT * FROM rel_music_src WHERE scraper = '%s' AND title = '%s' AND artist = '%s'" % (
                        scraper.name, clean_title(title).upper(), artist.upper()))
                match = dbcur.fetchone()
                t1 = int(re.sub('[^0-9]', '', str(match[4])))
                t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
                update = abs(t2 - t1) > maximum_age
                if update == False:
                    sources = json.loads(match[3])
                    return sources
            except:
                pass

        try:
            sources = scraper.scrape_music(title, artist, debrid = debrid)
            if sources == None:
                sources = []
            else:
                if cache_enabled:
                    dbcur.execute(
                        "DELETE FROM rel_music_src WHERE scraper = '%s' AND title = '%s' AND artist = '%s'" % (
                            scraper.name, clean_title(title).upper(), artist.upper))
                    dbcur.execute("INSERT INTO rel_music_src Values (?, ?, ?, ?, ?)", (
                        scraper.name, clean_title(title).upper(), artist.upper(), json.dumps(sources),
                        datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
                    dbcon.commit()

            return sources
        except:
            pass
Ejemplo n.º 5
0
    def get_muscic_url(scraper, title, artist, cache_location, maximum_age, debrid = False):
        cache_enabled = xbmcaddon.Addon('script.module.nanscrapers').getSetting("cache_enabled") == 'true'
        try:
            dbcon = database.connect(cache_location)
            dbcur = dbcon.cursor()

            try:
                dbcur.execute("SELECT * FROM version")
                match = dbcur.fetchone()
            except:
                nanscrapers.clear_cache()
                dbcur.execute("CREATE TABLE version (""version TEXT)")
                dbcur.execute("INSERT INTO version Values ('0.5.4')")
                dbcon.commit()

            dbcur.execute(
                "CREATE TABLE IF NOT EXISTS rel_music_src (""scraper TEXT, ""title Text, ""artist TEXT, ""urls TEXT, ""added TEXT, ""UNIQUE(scraper, title, artist)"");")
        except:
            pass

        if cache_enabled:
            try:
                sources = []
                dbcur.execute(
                    "SELECT * FROM rel_music_src WHERE scraper = '%s' AND title = '%s' AND artist = '%s'" % (
                        scraper.name, clean_title(title).upper(), artist.upper()))
                match = dbcur.fetchone()
                t1 = int(re.sub('[^0-9]', '', str(match[4])))
                t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
                update = abs(t2 - t1) > maximum_age
                if update == False:
                    sources = json.loads(match[3])
                    return sources
            except:
                pass

        try:
            sources = scraper.scrape_music(title, artist, debrid = debrid)
            if sources == None:
                sources = []
            else:
                if cache_enabled:
                    dbcur.execute(
                        "DELETE FROM rel_music_src WHERE scraper = '%s' AND title = '%s' AND artist = '%s'" % (
                            scraper.name, clean_title(title).upper(), artist.upper))
                    dbcur.execute("INSERT INTO rel_music_src Values (?, ?, ?, ?, ?)", (
                        scraper.name, clean_title(title).upper(), artist.upper(), json.dumps(sources),
                        datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
                    dbcon.commit()

            return sources
        except:
            pass
Ejemplo n.º 6
0
def clear_cache():
    import xbmcgui
    dialog = xbmcgui.Dialog()
    if dialog.yesno(addon_name, "Clear Metadata?"):
        koding.Remove_Table("meta")
        koding.Remove_Table("episode_meta")
    if dialog.yesno(addon_name, "Clear Scraper Cache?"):
        import nanscrapers
        nanscrapers.clear_cache()
    if dialog.yesno(addon_name, "Clear GIF Cache?"):
        dest_folder = os.path.join(
            xbmc.translatePath(xbmcaddon.Addon().getSetting("cache_folder")),
            "artcache")
        koding.Delete_Folders(dest_folder)
Ejemplo n.º 7
0
def clear_cache():
    import xbmcgui
    dialog = xbmcgui.Dialog()
    if dialog.yesno(addon_name, "Clear Metadata?"):
        koding.Remove_Table("meta")
        koding.Remove_Table("episode_meta")
    if dialog.yesno(addon_name, "Clear Scraper Cache?"):
        import nanscrapers
        nanscrapers.clear_cache()
    if dialog.yesno(addon_name, "Clear GIF Cache?"):
        dest_folder = os.path.join(
            xbmc.translatePath(xbmcaddon.Addon().getSetting("cache_folder")),
            "artcache")
        koding.Delete_Folders(dest_folder)
Ejemplo n.º 8
0
    if os.path.exists(progressFile):
        if control.yesnoDialog(control.lang(32056).encode('utf-8'), '', ''):
            try:
                os.remove(progressFile)
                dialog.ok('Clear Progress', 'Clear Progress Complete', '', '')
            except:
                dialog.ok('Clear Progress',
                          'There was an error Deleting the Database', '', '')
    else:
        control.infoDialog(control.lang2(161).encode('utf-8'),
                           heading='"Progress Database"',
                           sound=False,
                           icon=thumbnail)
elif action == 'clearSources':
    import nanscrapers
    nanscrapers.clear_cache()
elif action == 'deleteFavourite':
    favourites.deleteFavourite(meta, content)
elif action == 'deleteProgress':
    favourites.deleteProgress(meta, content)
elif action == 'download':
    try:
        downloader.download(
            name, image,
            sources().sourcesResolve(json.loads(source)[0], True))
    except:
        pass
elif action == 'downloadNavigator':
    navigator.navigator().downloads()
elif action == 'episodePlaycount':
    playcount.episodes(imdb, tvdb, season, episode, query)
Ejemplo n.º 9
0
    def get_url(scraper,
                title,
                show_year,
                year,
                season,
                episode,
                imdb,
                tvdb,
                type,
                cache_location,
                maximum_age,
                check_url=False,
                debrid=False):
        cache_enabled = xbmcaddon.Addon(
            'script.module.nanscrapers').getSetting("cache_enabled") == 'true'
        try:
            dbcon = database.connect(cache_location)
            dbcur = dbcon.cursor()
            try:
                dbcur.execute("SELECT * FROM version")
                match = dbcur.fetchone()
            except:
                nanscrapers.clear_cache()
                dbcur.execute("CREATE TABLE version (" "version TEXT)")
                dbcur.execute("INSERT INTO version Values ('0.5.4')")
                dbcon.commit()

            dbcur.execute("CREATE TABLE IF NOT EXISTS rel_src ("
                          "scraper TEXT, "
                          "title Text, show_year TEXT, year TEXT, "
                          "season TEXT, "
                          "episode TEXT, "
                          "imdb_id TEXT, "
                          "urls TEXT, "
                          "added TEXT, "
                          "UNIQUE(scraper, title, year, season, episode)"
                          ");")
        except:
            pass

        if cache_enabled:
            try:
                sources = []
                dbcur.execute(
                    "SELECT * FROM rel_src WHERE scraper = '%s' AND title = '%s' AND show_year= '%s' AND year = '%s' AND season = '%s' AND episode = '%s'"
                    % (scraper.name, clean_title(title).upper(), show_year,
                       year, season, episode))
                match = dbcur.fetchone()
                t1 = int(re.sub('[^0-9]', '', str(match[8])))
                t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
                update = abs(t2 - t1) > maximum_age
                if update == False:
                    sources = json.loads(match[7])
                    return sources
            except:
                pass

        try:
            sources = []
            if type == "movie":
                sources = scraper.scrape_movie(title,
                                               year,
                                               imdb,
                                               debrid=debrid)
            elif type == "episode":
                sources = scraper.scrape_episode(title,
                                                 show_year,
                                                 year,
                                                 season,
                                                 episode,
                                                 imdb,
                                                 tvdb,
                                                 debrid=debrid)
            if sources == None:
                sources = []
            else:
                if cache_enabled:
                    dbcur.execute(
                        "DELETE FROM rel_src WHERE scraper = '%s' AND title = '%s' AND show_year= '%s' AND year = '%s' AND season = '%s' AND episode = '%s'"
                        % (scraper.name, clean_title(title).upper(), show_year,
                           year, season, episode))
                    dbcur.execute(
                        "INSERT INTO rel_src Values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
                        (scraper.name, clean_title(title).upper(), show_year,
                         year, season, episode, imdb, json.dumps(sources),
                         datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
                    dbcon.commit()

            if check_url:
                noresolver = False
                try:
                    import urlresolver
                except:
                    try:
                        import urlresolver9 as urlresolver
                    except:
                        noresolver = True
                new_sources = []
                from common import check_playable
                for source in sources:
                    if source["direct"]:
                        check = check_playable(source["url"])
                        if check:
                            new_sources.append(source)
                    elif not noresolver:
                        try:
                            hmf = urlresolver.HostedMediaFile(
                                url=source['url'],
                                include_disabled=False,
                                include_universal=False)
                            if hmf.valid_url():
                                resolved_url = hmf.resolve()
                                check = check_playable(resolved_url)
                                if check:
                                    new_sources.append(source)
                        except:
                            pass
                    else:
                        new_sources.append(source)
                sources = new_sources
            return sources
        except:
            pass
Ejemplo n.º 10
0
def test():
    global movies, shows
    try:
        test_movies = []
        test_episodes = []
        profile_path = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile')).decode('utf-8')
        test_file = xbmcvfs.File(os.path.join(profile_path, "testings.xml"))
        xml = BeautifulStoneSoup(test_file.read())
        test_file.close()
        items = xml.findAll("item")
        for item in items:
            try:
                content = item.find("content")
                if content:
                    if "movie" in content.text:
                        meta = item.find("meta")
                        test_movies.append({
                            'title': meta.find("title").text,
                            'imdb': meta.find("imdb").text,
                            'year': meta.find("year").text,
                        })
                    elif "episode" in content.text:
                        meta = item.find("meta")
                        test_episodes.append({
                            'title': meta.find("tvshowtitle").text,
                            'show_year': int(meta.find("premiered").text[0:4]),
                            'year': meta.find("year").text,
                            'season': meta.find("season").text,
                            'episode': meta.find("season").text,
                            'imdb': meta.find("imdb").text,
                        })
            except:
                pass

            movies = test_movies
            shows = test_episodes
    except:
        pass

    dialog = xbmcgui.Dialog()
    pDialog = xbmcgui.DialogProgress()
    if dialog.yesno("NaNscrapers Testing Mode", 'Clear cache?'):
        nanscrapers.clear_cache()
    try:
        dbcon = database.connect(os.path.join(
            xbmc.translatePath(xbmcaddon.Addon("script.module.nanscrapers").getAddonInfo('profile')).decode('utf-8'),
            'url_cache.db'))
        dbcur = dbcon.cursor()
    except:
        dialog.ok("NaNscrapers Testing Mode", 'Error connecting to db')
        sys.exit()

    num_movies = len(movies)
    if num_movies > 0:
        pDialog.create('NaNscrapers Testing mode active', 'please wait')
        index = 0
        for movie in movies:
            index += 1
            title = movie['title']
            year = movie['year']
            imdb = movie['imdb']
            if pDialog.iscanceled():
                pDialog.close()
                break
            pDialog.update((index / num_movies) * 100, "Scraping movie {} of {}".format(index, num_movies), title)
            links_scraper = nanscrapers.scrape_movie(title, year, imdb)
            links_scraper = links_scraper()
            for scraper_links in links_scraper:
                if pDialog.iscanceled():
                    break
                if scraper_links:
                    random.shuffle(scraper_links)

        pDialog.close()
        dbcur.execute("SELECT COUNT(DISTINCT(scraper)) FROM rel_src where episode = ''")
        match = dbcur.fetchone()
        num_movie_scrapers = match[0]

        dbcur.execute("SELECT scraper, count(distinct(urls)) FROM rel_src where episode = '' group by scraper")
        matches = dbcur.fetchall()
        failed = []
        for match in matches:
            if int(match[1]) <= 1:
                failed.append(match[0])

        if len(failed) > 0:
            failedstring = "Failed: {}".format(len(failed))
            for fail in failed:
                failedstring += "\n        - {}".format(str(fail))
        else:
            failedstring = ""

        dbcur.execute("SELECT title, count(distinct(urls)) FROM rel_src where episode = '' group by title")
        matches = dbcur.fetchall()
        failed_movies = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode == '' and title == '{}' group by scraper".format(
                            match[0]))
                    new_matches = dbcur.fetchall()
                    found = False
                    for new_match in new_matches:
                        if new_match[1] == "[]":
                            continue
                        else:
                            found = True
                    if not found:
                        failed_movies.append(match[0])
                else:
                    failed_movies.append(match[0])

        if len(failed_movies) > 0:
            failed_movie_string = "Failed movies: {}".format(len(failed_movies))
            for fail in failed_movies:
                for movie in movies:
                    if clean_title(movie['title']).upper() == str(fail):
                        failed_movie_string += "\n        - {}".format(movie["title"])

        else:
            failed_movie_string = ""

    num_shows = len(shows)
    if num_shows > 0:
        pDialog.create('NaNscrapers Testing mode active', 'please wait')
        index = 0
        for show in shows:
            index += 1
            title = show['title']
            show_year = show['show_year']
            year = show['year']
            season = show['season']
            episode = show['episode']
            imdb = show['imdb']
            tvdb = show.get('tvdb', '')

            if pDialog.iscanceled():
                pDialog.close()
                break
            pDialog.update((index / num_shows) * 100, "Scraping show {} of {}".format(index, num_shows), title)
            links_scraper = nanscrapers.scrape_episode(title, show_year, year, season, episode, imdb, tvdb)
            links_scraper = links_scraper()
            for scraper_links in links_scraper:
                if pDialog.iscanceled():
                    break
                if scraper_links:
                    random.shuffle(scraper_links)

        pDialog.close()
        dbcur.execute("SELECT COUNT(DISTINCT(scraper)) FROM rel_src where episode != ''")
        match = dbcur.fetchone()
        num_show_scrapers = match[0]

        dbcur.execute("SELECT scraper, count(distinct(urls)) FROM rel_src where episode != '' group by scraper")
        matches = dbcur.fetchall()
        failed = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode != '' and scraper == '{}' group by scraper".format(
                            match[0]))
                    match = dbcur.fetchone()
                    if match[1] == "[]":
                        failed.append(match[0])
                else:
                    failed.append(match[0])

        if len(failed) > 0:
            show_scraper_failedstring = "Failed: {}".format(len(failed))
            for fail in failed:
                show_scraper_failedstring += "\n        - {}".format(str(fail))
        else:
            show_scraper_failedstring = ""

        dbcur.execute("SELECT title, count(distinct(urls)) FROM rel_src where episode != '' group by title")
        matches = dbcur.fetchall()
        failed_shows = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode != '' and title == '{}' group by scraper".format(
                            match[0]))
                    new_matches = dbcur.fetchall()
                    found = False
                    for new_match in new_matches:
                        if new_match[1] == "[]":
                            continue
                        else:
                            found = True
                    if not found:
                        failed_shows.append(match[0])
                else:
                    failed_shows.append(match[0])

        if len(failed_shows) > 0:
            failed_show_string = "Failed shows: {}".format(len(failed_shows))
            for fail in failed_shows:
                for show in shows:
                    if clean_title(show['title']).upper() == str(fail):
                        failed_show_string += "\n        - {} S{}-E{}".format(show["title"], show["season"],
                                                                              show["episode"])

        else:
            failed_show_string = ""

    resultstring = 'Results:\n'
    if num_movies > 0:
        resultstring = resultstring + \
                       '    Movie Scrapers: {}\n' \
                       '    {}\n' \
                       '    {}\n'.format(num_movie_scrapers, failedstring, failed_movie_string)
    if num_shows > 0:
        resultstring = resultstring + \
                       '    Episode Scrapers: {}\n' \
                       '    {}\n' \
                       '    {}\n'.format(num_show_scrapers, show_scraper_failedstring, failed_show_string)

    dialog.textviewer("NaNscrapers Testing Mode", resultstring)
Ejemplo n.º 11
0
    def get_url(scraper, title, show_year, year, season, episode, imdb, tvdb, type, cache_location, maximum_age, check_url = False, debrid = False):
        cache_enabled = xbmcaddon.Addon('script.module.nanscrapers').getSetting("cache_enabled") == 'true'
        try:
            dbcon = database.connect(cache_location)
            dbcur = dbcon.cursor()
            try:
                dbcur.execute("SELECT * FROM version")
                match = dbcur.fetchone()
            except:
                nanscrapers.clear_cache()
                dbcur.execute("CREATE TABLE version (""version TEXT)")
                dbcur.execute("INSERT INTO version Values ('0.5.4')")
                dbcon.commit()

            dbcur.execute(
                "CREATE TABLE IF NOT EXISTS rel_src (""scraper TEXT, ""title Text, show_year TEXT, year TEXT, ""season TEXT, ""episode TEXT, ""imdb_id TEXT, ""urls TEXT, ""added TEXT, ""UNIQUE(scraper, title, year, season, episode)"");")
        except:
            pass

        if cache_enabled:
            try:
                sources = []
                dbcur.execute(
                    "SELECT * FROM rel_src WHERE scraper = '%s' AND title = '%s' AND show_year= '%s' AND year = '%s' AND season = '%s' AND episode = '%s'" % (
                        scraper.name, clean_title(title).upper(), show_year, year, season, episode))
                match = dbcur.fetchone()
                t1 = int(re.sub('[^0-9]', '', str(match[8])))
                t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
                update = abs(t2 - t1) > maximum_age
                if update == False:
                    sources = json.loads(match[7])
                    return sources
            except:
                pass

        try:
            sources = []
            if type == "movie":
                sources = scraper.scrape_movie(title, year, imdb, debrid = debrid)
            elif type == "episode":
                sources = scraper.scrape_episode(title, show_year, year, season, episode, imdb, tvdb, debrid = debrid)
            if sources == None:
                sources = []
            else:
                if cache_enabled:
                    try:
                        dbcur.execute(
                            "DELETE FROM rel_src WHERE scraper = '%s' AND title = '%s' AND show_year= '%s' AND year = '%s' AND season = '%s' AND episode = '%s'" % (
                                scraper.name, clean_title(title).upper(), show_year, year, season, episode))
                        dbcur.execute("INSERT INTO rel_src Values (?, ?, ?, ?, ?, ?, ?, ?, ?)", (
                            scraper.name, clean_title(title).upper(), show_year, year, season, episode, imdb,
                            json.dumps(sources),
                            datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
                        dbcon.commit()
                    except:
                        pass

            if check_url:
                noresolver = False
                try:
                    import resolveurl as urlresolver
                except:
                    try:
                        import urlresolver as urlresolver
                    except:
                        noresolver = True
                new_sources = []
                from common import check_playable
                for source in sources:
                    if source["direct"]:
                        check = check_playable(source["url"])
                        if check:
                            new_sources.append(source)
                    elif not noresolver:
                        try:
                            hmf = urlresolver.HostedMediaFile(url=source['url'], include_disabled=False,
                                                         include_universal=False)
                            if hmf.valid_url():
                                resolved_url = hmf.resolve()
                                check = check_playable(resolved_url)
                                if check:
                                    new_sources.append(source)
                        except:
                            pass
                    else:
                        new_sources.append(source)
                sources = new_sources
            return sources
        except:
            pass
Ejemplo n.º 12
0
def test():
    global movies, shows
    try:
        test_movies = []
        test_episodes = []
        profile_path = xbmc.translatePath(
            xbmcaddon.Addon().getAddonInfo('profile')).decode('utf-8')
        test_file = xbmcvfs.File(os.path.join(profile_path, "testings.xml"))
        xml = BeautifulStoneSoup(test_file.read())
        test_file.close()
        items = xml.findAll("item")
        for item in items:
            try:
                content = item.find("content")
                if content:
                    if "movie" in content.text:
                        meta = item.find("meta")
                        test_movies.append({
                            'title': meta.find("title").text,
                            'imdb': meta.find("imdb").text,
                            'year': meta.find("year").text,
                        })
                    elif "episode" in content.text:
                        meta = item.find("meta")
                        test_episodes.append({
                            'title':
                            meta.find("tvshowtitle").text,
                            'show_year':
                            int(meta.find("premiered").text[0:4]),
                            'year':
                            meta.find("year").text,
                            'season':
                            meta.find("season").text,
                            'episode':
                            meta.find("season").text,
                            'imdb':
                            meta.find("imdb").text,
                        })
            except:
                pass

            movies = test_movies
            shows = test_episodes
    except:
        pass

    dialog = xbmcgui.Dialog()
    pDialog = xbmcgui.DialogProgress()
    if dialog.yesno("NaNscrapers Testing Mode", 'Clear cache?'):
        nanscrapers.clear_cache()
    try:
        dbcon = database.connect(
            os.path.join(
                xbmc.translatePath(
                    xbmcaddon.Addon("script.module.nanscrapers").getAddonInfo(
                        'profile')).decode('utf-8'), 'url_cache.db'))
        dbcur = dbcon.cursor()
    except:
        dialog.ok("NaNscrapers Testing Mode", 'Error connecting to db')
        sys.exit()

    num_movies = len(movies)
    if num_movies > 0:
        pDialog.create('NaNscrapers Testing mode active', 'please wait')
        index = 0
        for movie in movies:
            index += 1
            title = movie['title']
            year = movie['year']
            imdb = movie['imdb']
            if pDialog.iscanceled():
                pDialog.close()
                break
            pDialog.update((index / num_movies) * 100,
                           "Scraping movie {} of {}".format(index,
                                                            num_movies), title)
            links_scraper = nanscrapers.scrape_movie(title, year, imdb)
            links_scraper = links_scraper()
            for scraper_links in links_scraper:
                if pDialog.iscanceled():
                    break
                if scraper_links:
                    random.shuffle(scraper_links)

        pDialog.close()
        dbcur.execute(
            "SELECT COUNT(DISTINCT(scraper)) FROM rel_src where episode = ''")
        match = dbcur.fetchone()
        num_movie_scrapers = match[0]

        dbcur.execute(
            "SELECT scraper, count(distinct(urls)) FROM rel_src where episode = '' group by scraper"
        )
        matches = dbcur.fetchall()
        failed = []
        for match in matches:
            if int(match[1]) <= 1:
                failed.append(match[0])

        if len(failed) > 0:
            failedstring = "Failed: {}".format(len(failed))
            for fail in failed:
                failedstring += "\n        - {}".format(str(fail))
        else:
            failedstring = ""

        dbcur.execute(
            "SELECT title, count(distinct(urls)) FROM rel_src where episode = '' group by title"
        )
        matches = dbcur.fetchall()
        failed_movies = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode == '' and title == '{}' group by scraper"
                        .format(match[0]))
                    new_matches = dbcur.fetchall()
                    found = False
                    for new_match in new_matches:
                        if new_match[1] == "[]":
                            continue
                        else:
                            found = True
                    if not found:
                        failed_movies.append(match[0])
                else:
                    failed_movies.append(match[0])

        if len(failed_movies) > 0:
            failed_movie_string = "Failed movies: {}".format(
                len(failed_movies))
            for fail in failed_movies:
                for movie in movies:
                    if clean_title(movie['title']).upper() == str(fail):
                        failed_movie_string += "\n        - {}".format(
                            movie["title"])

        else:
            failed_movie_string = ""

    num_shows = len(shows)
    if num_shows > 0:
        pDialog.create('NaNscrapers Testing mode active', 'please wait')
        index = 0
        for show in shows:
            index += 1
            title = show['title']
            show_year = show['show_year']
            year = show['year']
            season = show['season']
            episode = show['episode']
            imdb = show['imdb']
            tvdb = show.get('tvdb', '')

            if pDialog.iscanceled():
                pDialog.close()
                break
            pDialog.update((index / num_shows) * 100,
                           "Scraping show {} of {}".format(index,
                                                           num_shows), title)
            links_scraper = nanscrapers.scrape_episode(title, show_year, year,
                                                       season, episode, imdb,
                                                       tvdb)
            links_scraper = links_scraper()
            for scraper_links in links_scraper:
                if pDialog.iscanceled():
                    break
                if scraper_links:
                    random.shuffle(scraper_links)

        pDialog.close()
        dbcur.execute(
            "SELECT COUNT(DISTINCT(scraper)) FROM rel_src where episode != ''")
        match = dbcur.fetchone()
        num_show_scrapers = match[0]

        dbcur.execute(
            "SELECT scraper, count(distinct(urls)) FROM rel_src where episode != '' group by scraper"
        )
        matches = dbcur.fetchall()
        failed = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode != '' and scraper == '{}' group by scraper"
                        .format(match[0]))
                    match = dbcur.fetchone()
                    if match[1] == "[]":
                        failed.append(match[0])
                else:
                    failed.append(match[0])

        if len(failed) > 0:
            show_scraper_failedstring = "Failed: {}".format(len(failed))
            for fail in failed:
                show_scraper_failedstring += "\n        - {}".format(str(fail))
        else:
            show_scraper_failedstring = ""

        dbcur.execute(
            "SELECT title, count(distinct(urls)) FROM rel_src where episode != '' group by title"
        )
        matches = dbcur.fetchall()
        failed_shows = []
        for match in matches:
            if int(match[1]) <= 1:
                if int(match[1]) == 1:
                    dbcur.execute(
                        "SELECT scraper, urls FROM rel_src where episode != '' and title == '{}' group by scraper"
                        .format(match[0]))
                    new_matches = dbcur.fetchall()
                    found = False
                    for new_match in new_matches:
                        if new_match[1] == "[]":
                            continue
                        else:
                            found = True
                    if not found:
                        failed_shows.append(match[0])
                else:
                    failed_shows.append(match[0])

        if len(failed_shows) > 0:
            failed_show_string = "Failed shows: {}".format(len(failed_shows))
            for fail in failed_shows:
                for show in shows:
                    if clean_title(show['title']).upper() == str(fail):
                        failed_show_string += "\n        - {} S{}-E{}".format(
                            show["title"], show["season"], show["episode"])

        else:
            failed_show_string = ""

    resultstring = 'Results:\n'
    if num_movies > 0:
        resultstring = resultstring + \
                       '    Movie Scrapers: {}\n' \
                       '    {}\n' \
                       '    {}\n'.format(num_movie_scrapers, failedstring, failed_movie_string)
    if num_shows > 0:
        resultstring = resultstring + \
                       '    Episode Scrapers: {}\n' \
                       '    {}\n' \
                       '    {}\n'.format(num_show_scrapers, show_scraper_failedstring, failed_show_string)

    dialog.textviewer("NaNscrapers Testing Mode", resultstring)
Ejemplo n.º 13
0
    def get_url(scraper, title, show_year, year, season, episode, imdb, tvdb,
                type, cache_location, maximum_age):
        cache_enabled = xbmcaddon.Addon(
            'script.module.nanscrapers').getSetting("cache_enabled") == 'true'
        try:
            dbcon = database.connect(cache_location)
            dbcur = dbcon.cursor()
            try:
                dbcur.execute("SELECT * FROM version")
                match = dbcur.fetchone()
            except:
                nanscrapers.clear_cache()
                dbcur.execute("CREATE TABLE version (" "version TEXT)")
                dbcur.execute("INSERT INTO version Values ('0.5.4')")
                dbcon.commit()

            dbcur.execute("CREATE TABLE IF NOT EXISTS rel_src ("
                          "scraper TEXT, "
                          "title Text, show_year TEXT, year TEXT, "
                          "season TEXT, "
                          "episode TEXT, "
                          "imdb_id TEXT, "
                          "urls TEXT, "
                          "added TEXT, "
                          "UNIQUE(scraper, title, year, season, episode)"
                          ");")
        except:
            pass

        if cache_enabled:
            try:
                sources = []
                dbcur.execute(
                    "SELECT * FROM rel_src WHERE scraper = '%s' AND title = '%s' AND show_year= '%s' AND year = '%s' AND season = '%s' AND episode = '%s'"
                    % (scraper.name, clean_title(title).upper(), show_year,
                       year, season, episode))
                match = dbcur.fetchone()
                t1 = int(re.sub('[^0-9]', '', str(match[8])))
                t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
                update = abs(t2 - t1) > maximum_age
                if update == False:
                    sources = json.loads(match[7])
                    return sources
            except:
                pass

        try:
            sources = []
            if type == "movie":
                sources = scraper.scrape_movie(title, year, imdb)
            elif type == "episode":
                sources = scraper.scrape_episode(title, show_year, year,
                                                 season, episode, imdb, tvdb)
            if sources == None:
                sources = []
            else:
                if cache_enabled:
                    dbcur.execute(
                        "DELETE FROM rel_src WHERE scraper = '%s' AND title = '%s' AND show_year= '%s' AND year = '%s' AND season = '%s' AND episode = '%s'"
                        % (scraper.name, clean_title(title).upper(), show_year,
                           year, season, episode))
                    dbcur.execute(
                        "INSERT INTO rel_src Values (?, ?, ?, ?, ?, ?, ?, ?, ?)",
                        (scraper.name, clean_title(title).upper(), show_year,
                         year, season, episode, imdb, json.dumps(sources),
                         datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
                    dbcon.commit()

            return sources
        except:
            pass