Пример #1
0
def GetMetadata(media, movie, TVDBid, TMDbid, IMDbid):
  Log.Info("=== TheMovieDb.GetMetadata() ===".ljust(157, '='))
  TheMovieDb_dict = {}
  TSDbid          = ""
  
  Log.Info("TVDBid: '{}', TMDbid: '{}', IMDbid: '{}'".format(TVDBid, TMDbid, IMDbid))
  if   TMDbid:            url, filename = TMDB_MOVIE_SEARCH_BY_TMDBID.format(id=TMDbid), "TMDB-"+TMDbid+".json"
  elif IMDbid:            url, filename = TMDB_MOVIE_SEARCH_BY_TMDBID.format(id=IMDbid), "IMDb-"+IMDbid+".json"
  elif TVDBid.isdigit():  url, filename = TMDB_SERIE_SEARCH_BY_TVDBID.format(id=TVDBid), "TVDB-"+TVDBid+".json"
  else:                   return TheMovieDb_dict, TSDbid, TMDbid, IMDbid
  
  mode           = "movie" if movie else "tv"
  Log.Info(("--- %s ---" % mode).ljust(157, '-'))
  json           = common.LoadFile(filename=filename,               relativeDirectory=os.path.join('TheMovieDb', 'json'), url=url)
  config_dict    = common.LoadFile(filename="TMDB_CONFIG_URL.json", relativeDirectory="TheMovieDb",                       url=TMDB_CONFIG_URL, cache=CACHE_1MONTH)
  image_base_url = Dict(config_dict, 'images', 'secure_base_url')
  if not json:  Log.Info("TMDB - url: failed to get json" + TMDB_MOVIE_SEARCH_BY_TMDBID.format(id=TMDbid))
  else:  
    if   Dict(json, 'tv_results'   ):  json, mode = json['tv_results'   ][0], "tv"
    elif Dict(json, 'movie_results'):  json, mode = json['movie_results'][0], "movie"
    
    Log.Info("[ ] title: {}"                  .format(SaveDict( Dict(json, 'title') or Dict(json, 'name'),                  TheMovieDb_dict, 'title'                  )))`
    Log.Info("[ ] rating: {}"                 .format(SaveDict( Dict(json, 'vote_average'),                                 TheMovieDb_dict, 'rating'                 )))  #if 'vote_count' in json and json['vote_count'] > 3:  SaveDict( Dict(json, 'vote_average'), TheMovieDb_dict, 'rating')
    Log.Info("[ ] tagline: {}"                .format(SaveDict( Dict(json, 'tagline'),                                      TheMovieDb_dict, 'tagline'                )))
    Log.Info("[ ] summary: {}"                .format(SaveDict( Dict(json, 'overview'),                                     TheMovieDb_dict, 'summary'                )))
    Log.Info("[ ] duration: {}"               .format(SaveDict( Dict(json, 'runtime'),                                      TheMovieDb_dict, 'duration'               )))
    Log.Info("[ ] countries: {}"              .format(SaveDict( Dict(json, 'origin_country'),                               TheMovieDb_dict, 'countries'              )))
    Log.Info("[ ] originally_available_at: {}".format(SaveDict( Dict(json, 'first_air_date') or Dict(json, 'release_date'), TheMovieDb_dict, 'originally_available_at')))
    if Dict(json, 'belongs_to_collection', 'name'):  Log.Info("[ ] collections: {}".format(SaveDict( [ Dict(json, 'belongs_to_collection', 'name')],                                TheMovieDb_dict, 'collections')))
    if Dict(json, 'genres'                       ):  Log.Info("[ ] genres: {}"     .format(SaveDict( sorted([ Dict(genre, 'name') for genre in Dict(json, 'genres', default=[]) ]), TheMovieDb_dict, 'genres'     )))
    if Dict(json, 'poster_path'                  ):  Log.Info("[ ] poster: {}"     .format(image_base_url + 'original' + json['poster_path']  )); SaveDict( (os.path.join('TheMovieDb', 'poster',  json['poster_path'  ].lstrip('/')), common.poster_rank('TheMovieDb', 'posters'), None),                                            TheMovieDb_dict, 'posters', image_base_url + 'original' + json['poster_path']  )
    if Dict(json, 'backdrop_path'                ):  Log.Info("[ ] art: {}"        .format(image_base_url + 'original' + json['backdrop_path'])); SaveDict( (os.path.join('TheMovieDb', 'artwork', json['backdrop_path'].lstrip('/')), common.poster_rank('TheMovieDb', 'art'    ), image_base_url + 'w300' + json['backdrop_path']), TheMovieDb_dict, 'art',     image_base_url + 'original' + json['backdrop_path'])
    try:     Log.Info("[ ] duration: {}".format(SaveDict( int(Dict(json, 'duration')) * 60 * 1000,  TheMovieDb_dict, 'duration')))
    except:  pass
    if mode=='tv':   TSDbid = str(Dict(json, 'id'))
    elif not TMDbid: TMDbid = str(Dict(json, 'id'))
    if not IMDbid:   IMDbid = Dict(json, 'imdb_id')
    
    for studio in Dict(json, 'production_companies', default=[]):
      if studio['id'] <= json['production_companies'][0]['id']:
        Log.Info("[ ] studio: {}".format(SaveDict( studio['name'].strip(), TheMovieDb_dict, 'studio')))
  
  ### More pictures ###
  Log.Info("--- pictures.more ---".ljust(157, '-'))
  Log.Info("TMDbid: '{}', TSDbid: '{}', IMDbid: '{}'".format(TMDbid, TSDbid, IMDbid))
  for id in IMDbid.split(',') if ',' in IMDbid else []:
    json                  = common.LoadFile(filename="TMDB-"+(IMDbid or TMDbid)+".json", relativeDirectory="TMDB", url=TMDB_MOVIE_IMAGES_URL.format(id=id, mode=mode))
    for index, poster in enumerate(Dict(json, 'posters', default=[])):
      if Dict(poster,   'file_path'):  Log.Info("[ ] poster: {}" .format(image_base_url + 'original' + poster['file_path'] )); SaveDict((os.path.join('TheMovieDb', 'poster',  "%s-%s.jpg" % (TMDbid, index)),     common.poster_rank('TheMovieDb', 'posters'), None),                                            TheMovieDb_dict, 'posters', image_base_url + 'original' + poster['file_path']  )
    for index, backdrop in enumerate(Dict(json, 'backdrops', default=[])):
      if Dict(backdrop, 'file_path'):  Log.Info("[ ] artwork: {}".format(image_base_url + 'original'+ backdrop['file_path'])); SaveDict((os.path.join('TheMovieDb', 'artwork', "%s-%s-art.jpg" % (TMDbid, index)), common.poster_rank('TheMovieDb', 'art'),     image_base_url + 'w300' + backdrop['file_path']), TheMovieDb_dict, 'art',     image_base_url + 'original' + backdrop['file_path'])
  
  Log.Info("--- return ---".ljust(157, '-'))
  Log.Info("TheMovieDb_dict: {}".format(DictString(TheMovieDb_dict, 4)))
  return TheMovieDb_dict, TSDbid, TMDbid, IMDbid
Пример #2
0
def GetAniDBTVDBMap():  
  MAPPING       = 'https://raw.githubusercontent.com/ScudLee/anime-lists/master/anime-list-master.xml'                                  # ScudLee mapping file url
  MAPPING_FIX   = 'https://raw.githubusercontent.com/ZeroQI/Absolute-Series-Scanner/master/anime-list-corrections.xml'                  # ScudLee mapping file url online override
  MAPPING_LOCAL = os.path.join(common.CachePath, 'AnimeLists', 'anime-list-custom.xml')                                            # Custom mapping list(PlexRoot, "Plug-in Support", "Data", "com.plexapp.agents.hama", "DataItems", 'AnimeLists', 'anime-list-corrections.xml')
  AniDBTVDBMap  = common.LoadFile(filename=os.path.basename(MAPPING), relativeDirectory="AnimeLists", url=MAPPING, cache= CACHE_1DAY*6)  # 
  if not AniDBTVDBMap:  Log.Critical("GetAniDBTVDBMap() - Failed to load core file '{file}'".format(url=os.path.splitext(os.path.basename(MAPPING)))); AniDB_Movie_Set = XML.ElementFromString("<anime-set-list></anime-set-list>")  #; raise Exception("HAMA Fatal Error Hit")
  MergeMaps(AniDBTVDBMap, common.LoadFile(filename=os.path.basename(MAPPING_FIX), relativeDirectory="AnimeLists", url=MAPPING_FIX, cache= CACHE_1DAY*6))  #Online ScudLee anidb to tvdb mapping list
    
  if os.path.exists(MAPPING_LOCAL):  #Local  ScudLee anidb to tvdb mapping list
    Log.Info("GetAniDBTVDBMap() - Loading local custom mapping - url: " + MAPPING_LOCAL)
    try:                    MergeMaps(AniDBTVDBMap, XML.ElementFromString(Core.storage.load(MAPPING_LOCAL)))
    except Exception as e:  Log.Info("GetAniDBTVDBMap() - Failed open scudlee_filename_custom, error: '%s'" % e)
  else:                     Log.Info("GetAniDBTVDBMap() - Local custom mapping file not present: {}".format(MAPPING_LOCAL))
  return AniDBTVDBMap
Пример #3
0
def GetMetadata(movie=False, TVDBid="", tmdbid="", imdbid="", season=0, num=100):  #Fetch from fanart.tv - Posters Seasons Fanarts Banner
  Log.Info("=== FanartTv.GetMetadata() ===".ljust(157, '='))
  FanartTV_dict = {}

  Log.Info("movie:'{movie}', TVDBid: '{TVDBid}', tmdbid: '{tmdbid}', imdbid:'{imdbid}', season: '{season}', num: '{num}'".format(movie=movie, TVDBid=TVDBid, tmdbid=tmdbid, imdbid=imdbid, season=season, num=num))
  if "," in imdbid:  (GetMetadata(movie, "", "", imdbid_unique, season, num) for imdbid_unique in (tmdbid or imdbid).split(",")); return  #recusive call for each imdbid to reduce complexity
  if "," in tmdbid:  (GetMetadata(movie, "", tmdbid_unique, "", season, num) for tmdbid_unique in tmdbid.split(","));             return  #recusive call for each tmdbid to reduce complexity
  if not movie and TVDBid.isdigit():  id, relativeDirectory, url = TVDBid,           "FanartTV/tv/"   +TVDBid,               FTV_API_TV_URL.format(id=TVDBid)
  elif movie and (imdbid or tmdbid):  id, relativeDirectory, url = imdbid or tmdbid, "FanartTV/movie/"+imdbid or tmdbid, FTV_API_MOVIES_URL.format(id=imdbid or tmdbid)
  else:                               return FanartTV_dict
  if TVDBid or tmdbid or imdbid:
    Log.Info(("--- %s.images ---" % id).ljust(157, '-'))
    json = common.LoadFile(filename=id+".json", relativeDirectory=relativeDirectory, url=url, cache=CACHE_1WEEK)
    
    #Movies
    if json and (imdbid or tmdbid):
      for item in Dict(json, 'movieposter'    ) or []:  Log.Info("[ ] poster: {}".format(SaveDict((relativeDirectory+"{id}/movieposter/{filename}.jpg".format(    id=id, filename=item['id']), num, None), FanartTV_dict, 'posters', item['url'])))
      for item in Dict(json, 'moviebackground') or []:  Log.Info("[ ] art: {}"   .format(SaveDict((relativeDirectory+"{id}/moviebackground/{filename}.jpg".format(id=id, filename=item['id']), num, None), FanartTV_dict, 'art',     item['url'])))
    
    #Series
    if json and TVDBid.isdigit():
      for item in Dict(json, 'tvposter'       ) or []:  Log.Info("[ ] poster: {}"       .format(SaveDict((relativeDirectory+"{id}/tvposter/{filename}.jpg".format(       id=id, filename=item['id']), num, None), FanartTV_dict, 'posters', item['url'])))
      for item in Dict(json, 'showbackground' ) or []:  Log.Info("[ ] art: {}"          .format(SaveDict((relativeDirectory+"{id}/showbackground/{filename}.jpg".format( id=id, filename=item['id']), num, None), FanartTV_dict, 'art',     item['url'])))
      for item in Dict(json, 'tvbanner'       ) or []:  Log.Info("[ ] banner: {}"       .format(SaveDict((relativeDirectory+"{id}/tvbanner/{filename}.jpg".format(       id=id, filename=item['id']), num, None), FanartTV_dict, 'banners', item['url'])))
      for item in Dict(json, 'seasonposter'   ) or []:  Log.Info("[ ] season poster: {}".format(SaveDict((relativeDirectory+"{id}/seasonposter/{filename}.jpg".format(   id=id, filename=item['id']), num, None), FanartTV_dict, 'seasons', item['season'], 'posters', item['url'])))

  Log.Info("--- return ---".ljust(157, '-'))
  Log.Info("FanartTV_dict: {}".format(DictString(FanartTV_dict, 4)))
  return FanartTV_dict
Пример #4
0
def GetAniDBTitlesDB():
  ''' Get the AniDB title database
  '''
  global AniDBTitlesDB
  ANIDB_TITLES  = 'http://anidb.net/api/anime-titles.xml.gz'               # AniDB title database file contain all ids, all languages  #http://bakabt.info/anidb/animetitles.xml
  AniDBTitlesDB = common.LoadFile(filename='anime-titles.xml', relativeDirectory="AniDB", url=ANIDB_TITLES, cache= CACHE_1DAY * 6)  # AniDB title database loaded once every 2 weeks
  if not AniDBTitlesDB:  raise Exception("Failed to load core file '{url}'".format(url=os.path.splitext(os.path.basename(ANIDB_TITLES))[0]))
Пример #5
0
def GetMetadata(movie=False, TVDBid="", tmdbid="", imdbid="", season=0):  #Fetch from fanart.tv - Posters Seasons Fanarts Banner
  Log.Info("=== FanartTv.GetMetadata() ===".ljust(157, '='))
  FanartTV_dict = {}

  Log.Info("movie:'{movie}', TVDBid: '{TVDBid}', tmdbid: '{tmdbid}', imdbid:'{imdbid}', season: '{season}'".format(movie=movie, TVDBid=TVDBid, tmdbid=tmdbid, imdbid=imdbid, season=season))
  if "," in imdbid:  (GetMetadata(movie, "", "", imdbid_unique, season) for imdbid_unique in (tmdbid or imdbid).split(",")); return  #recusive call for each imdbid to reduce complexity
  if "," in tmdbid:  (GetMetadata(movie, "", tmdbid_unique, "", season) for tmdbid_unique in tmdbid.split(","));             return  #recusive call for each tmdbid to reduce complexity
  if not movie and TVDBid.isdigit():  id, relativeDirectory, url = TVDBid,           os.path.join("FanartTV", "tv",    TVDBid),           FTV_API_TV_URL.format(id=TVDBid)
  elif movie and (imdbid or tmdbid):  id, relativeDirectory, url = imdbid or tmdbid, os.path.join("FanartTV", "movie", imdbid or tmdbid), FTV_API_MOVIES_URL.format(id=imdbid or tmdbid)
  else:                               return FanartTV_dict
  if TVDBid or tmdbid or imdbid:
    Log.Info(("--- %s.images ---" % id).ljust(157, '-'))
    json = common.LoadFile(filename=id+".json", relativeDirectory=relativeDirectory, url=url)
    
    #Movies
    if json and (imdbid or tmdbid):
      for item in Dict(json, 'movieposter'    , default=[]):  Log.Info("[ ] poster: {}".format(Dict(item, 'url'))); SaveDict((os.path.join(relativeDirectory, id, "movieposter",     "{filename}.jpg".format(filename=Dict(item, 'id'))), common.poster_rank('FanartTV', 'posters'), None), FanartTV_dict, 'posters', Dict(item, 'url'))
      for item in Dict(json, 'moviebackground', default=[]):  Log.Info("[ ] art: {}"   .format(Dict(item, 'url'))); SaveDict((os.path.join(relativeDirectory, id, "moviebackground", "{filename}.jpg".format(filename=Dict(item, 'id'))), common.poster_rank('FanartTV', 'art'    ), None), FanartTV_dict, 'art',     Dict(item, 'url'))
    
    #Series
    if json and TVDBid.isdigit():
      for item in Dict(json, 'tvposter'       , default=[]):  Log.Info("[ ] poster: {}"       .format(Dict(item, 'url'))); SaveDict((os.path.join(relativeDirectory, id, "tvposter",       "{filename}.jpg".format(filename=Dict(item, 'id'))), common.poster_rank('FanartTV', 'posters'), None), FanartTV_dict, 'posters', Dict(item, 'url'))
      for item in Dict(json, 'showbackground' , default=[]):  Log.Info("[ ] art: {}"          .format(Dict(item, 'url'))); SaveDict((os.path.join(relativeDirectory, id, "showbackground", "{filename}.jpg".format(filename=Dict(item, 'id'))), common.poster_rank('FanartTV', 'art'    ), None), FanartTV_dict, 'art',     Dict(item, 'url'))
      for item in Dict(json, 'tvbanner'       , default=[]):  Log.Info("[ ] banner: {}"       .format(Dict(item, 'url'))); SaveDict((os.path.join(relativeDirectory, id, "tvbanner",       "{filename}.jpg".format(filename=Dict(item, 'id'))), common.poster_rank('FanartTV', 'banners'), None), FanartTV_dict, 'banners', Dict(item, 'url'))
      for item in Dict(json, 'seasonposter'   , default=[]):  Log.Info("[ ] season poster: {}".format(Dict(item, 'url'))); SaveDict((os.path.join(relativeDirectory, id, "seasonposter",   "{filename}.jpg".format(filename=Dict(item, 'id'))), common.poster_rank('FanartTV', 'posters'), None), FanartTV_dict, 'seasons', Dict(item, 'season'), 'posters', Dict(item, 'url'))

  Log.Info("--- return ---".ljust(157, '-'))
  Log.Info("FanartTV_dict: {}".format(DictString(FanartTV_dict, 4)))
  return FanartTV_dict
Пример #6
0
def GetMetadata(movie, IMDbid):  # return 200 but not downloaded correctly - IMDB has a single poster, downloading through OMDB xml, prefered by mapping file
  Log.Info("=== OMDb.GetMetadata() ===".ljust(157, '='))
  url       = OMDB_HTTP_API_URL.format(api_key=Prefs['OMDbApiKey']) #'
  OMDb_dict = {}

  if Prefs['OMDbApiKey'] in ('None', '', 'N/A'):  Log.Info("No api key found - Prefs['OMDbApiKey']: '{}'".format(Prefs['OMDbApiKey']));  return OMDb_dict
  
  Log.Info("IMDbid: '%s'" % IMDbid)
  for imdbid_single in IMDbid.split(",") if IMDbid.startswith("tt") else []:
    Log.Info(("--- %s.series ---" % imdbid_single).ljust(157, '-'))
    json = common.LoadFile(filename=imdbid_single+".json", relativeDirectory=os.path.join('OMDb', 'json'), url=url + imdbid_single)
    if json:
      Log.Info("[ ] title: {}"                  .format(SaveDict( Dict(json,'title')     , OMDb_dict, 'title'                  )))
      Log.Info("[ ] summary: {}"                .format(SaveDict( Dict(json,'Plot')      , OMDb_dict, 'summary'                )))
      Log.Info("[ ] originally_available_at: {}".format(SaveDict( Dict(json,'Released')  , OMDb_dict, 'originally_available_at')))
      Log.Info("[ ] countries: {}"              .format(SaveDict( Dict(json,'Country')   , OMDb_dict, 'countries'              )))
      Log.Info("[ ] directors: {}"              .format(SaveDict( Dict(json,'Director')  , OMDb_dict, 'directors'              )))
      Log.Info("[ ] genres: {}"                 .format(SaveDict( sorted([x.strip() for x in Dict(json,'Genre').split(',')]), OMDb_dict, 'genres')))
      Log.Info("[ ] writers: {}"                .format(SaveDict( Dict(json,'Writer')    , OMDb_dict, 'writers'                )))
      SaveDict( Dict(json,'imdbRating'), OMDb_dict, 'rating')
      if Dict(json,'Metascore').isdigit() and not Dict(OMDb_dict,'rating'):
        SaveDict( float(json['Metascore'])/10, OMDb_dict, 'rating')
      Log.Info("[ ] rating: {}".format(Dict(OMDb_dict,'rating')))
      if SaveDict( Dict(json,'Rated'), OMDb_dict, 'content_rating') in common.Movie_to_Serie_US_rating and not movie and Dict(json,'Type')=="movie":
        Log.Info("[ ] content_rating: {}".format(SaveDict(common.Movie_to_Serie_US_rating[json['Rated']], OMDb_dict, 'content_rating')))
      if Dict(json,'Poster'):  Log.Info("[ ] poster: {}".format(json['Poster'])); SaveDict((os.path.join('OMDb', 'poster', imdbid_single+'.jpg'), common.poster_rank('OMDb', 'posters'), None), OMDb_dict, 'posters', json['Poster'])
      try:     Log.Info("[ ] duration: {}".format(SaveDict( int(Dict(json,'Runtime').replace(' min','')) * 60 * 1000, OMDb_dict, 'duration')))  # Plex save duration in millisecs
      except:  pass

  Log.Info("--- return ---".ljust(157, '-'))
  Log.Info("OMDb_dict: {}".format(DictString(OMDb_dict, 4)))
  return OMDb_dict
Пример #7
0
def GetAniDBTitlesDB():
  ''' Get the AniDB title database
  '''
  global AniDBTitlesDB
  AniDBTitlesDB = common.LoadFile(filename='anime-titles.xml', relativeDirectory="AniDB", url=ANIDB_TITLES)  # AniDB title database loaded once every 2 weeks
  if not AniDBTitlesDB:  raise Exception("Failed to load core file '{url}'".format(url=os.path.splitext(os.path.basename(ANIDB_TITLES))[0]))
  else: Log.Info("Entries loaded: {}, File: {}".format(len(AniDBTitlesDB), ANIDB_TITLES))
Пример #8
0
def GetAniDBTVDBMap():
    global AniDBTVDBMap
    AniDBTVDBMap = common.LoadFile(filename=os.path.basename(SCHUDLEE_MASTER),
                                   relativeDirectory="AnimeLists",
                                   url=SCHUDLEE_MASTER,
                                   cache=CACHE_1DAY * 6)  #
    if not AniDBTVDBMap:
        Log.Critical(
            "GetAniDBTVDBMap() - Failed to load core file '{file}'".format(
                url=os.path.splitext(os.path.basename(SCHUDLEE_MASTER)))
        )  #; AniDB_Movie_Set = XML.ElementFromString("<anime-set-list></anime-set-list>")  #; raise Exception("HAMA Fatal Error Hit")
    AniDBTVDBMap = MergeMaps(
        AniDBTVDBMap,
        common.LoadFile(filename=os.path.basename(ASS_SCHUDLEE_CORRECTIONS),
                        relativeDirectory="AnimeLists",
                        url=ASS_SCHUDLEE_CORRECTIONS,
                        cache=CACHE_1DAY *
                        6))  #Online ScudLee anidb to tvdb mapping list
Пример #9
0
def GetMetadata(movie, MALid):
    Log.Info("".ljust(157, '-'))
    Log.Info("MyAnimeList.GetMetadata() - MALid: '%s'" % MALid)
    if not MALid or not MALid.isdigit(): return {}

    MAL_HTTP_API_URL = "http://fribbtastic-api.net/fribbtastic-api/services/anime?id="
    MAL_PREFIX = "https://myanimelist.cdn-dena.com"  # Some links in the XML will come from TheTVDB, not adding those....
    MyAnimeList_dict = {}
    xml = common.LoadFile(filename=MALid + ".xml",
                          relativeDirectory=os.path.join('MyAnimeList', 'xml'),
                          url=MAL_HTTP_API_URL + MALid,
                          cache=CACHE_1DAY * 7)
    if xml:
        SaveDict(GetXml(xml, 'title'), MyAnimeList_dict, 'title')
        SaveDict(GetXml(xml, 'synopsis'), MyAnimeList_dict, 'summary')
        SaveDict(GetXml(xml, 'rating'), MyAnimeList_dict, 'score')
        #SaveDict( GetXml(xml, 'content_rating').split(" ")[0], MyAnimeList_dict, 'rating'   )
        SaveDict(GetXml(xml, 'firstAired'), MyAnimeList_dict,
                 'originally_available_at')

        #for item in xml.xpath('//anime/genres/genre' or []):  SaveDict([item.text], MyAnimeList_dict, 'genres')
        if GetXml(xml, '//anime/genres/genre'):
            SaveDict([item.text for item in xml.xpath('//anime/genres/genre')],
                     MyAnimeList_dict, 'genres')
        if GetXml(xml, 'status') == 'Currently Airing':
            SaveDict("Continuing", MyAnimeList_dict, 'genres')
        if GetXml(xml, 'status') == 'Finished Airing':
            SaveDict("Ended", MyAnimeList_dict, 'genres')
        if GetXml(xml, 'type') == 'TV':
            SaveDict("Serie", MyAnimeList_dict, 'genres')
        if GetXml(xml, 'type') == 'Movie':
            SaveDict("Movie", MyAnimeList_dict, 'genres')
        if GetXml(xml, 'type') == 'Special':
            SaveDict("Special", MyAnimeList_dict, 'genres')

        for item in xml.xpath('//anime/episodes/episode') or []:
            episode = GetXml(item, 'episodeNumber')
            SaveDict(GetXml(xml, 'engTitle'), MyAnimeList_dict, 'seasons', "1",
                     'episodes', episode, 'title')
            SaveDict(GetXml(xml, 'aired'), MyAnimeList_dict, 'seasons', "1",
                     'episodes', episode, 'originally_available_at')

        for item in xml.xpath('//anime/covers/cover'):
            SaveDict(("MyAnimeList/" + "/".join(item.text.split('/')[3:]), 50,
                      None) if item.text.startswith(MAL_PREFIX) else "",
                     MyAnimeList_dict, 'posters', item.text)
        for item in xml.xpath('//anime/backgrounds/background'):
            SaveDict(("MyAnimeList/" + "/".join(item.text.split('/')[3:]), 50,
                      None) if item.text.startswith(MAL_PREFIX) else "",
                     MyAnimeList_dict, 'art', item.text)
        for item in xml.xpath('//anime/banners/banner'):
            SaveDict(("MyAnimeList/" + "/".join(item.text.split('/')[3:]), 50,
                      None) if item.text.startswith(MAL_PREFIX) else "",
                     MyAnimeList_dict, 'banners', item.text)

    return MyAnimeList_dict
Пример #10
0
def GetAniDBTVDBMap():
    global AniDBTVDBMap
    MAPPING = 'https://raw.githubusercontent.com/ScudLee/anime-lists/master/anime-list-master.xml'  # ScudLee mapping file url
    MAPPING_FIX = 'https://raw.githubusercontent.com/ZeroQI/Absolute-Series-Scanner/master/anime-list-corrections.xml'  # ScudLee mapping file url online override
    AniDBTVDBMap = common.LoadFile(filename=os.path.basename(MAPPING),
                                   relativeDirectory="AnimeLists",
                                   url=MAPPING,
                                   cache=CACHE_1DAY * 6)  #
    if not AniDBTVDBMap:
        Log.Critical(
            "GetAniDBTVDBMap() - Failed to load core file '{file}'".format(
                url=os.path.splitext(os.path.basename(MAPPING)))
        )  #; AniDB_Movie_Set = XML.ElementFromString("<anime-set-list></anime-set-list>")  #; raise Exception("HAMA Fatal Error Hit")
    AniDBTVDBMap = MergeMaps(
        AniDBTVDBMap,
        common.LoadFile(filename=os.path.basename(MAPPING_FIX),
                        relativeDirectory="AnimeLists",
                        url=MAPPING_FIX,
                        cache=CACHE_1DAY *
                        6))  #Online ScudLee anidb to tvdb mapping list
Пример #11
0
def GetAniDBMovieSets():
    global AniDBMovieSets
    AniDBMovieSets = common.LoadFile(
        filename=os.path.basename(SCHUDLEE_MOVIESET),
        relativeDirectory="AnimeLists",
        url=SCHUDLEE_MOVIESET,
        cache=CACHE_1MONTH)
    if not AniDBMovieSets:
        Log.Error(
            "GetAniDBMovieSets() - Failed to load core file '%s'" %
            os.path.basename(SCHUDLEE_MOVIESET)
        )  #;  AniDB_Movie_Set = XML.ElementFromString("<anime-set-list></anime-set-list>")
Пример #12
0
def GetAniDBMovieSets():
    ANIME_MOVIESET = 'https://raw.githubusercontent.com/ScudLee/anime-lists/master/anime-movieset-list.xml'
    AniDBMovieSets = common.LoadFile(filename=os.path.basename(ANIME_MOVIESET),
                                     relativeDirectory="AnimeLists",
                                     url=ANIME_MOVIESET,
                                     cache=CACHE_1WEEK * 4)
    if not AniDBMovieSets:
        Log.Error(
            "Failed to load core file '%s'" %
            os.path.basename(MOVIE_COLLECTION)
        )  #;  AniDB_Movie_Set = XML.ElementFromString("<anime-set-list></anime-set-list>")
    return AniDBMovieSets
Пример #13
0
def GetAniDBTitlesDB():
    ''' Get the AniDB title database
  '''
    global AniDBTitlesDB
    AniDBTitlesDB = common.LoadFile(
        filename='anime-titles.xml',
        relativeDirectory="AniDB",
        url=ANIDB_TITLES,
        cache=CACHE_1DAY * 6)  # AniDB title database loaded once every 2 weeks
    if not AniDBTitlesDB:
        raise Exception("Failed to load core file '{url}'".format(
            url=os.path.splitext(os.path.basename(ANIDB_TITLES))[0]))
Пример #14
0
def GetAniDBTVDBMap():
    global AniDBTVDBMap
    AniDBTVDBMap = common.LoadFile(filename=os.path.basename(SCUDLEE_MASTER),
                                   relativeDirectory="AnimeLists",
                                   url=SCUDLEE_MASTER)  #
    if not AniDBTVDBMap:
        raise Exception(
            "GetAniDBTVDBMap() - Failed to load core file '{file}'".format(
                url=os.path.splitext(os.path.basename(SCUDLEE_MASTER)))
        )  #; AniDB_Movie_Set = XML.ElementFromString("<anime-set-list></anime-set-list>")  #; raise Exception("HAMA Fatal Error Hit")
    else:
        Log.Info("Entries loaded: {}, File: {}".format(len(AniDBTVDBMap),
                                                       SCUDLEE_MASTER))
Пример #15
0
def GetAniDBMovieSets():
    global AniDBMovieSets
    AniDBMovieSets = common.LoadFile(
        filename=os.path.basename(SCHUDLEE_MOVIESET),
        relativeDirectory="AnimeLists",
        url=SCHUDLEE_MOVIESET,
        cache=CACHE_1MONTH)
    if not AniDBMovieSets:
        raise Exception(
            "GetAniDBMovieSets() - Failed to load core file '%s'" %
            os.path.basename(SCHUDLEE_MOVIESET)
        )  #;  AniDB_Movie_Set = XML.ElementFromString("<anime-set-list></anime-set-list>")
    else:
        Log.Info("Entries loaded: {}, File: {}".format(len(AniDBMovieSets),
                                                       SCHUDLEE_MOVIESET))
Пример #16
0
def GetMetadata(AniDBid, MALid):
    Log.Info("=== AniList.GetMetadata() ===".ljust(157, '='))
    AniList_dict = {}

    # Try to match the AniDB id to an AniList id as it has a higher chance of being correct
    ALid = Dict(common.LoadFile(filename=AniDBid + '.json',
                                relativeDirectory=os.path.join(
                                    'AniList', 'json', 'AniDBid'),
                                url=ARM_SERVER_URL.format(id=AniDBid)),
                "anilist",
                default=None)

    Log.Info("AniDBid={}, MALid={}, ALid={}".format(AniDBid, MALid, ALid))
    if not MALid or not MALid.isdigit(): return AniList_dict

    Log.Info("--- series ---".ljust(157, "-"))

    # Use the AniList id if we got one, but fall back to the MAL id
    variables = {}
    if ALid is not None: SaveDict(ALid, variables, "id")
    else: SaveDict(int(MALid), variables, "malId")

    # Fetch data
    data = MakeGraphqlQuery(ANIME_DATA_DOCUMENT, variables)

    if data:
        Log.Info("--- images ---".ljust(157, "-"))

        posterUrl = Dict(data, "anime", "coverImage", "url")
        if posterUrl:
            Log.Info("[ ] poster: {}".format(posterUrl))
            SaveDict((os.path.join('AniList', 'poster',
                                   os.path.basename(posterUrl)),
                      common.poster_rank('AniList', 'posters'), None),
                     AniList_dict, 'posters', posterUrl)

        bannerUrl = Dict(data, "anime", "bannerImage")
        if bannerUrl:
            Log.Info("[ ] banner: {}".format(bannerUrl))
            SaveDict((os.path.join('AniList', 'banners',
                                   os.path.basename(bannerUrl)),
                      common.poster_rank('AniList', 'banners'), None),
                     AniList_dict, 'banners', bannerUrl)

    Log.Info("--- return ---".ljust(157, '-'))
    Log.Info("AniList_dict: {}".format(DictString(AniList_dict, 4)))
    return AniList_dict
Пример #17
0
def GetMetadata(
    movie,
    imdbid,
    num=98
):  # return 200 but not downloaded correctly - IMDB has a single poster, downloading through OMDB xml, prefered by mapping file
    OMDB_HTTP_API_URL = "http://www.omdbapi.com/?apikey={api_key}&i=".format(
        api_key=Prefs['OMDbApiKey'])  #'
    OMDb_dict = {}

    Log.Info("".ljust(157, '-'))
    Log.Info("OMDb.GetMetadata() - background, Poster - imdbid: '%s'" % imdbid)
    for imdbid_single in imdbid.split(",") if imdbid.startswith("tt") else []:
        json = common.LoadFile(filename=imdbid_single + ".json",
                               relativeDirectory=os.path.join('OMDb', 'json'),
                               url=OMDB_HTTP_API_URL + imdbid_single,
                               cache=CACHE_1WEEK)
        if json:
            SaveDict(Dict(json, 'title'), OMDb_dict, 'title')
            SaveDict(Dict(json, 'Plot'), OMDb_dict, 'summary')
            SaveDict(Dict(json, 'Released'), OMDb_dict,
                     'originally_available_at')
            SaveDict(Dict(json, 'Country'), OMDb_dict, 'countries')
            SaveDict(Dict(json, 'Director'), OMDb_dict, 'directors')
            SaveDict(Dict(json, 'Genre'), OMDb_dict, 'genres')
            SaveDict(Dict(json, 'Writer'), OMDb_dict, 'writers')
            SaveDict(Dict(json, 'imdbRating'), OMDb_dict, 'rating')
            if Dict(json,
                    'Metascore').isdigit() and not Dict(OMDb_dict, 'rating'):
                SaveDict(float(json['Metascore']) / 10, OMDb_dict, 'rating')
            if SaveDict(
                    Dict(json, 'Rated'), OMDb_dict, 'content_rating'
            ) in common.Movie_to_Serie_US_rating and not movie and Dict(
                    json, 'Type') == "movie":
                SaveDict(common.Movie_to_Serie_US_rating[json['Rated']],
                         OMDb_dict, 'content_rating')
            if Dict(json, 'Poster'):
                SaveDict(
                    os.path.join('OMDb', 'poster', imdbid_single + '.jpg', num,
                                 None), OMDb_dict, 'posters', json['Poster'])
            try:
                SaveDict(
                    int(Dict(json, 'Runtime').replace(' min', '')) * 60 * 1000,
                    OMDb_dict, 'duration')  # Plex save duration in millisecs
            except:
                pass
    return OMDb_dict
Пример #18
0
def MakeGraphqlQuery(document, variables):
    Log.Info("Query: {}".format(document))
    Log.Info("Variables: {}".format(variables))

    source = variables.keys()[0]
    data = JSON.StringFromObject({"query": document, "variables": variables})
    response = common.LoadFile(filename=str(variables[source]) + '.json',
                               relativeDirectory=os.path.join(
                                   'AniList', 'json', source),
                               url=GRAPHQL_API_URL,
                               data=data,
                               cache=CACHE_1DAY)

    # EX: {"data":null,"errors":[{"message":"Not Found.","hint":"Use POST request to access graphql subdomain.","status":404}]}
    if len(Dict(response, 'errors', default=[])) > 0:
        Log.Error("Got error: {}".format(Dict(response, 'errors')[0]))
        return None

    return Dict(response, "data")
Пример #19
0
def LoadFileTVDB(id="", filename="", url="", headers={}):
  """ Wrapper around "common.LoadFile()" to remove the need to consistently define arguments 'relativeDirectory'/'cache'/'headers'
  """
  global TVDB_AUTH_TIME

  while 'LoadFileTVDB' in netLocked and netLocked['LoadFileTVDB'][0]:
    Log.Root("TheTVDBv2.LoadFileTVDB() - Waiting for lock: 'LoadFileTVDB'"); time.sleep(1)
  netLocked['LoadFileTVDB'] = (True, int(time.time())) #Log.Root("Lock acquired: 'LoadFile'")

  # If no auth or auth is >12hrs old, authenticate from scratch
  if 'Authorization' not in TVDB_HEADERS or (TVDB_AUTH_TIME and (time.time()-TVDB_AUTH_TIME) > CACHE_1DAY/2):
    try:
      TVDB_HEADERS['Authorization'] = 'Bearer ' + JSON.ObjectFromString(HTTP.Request(TVDB_LOGIN_URL, data=JSON.StringFromObject( {'apikey':TVDB_API_KEY} ), headers=common.UpdateDict(headers, common.COMMON_HEADERS), cacheTime=0).content)['token']
      TVDB_AUTH_TIME = time.time()
    except Exception as e:  Log.Root('TheTVDBv2.LoadFileTVDB() - Authorization Error: {}'.format(e))
    else:                   Log.Root('TheTVDBv2.LoadFileTVDB() - URL {}, headers: {}'.format(TVDB_LOGIN_URL, headers))

  netLocked['LoadFileTVDB'] = (False, 0)  #Log.Root("Lock released: 'LoadFile'")

  return common.LoadFile(filename=filename, relativeDirectory=os.path.join("TheTVDB", "json", id), url=url, headers=common.UpdateDict(headers, TVDB_HEADERS))
Пример #20
0
def getAltName(typ, id, default):
    try:
        headers = {
            'pragma':
            'no-cache',
            'x-lcontrol':
            'x-no-cache',
            'user-agent':
            'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
        }
        API = 'http://anidb.net/perl-bin/animedb.pl?show=json&action=info_popup&type={}&id={}'
        c = common.LoadFile(filename="{}-{}.json".format(typ, id),
                            relativeDirectory=os.path.join("AniDB", "json"),
                            url=API.format(typ, id),
                            cache=CACHE_1DAY * 365 * 100,
                            headers=headers)
        regex = r"alternateName.*?>(.*?)<"
        matches = list(re.finditer(regex, c['html'], re.MULTILINE))
        match = matches[-1].group(1)
        assert match != ""
        return match
    except Exception:
        return default
Пример #21
0
def GetMetadata(media, movie, error_log, lang, metadata_source, AniDBid,
                TVDBid, IMDbid, mappingList, AniDB_movie):
    ''' TVDB - Load serie JSON
  '''
    Log.Info("".ljust(157, '-'))
    if not TVDBid.isdigit():
        Log.Info('TheTVDB.GetMetadata() - TVDBid empty')
        return {}, IMDbid
    TheTVDB_dict = {}
    anidb_numbering = metadata_source == "anidb" and (
        movie or max(map(int, media.seasons.keys())) <= 1)
    anidb_prefered = anidb_numbering and Dict(
        mappingList, 'defaulttvdbseason') not in ('a', '1')
    language_series = [
        language.strip() if language.strip() not in ('x-jat', 'zh-Hans',
                                                     'zh-Hant', 'zh-x-yue',
                                                     'zh-x-cmn',
                                                     'zh-x-nan') else ''
        for language in Prefs['SerieLanguagePriority'].split(',')
    ]
    language_episodes = [
        language.strip() if language.strip() not in ('x-jat', 'zh-Hans',
                                                     'zh-Hant', 'zh-x-yue',
                                                     'zh-x-cmn',
                                                     'zh-x-nan') else ''
        for language in Prefs['EpisodeLanguagePriority'].split(',')
    ]
    Log.Info(
        "TheTVDB.GetMetadata() - TVDBid: '{}', IMDbid: '{}', language_series : {}, language_episodes: {}"
        .format(TVDBid, IMDbid, language_series, language_episodes))

    ### TVDB Series JSON ###
    serie_json = Dict(
        common.LoadFile(filename='series_{}.json'.format(lang),
                        relativeDirectory="TheTVDB/json/" + TVDBid,
                        url=(TVDB_SERIES_URL % TVDBid) + '?' + lang,
                        cache=CACHE_1DAY,
                        headers={
                            'Content-type': 'application/json',
                            'Accept-Language': lang
                        }), 'data')
    if serie_json:
        #serie_json { "id","seriesId", "airsDayOfWeek", "imdbId", "zap2itId", "added", "addedBy", "lastUpdated", "seriesName", "aliases", "banner", "status",
        #             "firstAired", "network", "networkId", "runtime", "genre, "overview", "airsTime", "rating" , "siteRating", "siteRatingCount" }
        SaveDict(
            language_series.index(lang) if lang in language_series
            and not anidb_prefered else len(language_series), TheTVDB_dict,
            'language_rank')
        SaveDict(Dict(serie_json, 'seriesName'), TheTVDB_dict, 'title')
        SaveDict(Dict(serie_json, 'seriesName'), TheTVDB_dict,
                 'original_title')
        SaveDict(Dict(serie_json, 'imdbId' or IMDbid), TheTVDB_dict, 'IMDbid')
        SaveDict(Dict(serie_json, 'zap2it_id'), TheTVDB_dict, 'zap2itId')
        SaveDict(Dict(serie_json, 'rating'), TheTVDB_dict, 'content_rating')
        if not anidb_prefered:
            SaveDict(Dict(serie_json, 'overview'), TheTVDB_dict, 'summary')
        SaveDict(Dict(serie_json, 'firstAired'), TheTVDB_dict,
                 'originally_available_at')
        SaveDict(Dict(serie_json, 'genre'), TheTVDB_dict, 'genres')
        SaveDict(Dict(serie_json, 'network'), TheTVDB_dict, 'studio')
        SaveDict(Dict(serie_json, 'siteRating'), TheTVDB_dict, 'rating')
        SaveDict(Dict(serie_json, 'status'), TheTVDB_dict, 'status')
        if Dict(serie_json, 'runtime') and Dict(serie_json,
                                                'runtime').isdigit():
            Log.Info('[ ] duration: {}'.format(Dict(serie_json, 'runtime')))
            SaveDict(
                int(Dict(serie_json, 'runtime')) * 60 * 1000, TheTVDB_dict,
                'duration')  #in ms in plex
        if Dict(serie_json, 'banner'):
            Log.Info('[ ] banner: {}'.format(TVDB_IMG_ROOT +
                                             Dict(serie_json, 'banner')))
            SaveDict((os.path.join('TheTVDB', 'banner',
                                   Dict(serie_json, 'banner')), 1, None),
                     TheTVDB_dict, 'banners',
                     TVDB_IMG_ROOT + Dict(serie_json, 'banner'))

        ### TVDB Series Actors JSON ###
        actor_json = Dict(
            common.LoadFile(filename='actors_{}.json'.format(lang),
                            relativeDirectory="TheTVDB/json/" + TVDBid,
                            url=TVDB_ACTORS_URL % TVDBid,
                            cache=CACHE_1DAY,
                            headers={
                                'Content-type': 'application/json',
                                'Accept-Language': lang
                            }), 'data')
        if actor_json:  #JSON format: 'data': [{"seriesId", "name", "image", "lastUpdated", "imageAuthor", "role", "sortOrder", "id", "imageAdded", },...]
            for role in actor_json or []:
                try:
                    SaveDict([{
                        'role':
                        Dict(role, 'role'),
                        'name':
                        Dict(role, 'name'),
                        'photo':
                        TVDB_IMG_ROOT +
                        role['image'] if Dict(role, 'image') else ''
                    }], TheTVDB_dict, 'roles')
                except Exception as e:
                    Log.Info("'roles' - error: '{}', role: '{}'".format(
                        str(e), str(role)))
            #common.DisplayDict(actor_json, ['role', 'name', 'image'])

        ### Load pages of episodes ###
        episodes_json, page = [], 1
        while page not in (None, '', 'null'):
            episodes_json_page = common.LoadFile(
                filename='episodes_page{}_{}.json'.format(page, lang),
                relativeDirectory="TheTVDB/json/" + TVDBid,
                url=TVDB_EPISODES_URL % (TVDBid, page),
                cache=CACHE_1DAY,
                headers={
                    'Content-type': 'application/json',
                    'Accept-Language': lang
                })
            episodes_json.extend(
                Dict(episodes_json_page, 'data')
            )  #Log.Info('TVDB_EPISODES_URL: {}, links: {}'.format(TVDB_EPISODES_URL % (TVDBid, page), Dict(episodes_json_page, 'links')))
            page = Dict(episodes_json_page, 'links', 'next')

        sorted_episodes_json = {}
        for episode_json in episodes_json:
            sorted_episodes_json['s{:02d}e{:03d}'.format(
                Dict(episode_json, 'airedSeason'),
                Dict(episode_json, 'airedEpisodeNumber'))] = episode_json
        sorted_episodes_index_list = sorted(
            sorted_episodes_json, key=natural_sort_key
        )  #Log.Info('len: {}, sorted_episodes_index_list: {}'.format(len(sorted_episodes_index_list), sorted_episodes_index_list))

        ### Build list_abs_eps for tvdb 3/4/5 ###
        list_abs_eps, list_sp_eps = {}, []
        if metadata_source in ('tvdb3', 'tvdb4'):
            for s in media.seasons:
                for e in media.seasons[s].episodes:
                    if s == '0': list_sp_eps.append(e)
                    else: list_abs_eps[e] = s
            Log.Info('Present abs eps: {}'.format(list_abs_eps))

        absolute_numering = metadata_source in ('tvdb3', 'tvdb4', 'tvdb5')

        ### episode loop ###
        tvdb_special_missing, summary_missing_special, summary_missing, summary_present, episode_missing, abs_manual_placement_info = [], [], [], [], [], []
        abs_number, missing_abs_nb, ep_count = 0, False, 0
        for index in sorted_episodes_index_list:

            # Episode and Absolute number calculation engine, episode translation
            episode_json = sorted_episodes_json[
                index]  #Log.Info('s{:02d}e{:03d} abs: {:03d} ep: {}'.format(Dict(episode_json, 'airedSeason') or 0, Dict(episode_json, 'airedEpisodeNumber') or 0, Dict(episode_json, 'absoluteNumber') or 0, episode_json))
            season = str(Dict(episode_json, 'airedSeason'))
            episode = str(Dict(episode_json, 'airedEpisodeNumber'))
            if season != '0':
                abs_number = abs_number + 1
                if not Dict(episode_json, 'absoluteNumber'):
                    missing_abs_nb = True
                elif missing_abs_nb or Dict(episode_json,
                                            'absoluteNumber') != abs_number:
                    Log.Error(
                        "(s{}e{}) Abs number {:>3} different from readings ({}), possibly tv special"
                        .format(season, episode, abs_number,
                                Dict(episode_json, 'absoluteNumber')))
                    abs_manual_placement_info.append(
                        "s{}e{} = json abs ep {} / abs_number {}".format(
                            season, episode,
                            Dict(episode_json, 'absoluteNumber'), abs_number))
                    missing_abs_nb = False
                    abs_number = Dict(episode_json, 'absoluteNumber')

            ### Missing summaries logs ###
            numbering = "s{}e{}".format(season, episode)
            if Dict(episode_json, 'overview'):
                summary_present.append(numbering)
            elif season != '0':
                summary_missing.append(numbering)
            else:
                summary_missing_special.append(numbering)

            ### ep translation
            anidbid = ""
            #if anidb_numbering:
            #  if Dict(mappingList, 'defaulttvdbseason')!="a" or season=='0':      season, episodex, anidbid = anidb_ep(mappingList, season, episode)
            #  else:                                                               season, episode = '1', str(abs_number)
            #elif metadata_source in ('tvdb3', "tvdb4", "tvdb5") and season!='0':  season, episode = '1', str(abs_number)
            if anidb_numbering:
                season, episode, anidbid = anidb_ep(mappingList, season,
                                                    episode)
            elif metadata_source in ('tvdb3', "tvdb4",
                                     "tvdb5") and season != '0':
                season, episode = '1', str(abs_number)

            ### Check for Missing Episodes ###
            if not(season =='0' and episode in list_sp_eps) and \
               not(metadata_source in ('tvdb3', 'tvdb4') and str(abs_number) in list_abs_eps) and \
               not(not movie and season in media.seasons and episode in media.seasons[season].episodes):
                Log.Info(
                    '[ ] {:>7} s{:0>2}e{:0>3} anidbid: {:>7} air_date: {}'.
                    format(numbering, season, episode, anidbid,
                           Dict(episode_json, 'FirstAired')))
                air_date = Dict(episode_json, 'FirstAired')
                air_date = int(air_date.replace(
                    '-', '')) if air_date.replace('-', '').isdigit() and int(
                        air_date.replace('-', '')) > 10000000 else 99999999
                if int(time.strftime("%Y%m%d")) <= air_date + 1:
                    pass  #Log.Info("TVDB - Episode '{}' missing but not aired/missing '{}'".format(numbering, air_date))
                elif season == '0':
                    Log.Info("TVDB - type of episode_missing: " +
                             type(episode_missing).__name__)
                    tvdb_special_missing.append(episode)
                else:
                    Log.Info("TVDB - type of tvdb_special_missing: " +
                             type(tvdb_special_missing).__name__)
                    episode_missing.append(
                        str(abs_number) + " (" + numbering +
                        ")" if metadata_source in ('tvdb3',
                                                   'tvdb4') else numbering)

            ### File present on disk
            else:
                #Log.Info('[?] episode_json: {}'.format(episode_json))
                Log.Info(
                    '[X] {:>7} s{:0>2}e{:0>3} anidbid: {:>7} air_date: {} abs_number: {}, title: {}'
                    .format(numbering, season, episode, anidbid,
                            Dict(episode_json, 'FirstAired'), abs_number,
                            Dict(episode_json, 'episodeName')))
                if not anidb_numbering:
                    SaveDict(abs_number, TheTVDB_dict, 'seasons', season,
                             'episodes', episode, 'absolute_index')
                SaveDict(Dict(serie_json, 'rating'), TheTVDB_dict, 'seasons',
                         season, 'episodes', episode, 'content_rating')
                SaveDict(Dict(serie_json, 'runtime'), TheTVDB_dict, 'seasons',
                         season, 'episodes', episode, 'duration')
                SaveDict(Dict(episode_json, 'overview'), TheTVDB_dict,
                         'seasons', season, 'episodes', episode, 'summary')
                Log.Info('[ ] summary: {}'.format(
                    Dict(episode_json, 'overview')))
                SaveDict(Dict(episode_json,
                              'firstAired'), TheTVDB_dict, 'seasons', season,
                         'episodes', episode, 'originally_available_at')

                # Title from serie page
                if Dict(episode_json, 'episodeName'):
                    rank = language_episodes.index(
                        lang) if lang in language_episodes else len(
                            language_episodes)
                    title = Dict(episode_json, 'episodeName')
                else:
                    rank, title = len(language_episodes) + 1, ''
                Log.Info("[1] rank: {:>1}, language: {:>4}, title: {}".format(
                    rank, Dict(episode_json, 'language', 'episodeName'),
                    title))

                ### Ep advance information ###
                ep_count += 1
                for lang2 in language_episodes:
                    if lang2 not in (lang, ''): break
                else: lang2 = 'en'
                episode_details_json = Dict(
                    common.LoadFile(filename='episode_{}_{}.json'.format(
                        Dict(episode_json, 'id'), lang2),
                                    relativeDirectory="TheTVDB/json/" + TVDBid,
                                    url=TVDB_EPISODE_DETAILS_URL +
                                    str(Dict(episode_json, 'id')),
                                    cache=CACHE_1DAY,
                                    headers={
                                        'Content-type': 'application/json',
                                        'Accept-Language': lang2
                                    }), 'data')
                if episode_details_json:

                    # Std ep info loaded for Library language ten details for 1st language, loading other languages if needed
                    if lang2 in language_episodes and language_episodes.index(
                            lang2) < rank and Dict(episode_details_json,
                                                   'language',
                                                   'episodeName') == lang2:
                        rank = language_episodes.index(lang2)
                        title = Dict(episode_details_json, 'episodeName')
                        Log.Info(
                            '[2] language_rank: {:>1}, language: {:>4}, title: {}'
                            .format(rank, lang2, title))

                    SaveDict(Dict(episode_details_json,
                                  'writers'), TheTVDB_dict, 'seasons', season,
                             'episodes', episode, 'writers')
                    SaveDict(Dict(episode_details_json,
                                  'directors'), TheTVDB_dict, 'seasons',
                             season, 'episodes', episode, 'directors')
                    SaveDict(Dict(episode_details_json,
                                  'guestStars'), TheTVDB_dict, 'seasons',
                             season, 'episodes', episode, 'guest_stars')
                    SaveDict(Dict(episode_details_json,
                                  'siteRating'), TheTVDB_dict, 'seasons',
                             season, 'episodes', episode, 'rating')

                    # Episode screenshoT/Thumbnail
                    if Dict(episode_details_json, 'filename'):
                        SaveDict((str("TheTVDB/episodes/" + os.path.basename(
                            Dict(episode_details_json, 'filename'))), 1, None),
                                 TheTVDB_dict, 'seasons', season, 'episodes',
                                 episode, 'thumbs',
                                 str(TVDB_IMG_ROOT +
                                     Dict(episode_details_json, 'filename')))
                    else:
                        Log.Info('[!] episode_details_json: {}'.format(
                            episode_details_json))

                    Log.Info('[ ] thumb: {}'.format(
                        TVDB_IMG_ROOT + Dict(episode_details_json, 'filename')
                        if Dict(episode_details_json, 'filename') else ''))

                #
                for lang_rank, language in enumerate(
                        language_episodes[1:rank - 1]
                        if len(language_episodes) > 1 and rank >= 2 else []):
                    if not language: continue
                    episode_details_json = Dict(common.LoadFile(
                        filename='episode_{}_{}.json'.format(
                            Dict(episode_json, 'id'), language),
                        relativeDirectory="TheTVDB/json/" + TVDBid,
                        url=TVDB_EPISODE_DETAILS_URL +
                        str(Dict(episode_json, 'id')),
                        cache=CACHE_1DAY,
                        headers={
                            'Content-type': 'application/json',
                            'Accept-Language': lang
                        }),
                                                'data',
                                                default={})
                    if Dict(episode_details_json, 'episodeName'):
                        title = Dict(episode_details_json, 'episodeName')
                        rank = lang_rank
                        Log.Info('[3] language_rank: {}, title: {}'.format(
                            rank, title))
                        break
                    else:
                        Log.Info('no ep title in language: {}'.format(
                            language_episodes[lang_rank]))
                SaveDict(title, TheTVDB_dict, 'seasons', season, 'episodes',
                         episode, 'title')
                SaveDict(rank, TheTVDB_dict, 'seasons', season, 'episodes',
                         episode, 'language_rank')
                #Log.Info('[?] numbering: {} => s{:>1}e{:>3} language_rank: {:>1}, title: "{}"'.format(numbering, season, episode, rank, title))
                Log.Info('-------------')

        ### Collection ###  # get all anidbids sharing the same tvdbids
        if not movie:
            ### Logging ###
            if summary_missing:
                error_log['Missing Episode Summaries'].append(
                    "TVDBid: %s | Title: '%s' | Missing Episode Summaries: %s"
                    % (common.WEB_LINK %
                       (common.TVDB_SERIE_URL + TVDBid, TVDBid),
                       Dict(TheTVDB_dict, 'title'), str(summary_missing)))
            if summary_missing_special:
                error_log['Missing Special Summaries'].append(
                    "TVDBid: %s | Title: '%s' | Missing Special Summaries: %s"
                    % (common.WEB_LINK %
                       (common.TVDB_SERIE_URL + TVDBid, TVDBid),
                       Dict(TheTVDB_dict,
                            'title'), str(summary_missing_special)))
            if episode_missing:
                error_log['Missing Episodes'].append(
                    "TVDBid: %s | Title: '%s' | Missing Episodes: %s" %
                    (common.WEB_LINK %
                     (common.TVDB_SERIE_URL + TVDBid, TVDBid),
                     Dict(TheTVDB_dict, 'title'), str(episode_missing)))
            if tvdb_special_missing:
                error_log['Missing Specials'].append(
                    "TVDBid: %s | Title: '%s' | Missing Specials: %s" %
                    (common.WEB_LINK %
                     (common.TVDB_SERIE_URL + TVDBid, TVDBid),
                     Dict(TheTVDB_dict, 'title'), str(tvdb_special_missing)))
            #Log.Info("abs_manual_placement_info: " + str(abs_manual_placement_info))
            #Log.Debug("Episodes without Summary: " + str(sorted(summary_missing, key=natural_sort_key)))

        ### Picture types JSON download ###
        language_posters = [
            language.strip()
            for language in Prefs['PosterLanguagePriority'].split(',')
        ]
        priority_posters = [
            source.strip() for source in Prefs['posters'].split(',')
        ]
        Log.Info('language_posters: {}'.format(language_posters))
        Log.Info('==========================')
        for language in language_posters:
            try:
                if not language == 'ja':
                    bannerTypes = Dict(common.LoadFile(
                        filename='images_{}.json'.format(language),
                        relativeDirectory="TheTVDB/json/" + TVDBid,
                        url=(TVDB_SERIES_IMG_INFO_URL % TVDBid),
                        cache=0,
                        headers={
                            'Content-type': 'application/json',
                            'Accept-Language': language
                        }),
                                       'data',
                                       default={})
            except:
                Log.Info("Invalid image JSON from url: " +
                         TVDB_SERIES_IMG_INFO_URL % TVDBid)
            else:  #JSON format = {"fanart", "poster", "season", "seasonwide", "series"}
                metanames = {
                    'fanart': "art",
                    'poster': "posters",
                    'series': "banners",
                    'season': "seasons",
                    'seasonwide': 'seasonwide'
                }  #
                count_valid = {key: 0 for key in metanames}
                anidb_offset = sorted(
                    (Dict(mappingList, 'poster_id_array', TVDBid) or
                     {}).keys()).index(AniDBid) if AniDBid and AniDBid in Dict(
                         mappingList, 'poster_id_array', TVDBid) else 0
                language_priority = [
                    item.strip()
                    for item in Prefs['EpisodeLanguagePriority'].split(',')
                ]
                Log.Info(
                    "bannerTypes: {}, anidb_offset: {}, AniDBid: {}, anidb_array: {}"
                    .format(
                        bannerTypes, anidb_offset, AniDBid,
                        str((Dict(mappingList, 'poster_id_array', TVDBid)
                             or {}).keys())))

                #Loop per banner type ("fanart", "poster", "season", "series") skip 'seasonwide' - Load bannerType images list JSON
                for bannerType in bannerTypes or []:
                    if bannerTypes[bannerType] == 0 or bannerType in (
                            'seasonwide', 'series'
                    ) or movie and not bannerType in ('fanart', 'poster'):
                        continue  #Loop if no images
                    #if anidb_numbering and Dict(mappingList, 'defaulttvdbseason') not in ('a', '1') and bannerType=='poster':  continue  #skip if anidb numbered serie mapping to season 0 or 2+

                    Log.Info('------------------------------')
                    try:
                        images = Dict(common.LoadFile(
                            filename='images_{}_{}.json'.format(
                                bannerType, language),
                            relativeDirectory="TheTVDB/json/" + TVDBid,
                            url=TVDB_SERIES_IMG_QUERY_URL.format(
                                TVDBid, bannerType),
                            cache=CACHE_1DAY,
                            headers={'Accept-Language': language}),
                                      'data',
                                      default={})
                    except:
                        images = {}
                        Log("Bad image type query data for TVDB id: %s (bannerType: %s)"
                            % (TVDBid, bannerType))
                    else:
                        images = sorted(
                            images,
                            key=lambda x: Dict(
                                x, "ratingsInfo", "average", default=0),
                            reverse=True)
                        for image in images:  #JSON format = {"data": [{"id", "keyType", "subKey"(season/graphical/text), "fileName", "resolution", "ratingsInfo": {"average", "count"}, "thumbnail"}]}

                            #rank
                            rank = 1 if bannerType == 'poster' and anidb_offset == divmod(
                                count_valid['poster'],
                                Dict(bannerTypes, 'poster', default=0)
                            )[1] else count_valid[bannerType] + 2
                            if language in language_posters:
                                rank = (rank //
                                        30) * 30 * language_posters.index(
                                            language) + rank % 30
                            if 'TheTVDB' in priority_posters:
                                rank = rank + 6 * priority_posters.index(
                                    'TheTVDB')
                            rank = rank + language_posters.index(language) * 20
                            if AniDB_movie:
                                rank = rank + Dict(
                                    bannerTypes, 'poster',
                                    default=0) if rank + Dict(
                                        bannerTypes, 'poster',
                                        default=0) < 99 else 99

                            ### Adding picture ###
                            thumbnail = TVDB_IMG_ROOT + image[
                                'thumbnail'] if Dict(image,
                                                     'thumbnail') else None
                            Log.Info(
                                "[!] bannerType: {:>7} subKey: {:>9} rank: {:>3} filename: {} thumbnail: {} resolution: {} average: {} count: {}"
                                .format(
                                    metanames[bannerType],
                                    Dict(image, 'subKey'), rank,
                                    TVDB_IMG_ROOT + Dict(image, 'fileName'),
                                    TVDB_IMG_ROOT + Dict(image, 'thumbnail'),
                                    Dict(image, 'resolution'),
                                    Dict(image, 'ratingsInfo', 'average'),
                                    Dict(image, 'ratingsInfo', 'average',
                                         'count')))
                            if bannerType == 'season':  #tvdb season posters or anidb specials and defaulttvdb season  ## season 0 et empty+ season ==defaulttvdbseason(a=1)
                                if not anidb_numbering:
                                    SaveDict(('TheTVDB/' + image['fileName'],
                                              rank, thumbnail),
                                             TheTVDB_dict, 'seasons',
                                             str(image['subKey']), 'posters',
                                             TVDB_IMG_ROOT + image['fileName'])
                                elif str(image['subKey']
                                         or 0) in ('1' if Dict(
                                             mappingList,
                                             'defaulttvdbseason') == 'a' else
                                                   (mappingList,
                                                    'defaulttvdbseason'), '0'):
                                    SaveDict(
                                        ('TheTVDB/' + image['fileName'],
                                         1 if rank == 3 else
                                         3 if rank == 1 else rank, thumbnail),
                                        TheTVDB_dict, 'seasons',
                                        '0' if str(image['subKey']) == '0' else
                                        '1' if anidb_numbering else str(
                                            image['subKey']), 'posters',
                                        TVDB_IMG_ROOT + image['fileName'])
                                    SaveDict(('TheTVDB/' + image['fileName'],
                                              rank, thumbnail), TheTVDB_dict,
                                             'posters',
                                             TVDB_IMG_ROOT + image['fileName'])
                            elif bannerType == 'fanart' or not anidb_numbering or Dict(
                                    mappingList,
                                    'defaulttvdbseason') in ('a', '1') or str(
                                        image['subKey'] or '1') == Dict(
                                            mappingList,
                                            'defaulttvdbseason') or not Dict(
                                                bannerTypes, 'season'
                                            ) and bannerType == 'poster':
                                SaveDict(
                                    ('TheTVDB/' + image['fileName'], rank,
                                     thumbnail), TheTVDB_dict,
                                    metanames[bannerType], TVDB_IMG_ROOT +
                                    image['fileName'])  #use art + posters tvdb
                            #if bannerType == 'season':
                            #  if anidb_numbering and ('1' if Dict(mappingList, 'defaulttvdbseason')=='a' else Dict(mappingList, 'defaulttvdbseason'))==str(image['subKey']):
                            #    SaveDict(('TheTVDB/'+image['fileName'], 1 if rank==3 else 3 if rank==1 else rank, thumbnail), TheTVDB_dict, 'seasons', '0' if str(image['subKey'])=='0' else '1', 'posters', TVDB_IMG_ROOT + image['fileName'])
                            #  elif not anidb_numbering:
                            #    season = str(int(image['subKey'])+(0 if Dict(mappingList, 'defaulttvdbseason')=="0" or not Dict(mappingList, 'defaulttvdbseason').isdigit() else int(Dict(mappingList, 'defaulttvdbseason'))-1))
                            #    SaveDict(   ('TheTVDB/'+image['fileName'], rank, thumbnail), TheTVDB_dict, 'seasons', season, 'posters', TVDB_IMG_ROOT + image['fileName'])
                            #if bannerType != 'season':
                            #  SaveDict(('TheTVDB/'+image['fileName'], rank, thumbnail), TheTVDB_dict, metanames[bannerType],        TVDB_IMG_ROOT + image['fileName'])
                            count_valid[bannerType] = count_valid[
                                bannerType] + 1  #Otherwise with += SyntaxError: Line 142: Augmented assignment of object items and slices is not allowed

                #Log.Info("Posters : {}/{}, Season posters: {}/{}, Art: {}/{}".format(count_valid['poster'], Dict(bannerTypes, 'poster'), count_valid['season'], Dict(bannerTypes, 'season') or 0, count_valid['fanart'], Dict(bannerTypes, 'fanart')))
                if not Dict(bannerTypes, 'poster'):
                    error_log['TVDB posters missing'].append(
                        "TVDBid: %s | Title: '%s'" %
                        (common.WEB_LINK %
                         (common.TVDB_SERIE_URL + TVDBid, TVDBid),
                         Dict(TheTVDB_dict, 'title')))
                if not Dict(bannerTypes, 'season'):
                    error_log['TVDB season posters missing'].append(
                        "TVDBid: %s | Title: '%s'" %
                        (common.WEB_LINK %
                         (common.TVDB_SERIE_URL + TVDBid, TVDBid),
                         Dict(TheTVDB_dict, 'title')))
            Log.Info('==========================')

        Log.Info(
            "url: '{}', IMDbid: {}, Present episodes: {}, Missing: {}".format(
                TVDB_SERIES_URL % TVDBid, IMDbid, ep_count,
                sorted(episode_missing, key=natural_sort_key)))

    Log.Info('TheTVDB_dict: {}'.format(TheTVDB_dict))
    return TheTVDB_dict, IMDbid
Пример #22
0
def GetMetadata(media, movie, TVDBid, TMDbid, IMDbid):
    TMDB_MOVIE_SEARCH_BY_TMDBID = 'http://api.tmdb.org/3/movie/%s?api_key=7f4a0bd0bd3315bb832e17feda70b5cd&append_to_response=releases,credits,trailers,external_ids&language=en'  #Work with IMDbid
    TMDB_SERIE_SEARCH_BY_TVDBID = "http://api.TheMovieDb.org/3/find/%s?api_key=7f4a0bd0bd3315bb832e17feda70b5cd&external_source=tvdb_id&append_to_response=releases,credits,trailers,external_ids&language=en"
    TMDB_CONFIG_URL = 'http://api.tmdb.org/3/configuration?api_key=7f4a0bd0bd3315bb832e17feda70b5cd'
    #TMDB_MOVIE_GENRE_LIST       = "https://api.TheMovieDb.org/3/genre/movie/list?api_key=7f4a0bd0bd3315bb832e17feda70b5cd&language=en-US"
    #TMDB_SERIE_GENRE_LIST       = "https://api.TheMovieDb.org/3/genre/tv/list?api_key=7f4a0bd0bd3315bb832e17feda70b5cd&language=en-US"
    dict_TheMovieDb = {}
    TSDbid = ""

    Log.Info("".ljust(157, '-'))
    Log("TheMovieDb.GetMetadata() - TVDBid: {}, TMDbid: {}, IMDbid: {}".format(
        TVDBid, TMDbid, IMDbid))
    if TMDbid:
        url, filename = TMDB_MOVIE_SEARCH_BY_TMDBID % TMDbid, "TMDB-" + TMDbid + ".json"
    elif IMDbid and TVDBid == 'movie':
        url, filename = TMDB_MOVIE_SEARCH_BY_TMDBID % IMDbid, "IMDb-" + IMDbid + ".json"
    elif TVDBid.isdigit():
        url, filename = TMDB_SERIE_SEARCH_BY_TVDBID % TVDBid, "TVDB-" + TVDBid + ".json"
    else:
        return dict_TheMovieDb, TSDbid, TMDbid, IMDbid

    json = common.LoadFile(filename=filename,
                           relativeDirectory=os.path.join(
                               'TheMovieDb', 'json'),
                           url=url,
                           cache=CACHE_1WEEK)
    config_dict = common.LoadFile(filename="TMDB_CONFIG_URL.json",
                                  relativeDirectory="TheMovieDb",
                                  url=TMDB_CONFIG_URL,
                                  cache=CACHE_1DAY * 30)
    mode = "movie" if movie else "tv"
    if not json:
        Log.Info("TMDB - url: failed to get json" +
                 TMDB_MOVIE_SEARCH_BY_TMDBID % TMDbid)
    else:
        if 'tv_results' in json and json['tv_results']:
            json, mode = json['tv_results'][0], "tv"
        elif 'movie_results' in json and json['movie_results']:
            json, mode = json['movie_results'][0], "movie"

        SaveDict(Dict(json, 'title'), dict_TheMovieDb, 'title')
        SaveDict(
            Dict(json, 'vote_average'), dict_TheMovieDb, 'rating'
        )  #if 'vote_count' in json and json['vote_count'] > 3:  SaveDict( Dict(json, 'vote_average'), dict_TheMovieDb, 'rating')
        SaveDict(Dict(json, 'tagline'), dict_TheMovieDb, 'tagline')
        SaveDict(Dict(json, 'overview'), dict_TheMovieDb, 'summary')
        SaveDict(Dict(json, 'runtime'), dict_TheMovieDb, 'duration')
        SaveDict(Dict(json, 'origin_country'), dict_TheMovieDb, 'countries')
        SaveDict(Dict(json, 'first_air_date'), dict_TheMovieDb,
                 'originally_available_at')
        if Dict(json, 'belongs_to_collection', 'name'):
            SaveDict([
                Dict(json, 'belongs_to_collection', 'name').replace(
                    ' Collection', '')
            ], dict_TheMovieDb, 'collections')
        if Dict(json, 'genres'):
            SaveDict(
                [Dict(genre, 'name') for genre in Dict(json, 'genres') or []],
                dict_TheMovieDb, 'genres')
        if Dict(json, 'poster_path'):
            dict_TheMovieDb['posters'] = {
                config_dict['images']['base_url'] + 'original' + json['poster_path']:
                (os.path.join('TheMovieDb', 'poster',
                              json['poster_path'].lstrip('/')), 90, None)
            }
        if Dict(json, 'backdrop_path'):
            dict_TheMovieDb['art'] = {
                config_dict['images']['base_url'] + 'original' + json['backdrop_path']:
                (os.path.join('TheMovieDb', 'artwork',
                              json['backdrop_path'].lstrip('/')), 90,
                 config_dict['images']['base_url'] + 'w300' +
                 json['backdrop_path'])
            }
        try:
            SaveDict(
                int(Dict(json, 'duration')) * 60 * 1000, dict_TheMovieDb,
                'duration')
        except:
            pass
        #Log.Info(str(dict_TheMovieDb))
        if mode == 'tv': TSDbid = Dict(json, 'id')
        elif not TMDbid: TMDbid = Dict(json, 'id')
        if not IMDbid: IMDbid = Dict(json, 'imdb_id')

        #SaveDict( studio['name'].strip(), dict_TheMovieDb, 'studio')
        for studio in Dict(json, 'production_companies') or []:
            if studio['id'] <= json['production_companies'][0]['id']:
                SaveDict(studio['name'].strip(), dict_TheMovieDb, 'studio')

    ### More pictures ###
    Log.Info(
        "TheMovieDb.GetMetadata() - TMDbid: '{}', TSDbid: '{}', IMDbid: '{}'".
        format(TMDbid, TSDbid, IMDbid))
    for id in IMDbid.split(',') if ',' in IMDbid else []:
        TMDB_MOVIE_IMAGES_URL = 'https://api.tmdb.org/3/{mode}/{id}/images?api_key=7f4a0bd0bd3315bb832e17feda70b5cd'
        json = common.LoadFile(filename="TMDB-" + (IMDbid or TMDbid) + ".json",
                               relativeDirectory="TMDB",
                               url=TMDB_MOVIE_IMAGES_URL.format(id=id,
                                                                mode=mode),
                               cache=CACHE_1WEEK)
        for index, poster in enumerate(Dict(json, 'posters') or []):
            if Dict(json, 'posters', index, 'file_path'):
                SaveDict((os.path.join('TheMovieDb', 'poster', "%s-%s.jpg" %
                                       (TMDbid, index)), 40, None),
                         dict_TheMovieDb, 'posters',
                         config_dict['images']['base_url'] + 'original' +
                         json['posters'][index]['file_path'])
        for index, poster in enumerate(Dict(json, 'backdrops') or []):
            if Dict(json, 'backdrops', index, 'file_path'):
                SaveDict((os.path.join(
                    'TheMovieDb', 'artwork', "%s-%s-art.jpg" %
                    (TMDbid, index)), 40, config_dict['images']['base_url'] +
                          'w300' + json['backdrops'][index]['file_path']),
                         dict_TheMovieDb, 'art',
                         config_dict['images']['base_url'] + 'original' +
                         json['backdrops'][index]['file_path'])

    return dict_TheMovieDb, TSDbid, TMDbid, IMDbid
Пример #23
0
def GetMetadata(media, movie, error_log, source, AniDBid, TVDBid,
                AniDBMovieSets, mappingList):
    ''' Download metadata to dict_AniDB, ANNid, MALid
  '''
    Log.Info("=== AniDB.GetMetadata() ===".ljust(157, '='))
    AniDB_dict, ANNid, MALid = {}, "", ""
    original = AniDBid
    anidb_numbering = source == "anidb" and (
        movie or max(map(int, media.seasons.keys())) <= 1)
    language_posters = [
        language.strip()
        for language in Prefs['PosterLanguagePriority'].split(',')
    ]
    priority_posters = [
        provider.strip() for provider in Prefs['posters'].split(',')
    ]

    ### Build the list of anidbids for files present ####
    if source.startswith("tvdb") or source.startswith(
            "anidb") and not movie and max(map(int, media.seasons.keys(
            ))) > 1:  #multi anidbid required only for tvdb numbering
        full_array = [
            anidbid for season in Dict(mappingList, 'TVDB') or []
            for anidbid in Dict(mappingList, 'TVDB', season)
            if season and 'e' not in season and anidbid.isdigit()
        ]
        AniDB_array = {
            AniDBid: []
        } if Dict(mappingList,
                  'defaulttvdbseason') == '1' and source != 'tvdb4' else {}
        for season in sorted(
                media.seasons, key=common.natural_sort_key
        ) if not movie else []:  # For each season, media, then use metadata['season'][season]...
            for episode in sorted(media.seasons[season].episodes,
                                  key=common.natural_sort_key):
                if int(episode) > 99:
                    continue  # AniDB non-normal special (op/ed/t/o) that is not mapable
                if source == 'tvdb3' and season != 0:
                    new_season, new_episode, anidbid = AnimeLists.anidb_ep(
                        mappingList, season,
                        Dict(mappingList,
                             'absolute_map',
                             episode,
                             default=(None, episode))
                        [1])  # Pull absolute number then try to map
                elif source == 'tvdb4' and season != 0:
                    new_season, new_episode = Dict(mappingList,
                                                   'absolute_map',
                                                   episode,
                                                   default=(season, episode))
                    anidbid = 'UNKN'  # Not TVDB mapping. Use custom ASS mapping to pull season/episode
                else:
                    new_season, new_episode, anidbid = AnimeLists.anidb_ep(
                        mappingList, season, episode)  # Try to map
                numbering = 's{}e{}'.format(season, episode) + (
                    '(s{}e{})'.format(new_season, new_episode)
                    if season != new_season and episode != new_episode else '')
                if anidbid and not (new_season == '0' and new_episode == '0'):
                    SaveDict([numbering], AniDB_array, anidbid)
            else:
                continue
    elif source.startswith('anidb') and AniDBid != "":
        full_array, AniDB_array = [AniDBid], {AniDBid: []}
    else:
        full_array, AniDB_array = [], {}

    active_array = full_array if Dict(
        mappingList, 'possible_anidb3'
    ) or source in ("tvdb4", "tvdb6") else AniDB_array.keys(
    )  # anidb3(tvdb)/anidb4(tvdb6) for full relation_map data | tvdb4 bc the above will not be able to know the AniDBid
    Log.Info(
        "Source: {}, AniDBid: {}, Full AniDBids list: {}, Active AniDBids list: {}"
        .format(source, AniDBid, full_array, active_array))
    for anidbid in sorted(AniDB_array, key=common.natural_sort_key):
        Log.Info('[+] {:>5}: {}'.format(anidbid, AniDB_array[anidbid]))
    Log.Info('language_posters: {}'.format(language_posters))

    ### Build list_abs_eps for tvdb 3/4/5 ###
    list_abs_eps, list_sp_eps = {}, []
    if source in ('tvdb3', 'tvdb4'):
        for s in media.seasons:
            for e in media.seasons[s].episodes:
                if s == '0': list_sp_eps.append(e)
                else: list_abs_eps[e] = s
        Log.Info('Present abs eps: {}'.format(list_abs_eps))

    ### Load anidb xmls in tvdb numbering format if needed ###
    for AniDBid in sorted(active_array, key=common.natural_sort_key):
        is_primary_entry = AniDBid == original or len(active_array) == 1

        Log.Info(("--- %s ---" % AniDBid).ljust(157, '-'))
        Log.Info('AniDBid: {}, IsPrimary: {}, url: {}'.format(
            AniDBid, is_primary_entry, ANIDB_HTTP_API_URL + AniDBid))
        Log.Info(("--- %s.series ---" % AniDBid).ljust(157, '-'))

        xml, cache = None, CACHE_1DAY * 6
        xml_cache = common.LoadFileCache(filename=AniDBid + ".xml",
                                         relativeDirectory=os.path.join(
                                             "AniDB", "xml"))[0]
        if xml_cache:  # Pull the enddate and adjust max cache age based on series enddate in relation to now
            ed = GetXml(
                xml_cache,
                'enddate') or datetime.datetime.now().strftime("%Y-%m-%d")
            enddate = datetime.datetime.strptime(
                "{}-12-31".format(ed) if len(ed) == 4 else "{}-{}".format(
                    ed,
                    ([30, 31] if int(ed[-2:]) <= 7 else [31, 30]
                     )[int(ed[-2:]) %
                       2] if ed[-2:] != '02' else 28) if len(ed) == 7 else ed,
                '%Y-%m-%d')
            days_old = (datetime.datetime.now() - enddate).days
            if days_old > 1825:
                cache = CACHE_1DAY * 365  # enddate > 5 years ago => 1 year cache
            elif days_old > 30:
                cache = (
                    days_old * CACHE_1DAY * 365
                ) / 1825  # enddate > 30 days ago => (days_old/5yrs ended = x/1yrs cache)
        if AniDBBan:
            xml = xml_cache  # Ban has been hit in this process' life span (which is transient)
        else:
            xml = common.LoadFile(filename=AniDBid + ".xml",
                                  relativeDirectory=os.path.join(
                                      "AniDB", "xml"),
                                  url=ANIDB_HTTP_API_URL + AniDBid,
                                  cache=cache,
                                  sleep=6,
                                  throttle=['AniDB', CACHE_1HOUR, 100])
        if isinstance(xml, str) and 'banned' in xml:
            global AniDBBan
            AniDBBan = True  # Set ban hit on process level
        if AniDBBan:
            SaveDict(True, AniDB_dict, 'Banned')  # Set ban hit on series level

        if not xml or isinstance(xml, str):
            title, original_title, language_rank = GetAniDBTitle(
                AniDBTitlesDB.xpath(
                    '/animetitles/anime[@aid="{}"]/title'.format(AniDBid)))
            if is_primary_entry:
                Log.Info("[ ] title: {}".format(
                    SaveDict(title, AniDB_dict, 'title')))
                Log.Info("[ ] original_title: {}".format(
                    SaveDict(original_title, AniDB_dict, 'original_title')))
                Log.Info("[ ] language_rank: {}".format(
                    SaveDict(language_rank, AniDB_dict, 'language_rank')))

        elif xml:
            title, original_title, language_rank = GetAniDBTitle(
                xml.xpath('/anime/titles/title'))
            if is_primary_entry:  ### for each main anime AniDBid ###
                Log.Info("[ ] title: {}".format(
                    SaveDict(title, AniDB_dict, 'title')))
                Log.Info("[ ] original_title: {}".format(
                    SaveDict(original_title, AniDB_dict, 'original_title')))
                Log.Info("[ ] language_rank: {}".format(
                    SaveDict(language_rank, AniDB_dict, 'language_rank')))
                if SaveDict(GetXml(xml, 'startdate'), AniDB_dict,
                            'originally_available_at'):
                    Log.Info("[ ] originally_available_at: '{}'".format(
                        AniDB_dict['originally_available_at']))
                if SaveDict(summary_sanitizer(GetXml(
                        xml, 'description')), AniDB_dict, 'summary'
                            ) and not movie and not anidb_numbering and Dict(
                                mappingList,
                                'defaulttvdbseason').isdigit() and mappingList[
                                    'defaulttvdbseason'] in media.seasons:
                    SaveDict(AniDB_dict['summary'], AniDB_dict, 'seasons',
                             mappingList['defaulttvdbseason'], 'summary')

                Log.Info("[ ] rating: '{}'".format(
                    SaveDict(GetXml(xml, 'ratings/permanent'), AniDB_dict,
                             'rating')))

                ### Posters
                if GetXml(xml, 'picture'):
                    rank = 1
                    if 'en' in language_posters:
                        rank = (rank // 30) * 30 * language_posters.index(
                            'en') + rank % 30
                    if 'AniDB' in priority_posters:
                        rank = rank + 6 * priority_posters.index('AniDB')
                    AniDB_dict['posters'] = {
                        ANIDB_PIC_BASE_URL + GetXml(xml, 'picture'):
                        (os.path.join('AniDB', 'poster',
                                      GetXml(xml, 'picture')), rank, None)
                    }  # ANIDB_PIC_THUMB_URL.format(name=GetXml(xml, 'picture').split('.')[0])

                ### genre ###
                RESTRICTED_GENRE = {
                    "18 restricted": 'X',
                    "pornography": 'X',
                    "tv censoring": 'TV-MA',
                    "borderline p**n": 'TV-MA'
                }
                for tag in xml.xpath('tags/tag'):
                    if GetXml(tag, 'name') and tag.get(
                            'weight', '').isdigit() and int(
                                tag.get('weight', '') or '200') >= int(
                                    Prefs['MinimumWeight'] or '200'):
                        SaveDict([string.capwords(GetXml(tag, 'name'), '-')],
                                 AniDB_dict, 'genres')
                        if GetXml(tag, 'name').lower() in RESTRICTED_GENRE:
                            AniDB_dict['content_rating'] = RESTRICTED_GENRE[
                                GetXml(tag, 'name').lower()]
                if Dict(AniDB_dict, 'genres'): AniDB_dict['genres'].sort()
                SaveDict(
                    "Continuing" if GetXml(xml, 'Anime/enddate')
                    == "1970-01-01" else "Ended", AniDB_dict, 'status')
                Log.Info("[ ] genres ({}/{} above {} weight): {}".format(
                    len(Dict(AniDB_dict, 'genres')),
                    len(xml.xpath('tags/tag')),
                    int(Prefs['MinimumWeight'] or 200),
                    Dict(AniDB_dict, 'genres')))
                for element in AniDBMovieSets.xpath(
                        "/anime-set-list/set/anime"):
                    if element.get('anidbid') == AniDBid or element.get(
                            'anidbid') in full_array:
                        node = element.getparent()
                        title, main, language_rank = GetAniDBTitle(
                            node.xpath('titles')[0])
                        if title not in Dict(AniDB_dict,
                                             'collections',
                                             default=[]):
                            Log.Info(
                                "[ ] title: {}, main: {}, language_rank: {}".
                                format(title, main, language_rank))
                            SaveDict([title], AniDB_dict, 'collections')
                            Log.Info(
                                "[ ] collection: AniDBid '%s' is part of movie collection: '%s', related_anime_list: %s"
                                % (AniDBid, title, str(full_array)))
                if not Dict(AniDB_dict, 'collections'):
                    Log.Info(
                        "[ ] collection: AniDBid '%s' is not part of any collection, related_anime_list: %s"
                        % (AniDBid, str(full_array)))

                #roles  ### NEW, NOT IN Plex FrameWork Documentation 2.1.1 ###
                Log.Info(("--- %s.actors ---" % AniDBid).ljust(157, '-'))
                for role in xml.xpath(
                        'characters/character[(@type="secondary cast in") or (@type="main character in")]'
                ):
                    try:
                        if GetXml(role, 'seiyuu') and GetXml(role, 'name'):
                            role_dict = {
                                'role':
                                role.find('name').text,
                                'name':
                                role.find('seiyuu').text,
                                'photo':
                                ANIDB_PIC_BASE_URL +
                                role.find('seiyuu').get('picture')
                            }
                            SaveDict([role_dict], AniDB_dict, 'roles')
                            Log.Info(
                                '[ ] role: {:<20}, name: {:<20}, photo: {}'.
                                format(role_dict['role'], role_dict['name'],
                                       role_dict['photo']))
                    except Exception as e:
                        Log.Info("Seyiuu error: {}".format(e))

            ### Creators ###
            creator_tags = {
                "Animation Work": "studio",
                "Work": "studio",
                "Direction": "directors",
                "Series Composition": "producers",
                "Original Work": "writers",
                "Script": "writers",
                "Screenplay": "writers"
            }
            studios = {}
            creators = {}
            for creator in xml.xpath('creators/name'):
                for tag in creator_tags:
                    if tag != creator.get('type'): continue
                    if creator_tags[tag] == "studio":
                        studios[tag] = creator.text
                    else:
                        SaveDict([creator.text], creators, creator_tags[tag])
            if is_primary_entry:
                Log.Info("[ ] studio: {}".format(
                    SaveDict(
                        Dict(studios,
                             "Animation Work",
                             default=Dict(studios, "Work")), AniDB_dict,
                        'studio')))

            Log.Info("[ ] movie: {}".format(
                SaveDict(GetXml(xml, 'type') == 'Movie', AniDB_dict, 'movie')))
            ### Movie ###
            if movie:
                Log.Info("[ ] year: '{}'".format(
                    SaveDict(
                        GetXml(xml, 'startdate')[0:4], AniDB_dict, 'year')))

                if is_primary_entry:
                    for creator in creators:
                        Log.Info("[ ] {}: {}".format(
                            creator,
                            SaveDict(creators[creator], AniDB_dict, creator)))

                Log.Info(("--- %s.summary info ---" % AniDBid).ljust(157, '-'))

            ### Series ###
            else:
                ### Translate into season/episode mapping
                numEpisodes, totalDuration, mapped_eps, ending_table, op_nb = 0, 0, [], {}, 0
                specials = {
                    'S': [0, 'Special'],
                    'C': [100, 'Opening/Ending'],
                    'T': [200, 'Trailer'],
                    'P': [300, 'Parody'],
                    'O': [400, 'Other']
                }
                movie_ep_groups = {}
                ending_offset = 99
                missing = {'0': [], '1': []}

                ### Episodes (and specials) not always in right order ###
                Log.Info(("--- %s.episodes ---" % AniDBid).ljust(157, '-'))
                Log.Info("[ ] ep creators (creators tag): " + str(creators))
                for ep_obj in sorted(
                        xml.xpath('episodes/episode'),
                        key=lambda x: [
                            int(x.xpath('epno')[0].get('type')),
                            int(
                                x.xpath('epno')[0].text
                                if x.xpath('epno')[0].text.isdigit() else x.
                                xpath('epno')[0].text[1:])
                        ]):

                    ### Title, Season, Episode number, Specials
                    title, main, language_rank = GetAniDBTitle(
                        ep_obj.xpath('title'), [
                            language.strip() for language in
                            Prefs['EpisodeLanguagePriority'].split(',')
                        ])
                    if not anidb_numbering and title == 'Complete Movie':
                        title = ""  # For mapping use meanningful titles
                    epNum = ep_obj.xpath('epno')[0]
                    epNumType = epNum.get('type')
                    season = "1" if epNumType == "1" else "0"
                    if epNumType == "3" and ep_obj.xpath(
                            'title')[0].text.startswith('Ending') and int(
                                epNum.text[1:]) - 1 < ending_offset:
                        ending_offset = int(epNum.text[1:]) - 1
                    if epNumType == "3" and int(
                            epNum.text[1:]) > ending_offset:
                        episode = str(
                            int(epNum.text[1:]) + 150 -
                            ending_offset)  #shifted to 150 for 1st ending.
                    elif epNumType == "1":
                        episode = epNum.text
                    else:
                        episode = str(specials[epNum.text[0]][0] +
                                      int(epNum.text[1:]))
                    numbering = "s{}e{:>3}".format(season, episode)

                    #If tvdb numbering used, save anidb episode meta using tvdb numbering
                    if source.startswith("tvdb") or source.startswith(
                            "anidb") and not movie and max(
                                map(int, media.seasons.keys())) > 1:
                        season, episode = AnimeLists.tvdb_ep(
                            mappingList, season, episode, AniDBid)

                        # Get episode number to absolute number
                        if source in ('tvdb3',
                                      'tvdb4') and season not in ['-1', '0']:
                            if source == 'tvdb4' or season == '1':
                                ms, usl = (
                                    season, True) if source == 'tvdb3' else (
                                        Dict(mappingList, 'absolute_map',
                                             'max_season'),
                                        Dict(mappingList, 'absolute_map',
                                             'unknown_series_length'))
                                if ms and usl:
                                    season = Dict(mappingList,
                                                  'absolute_map',
                                                  episode,
                                                  default=(ms if usl else
                                                           str(int(ms) + 1),
                                                           None))[0]
                            else:
                                try:
                                    episode = list(
                                        Dict(mappingList,
                                             'absolute_map',
                                             default={}).keys())[list(
                                                 Dict(mappingList,
                                                      'absolute_map',
                                                      default={}).values()
                                             ).index((season, episode))]
                                except:
                                    pass

                        if not(season =='0' and episode in list_sp_eps) and \
                           not(source in ('tvdb3', 'tvdb4') and episode in list_abs_eps) and \
                           not(season in media.seasons and episode in media.seasons[season].episodes):
                            Log.Info(
                                '[ ] {} => s{:>1}e{:>3} epNumType: {}'.format(
                                    numbering, season, episode, epNumType))
                            continue

                        ### Series poster as season poster
                        if GetXml(xml, 'picture') and not Dict(
                                AniDB_dict, 'seasons', season, 'posters',
                                ANIDB_PIC_BASE_URL + GetXml(xml, 'picture')):
                            rank = 1
                            if 'en' in language_posters:
                                rank = (rank //
                                        30) * 30 * language_posters.index(
                                            'en') + rank % 30
                            if 'AniDB' in priority_posters:
                                rank = rank + 6 * priority_posters.index(
                                    'AniDB')
                            SaveDict(
                                (os.path.join('AniDB', 'poster',
                                              GetXml(xml,
                                                     'picture')), rank, None),
                                AniDB_dict, 'seasons', season, 'posters',
                                ANIDB_PIC_BASE_URL + GetXml(xml, 'picture'))

                    ### In AniDB numbering, Movie episode group, create key and create key in dict with empty list if doesn't exist ###
                    else:  #if source.startswith("anidb") and not movie and max(map(int, media.seasons.keys()))<=1:

                        ### Movie episode group, create key and create key in dict with empty list if doesn't exist ###
                        key = ''
                        if epNumType == '1' and GetXml(
                                xml, '/anime/episodecount') == '1' and GetXml(
                                    xml, '/anime/type') in ('Movie', 'OVA'):
                            key = '1' if title in (
                                'Complete Movie', 'OVA'
                            ) else title[-1] if title.startswith(
                                'Part ') and title[-1].isdigit() else ''  #'-1'
                            if key: SaveDict([], movie_ep_groups, key)

                        #Episode missing from disk
                        if not season in media.seasons or not episode in media.seasons[
                                season].episodes:
                            Log.Info(
                                '[ ] {} => s{:>1}e{:>3} air_date: {}'.format(
                                    numbering, season, episode,
                                    GetXml(ep_obj, 'airdate')))
                            current_air_date = GetXml(ep_obj,
                                                      'airdate').replace(
                                                          '-', '')
                            current_air_date = int(
                                current_air_date
                            ) if current_air_date.isdigit() and int(
                                current_air_date) > 10000000 else 99999999
                            if int(time.strftime(
                                    "%Y%m%d")) > current_air_date + 1:
                                if epNumType == '1' and key:
                                    SaveDict([numbering], movie_ep_groups, key)
                                elif epNumType in ['1', '2']:
                                    SaveDict([episode], missing, season)
                            continue

                    ### Episodes
                    SaveDict(language_rank, AniDB_dict, 'seasons', season,
                             'episodes', episode, 'language_rank')
                    SaveDict(title, AniDB_dict, 'seasons', season, 'episodes',
                             episode, 'title')
                    Log.Info(
                        '[X] {} => s{:>1}e{:>3} air_date: {} language_rank: {}, title: "{}"'
                        .format(numbering, season, episode,
                                GetXml(ep_obj, 'airdate'), language_rank,
                                title))

                    if GetXml(ep_obj, 'length').isdigit():
                        SaveDict(
                            int(GetXml(ep_obj, 'length')) * 1000 * 60,
                            AniDB_dict, 'seasons', season, 'episodes', episode,
                            'duration'
                        )  # AniDB stores it in minutes, Plex save duration in millisecs
                        if season == "1":
                            numEpisodes, totalDuration = numEpisodes + 1, totalDuration + int(
                                GetXml(ep_obj, 'length'))

                    SaveDict(GetXml(ep_obj, 'rating'), AniDB_dict, 'seasons',
                             season, 'episodes', episode, 'rating')
                    SaveDict(GetXml(ep_obj,
                                    'airdate'), AniDB_dict, 'seasons', season,
                             'episodes', episode, 'originally_available_at')
                    ep_summary = SaveDict(
                        summary_sanitizer(GetXml(ep_obj,
                                                 'summary')), AniDB_dict,
                        'seasons', season, 'episodes', episode, 'summary')
                    Log.Info(' - [ ] summary: {}'.format(
                        (ep_summary[:200]
                         ).replace("\n", "\\n").replace("\r", "\\r") +
                        '..' if len(ep_summary) > 200 else ep_summary))
                    for creator in creators:
                        SaveDict(",".join(creators[creator]), AniDB_dict,
                                 'seasons', season, 'episodes', episode,
                                 creator)

                ### End of for ep_obj...
                Log.Info(("--- %s.summary info ---" % AniDBid).ljust(157, '-'))
                if SaveDict((int(totalDuration) / int(numEpisodes)) * 60 *
                            1000 if int(numEpisodes) else 0, AniDB_dict,
                            'duration'):
                    Log.Info(
                        "Duration: {}, numEpisodes: {}, average duration: {}".
                        format(str(totalDuration), str(numEpisodes),
                               AniDB_dict['duration']))

                ### AniDB numbering Missing Episodes ###
                if source.startswith("anidb") and not movie and max(
                        map(int, media.seasons.keys())) <= 1:
                    if movie_ep_groups:
                        Log.Info(
                            "Movie/OVA Ep Groups: %s" % movie_ep_groups
                        )  #movie_ep_groups: {'1': ['s1e1'], '3': ['s1e4', 's1e5', 's1e6'], '2': ['s1e3'], '-1': []}
                        SaveDict([
                            value for key in movie_ep_groups
                            for value in movie_ep_groups[key]
                            if 0 < len(movie_ep_groups[key]) < int(key)
                        ], missing, '1')
                    for season in sorted(missing):
                        missing_eps = sorted(missing[season],
                                             key=common.natural_sort_key)
                        Log.Info('Season: {} Episodes: {} not on disk'.format(
                            season, missing_eps))
                        if missing_eps:
                            error_log[
                                'Missing Specials' if season ==
                                '0' else 'Missing Episodes'].append(
                                    "AniDBid: %s | Title: '%s' | Missing Episodes: %s"
                                    % (common.WEB_LINK %
                                       (common.ANIDB_SERIE_URL + AniDBid,
                                        AniDBid), AniDB_dict['title'],
                                       str(missing_eps)))

            ### End of if not movie ###

            # Generate relations_map for anidb3/4(tvdb1/6) modes
            for relatedAnime in xml.xpath('/anime/relatedanime/anime'):
                if relatedAnime.get('id') not in Dict(mappingList,
                                                      'relations_map',
                                                      AniDBid,
                                                      relatedAnime.get('type'),
                                                      default=[]):
                    SaveDict([relatedAnime.get('id')],
                             mappingList, 'relations_map', AniDBid,
                             relatedAnime.get('type'))

            # External IDs
            ANNid = GetXml(
                xml,
                "/anime/resources/resource[@type='1']/externalentity/identifier"
            )
            MALid = GetXml(
                xml,
                "/anime/resources/resource[@type='2']/externalentity/identifier"
            )
            #ANFOid = GetXml(xml, "/anime/resources/resource[@type='3']/externalentity/identifier"), GetXml(xml, "/anime/resources/resource[@type='3']/externalentity/identifier")

            # Logs
            if not Dict(AniDB_dict, 'summary'):
                error_log['AniDB summaries missing'].append(
                    "AniDBid: %s" %
                    (common.WEB_LINK %
                     (common.ANIDB_SERIE_URL + AniDBid, AniDBid) +
                     " | Title: '%s'" % Dict(AniDB_dict, 'title')))
            if not Dict(AniDB_dict, 'posters'):
                error_log['AniDB posters missing'].append(
                    "AniDBid: %s" %
                    (common.WEB_LINK %
                     (common.ANIDB_SERIE_URL + AniDBid, AniDBid) +
                     " | Title: '%s'" % Dict(AniDB_dict, 'title')))
            #if not Dict(AniDB_dict, 'studio' ):                                                                                          error_log['anime-list studio logos'].append("AniDBid: %s | Title: '%s' | AniDB has studio '%s' and anime-list has '%s' | "    % (common.WEB_LINK % (ANIDB_SERIE_URL % AniDBid, AniDBid), title, metadata.studio, mapping_studio) + common.WEB_LINK % (ANIDB_TVDB_MAPPING_FEEDBACK % ("aid:" + metadata.id + " " + title, String.StripTags( XML.StringFromElement(xml, encoding='utf8'))), "Submit bug report (need GIT account)"))
            #if metadata.studio       and 'studio' in AniDB_dict and AniDB_dict ['studio'] and AniDB_dict ['studio'] != metadata.studio:  error_log['anime-list studio logos'].append("AniDBid: %s | Title: '%s' | AniDB has studio '%s' and anime-list has '%s' | "    % (common.WEB_LINK % (ANIDB_SERIE_URL % AniDBid, AniDBid), title, metadata.studio, mapping_studio) + common.WEB_LINK % (ANIDB_TVDB_MAPPING_FEEDBACK % ("aid:" + metadata.id + " " + title, String.StripTags( XML.StringFromElement(xml, encoding='utf8'))), "Submit bug report (need GIT account)"))
            #if metadata.studio == "" and 'studio' in AniDB_dict and AniDB_dict ['studio'] == "":                                         error_log['anime-list studio logos'].append("AniDBid: %s | Title: '%s' | AniDB and anime-list are both missing the studio" % (common.WEB_LINK % (ANIDB_SERIE_URL % AniDBid, AniDBid), title) )

            Log.Info("ANNid: '%s', MALid: '%s', xml loaded: '%s'" %
                     (ANNid, MALid, str(xml is not None)))

    Log.Info("--- return ---".ljust(157, '-'))
    Log.Info("relations_map: {}".format(
        DictString(Dict(mappingList, 'relations_map', default={}), 1)))
    Log.Info("AniDB_dict: {}".format(DictString(AniDB_dict, 4)))
    return AniDB_dict, ANNid, MALid
Пример #24
0
def GetMetadata(movie, MALid):
    Log.Info("=== MyAnimeList.GetMetadata() ===".ljust(157, '='))
    MAL_HTTP_API_URL = "http://fribbtastic-api.net/fribbtastic-api/services/anime?id="
    MAL_PREFIX = "https://myanimelist.cdn-dena.com"  # Some links in the XML will come from TheTVDB, not adding those....
    MyAnimeList_dict = {}

    Log.Info("MALid: '%s'" % MALid)
    if not MALid or not MALid.isdigit(): return MyAnimeList_dict

    Log.Info("--- series ---".ljust(157, '-'))
    xml = common.LoadFile(filename=MALid + ".xml",
                          relativeDirectory=os.path.join('MyAnimeList', 'xml'),
                          url=MAL_HTTP_API_URL + MALid,
                          cache=CACHE_1DAY * 7)
    if isinstance(xml, str):
        Log.Error('Invalid str returned: "{}"'.format(xml))
    elif xml:
        Log.Info("[ ] title: {}".format(
            SaveDict(GetXml(xml, 'title'), MyAnimeList_dict, 'title')))
        Log.Info("[ ] summary: {}".format(
            SaveDict(GetXml(xml, 'synopsis'), MyAnimeList_dict, 'summary')))
        Log.Info("[ ] score: {}".format(
            SaveDict(GetXml(xml, 'rating'), MyAnimeList_dict, 'score')))
        #Log.Info("[ ] rating: {}"                 .format(SaveDict( GetXml(xml, 'content_rating').split(" ")[0], MyAnimeList_dict, 'rating'   )))
        Log.Info("[ ] originally_available_at: {}".format(
            SaveDict(GetXml(xml, 'firstAired'), MyAnimeList_dict,
                     'originally_available_at')))

        #for item in xml.xpath('//anime/genres/genre' or []):  SaveDict([item.text], MyAnimeList_dict, 'genres')
        if GetXml(xml, '//anime/genres/genre'):
            Log.Info("[ ] genres: {}".format(
                SaveDict(
                    sorted([
                        item.text for item in xml.xpath('//anime/genres/genre')
                    ]), MyAnimeList_dict, 'genres')))
        if GetXml(xml, 'status') == 'Currently Airing':
            Log.Info("[ ] status: {}".format(
                SaveDict("Continuing", MyAnimeList_dict, 'status')))
        if GetXml(xml, 'status') == 'Finished Airing':
            Log.Info("[ ] status: {}".format(
                SaveDict("Ended", MyAnimeList_dict, 'status')))

        Log.Info("--- episodes ---".ljust(157, '-'))
        for item in xml.xpath('//anime/episodes/episode') or []:
            ep_number, ep_title, ep_air = GetXml(
                item,
                'episodeNumber'), GetXml(xml,
                                         'engTitle'), GetXml(xml, 'aired')
            Log.Info('[ ] s1e{:>3} air_date: {}, title: "{}"'.format(
                ep_number, ep_title, ep_air))
            SaveDict(ep_title, MyAnimeList_dict, 'seasons', "1", 'episodes',
                     ep_number, 'title')
            SaveDict(ep_air, MyAnimeList_dict, 'seasons', "1", 'episodes',
                     ep_number, 'originally_available_at')

        Log.Info("--- images ---".ljust(157, '-'))
        for item in xml.xpath('//anime/covers/cover'):
            Log.Info("[ ] poster: {}".format(
                SaveDict(
                    ("MyAnimeList/" + "/".join(item.text.split('/')[3:]), 50,
                     None) if item.text.startswith(MAL_PREFIX) else "",
                    MyAnimeList_dict, 'posters', item.text)))
        for item in xml.xpath('//anime/backgrounds/background'):
            Log.Info("[ ] art: {}".format(
                SaveDict(
                    ("MyAnimeList/" + "/".join(item.text.split('/')[3:]), 50,
                     None) if item.text.startswith(MAL_PREFIX) else "",
                    MyAnimeList_dict, 'art', item.text)))
        for item in xml.xpath('//anime/banners/banner'):
            Log.Info("[ ] banner: {}".format(
                SaveDict(
                    ("MyAnimeList/" + "/".join(item.text.split('/')[3:]), 50,
                     None) if item.text.startswith(MAL_PREFIX) else "",
                    MyAnimeList_dict, 'banners', item.text)))

    Log.Info("--- return ---".ljust(157, '-'))
    Log.Info("MyAnimeList_dict: {}".format(DictString(MyAnimeList_dict, 4)))
    return MyAnimeList_dict
Пример #25
0
def GetMetadata(media, movie, error_log, lang, metadata_source, AniDBid, TVDBid, IMDbid, mappingList, AniDB_movie):
  ''' TVDB - Load serie JSON
  '''
  Log.Info("=== TheTVDB.GetMetadata() ===".ljust(157, '='))
  TheTVDB_dict      = {}
  max_season        = 0
  anidb_numbering   = metadata_source=="anidb" and (movie or max(map(int, media.seasons.keys()))<=1)
  anidb_prefered    = anidb_numbering and Dict(mappingList, 'defaulttvdbseason') != '1'
  language_series   = [language.strip() for language in Prefs['SerieLanguagePriority'  ].split(',') if language.strip() not in ('x-jat', 'zh-Hans', 'zh-Hant', 'zh-x-yue', 'zh-x-cmn', 'zh-x-nan', 'main')]
  language_episodes = [language.strip() for language in Prefs['EpisodeLanguagePriority'].split(',') if language.strip() not in ('x-jat', 'zh-Hans', 'zh-Hant', 'zh-x-yue', 'zh-x-cmn', 'zh-x-nan', 'main')]
  Log.Info("TVDBid: '{}', IMDbid: '{}', language_series : {}, language_episodes: {}".format(TVDBid, IMDbid, language_series , language_episodes))
  
  if not TVDBid.isdigit(): Log.Info('TVDBid non-digit');  return TheTVDB_dict, IMDbid

  ### TVDB Series JSON ###
  Log.Info("--- series ---".ljust(157, '-'))
  json = {}
  if lang not in language_series:    language_series.insert(0, lang) #for summary in lang (library setting) language
  if 'en' not in language_series:    language_series.insert(0, 'en') #for failover title
  if lang not in language_episodes:  language_episodes.append(lang) #for summary in lang (library setting) language
  if 'en' not in language_episodes:  language_episodes.append('en') #for failover title
  for language in language_series:
    json[language] = Dict(common.LoadFile(filename='series_{}.json'.format(language), relativeDirectory="TheTVDB/json/"+TVDBid, url=(TVDB_SERIES_URL % TVDBid)+'?'+language, cache=CACHE_1DAY, headers={'Content-type': 'application/json', 'Accept-Language': language}), 'data')
    if Dict(json[language], 'seriesName'):  # and not Dict(TheTVDB_dict, 'language_rank'):
      SaveDict( language_series.index(language) if not anidb_prefered else len(language_series), TheTVDB_dict, 'language_rank')
      Log.Info("[ ] language_rank: {}"          .format(Dict(TheTVDB_dict, 'language_rank')))
      Log.Info("[ ] title: {}"                  .format(SaveDict( Dict(json[language], 'seriesName') or Dict(serie2_json, 'seriesName'), TheTVDB_dict, 'title'                  )))
      Log.Info("[ ] original_title: {}"         .format(SaveDict( Dict(json[language], 'seriesName') or Dict(serie2_json, 'seriesName'), TheTVDB_dict, 'original_title'         )))
    if Dict(json, lang) and (Dict(json, lang, 'overview') or Dict(TheTVDB_dict, 'language_rank')):  break  #only need json in lang for summary, in 'en' for most things
  if not anidb_prefered:  SaveDict( Dict(json, lang, 'overview'  ).strip(" \n\r") or Dict(json, 'en', 'overview').strip(" \n\r"), TheTVDB_dict, 'summary')  
  if Dict(json, lang):
    #format:   { "id","seriesId", "airsDayOfWeek", "imdbId", "zap2itId", "added", "addedBy", "lastUpdated", "seriesName", "aliases", "banner", "status", 
    #             "firstAired", "network", "networkId", "runtime", "genre, "overview", "airsTime", "rating" , "siteRating", "siteRatingCount" }
    Log.Info("[ ] IMDbid: {}"                 .format(SaveDict(        Dict(json[lang], 'imdbId' or IMDbid),                              TheTVDB_dict, 'IMDbid'                 )))
    Log.Info("[ ] zap2itId: {}"               .format(SaveDict(        Dict(json[lang], 'zap2it_id' ),                                    TheTVDB_dict, 'zap2itId'               )))
    Log.Info("[ ] content_rating: {}"         .format(SaveDict(        Dict(json[lang], 'rating'    ),                                    TheTVDB_dict, 'content_rating'         )))
    Log.Info("[ ] originally_available_at: {}".format(SaveDict(        Dict(json[lang], 'firstAired'),                                    TheTVDB_dict, 'originally_available_at')))
    Log.Info("[ ] studio: {}"                 .format(SaveDict(        Dict(json[lang], 'network'   ),                                    TheTVDB_dict, 'studio'                 )))
    Log.Info("[ ] rating: {}"                 .format(SaveDict(        Dict(json[lang], 'siteRating'),                                    TheTVDB_dict, 'rating'                 )))
    Log.Info("[ ] status: {}"                 .format(SaveDict(        Dict(json[lang], 'status'    ),                                    TheTVDB_dict, 'status'                 )))
    Log.Info("[ ] genres: {}"                 .format(SaveDict( sorted(Dict(json[lang], 'genre')),                                        TheTVDB_dict, 'genres'                 )))
    if Dict(json[lang], 'runtime') and Dict(json[lang], 'runtime').isdigit():
      Log.Info('[ ] duration: {}'             .format(SaveDict(     int(Dict(json[lang], 'runtime'))*60*1000, TheTVDB_dict, 'duration'               )))  #in ms in plex
    if Dict(json[lang], 'banner'):
      SaveDict((os.path.join('TheTVDB', 'banner', Dict(json[lang], 'banner')), 1, None), TheTVDB_dict, 'banners', TVDB_IMG_ROOT+Dict(json[lang], 'banner'))
      Log.Info('[ ] banner: {}'               .format(Dict(TheTVDB_dict, 'banners')))
    
    ### TVDB Series Actors JSON ###
    Log.Info("--- actors ---".ljust(157, '-'))
    actor_json = Dict(common.LoadFile(filename='actors_{}.json'.format(lang), relativeDirectory="TheTVDB/json/"+TVDBid, url=TVDB_ACTORS_URL % TVDBid, cache=CACHE_1DAY, headers={'Content-type': 'application/json', 'Accept-Language': lang}), 'data')
    if actor_json:               #JSON format: 'data': [{"seriesId", "name", "image", "lastUpdated", "imageAuthor", "role", "sortOrder", "id", "imageAdded", },...]
      for role in actor_json or []:
        try:
          role_dict = {'role': Dict(role, 'role'), 'name': Dict(role, 'name'), 'photo': TVDB_IMG_ROOT + role['image'] if Dict(role, 'image') else ''}
          SaveDict([role_dict], TheTVDB_dict, 'roles')
          Log.Info("[ ] role: {:<50}, name: {:<20}, photo: {}".format(role_dict['role'], role_dict['name'], role_dict['photo']))
        except Exception as e:  Log.Info(" role: {}, error: '{}'".format(str(role), str(e)))
      #common.DisplayDict(actor_json, ['role', 'name', 'image'])
      
    ### Load pages of episodes ###
    Log.Info("--- episodes ---".ljust(157, '-'))
    episodes_json, sorted_episodes_json, next_page = [], {}, 1
    while next_page not in (None, '', 'null'):
      episodes_json_page = common.LoadFile(filename='episodes_page{}_{}.json'.format(next_page, lang), relativeDirectory="TheTVDB/json/"+TVDBid, url=TVDB_EPISODES_URL % (TVDBid, next_page), cache=CACHE_1DAY, headers={'Content-type': 'application/json', 'Accept-Language': lang})
      next_page          = Dict(episodes_json_page, 'links', 'next')
      episodes_json.extend(Dict(episodes_json_page, 'data'))
    for episode_json in episodes_json: sorted_episodes_json['s{:02d}e{:03d}'.format(Dict(episode_json, 'airedSeason'), Dict(episode_json, 'airedEpisodeNumber'))] = episode_json
    
    ### Build list_abs_eps for tvdb 3/4/5 ###
    list_abs_eps, list_sp_eps={}, []
    if metadata_source in ('tvdb3', 'tvdb4'):
      for s in media.seasons:
        for e in media.seasons[s].episodes:
           if s=='0':  list_sp_eps.append(e)
           else:       list_abs_eps[e]=s 
      Log.Info('Present abs eps: {}'.format(list_abs_eps))
    
    ### episode loop ###
    tvdb_special_missing, summary_missing_special, summary_missing, summary_present, episode_missing, episode_missing_season, episode_missing_season_all, abs_number, ep_count = [], [], [], [], [], [], True, 0, 0
    for key in sorted(sorted_episodes_json):
      
      # Episode and Absolute number calculation engine, episode translation
      episode_json = sorted_episodes_json[key]
      episode      = str(Dict(episode_json, 'airedEpisodeNumber'))
      season       = str(Dict(episode_json, 'airedSeason'       ))
      numbering    = "s{}e{}".format(season, episode)
      
      # Replace all the individual episodes reported as missing with a single season 'sX' entry
      if episode=="1":
        if not episode_missing_season_all:  episode_missing.extend(episode_missing_season)
        elif episode_missing_season:
          first_entry, last_entry = episode_missing_season[0], episode_missing_season[-1]
          fm = re.match(r'((?P<abs>\d+) \()?s(?P<s>\d+)e(?P<e>\d+)\)?', first_entry).groupdict()
          lm = re.match(r'((?P<abs>\d+) \()?s(?P<s>\d+)e(?P<e>\d+)\)?', last_entry ).groupdict()
          episode_missing.append("s{}e{}-{}".format(fm['s'], fm['e'], lm['e']) if fm['abs'] is None else "{}-{} (s{}e{}-{})".format(fm['abs'], lm['abs'], fm['s'], fm['e'], lm['e']))
        episode_missing_season, episode_missing_season_all = [], True

      # Get the max season number from TVDB API
      if int(season) > max_season:  max_season = int(season)
      
      ### ep translation
      anidbid=""
      abs_number = Dict(episode_json, 'absoluteNumber', default=0 if season=='0' else abs_number+1)
      if anidb_numbering:
        if Dict(mappingList, 'defaulttvdbseason_a'):  season, episode          = '1', str(abs_number)
        else:                                         season, episode, anidbid = anidb_ep(mappingList, season, episode)
      elif metadata_source=='tvdb3':  
        episode             = str(abs_number)
      elif metadata_source=='tvdb4':  
        ms, usl                         = Dict(mappingList, 'absolute_map', 'max_season'), Dict(mappingList, 'absolute_map', 'unknown_series_length')
        if ms and usl:  season, episode = Dict(mappingList, 'absolute_map', str(abs_number), default=(ms if usl else str(int(ms)+1), None))[0], str(abs_number)
        if season not in media.seasons or episode not in media.seasons[season].episodes:  #tvdb4 with custom season folder mapping
          for s in media.seasons:
            if str(abs_number) in media.seasons[s].episodes:  #if str(abs_number) in list_abs_eps
              season, episode = s, str(abs_number)
              break      
      elif metadata_source=='tvdb5':  
        episode, abs_number = str(Dict(episode_json, 'absoluteNumber') or abs_number), int(Dict(episode_json, 'absoluteNumber') or abs_number)
      
      # Record absolute number mapping for AniDB metadata pull
      if metadata_source=='tvdb3':  SaveDict((str(Dict(episode_json, 'airedSeason')), str(Dict(episode_json, 'airedEpisodeNumber'))), mappingList, 'absolute_map', str(abs_number))

      ### Missing summaries logs ###
      if Dict(episode_json, 'overview'):  summary_present.append(numbering)
      elif season!='0':                   summary_missing.append(numbering)
      else:                       summary_missing_special.append(numbering)
      
      ### Check for Missing Episodes ###
      is_missing = False
      if not(str(Dict(episode_json, 'airedSeason'))=='0' and str(Dict(episode_json, 'airedEpisodeNumber')) in list_sp_eps) and \
         not(metadata_source in ('tvdb3', 'tvdb4') and str(abs_number) in list_abs_eps) and \
         not(not movie and season in media.seasons and episode in media.seasons[season].episodes):
        is_missing = True
        Log.Info('[ ] {:>7} s{:0>2}e{:0>3} anidbid: {:>7} air_date: {}'.format(numbering, season, episode, anidbid, Dict(episode_json, 'firstAired')))
        air_date = Dict(episode_json, 'firstAired')
        air_date = int(air_date.replace('-','')) if air_date.replace('-','').isdigit() and int(air_date.replace('-','')) > 10000000 else 99999999
        if int(time.strftime("%Y%m%d")) <= air_date+1:  pass #Log.Info("TVDB - Episode '{}' missing but not aired/missing '{}'".format(numbering, air_date))
        elif season=='0':                               tvdb_special_missing.append(episode)
        elif metadata_source!='tvdb6':                  episode_missing_season.append( str(abs_number)+" ("+numbering+")" if metadata_source in ('tvdb3', 'tvdb4') else numbering)
        
      ### File present on disk
      if not is_missing or metadata_source in ["tvdb", "tvdb6"]:  # Only pull all if anidb3(tvdb)/anidb4(tvdb6) usage for tvdb ep/season adjustments
        episode_missing_season_all = False
        if not is_missing:       Log.Info('[X] {:>7} s{:0>2}e{:0>3} anidbid: {:>7} air_date: {} abs_number: {}, title: {}'.format(numbering, season, episode, anidbid, Dict(episode_json, 'firstAired'), abs_number, Dict(episode_json, 'episodeName')))
        if not anidb_numbering:  SaveDict( abs_number, TheTVDB_dict, 'seasons', season, 'episodes', episode, 'absolute_index')
        SaveDict( Dict(json[lang]  , 'rating'    ), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'content_rating'         )
        SaveDict( Dict(TheTVDB_dict, 'duration'  ), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'duration'               )
        SaveDict( Dict(episode_json, 'firstAired'), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'originally_available_at')
        
        # Title from serie page
        rank, title = len(language_episodes)+1, ''
        if Dict(episode_json, 'episodeName'):
          rank  = language_episodes.index(lang) if lang in language_episodes else len(language_episodes)
          title = Dict(episode_json, 'episodeName')
          Log.Info(" - [1] title:   [{}] {}".format(language_episodes[rank], title))
        
        #Summary from serie page
        if Dict(episode_json, 'overview').strip(" \n\r"):
          SaveDict( Dict(episode_json, 'overview').strip(" \n\r"), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'summary' )
          Log.Info(' - [1] summary: [{}] {}'.format(lang, Dict(TheTVDB_dict, 'seasons', season, 'episodes', episode, 'summary' )))
                
        ### Ep advance information ###
        ep_count += 1
        lang2 = 'en' if len(language_episodes)<=1 else language_episodes[1]
        episode_details_json = Dict(common.LoadFile(filename='episode_{}_{}.json'.format(Dict(episode_json, 'id'), lang2), relativeDirectory="TheTVDB/json/"+TVDBid, url=TVDB_EPISODE_DETAILS_URL + str(Dict(episode_json, 'id')), cache=CACHE_1DAY, headers={'Content-type': 'application/json', 'Accept-Language': lang2}), 'data')
        if episode_details_json:
          
          # Std ep info loaded for Library language ten details for 1st language, loading other languages if needed
          if lang2 in language_episodes and language_episodes.index(lang2)<rank and Dict(episode_details_json, 'language', 'episodeName')==lang2 and Dict(episode_details_json, 'episodeName'):
            rank  = language_episodes.index(lang2)
            title = Dict(episode_details_json, 'episodeName')
            Log.Info(" - [2] title:   [{}] {}".format(language_episodes[rank], title))
            
          #Summary
          if not Dict(TheTVDB_dict, 'seasons', season, 'episodes', episode, 'summary') and Dict(episode_details_json, 'overview'):
            SaveDict( Dict(episode_details_json, 'overview').strip(" \n\r"), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'summary')
            Log.Info(' - [2] summary: [{}] {}'.format(lang2, Dict(TheTVDB_dict, 'seasons', season, 'episodes', episode, 'summary' )))
        
          SaveDict( Dict(episode_details_json, 'writers'            ), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'writers'    )
          SaveDict( Dict(episode_details_json, 'directors'          ), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'directors'  )
          SaveDict( Dict(episode_details_json, 'siteRating'         ), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'rating'     )
          #SaveDict( Dict(episode_details_json, 'guestStars'         ), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'guest_stars') 
          
          # Episode screenshot/Thumbnail
          if Dict(episode_details_json, 'filename'):
            SaveDict((str("TheTVDB/episodes/"+ os.path.basename(Dict(episode_details_json, 'filename'))), 1, None), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'thumbs', str(TVDB_IMG_ROOT+Dict(episode_details_json, 'filename')))
            Log.Info(' - [ ] thumb: {}'.format(TVDB_IMG_ROOT+Dict(episode_details_json, 'filename') if Dict(episode_details_json, 'filename') else ''))
        
        #Ep title fallback (first lang title come from ep list, second from ep details)
        for lang_rank, language in enumerate(language_episodes[2:rank-1] if len(language_episodes)>1 and rank>=2 and not title else []):
          if not language:  continue
          episode_details_json = Dict(common.LoadFile(filename='episode_{}_{}.json'.format(Dict(episode_json, 'id'), language), relativeDirectory="TheTVDB/json/"+TVDBid, url=TVDB_EPISODE_DETAILS_URL + str(Dict(episode_json, 'id')), cache=CACHE_1DAY, headers={'Content-type': 'application/json', 'Accept-Language': lang}), 'data', default={})
          if Dict(episode_details_json, 'episodeName') :  
            title = Dict(episode_details_json, 'episodeName')
            rank  = lang_rank
            Log.Info(" - [3] title:   [{}] {}".format(language_episodes[rank], title))
          if not Dict(TheTVDB_dict, 'seasons', season, 'episodes', episode, 'summary') and Dict(episode_details_json, 'overview'):
            SaveDict( Dict(episode_details_json, 'overview')[:160].strip(" \n\r"), TheTVDB_dict, 'seasons', season, 'episodes', episode, 'summary')
            Log.Info(' - [3] summary: [{}] {}'.format(language_episodes[lang_rank], Dict(TheTVDB_dict, 'seasons', season, 'episodes', episode, 'summary' )))
          if title and Dict(TheTVDB_dict, 'seasons', season, 'episodes', episode, 'summary'):  break
        SaveDict( title, TheTVDB_dict, 'seasons', season, 'episodes', episode, 'title'        )
        SaveDict( rank , TheTVDB_dict, 'seasons', season, 'episodes', episode, 'language_rank')      
    
    # (last season) Replace all the individual episodes reported as missing with a single season 'sX' entry
    if not episode_missing_season_all:  episode_missing.extend(episode_missing_season)
    elif episode_missing_season:
      first_entry, last_entry = episode_missing_season[0], episode_missing_season[-1]
      fm = re.match(r'((?P<abs>\d+) \()?s(?P<s>\d+)e(?P<e>\d+)\)?', first_entry).groupdict()
      lm = re.match(r'((?P<abs>\d+) \()?s(?P<s>\d+)e(?P<e>\d+)\)?', last_entry ).groupdict()
      episode_missing.append("s{}e{}-{}".format(fm['s'], fm['e'], lm['e']) if fm['abs'] is None else "{}-{} (s{}e{}-{})".format(fm['abs'], lm['abs'], fm['s'], fm['e'], lm['e']))

    # Set the min/max season to ints & update max value to the next min-1 to handle multi tvdb season anidb entries
    map_min_values = [int(Dict(mappingList, 'season_map')[x]['min']) for x in Dict(mappingList, 'season_map', default={}) for y in Dict(mappingList, 'season_map')[x] if y=='min']
    for entry in Dict(mappingList, 'season_map', default={}):
      entry_min, entry_max = int(mappingList['season_map'][entry]['min']), int(mappingList['season_map'][entry]['max'])
      while entry_min!=0 and entry_max+1 not in map_min_values and entry_max < max_season:  entry_max += 1
      mappingList['season_map'][entry] = {'min': entry_min, 'max': entry_max}
    SaveDict(max_season, mappingList, 'season_map', 'max_season')

    ### Logging ###
    if not movie:
      if summary_missing:          error_log['Missing Episode Summaries'].append("TVDBid: %s | Title: '%s' | Missing Episode Summaries: %s" % (common.WEB_LINK % (common.TVDB_SERIE_URL + TVDBid, TVDBid), Dict(TheTVDB_dict, 'title'), str(summary_missing        )))
      if summary_missing_special:  error_log['Missing Special Summaries'].append("TVDBid: %s | Title: '%s' | Missing Special Summaries: %s" % (common.WEB_LINK % (common.TVDB_SERIE_URL + TVDBid, TVDBid), Dict(TheTVDB_dict, 'title'), str(summary_missing_special)))
    if metadata_source.startswith("tvdb") or metadata_source.startswith("anidb") and not movie and max(map(int, media.seasons.keys()))>1:
      if episode_missing:          error_log['Missing Episodes'         ].append("TVDBid: %s | Title: '%s' | Missing Episodes: %s"          % (common.WEB_LINK % (common.TVDB_SERIE_URL + TVDBid, TVDBid), Dict(TheTVDB_dict, 'title'), str(episode_missing        )))
      if tvdb_special_missing:     error_log['Missing Specials'         ].append("TVDBid: %s | Title: '%s' | Missing Specials: %s"          % (common.WEB_LINK % (common.TVDB_SERIE_URL + TVDBid, TVDBid), Dict(TheTVDB_dict, 'title'), str(tvdb_special_missing   )))
      #Log.Debug("Episodes without Summary: " + str(sorted(summary_missing, key=natural_sort_key)))
      
    ### Picture types JSON download ###
    Log.Info("--- images ---".ljust(157, '-'))
    language_posters = [language.strip() for language in Prefs['PosterLanguagePriority'].split(',')]
    priority_posters = [  source.strip() for source   in Prefs['posters'               ].split(',')]
    Log.Info('language_posters: {}'.format(language_posters))
    for language in language_posters:
      try:     bannerTypes = Dict(common.LoadFile(filename='images_{}.json'.format(language), relativeDirectory="TheTVDB/json/"+TVDBid, url=(TVDB_SERIES_IMG_INFO_URL % TVDBid), cache=0, headers={'Content-type': 'application/json', 'Accept-Language': language}), 'data', default={})
      except:  Log.Info("Invalid image JSON from url: " + TVDB_SERIES_IMG_INFO_URL % TVDBid)
      else:             #JSON format = {"fanart", "poster", "season", "seasonwide", "series"}
        metanames         = {'fanart': "art", 'poster': "posters", 'series': "banners", 'season': "seasons", 'seasonwide': 'seasonwide'}#
        count_valid       = {key: 0 for key in metanames}
        anidb_offset      = sorted((Dict(mappingList, 'poster_id_array', TVDBid) or {}).keys()).index(AniDBid) if AniDBid and AniDBid in Dict(mappingList, 'poster_id_array', TVDBid) else 0  
        language_priority = [item.strip() for item in Prefs['EpisodeLanguagePriority'].split(',')]
        Log.Info("bannerTypes: {}, anidb_offset: {}, AniDBid: {}, anidb_array: {}".format(bannerTypes, anidb_offset, AniDBid, str((Dict(mappingList, 'poster_id_array', TVDBid) or {}).keys())))
        
        #Loop per banner type ("fanart", "poster", "season", "series") skip 'seasonwide' - Load bannerType images list JSON
        for bannerType in bannerTypes or []:
          if bannerTypes[bannerType]==0 or bannerType in ('seasonwide', 'series') or movie and not bannerType in ('fanart', 'poster'):  continue  #Loop if no images
          #if anidb_numbering and Dict(mappingList, 'defaulttvdbseason') != '1' and bannerType=='poster':  continue  #skip if anidb numbered serie mapping to season 0 or 2+
          
          Log.Info(("--- images.%s ---" % bannerType).ljust(157, '-'))
          try:     images = Dict( common.LoadFile(filename='images_{}_{}.json'.format(bannerType, language), relativeDirectory="TheTVDB/json/"+TVDBid, url=TVDB_SERIES_IMG_QUERY_URL.format(TVDBid, bannerType), cache=CACHE_1DAY, headers={'Accept-Language': language}), 'data', default={})
          except:  images = {};  Log.Info("Bad image type query data for TVDB id: %s (bannerType: %s)" % (TVDBid, bannerType)) 
          else:
            images = sorted(images, key = lambda x: Dict(x, "ratingsInfo", "average", default=0), reverse=True)
            for image in images:  #JSON format = {"data": [{"id", "keyType", "subKey"(season/graphical/text), "fileName", "resolution", "ratingsInfo": {"average", "count"}, "thumbnail"}]}
              
              #rank
              rank = 1 if bannerType=='poster' and anidb_offset == divmod(count_valid['poster'], Dict(bannerTypes, 'poster', default=0))[1] else count_valid[bannerType]+2
              if language  in language_posters:  rank = (rank//30)*30*language_posters.index(language)+rank%30
              if 'TheTVDB' in priority_posters:  rank = rank+ 6*priority_posters.index('TheTVDB')
              rank = rank + language_posters.index(language)*20
              if AniDB_movie: rank = rank+Dict(bannerTypes, 'poster', default=0) if rank+Dict(bannerTypes, 'poster', default=0)<99 else 99
              
              ### Adding picture ###
              thumbnail = TVDB_IMG_ROOT + image['thumbnail'] if Dict(image, 'thumbnail') else None
              Log.Info("[!] bannerType: {:>7} subKey: {:>9} rank: {:>3} filename: {} thumbnail: {} resolution: {} average: {} count: {}".format( metanames[bannerType], Dict(image, 'subKey'), rank, TVDB_IMG_ROOT + Dict(image, 'fileName'), TVDB_IMG_ROOT + Dict(image, 'thumbnail'), Dict(image, 'resolution'), Dict(image, 'ratingsInfo','average'), Dict(image, 'ratingsInfo', 'average', 'count') ))
              if bannerType=='season':  #tvdb season posters or anidb specials and defaulttvdb season  ## season 0 et empty+ season ==defaulttvdbseason(a=1)
                if not anidb_numbering:  SaveDict(('TheTVDB/'+image['fileName'], rank, thumbnail), TheTVDB_dict, 'seasons', str(image['subKey']), 'posters', TVDB_IMG_ROOT + image['fileName'])
                else:
                  if str(image['subKey']) in [Dict(mappingList, 'defaulttvdbseason')]:
                    SaveDict(('TheTVDB/'+image['fileName'], rank, thumbnail), TheTVDB_dict, 'posters', TVDB_IMG_ROOT + image['fileName'])
                  if str(image['subKey']) in ['0', Dict(mappingList, 'defaulttvdbseason')]:
                    SaveDict(('TheTVDB/'+image['fileName'], 1 if rank==3 else 3 if rank==1 else rank, thumbnail), TheTVDB_dict, 'seasons', '0' if str(image['subKey'])=='0' else '1', 'posters', TVDB_IMG_ROOT + image['fileName'])  #if anidb_numbering else str(image['subKey'])
              else:
                new_rank = rank + 10 if anidb_numbering and Dict(mappingList, 'defaulttvdbseason') != '1' else rank
                SaveDict(('TheTVDB/'+image['fileName'], new_rank, thumbnail), TheTVDB_dict, metanames[bannerType], TVDB_IMG_ROOT + image['fileName'])   #use art + posters tvdb
              #if bannerType == 'season':  
              #  if anidb_numbering and Dict(mappingList, 'defaulttvdbseason')==str(image['subKey']):
              #    SaveDict(('TheTVDB/'+image['fileName'], 1 if rank==3 else 3 if rank==1 else rank, thumbnail), TheTVDB_dict, 'seasons', '0' if str(image['subKey'])=='0' else '1', 'posters', TVDB_IMG_ROOT + image['fileName'])
              #  elif not anidb_numbering:  
              #    season = str(int(image['subKey'])+(0 if Dict(mappingList, 'defaulttvdbseason')=="0" or not Dict(mappingList, 'defaulttvdbseason').isdigit() else int(Dict(mappingList, 'defaulttvdbseason'))-1))
              #    SaveDict(   ('TheTVDB/'+image['fileName'], rank, thumbnail), TheTVDB_dict, 'seasons', season, 'posters', TVDB_IMG_ROOT + image['fileName'])
              #if bannerType != 'season':
              #  SaveDict(('TheTVDB/'+image['fileName'], rank, thumbnail), TheTVDB_dict, metanames[bannerType],        TVDB_IMG_ROOT + image['fileName'])
              count_valid[bannerType] = count_valid[bannerType] + 1  #Otherwise with += SyntaxError: Line 142: Augmented assignment of object items and slices is not allowed
              
        #Log.Info("Posters : {}/{}, Season posters: {}/{}, Art: {}/{}".format(count_valid['poster'], Dict(bannerTypes, 'poster'), count_valid['season'], Dict(bannerTypes, 'season') or 0, count_valid['fanart'], Dict(bannerTypes, 'fanart')))
        if not Dict(bannerTypes, 'poster'):  error_log['TVDB posters missing'       ].append("TVDBid: %s | Title: '%s'" % (common.WEB_LINK % (common.TVDB_SERIE_URL + TVDBid, TVDBid), Dict(TheTVDB_dict, 'title')))
        if not Dict(bannerTypes, 'season'):  error_log['TVDB season posters missing'].append("TVDBid: %s | Title: '%s'" % (common.WEB_LINK % (common.TVDB_SERIE_URL + TVDBid, TVDBid), Dict(TheTVDB_dict, 'title')))
          
    Log.Info("--- final summary info ---".ljust(157, '-'))
    Log.Info("url: '{}', IMDbid: {}, Present episodes: {}, Missing: {}".format(TVDB_SERIES_URL % TVDBid, IMDbid, ep_count, sorted(episode_missing, key=natural_sort_key)))
    
  Log.Info("--- return ---".ljust(157, '-'))
  Log.Info("absolute_map: {}".format(DictString(Dict(mappingList, 'absolute_map', default={}), 0)))
  Log.Info("season_map: {}".format(DictString(Dict(mappingList, 'season_map', default={}), 0)))
  Log.Info("TheTVDB_dict: {}".format(DictString(TheTVDB_dict, 4)))
  return TheTVDB_dict, IMDbid
Пример #26
0
def GetMetadata(media, movie, source, TVDBid, mappingList, num=0):
    """ [tvdb4.mapping.xml] Attempt to get the ASS's episode mapping data
      [tvdb4.posters.xml] Attempt to get the ASS's image data
  """
    Log.Info('=== tvdb4.GetMetadata() ==='.ljust(157, '='))
    TVDB4_dict, TVDB4_mapping, TVDB4_xml = {}, None, None

    if movie or not source == "tvdb4":
        Log.Info("not tvdb4 mode")
        return TVDB4_dict
    Log.Info("tvdb4 mode")

    def find_tvdb4_file(file_to_find):
        try:
            folder = common.GetMediaDir(media, movie)
            while folder and folder[-1] not in ["/", "\\"]:
                filename = os.path.join(folder, file_to_find)
                if os.path.exists(filename):
                    file = Core.storage.load(os.path.realpath(filename))
                    try:
                        return XML.ElementFromString(file)
                    except:
                        return file
                folder = os.path.dirname(folder)
            else:
                Log.Info("No '{}' file detected locally".format(file_to_find))
        except Exception as e:
            Log.Error(
                "Issues in finding setup info as directories have most likely changed post scan into Plex, Exception: '%s'"
                % e)
        return ""

    Log.Info("--- tvdb4.mapping.xml ---".ljust(157, '-'))
    TVDB4_mapping = find_tvdb4_file("tvdb4.mapping")

    if TVDB4_mapping: Log.Debug("'tvdb4.mapping' file detected locally")
    else:
        TVDB4_mapping = TVDB4_mapping or common.LoadFile(
            filename=os.path.basename(TVDB4_MAPPING_URL),
            url=TVDB4_MAPPING_URL,
            cache=CACHE_1DAY *
            6)  # AniDB title database loaded once every 2 weeks
    entry = ""
    if isinstance(TVDB4_mapping, str): entry = TVDB4_mapping
    else:
        entry = common.GetXml(TVDB4_mapping,
                              "/tvdb4entries/anime[@tvdbid='%s']" % TVDBid)
        if not entry:
            Log.Error("TVDBid '%s' is not found in mapping file" % TVDBid)
    if entry:
        for line in filter(None, entry.strip().splitlines()):
            season = line.strip().split("|")
            for absolute_episode in range(int(season[1]), int(season[2]) + 1):
                SaveDict((str(int(season[0])), str(absolute_episode)),
                         mappingList, 'absolute_map', str(absolute_episode))
            SaveDict(True if "(unknown length)" in season[3] else False,
                     mappingList, 'absolute_map', 'unknown_series_length')
            SaveDict(str(int(season[0])), mappingList, 'absolute_map',
                     'max_season')

    Log.Info("--- tvdb4.posters.xml ---".ljust(157, '-'))
    TVDB4_xml = find_tvdb4_file(os.path.basename(TVDB4_POSTERS_URL))

    if TVDB4_xml: Log.Debug("'tvdb4.posters.xml' file detected locally")
    else:
        TVDB4_xml = TVDB4_xml or common.LoadFile(
            filename=os.path.basename(TVDB4_POSTERS_URL),
            url=TVDB4_POSTERS_URL,
            cache=CACHE_1DAY *
            6)  # AniDB title database loaded once every 2 weeks
    if TVDB4_xml:
        seasonposternum = 0
        entry = common.GetXml(TVDB4_xml,
                              "/tvdb4entries/posters[@tvdbid='%s']" % TVDBid)
        if not entry:
            Log.Error("TVDBid '%s' is not found in posters file" % TVDBid)
        for line in filter(None, entry.strip().splitlines()):
            season, url = line.strip().split("|", 1)
            season = season.lstrip("0") if season.lstrip("0") else "0"
            seasonposternum += 1
            SaveDict(("TheTVDB/seasons/%s-%s-%s" %
                      (TVDBid, season, os.path.basename(url)), 1, None),
                     TVDB4_dict, 'seasons', season, 'posters', url)

    Log.Info("--- return ---".ljust(157, '-'))
    Log.Info("absolute_map: {}".format(
        DictString(Dict(mappingList, 'absolute_map', default={}), 0)))
    Log.Info("TVDB4_dict: {}".format(DictString(TVDB4_dict, 4)))
    return TVDB4_dict
Пример #27
0
def GetMetadata(
        movie=False,
        TVDBid="",
        tmdbid="",
        imdbid="",
        season=0,
        num=100):  #Fetch from fanart.tv - Posters Seasons Fanarts Banner
    API_MOVIES_URL = 'http://webservice.fanart.tv/v3/movies/{id}?api_key={api_key}'
    API_TV_URL = 'http://webservice.fanart.tv/v3/tv/{id}?api_key={api_key}'
    API_KEY = 'cfa9dc054d221b8d107f8411cd20b13f'
    FanartTV_dict = {}
    Log.Info("".ljust(157, '-'))
    Log.Info(
        "FanartTv.GetMetadata() - movie:'{movie}', TVDBid: '{TVDBid}', tmdbid: '{tmdbid}', imdbid:'{imdbid}', season: '{season}', num: '{num}'"
        .format(movie=movie,
                TVDBid=TVDBid,
                tmdbid=tmdbid,
                imdbid=imdbid,
                season=season,
                num=num))
    if "," in imdbid:
        (GetMetadata(metadata, movie, "", "", imdbid_unique, season, num)
         for imdbid_unique in (tmdbid or imdbid).split(","))
        return  #recusive call for each imdbid to reduce complexity
    if "," in tmdbid:
        (GetMetadata(metadata, movie, "", tmdbid_unique, "", season, num)
         for tmdbid_unique in tmdbid.split(","))
        return  #recusive call for each tmdbid to reduce complexity
    if not movie and TVDBid.isdigit():
        id, relativeDirectory, url = TVDBid, "FanartTV/tv/" + TVDBid, API_TV_URL.format(
            id=TVDBid, api_key=API_KEY)
    elif movie and (imdbid or tmdbid):
        id, relativeDirectory, url = imdbid or tmdbid, "FanartTV/movie/" + imdbid or tmdbid, API_MOVIES_URL.format(
            id=imdbid or tmdbid, api_key=API_KEY)
    else:
        return
    if TVDBid or tmdbid or imdbid:
        json = common.LoadFile(filename=id + ".json",
                               relativeDirectory=relativeDirectory,
                               url=url,
                               cache=CACHE_1WEEK)

        #Movies
        if json and (imdbid or tmdbid):
            for item in Dict(json, 'movieposter') or []:
                SaveDict((relativeDirectory +
                          "{id}/movieposter/{filename}.jpg".format(
                              id=id, filename=item['id']), num, None),
                         FanartTV_dict, 'posters', item['url'])
            for item in Dict(json, 'moviebackground') or []:
                SaveDict((relativeDirectory +
                          "{id}/moviebackground/{filename}.jpg".format(
                              id=id, filename=item['id']), num, None),
                         FanartTV_dict, 'art', item['url'])

        #Series
        if json and TVDBid.isdigit():
            for item in Dict(json, 'tvposter') or []:
                SaveDict(
                    (relativeDirectory + "{id}/tvposter/{filename}.jpg".format(
                        id=id, filename=item['id']), num, None), FanartTV_dict,
                    'posters', item['url'])
            for item in Dict(json, 'showbackground') or []:
                SaveDict((relativeDirectory +
                          "{id}/showbackground/{filename}.jpg".format(
                              id=id, filename=item['id']), num, None),
                         FanartTV_dict, 'art', item['url'])
            for item in Dict(json, 'tvbanner') or []:
                SaveDict(
                    (relativeDirectory + "{id}/tvbanner/{filename}.jpg".format(
                        id=id, filename=item['id']), num, None), FanartTV_dict,
                    'banners', item['url'])
            for item in Dict(json, 'seasonposter') or []:
                SaveDict((relativeDirectory +
                          "{id}/seasonposter/{filename}.jpg".format(
                              id=id, filename=item['id']), num, None),
                         FanartTV_dict, 'seasons', item['season'], 'posters',
                         item['url'])
    return FanartTV_dict
Пример #28
0
def GetMetadata(media, movie, error_log, source, AniDBid, TVDBid, AniDBMovieSets, mappingList):
  ''' Download metadata to dict_AniDB, ANNid, MALid
  '''
  Log.Info("=== AniDB.GetMetadata() ===".ljust(157, '='))
  ANIDB_HTTP_API_URL       = 'http://api.anidb.net:9001/httpapi?request=anime&client=hama&clientver=1&protover=1&aid='
  ANIDB_PIC_BASE_URL       = 'http://img7.anidb.net/pics/anime/'                                                                # AniDB picture directory
  ANIDB_PIC_THUMB_URL      = 'http://img7.anidb.net/pics/anime/thumbs/150/{}.jpg-thumb.jpg' 
  AniDB_dict, ANNid, MALid = {}, "", ""
  original                 = AniDBid
  language_posters         = [language.strip() for language in Prefs['PosterLanguagePriority'].split(',')]
  priority_posters         = [provider.strip() for provider in Prefs['posters'               ].split(',')]
  
  ### Build the list of anidbids for files present ####
  if source.startswith("tvdb") or source.startswith("anidb") and not movie and max(map(int, media.seasons.keys()))>1:  #multi anidbid required only for tvdb numbering
    full_array  = [ anidbid for season in Dict(mappingList, 'TVDB') or [] for anidbid in Dict(mappingList, 'TVDB', season) if season and 'e' not in season and anidbid.isdigit() ]
    AniDB_array = { AniDBid: [] } if Dict(mappingList, 'defaulttvdbseason')=='1' or Dict(mappingList, 'TVDB', 'sa') else {}
    for season in sorted(media.seasons, key=common.natural_sort_key) if not movie else []:  # For each season, media, then use metadata['season'][season]...
      for episode in sorted(media.seasons[season].episodes, key=common.natural_sort_key):
        new_season, new_episode, anidbid = AnimeLists.anidb_ep(mappingList, season, episode)
        numbering                        = 's{}e{}'.format(season, episode)
        if anidbid and not (new_season=='0' and new_episode=='0'):  SaveDict([numbering], AniDB_array, anidbid)
      else:  continue
  elif source.startswith('anidb') and AniDBid != "":  full_array, AniDB_array = [AniDBid], {AniDBid:[]}
  else:                                               full_array, AniDB_array = [], {}
  Log.Info("AniDBid: {}, AniDBids list: {}, source: {}".format(AniDBid, full_array, source))
  for anidbid in AniDB_array:
    Log.Info('[+] {:>5}: {}'.format(anidbid, AniDB_array[anidbid]))
  Log.Info('language_posters: {}'.format(language_posters))
  
  ### Load anidb xmls in tvdb numbering format if needed ###
  for AniDBid in full_array:
    Log.Info(("--- %s ---" % AniDBid).ljust(157, '-'))
    Log.Info('AniDBid: {}, url: {}'.format(AniDBid, ANIDB_HTTP_API_URL+AniDBid))
    Log.Info(("--- %s.series ---" % AniDBid).ljust(157, '-'))
    xml = common.LoadFile(filename=AniDBid+".xml", relativeDirectory=os.path.join("AniDB", "xml"), url=ANIDB_HTTP_API_URL+AniDBid)  # AniDB title database loaded once every 2 weeks

    if not xml or isinstance(xml, str):
      if not xml:               SaveDict(True, AniDB_dict, 'Banned')
      if isinstance(xml, str):  Log.Error('Invalid str returned: "{}"'.format(xml))

      title, original_title, language_rank = GetAniDBTitle(AniDBTitlesDB.xpath('/animetitles/anime[@aid="{}"]/title'.format(AniDBid)))
      if AniDBid==original or len(full_array)==1:
        Log.Info("[ ] title: {}"         .format(SaveDict(title,          AniDB_dict, 'title'         )))
        Log.Info("[ ] original_title: {}".format(SaveDict(original_title, AniDB_dict, 'original_title')))
        Log.Info("[ ] language_rank: {}" .format(SaveDict(language_rank,  AniDB_dict, 'language_rank' )))

    elif xml:
      title, original_title, language_rank = GetAniDBTitle(xml.xpath('/anime/titles/title'))
      if AniDBid==original or len(full_array)==1: #Dict(mappingList, 'poster_id_array', TVDBid, AniDBid)[0]in ('1', 'a'):  ### for each main anime AniDBid ###
        Log.Info("[ ] title: {}"         .format(SaveDict(title,          AniDB_dict, 'title'         )))
        Log.Info("[ ] original_title: {}".format(SaveDict(original_title, AniDB_dict, 'original_title')))
        Log.Info("[ ] language_rank: {}" .format(SaveDict(language_rank,  AniDB_dict, 'language_rank' )))
        if SaveDict( GetXml(xml, 'startdate'  ), AniDB_dict, 'originally_available_at'):  Log.Info("[ ] originally_available_at: '{}'".format(AniDB_dict['originally_available_at']))
        if SaveDict(summary_sanitizer(GetXml(xml, 'description')), AniDB_dict, 'summary') and not movie and Dict(mappingList, 'defaulttvdbseason').isdigit() and mappingList['defaulttvdbseason'] in media.seasons:
          SaveDict(AniDB_dict['summary'], AniDB_dict, 'seasons', mappingList['defaulttvdbseason'], 'summary') 
            
        Log.Info("[ ] rating: '{}'".format(SaveDict( GetXml(xml, 'ratings/temporary'), AniDB_dict, 'rating')))
        
        ### Posters
        if GetXml(xml, 'picture'):
          rank = 1
          if 'en'     in language_posters:  rank = (rank//30)*30*language_posters.index('en')+rank%30
          if 'AniDB'  in priority_posters:  rank = rank+ 6*priority_posters.index('AniDB')
          AniDB_dict['posters'] = {ANIDB_PIC_BASE_URL + GetXml(xml, 'picture'): ( os.path.join('AniDB', 'poster', GetXml(xml, 'picture')), rank, ANIDB_PIC_THUMB_URL.format(GetXml(xml, 'picture').split('.')[0]))}
        
        ### genre ###
        RESTRICTED_GENRE     = {"18 restricted": 'X', "pornography": 'X', "tv censoring": 'TV-MA', "borderline p**n": 'TV-MA'}
        for tag in xml.xpath('tags/tag'):
          if GetXml(tag, 'name') and tag.get('weight', '').isdigit() and int(tag.get('weight', '') or '200') >= int(Prefs['MinimumWeight'] or '200'):
            SaveDict( [string.capwords(GetXml(tag, 'name'), '-')], AniDB_dict, 'genres')
            if GetXml(tag, 'name').lower() in RESTRICTED_GENRE:  AniDB_dict['content_rating'] = RESTRICTED_GENRE[ GetXml(tag, 'name').lower() ]
        if Dict(AniDB_dict, 'genres'): AniDB_dict['genres'].sort()
        SaveDict( "Continuing" if GetXml(xml, 'Anime/enddate')=="1970-01-01" else "Ended", AniDB_dict, 'status')
        Log.Info("[ ] genres ({}/{} above {} weight): {}".format(len(Dict(AniDB_dict, 'genres')), len(xml.xpath('tags/tag')), int(Prefs['MinimumWeight'] or 200), Dict(AniDB_dict, 'genres')))
        for element in AniDBMovieSets.xpath("/anime-set-list/set/anime"):
          if element.get('anidbid') == AniDBid or element.get('anidbid') in full_array:
            node              = element.getparent()
            title, main, language_rank = GetAniDBTitle(node.xpath('titles')[0])
            if title not in Dict(AniDB_dict, 'collections', default=[]):
              Log.Info("[ ] title: {}, main: {}, language_rank: {}".format(title, main, language_rank))
              SaveDict([title], AniDB_dict, 'collections')
              Log.Info("[ ] collection: AniDBid '%s' is part of movie collection: '%s', related_anime_list: %s" % (AniDBid, title, str(full_array)))
        if not Dict(AniDB_dict, 'collections'):  Log.Info("[ ] collection: AniDBid '%s' is not part of any collection, related_anime_list: %s" % (AniDBid, str(full_array))) 
      
        #roles  ### NEW, NOT IN Plex FrameWork Documentation 2.1.1 ###
        Log.Info(("--- %s.actors ---" % AniDBid).ljust(157, '-'))
        for role in xml.xpath('characters/character[(@type="secondary cast in") or (@type="main character in")]'):
          try:
            if GetXml(role, 'seiyuu') and GetXml(role, 'name'):  
              role_dict = {'role': role.find('name').text, 'name': role.find('seiyuu').text, 'photo': ANIDB_PIC_BASE_URL + role.find('seiyuu').get('picture')}
              SaveDict([role_dict], AniDB_dict, 'roles')
              Log.Info('[ ] role: {:<20}, name: {:<20}, photo: {}'.format(role_dict['role'], role_dict['name'], role_dict['photo']))
          except Exception as e:  Log.Info("Seyiuu error: {}".format(e))
        
      if  movie:
        Log.Info("[ ] year: '{}'".format(SaveDict(GetXml(xml, 'startdate')[0:4], AniDB_dict, 'year')))
        Log.Info(("--- %s.summary info ---" % AniDBid).ljust(157, '-'))
          
      ### Series ###
      else:
        
        ### not listed for serie but is for eps
        roles    = { "Animation Work":"studio", "Direction":"directors", "Series Composition":"producers", "Original Work":"writers", "Script":"writers", "Screenplay":"writers" }
        ep_roles = {}
        for creator in xml.xpath('creators/name'):
          for role in roles: 
            if not role in creator.get('type'):  continue
            if roles[role]=="studio":  SaveDict(creator.text, AniDB_dict, 'studio')
            else:                      SaveDict([creator.text], ep_roles, roles[role])
        Log.Info("[ ] roles (creators tag): " +str(ep_roles))
        if SaveDict(GetXml(xml, 'type')=='Movie', AniDB_dict, 'movie'):  Log.Info("'movie': '{}'".format(AniDB_dict['movie']))
      
        ### Translate into season/episode mapping
        numEpisodes, totalDuration, mapped_eps, ending_table, op_nb = 0, 0, [], {}, 0 
        specials = {'S': [0, 'Special'], 'C': [100, 'Opening/Ending'], 'T': [200, 'Trailer'], 'P': [300, 'Parody'], 'O': [400, 'Other']}
        movie_ep_groups = {}
        missing={'0': [], '1':[]}
                
        ### Episodes (and specials) not always in right order ###
        Log.Info(("--- %s.episodes ---" % AniDBid).ljust(157, '-'))
        ending_offset = 99
        for ep_obj in sorted(xml.xpath('episodes/episode'), key=lambda x: [int(x.xpath('epno')[0].get('type')), int(x.xpath('epno')[0].text if x.xpath('epno')[0].text.isdigit() else x.xpath('epno')[0].text[1:])]):
          
          ### Title, Season, Episode number, Specials
          title, main, language_rank = GetAniDBTitle (ep_obj.xpath('title'), [language.strip() for language in Prefs['EpisodeLanguagePriority'].split(',')])
          epNum     = ep_obj.xpath('epno')[0]
          epNumType = epNum.get('type')
          season    = "1" if epNumType == "1" else "0"
          if   epNumType=="3" and ep_obj.xpath('title')[0].text.startswith('Ending') and int(epNum.text[1:])-1<ending_offset:  ending_offset = int(epNum.text[1:])-1
          if   epNumType=="3" and int(epNum.text[1:])>ending_offset:  episode = str(int(epNum.text[1:])+150-ending_offset)  #shifted to 150 for 1st ending.  
          elif epNumType=="1":                                        episode = epNum.text
          else:                                                       episode = str( specials[ epNum.text[0] ][0] + int(epNum.text[1:]))
          numbering = "s{}e{:>3}".format(season, episode)
          
          #If tvdb numbering used, save anidb episode meta using tvdb numbering
          if source.startswith("tvdb") or source.startswith("anidb") and not movie and max(map(int, media.seasons.keys()))>1:
            season, episode = AnimeLists.tvdb_ep(mappingList, season, episode, AniDBid) ###Broken for tvdbseason='a'

            # Get season from absolute number OR convert episode number to absolute number
            if source in ('tvdb3', 'tvdb4') and season not in ('-1', '0'):
              if season=='a' or source=='tvdb4':  season = Dict(mappingList, 'absolute_map', episode, default=(season, episode))[0]
              elif episode!='0':
                try:  episode = list(Dict(mappingList, 'absolute_map', default={}).keys())[list(Dict(mappingList, 'absolute_map', default={}).values()).index((season, episode))]
                except Exception as e:  Log.Error("Exception: {}".format(e))

            if season=='0' and episode=='0' or not season in media.seasons or not episode in media.seasons[season].episodes:   Log.Info('[ ] {} => s{:>1}e{:>3} epNumType: {}'.format(numbering, season, episode, epNumType));  continue
            
            ### Series poster as season poster
            if GetXml(xml, 'picture') and not Dict(AniDB_dict, 'seasons', season, 'posters', ANIDB_PIC_BASE_URL + GetXml(xml, 'picture')):
              rank = 1
              if 'en'     in language_posters:  rank = (rank//30)*30*language_posters.index('en')+rank%30
              if 'AniDB'  in priority_posters:  rank = rank+ 6*priority_posters.index('AniDB')
              SaveDict((os.path.join('AniDB', 'poster', GetXml(xml, 'picture')), rank, ANIDB_PIC_THUMB_URL.format(GetXml(xml, 'picture').split('.')[0])), AniDB_dict, 'seasons', season, 'posters', ANIDB_PIC_BASE_URL + GetXml(xml, 'picture'))

          ### In AniDB numbering, Movie episode group, create key and create key in dict with empty list if doesn't exist ###
          else:  #if source.startswith("anidb") and not movie and max(map(int, media.seasons.keys()))<=1:
                     
            ### Movie episode group, create key and create key in dict with empty list if doesn't exist ###
            key = ''
            if epNumType=='1' and GetXml(xml, '/anime/episodecount')=='1' and GetXml(xml, '/anime/type') in ('Movie', 'OVA'):
              key = '1' if title in ('Complete Movie', 'OVA') else title[-1] if title.startswith('Part ') and title[-1].isdigit() else '' #'-1'
              if key:  SaveDict([], movie_ep_groups, key)
            
            #Episode missing from disk
            if not season in media.seasons or not episode in media.seasons[season].episodes:
              Log.Info('[ ] {} => s{:>1}e{:>3} air_date: {}'.format(numbering, season, episode, GetXml(ep_obj, 'airdate')))
              current_air_date = GetXml(ep_obj, 'airdate').replace('-','')
              current_air_date = int(current_air_date) if current_air_date.isdigit() and int(current_air_date) > 10000000 else 99999999
              if int(time.strftime("%Y%m%d")) > current_air_date+1:
                if   epNumType == '1' and key:  SaveDict([numbering], movie_ep_groups, key   )
                elif epNumType in ['1', '2']:   SaveDict([episode],   missing,         season)
              continue
                    
          ### Episodes
          SaveDict(language_rank, AniDB_dict, 'seasons', season, 'episodes', episode, 'language_rank')
          SaveDict(title,         AniDB_dict, 'seasons', season, 'episodes', episode, 'title'        )
          Log.Info('[X] {} => s{:>1}e{:>3} air_date: {} language_rank: {}, title: "{}"'.format(numbering, season, episode, GetXml(ep_obj, 'airdate'), language_rank, title))
          
          if GetXml(ep_obj, 'length').isdigit():
            SaveDict(int(GetXml(ep_obj, 'length'))*1000*60, AniDB_dict, 'seasons', season, 'episodes', episode, 'duration')  # AniDB stores it in minutes, Plex save duration in millisecs
            if season == "1":  numEpisodes, totalDuration = numEpisodes+1, totalDuration + int(GetXml(ep_obj, 'length'))
          
          SaveDict(GetXml(ep_obj, 'rating' ), AniDB_dict, 'seasons', season, 'episodes', episode, 'rating'                 )
          SaveDict(GetXml(ep_obj, 'airdate'), AniDB_dict, 'seasons', season, 'episodes', episode, 'originally_available_at')
          if SaveDict(summary_sanitizer(GetXml(ep_obj, 'summary')), AniDB_dict, 'seasons', season, 'episodes', episode, 'summary'):  Log.Info(" - [ ] summary: {}".format(Dict(AniDB_dict, 'seasons', season, 'episodes', episode, 'summary')))
          #for role in ep_roles: SaveDict(",".join(ep_roles[role]), AniDB_dict, 'seasons', season, 'episodes', episode, role)
            #Log.Info("role: '%s', value: %s " % (role, str(ep_roles[role])))
                  
        ### End of for ep_obj...
        Log.Info(("--- %s.summary info ---" % AniDBid).ljust(157, '-'))
        if SaveDict(int(totalDuration)/int(numEpisodes) if int(numEpisodes) else 0, AniDB_dict, 'duration'):
          Log.Info("Duration: {}, numEpisodes: {}, average duration: {}".format(str(totalDuration), str(numEpisodes), AniDB_dict['duration']))

        ### AniDB numbering Missing Episodes ###
        if source.startswith("anidb") and not movie and max(map(int, media.seasons.keys()))<=1:
          if movie_ep_groups:
            Log.Info("Movie/OVA Ep Groups: %s" % movie_ep_groups)  #movie_ep_groups: {'1': ['s1e1'], '3': ['s1e4', 's1e5', 's1e6'], '2': ['s1e3'], '-1': []}
            SaveDict([value for key in movie_ep_groups for value in movie_ep_groups[key] if 0 < len(movie_ep_groups[key]) < int(key)], missing, '1')
          for season in sorted(missing):
            missing_eps = sorted(missing[season], key=common.natural_sort_key)
            Log.Info('Season: {} Episodes: {} not on disk'.format(season, missing_eps))
            if missing_eps:  error_log['Missing Specials' if season=='0' else 'Missing Episodes'].append("AniDBid: %s | Title: '%s' | Missing Episodes: %s" % (common.WEB_LINK % (common.ANIDB_SERIE_URL + AniDBid, AniDBid), AniDB_dict['title'], str(missing_eps)))
          
      ### End of if not movie ###
    
      # Generate relations_map for anidb3/4(tvdb1/6) modes
      for relatedAnime in xml.xpath('/anime/relatedanime/anime'):
        if relatedAnime.get('id') not in Dict(mappingList, 'relations_map', AniDBid, relatedAnime.get('type'), default=[]): SaveDict([relatedAnime.get('id')], mappingList, 'relations_map', AniDBid, relatedAnime.get('type'))

      # External IDs
      ANNid = GetXml(xml, "/anime/resources/resource[@type='1']/externalentity/identifier")
      MALid = GetXml(xml, "/anime/resources/resource[@type='2']/externalentity/identifier")
      #ANFOid = GetXml(xml, "/anime/resources/resource[@type='3']/externalentity/identifier"), GetXml(xml, "/anime/resources/resource[@type='3']/externalentity/identifier")
    
      # Logs
      if not Dict(AniDB_dict, 'summary'):  error_log['AniDB summaries missing'].append("AniDBid: %s" % (common.WEB_LINK % (common.ANIDB_SERIE_URL + AniDBid, AniDBid) + " | Title: '%s'" % Dict(AniDB_dict, 'title')))
      if not Dict(AniDB_dict, 'posters'):  error_log['AniDB posters missing'  ].append("AniDBid: %s" % (common.WEB_LINK % (common.ANIDB_SERIE_URL + AniDBid, AniDBid) + " | Title: '%s'" % Dict(AniDB_dict, 'title')))
      #if not Dict(AniDB_dict, 'studio' ):                                                                                          error_log['anime-list studio logos'].append("AniDBid: %s | Title: '%s' | AniDB has studio '%s' and anime-list has '%s' | "    % (common.WEB_LINK % (ANIDB_SERIE_URL % AniDBid, AniDBid), title, metadata.studio, mapping_studio) + common.WEB_LINK % (ANIDB_TVDB_MAPPING_FEEDBACK % ("aid:" + metadata.id + " " + title, String.StripTags( XML.StringFromElement(xml, encoding='utf8'))), "Submit bug report (need GIT account)"))
      #if metadata.studio       and 'studio' in AniDB_dict and AniDB_dict ['studio'] and AniDB_dict ['studio'] != metadata.studio:  error_log['anime-list studio logos'].append("AniDBid: %s | Title: '%s' | AniDB has studio '%s' and anime-list has '%s' | "    % (common.WEB_LINK % (ANIDB_SERIE_URL % AniDBid, AniDBid), title, metadata.studio, mapping_studio) + common.WEB_LINK % (ANIDB_TVDB_MAPPING_FEEDBACK % ("aid:" + metadata.id + " " + title, String.StripTags( XML.StringFromElement(xml, encoding='utf8'))), "Submit bug report (need GIT account)"))
      #if metadata.studio == "" and 'studio' in AniDB_dict and AniDB_dict ['studio'] == "":                                         error_log['anime-list studio logos'].append("AniDBid: %s | Title: '%s' | AniDB and anime-list are both missing the studio" % (common.WEB_LINK % (ANIDB_SERIE_URL % AniDBid, AniDBid), title) )
    
      Log.Info("ANNid: '%s', MALid: '%s', xml loaded: '%s'" % (ANNid, MALid, str(xml is not None)))
  
  Log.Info("--- return ---".ljust(157, '-'))
  Log.Info("relations_map: {}".format(DictString(Dict(mappingList, 'relations_map', default={}), 1)))
  Log.Info("AniDB_dict: {}".format(DictString(AniDB_dict, 4)))
  return AniDB_dict, ANNid, MALid