def buildList(query, opts): # TEMPORARY FIX: # replace all dashes from queries to work around search behavior # as negative to all text that comes afterwards query = query.replace("-", " ") from MythTV.tmdb3 import searchMovie from MythTV import VideoMetadata from lxml import etree results = searchMovie(query) tree = etree.XML(u"<metadata></metadata>") mapping = [ ["runtime", "runtime"], ["title", "originaltitle"], ["releasedate", "releasedate"], ["tagline", "tagline"], ["description", "overview"], ["homepage", "homepage"], ["userrating", "userrating"], ["popularity", "popularity"], ] count = 0 for res in results: if res is None: continue m = VideoMetadata() for i, j in mapping: if getattr(res, j): setattr(m, i, getattr(res, j)) m.inetref = str(res.id) if res.title: m.title = res.title # TODO: # should releasedate and year be pulled from the country-specific data # or should it be left to the default information to cut down on # traffic from searches if res.releasedate: m.year = res.releasedate.year if res.backdrop: b = res.backdrop m.images.append({"type": "fanart", "url": b.geturl(), "thumb": b.geturl(b.sizes()[0])}) if res.poster: p = res.poster m.images.append({"type": "coverart", "url": p.geturl(), "thumb": p.geturl(p.sizes()[0])}) tree.append(m.toXML()) count += 1 if count >= 60: # page limiter, dont want to overload the server break sys.stdout.write(etree.tostring(tree, encoding="UTF-8", pretty_print=True, xml_declaration=True)) sys.exit(0)
def buildList(query, opts): # TEMPORARY FIX: # replace all dashes from queries to work around search behavior # as negative to all text that comes afterwards query = query.replace('-',' ') from MythTV.tmdb3 import searchMovie from MythTV import VideoMetadata from lxml import etree results = searchMovie(query) tree = etree.XML(u'<metadata></metadata>') mapping = [['runtime', 'runtime'], ['title', 'originaltitle'], ['releasedate', 'releasedate'], ['tagline', 'tagline'], ['description', 'overview'], ['homepage', 'homepage'], ['userrating', 'userrating'], ['popularity', 'popularity']] count = 0 for res in results: m = VideoMetadata() for i,j in mapping: if getattr(res, j): setattr(m, i, getattr(res, j)) m.inetref = str(res.id) m.imdb = str(res.imdb).lstrip("t") if res.title: m.title = res.title #TODO: # should releasedate and year be pulled from the country-specific data # or should it be left to the default information to cut down on # traffic from searches if res.releasedate: m.year = res.releasedate.year if res.backdrop: b = res.backdrop m.images.append({'type':'fanart', 'url':b.geturl(), 'thumb':b.geturl(b.sizes()[0])}) if res.poster: p = res.poster m.images.append({'type':'coverart', 'url':p.geturl(), 'thumb':p.geturl(p.sizes()[0])}) tree.append(m.toXML()) count += 1 if count >= 60: # page limiter, dont want to overload the server break sys.stdout.write(etree.tostring(tree, encoding='UTF-8', pretty_print=True, xml_declaration=True)) sys.exit(0)
def buildList(query): results = searchMovie(query) tree = etree.XML(u'<metadata></metadata>') mapping = [['runtime', 'runtime'], ['title', 'originaltitle'], ['releasedate', 'releasedate'], ['tagline', 'tagline'], ['description', 'overview'], ['homepage', 'homepage'], ['userrating', 'userrating'], ['popularity', 'popularity']] for res in results: m = VideoMetadata() for i,j in mapping: setattr(m, i, getattr(res, j)) m.inetref = str(res.id) m.year = res.releasedate.year m.images.append({'type':'fanart', 'url':res.backdrop.geturl()}) m.images.append({'type':'coverart', 'url':res.poster.geturl()}) tree.append(m.toXML()) sys.stdout.write(etree.tostring(tree, encoding='UTF-8', pretty_print=True)) sys.exit(0)
def buildList(query): # TEMPORARY FIX: # replace all dashes from queries to work around search behavior # as negative to all text that comes afterwards query = query.replace('-',' ') results = searchMovie(query) tree = etree.XML(u'<metadata></metadata>') mapping = [['runtime', 'runtime'], ['title', 'originaltitle'], ['releasedate', 'releasedate'], ['tagline', 'tagline'], ['description', 'overview'], ['homepage', 'homepage'], ['userrating', 'userrating'], ['popularity', 'popularity']] count = 0 for res in results: m = VideoMetadata() for i,j in mapping: if getattr(res, j): setattr(m, i, getattr(res, j)) m.inetref = str(res.id) if res.releasedate: m.year = res.releasedate.year if res.backdrop: b = res.backdrop m.images.append({'type':'fanart', 'url':b.geturl(), 'thumb':b.geturl(b.sizes()[0])}) if res.poster: p = res.poster m.images.append({'type':'coverart', 'url':p.geturl(), 'thumb':p.geturl(p.sizes()[0])}) tree.append(m.toXML()) count += 1 if count >= 60: # page limiter, dont want to overload the server break sys.stdout.write(etree.tostring(tree, encoding='UTF-8', pretty_print=True, xml_declaration=True)) sys.exit(0)
# if ep_id in lib and (ep_season != '00' or ep_num != '00'): # print "[WARN] Matched program ID" + ep_id + ", skipping " + episode_name # continue # else: # print "[INFO] Adding " + episode_name + " to library" # lib.append(ep_id) # Handle specials, movies, etc. if ep_season == '00' and ep_num == '00': #Fallback 1: Check TheMovieDB print "[WARN] no season or episode info - trying fallbacks" moviedb_successful = False moviedb_run = False if (config.moviedb_enabled): print "[INFO] (fallback 1) Querying TheMovieDB for " + title res = searchMovie(title) if (len(res) is 0): moviedb_successful = False print "[WARN] " + episode_name + "not found in TheMovieDB" else: print "[INFO] Successfully looked up in MovieDB" moviedb_successful = True print (res[0].title) title = re.sub('[\[\]/\\;><&*%=+@!#^()|?]', '_', res[0].title) episode_name = title #Fallback 2: start time if (ep_start_time is not None): if (moviedb_run is False or (moviedb_run is True and moviedb_successful is False)): print "[INFO] (fallback 2) using start time"
def buildList(query, opts): # TEMPORARY FIX: # replace all dashes from queries to work around search behavior # as negative to all text that comes afterwards query = query.replace('-', ' ') from MythTV.tmdb3 import searchMovie from MythTV import VideoMetadata from lxml import etree results = iter(searchMovie(query)) tree = etree.XML(u'<metadata></metadata>') mapping = [['runtime', 'runtime'], ['title', 'originaltitle'], ['releasedate', 'releasedate'], ['tagline', 'tagline'], ['description', 'overview'], ['homepage', 'homepage'], ['userrating', 'userrating'], ['popularity', 'popularity']] count = 0 while True: try: res = next(results) except StopIteration: # end of results break except IndexError: # unexpected end of results # we still want to return whatever we have so far break if res is None: # faulty data, skip it and continue continue m = VideoMetadata() for i, j in mapping: if getattr(res, j): setattr(m, i, getattr(res, j)) m.inetref = str(res.id) if res.title: m.title = res.title #TODO: # should releasedate and year be pulled from the country-specific data # or should it be left to the default information to cut down on # traffic from searches if res.releasedate: m.year = res.releasedate.year if res.backdrop: b = res.backdrop m.images.append({ 'type': 'fanart', 'url': b.geturl(), 'thumb': b.geturl(b.sizes()[0]) }) if res.poster: p = res.poster m.images.append({ 'type': 'coverart', 'url': p.geturl(), 'thumb': p.geturl(p.sizes()[0]) }) tree.append(m.toXML()) count += 1 if count >= 60: # page limiter, dont want to overload the server break print_etree( etree.tostring(tree, encoding='UTF-8', pretty_print=True, xml_declaration=True)) sys.exit(0)
def buildList(query, opts): # TEMPORARY FIX: # replace all dashes from queries to work around search behavior # as negative to all text that comes afterwards query = query.replace('-',' ') query = query.strip() # trim extra whitespace # extract year from name -- note that we require a space before the year # so we don't confuse movie names for years (example: "2012 - 2009", # a 2009 movie); also note that we accept optional paranthesis around # the year: yearRegex = re.compile(r"\s\(?((19|20)[0-9]{2})\)?$") year = yearRegex.search(query) if year is not None: year = year.group(1) # if no year was found, we'll pass year=None to searchMovie, # which is the default anyway # get rid of the year (if any) from the query text, because # it causes bad TMDB results query = yearRegex.sub("", query) query = query.strip() from MythTV.tmdb3 import searchMovie from MythTV import VideoMetadata from lxml import etree results = iter(searchMovie(query, locale=None, adult=False, year=year)) tree = etree.XML(u'<metadata></metadata>') mapping = [['runtime', 'runtime'], ['title', 'originaltitle'], ['releasedate', 'releasedate'], ['tagline', 'tagline'], ['description', 'overview'], ['homepage', 'homepage'], ['userrating', 'userrating'], ['popularity', 'popularity']] count = 0 while True: try: res = results.next() except StopIteration: # end of results break except IndexError: # unexpected end of results # we still want to return whatever we have so far break if res is None: # faulty data, skip it and continue continue m = VideoMetadata() for i,j in mapping: if getattr(res, j): setattr(m, i, getattr(res, j)) m.inetref = str(res.id) if res.title: m.title = res.title #TODO: # should releasedate and year be pulled from the country-specific data # or should it be left to the default information to cut down on # traffic from searches if res.releasedate: m.year = res.releasedate.year if res.backdrop: b = res.backdrop m.images.append({'type':'fanart', 'url':b.geturl(), 'thumb':b.geturl(b.sizes()[0])}) if res.poster: p = res.poster m.images.append({'type':'coverart', 'url':p.geturl(), 'thumb':p.geturl(p.sizes()[0])}) tree.append(m.toXML()) count += 1 if count >= 60: # page limiter, dont want to overload the server break sys.stdout.write(etree.tostring(tree, encoding='UTF-8', pretty_print=True, xml_declaration=True)) sys.exit(0)
def main(): print "[INFO] Starting mythEx" lib = open_library() url = "http://" + config.host_url + ":" + config.host_port print "[INFO] Looking up from MythTV: " + url + '/Dvr/GetRecordedList' tree = ET.parse(urllib.urlopen(url + '/Dvr/GetRecordedList')) root = tree.getroot() for program in root.iter('Program'): title = program.find('Title').text ep_title = program.find('SubTitle').text ep_season = program.find('Season').text.zfill(2) ep_num = program.find('Episode').text.zfill(2) ep_file_extension = program.find('FileName').text[-4:] ep_file_name = program.find('FileName').text ep_id = program.find('ProgramId').text ep_airdate = program.find('Airdate').text ep_temp = program.find('StartTime').text ep_start_time = utc_to_local(datetime.strptime(ep_temp, '%Y-%m-%dT%H:%M:%SZ')) # parse start time for file-system safe name ep_start_time = datetime.strftime(ep_start_time, '%Y-%m-%d %H%M') # parse show name for file-system safe name title = re.sub('[\[\]/\\;><&*%=+@!#^()|?]', '_', title) episode_name = title + " - S" + ep_season + "E" + ep_num print "[INFO] Processing " + episode_name + " ..." # Skip previously finished files if ep_id in lib: print "[WARN] Matched program ID" + ep_id + ", skipping " + episode_name continue elif ep_id is not None: print "[INFO] Adding " + episode_name + " to library [" + ep_id + "]" lib.append(ep_id) # Handle specials, movies, etc. if ep_season == '00' and ep_num == '00': #Fallback 1: Check TheMovieDB print "[WARN] no season or episode info - trying fallbacks" moviedb_successful = False moviedb_run = False if (config.moviedb_enabled): print "[INFO] (fallback 1) Querying TheMovieDB for " + title res = searchMovie(title) moviedb_run = True if (len(res) is 0): moviedb_successful = False print "[WARN] " + episode_name + "not found in TheMovieDB" else: print "[INFO] Successfully looked up in MovieDB" moviedb_successful = True print (res[0].title) title = re.sub('[\[\]/\\;><&*%=+@!#^()|?]', '_', res[0].title) episode_name = title link_path = (config.plex_movie_directory + title + separator + episode_name + ep_file_extension) #Fallback 2: start time if (ep_start_time is not None): if (moviedb_run is False or (moviedb_run is True and moviedb_successful is False)): print "[INFO] (fallback 2) using start time" episode_name = title + " - " + ep_start_time print "[INFO] Changed to " + episode_name link_path = (config.plex_specials_directory + title + separator + episode_name + ep_file_extension) else: print "[WARN] no start time available" else: print "[INFO] have season and episode." link_path = (config.plex_tv_directory + title + separator + episode_name + ep_file_extension) # Symlink path print "[INFO] symlink processing.." # Watch for oprhaned recordings! source_dir = None for myth_dir in config.mythtv_recording_directories[:]: source_path = myth_dir + ep_file_name if os.path.isfile(source_path): source_dir = myth_dir break if source_dir is None: print ("[ERROR] Cannot create symlink for " + episode_name + ", no valid source directory. Skipping.") continue if os.path.exists(link_path) or os.path.islink(link_path): print "[WARN] Symlink " + link_path + " already exists. Skipping." continue if (config.plex_tv_directory in link_path) and (not os.path.exists(config.plex_tv_directory + title)): print "[INFO] Show folder does not exist, creating." os.makedirs(config.plex_tv_directory + title) if (config.plex_movie_directory in link_path) and (not os.path.exists(config.plex_movie_directory + title)): print "[INFO] Show folder does not exist, creating." os.makedirs(config.plex_movie_directory + title) if (config.plex_specials_directory in link_path) and (not os.path.exists(config.plex_specials_directory + title)): print "[INFO] Show folder does not exist, creating." os.makedirs(config.plex_specials_directory + title) # avconv (next-gen ffmpeg) support -- convert files to MP4 # so smaller devices (eg Roku, AppleTV, FireTV, Chromecast) # support native playback. if config.avconv_enabled is True: mthcommflag_exists = False # MythTV's mythcommflag can be used to remove commercials, # shrinking recordings and improving viewing. if config.avconv_mythcommflag_enabled is True: run_mythcommflag(source_path) # Re-encode with avconv run_avconv(source_path, link_path) elif config.avconv_remux_enabled: run_avconv_remux(source_path, link_path) else: print "[INFO] Linking " + source_path + " ==> " + link_path os.symlink(source_path, link_path) close_library(lib)