def webpage(wfdir, outdir, locate_merger): datadir = DataDir(wfdir, outdir) filename = open(os.path.join(datadir + '/..', 'index.html'), 'w+') meta_data, parfile = metadata(wfdir, outdir, locate_merger) webdata = webpage_data(meta_data, parfile, locate_merger) filename.write(webdata) filename.close() if not os.path.exists(datadir + "/../HTML"): copytree('HTML', datadir + '/../HTML')
def get_metadata(self, filename): filename = self.clean(filename) quality = self.find_quality(filename) release_date = self.find_release_date(filename) index_title_end = (self.release_date_index_start if self.release_date_index_start < self.quality_index_start else self.quality_index_start) title = filename[:index_title_end] if title.endswith('[') or title.endswith('.'): title = title[:index_title_end - 1] m = metadata(title, release_date, quality) return m
def reSub(): media = [] import popen2 import enzyme for mediaPath in ReSubSearchPaths: for root, dirs, files in os.walk(mediaPath): for files in ["*.m4v"]: fp = root + "/" + files media.extend(glob.glob(fp)) for path in media: r = popen2.popen3(HandleBarBinPath + '/bin/SublerCLI -source "' + path + '" -listtracks') tracks = r[0].readlines() r[0].close() subsInTrack = filter(hasSubtitle, tracks) if len(subsInTrack) > 0: continue r = popen2.popen3(HandleBarBinPath + '/bin/SublerCLI -source "' + path + '" -listmetadata') comments = r[0].readlines() matches = filter(hasComments, comments) if len(matches) > 0: start = len("Comments: Original filename ") file = matches[0][start:].strip() md = metadata(file, 0) sub = subs(file, md.guess["type"]) sub.downloadSubtitles() md.addSubtitles(path)
def parseFailedFiles(): media = [] for root, dirs, files in os.walk(HandleBarConfigPath + DebugFailedPath): for files in ["*.m4v"]: fp = root + "/" + files media.extend(glob.glob(fp)) for path in media: md = metadata(path, 0) """ SUBS """ sub = subs(path, md.guess["type"]) sub.downloadSubtitles() result = md.parseFile() if result != True: print "FAILED --" return False moveToItunes(md.filePath)