def handle_video_dir(d, dry_run=True): file_sizes = [f.size for f in d.walkfiles()] total_size = sum(file_sizes) * 1.0 size_ratios = sorted([s / total_size for s in file_sizes], reverse=True) if size_ratios[0] >= 0.95: vid = sorted(d.walkfiles(), key=lambda f: f.size)[-1] info = guessit.guess_video_info(str(vid)) if info["type"] == "movie": fp = vid.rename("%s%s" % (info["title"], vid.ext)) move_file(fp, HOME_DIR("Videos/Movies"), dry_run) elif info["type"] == "episode": fname = "%(series)s S%(season)02dE%(episodeNumber)02d" % info fp = vid.rename("%s%s" % (fname, vid.ext)) move_file(fp, HOME_DIR("Videos/TO WATCH"), dry_run) #remove the directory if not dry_run: d.rmtree() else: #multiple video files, rename them for f in d.files(): rename(f, dry_run) #move the directory if not dry_run: fp = rename(d, dry_run) fp.move(HOME_DIR("Videos/TO WATCH"))
def main() -> None: """Main function that is call when the script is run.""" utils.rename(OUTPUT_FILE, utils.get_date()) driver = set_scraper() site_login(driver) get_mail_link(driver) scrape_mail(driver) save_result(driver) site_logout(driver) utils.remove(PAGE_URLS) print("Scraping has finished successfully.")
def fix_file_names(dir): """ The current version of xslproc doesn't correctly handle spaces. We have to manually go through the result set and decode encoded spaces (%20). """ utils.log('Fixing encoded file names...') for root, dirs, files in os.walk(dir): for file in files: if file.find("%20") > -1: new_name = file.replace("%20", " ") utils.rename(utils.log, os.path.join(root, file), os.path.join(root, new_name))
def generate(): config = yaml.load(open(opt.cfg), Loader=yaml.FullLoader) if opt.do == 'filter': utils.coco_filter(label_num=opt.n, config=config) if opt.do == 'split': utils.coco_split(config=config) if opt.do == 'concat': utils.concat_label(config=config) if opt.do == 'match': utils.label_match_image(config=config) if opt.do == 'rename': utils.rename(config=config)
def correct_ac3_delay(delaycut, file_in, file_out, delay, bitrate): ''' Just correct a single delay ''' file_out_1 = file_in + '.part1' file_out_2 = file_in + '.part2' do_ = ['-startcut', str(-delay)] args = [delaycut, '-i', file_in] args.extend(do_) args.extend(['-o', file_out_1]) _run_delaycut(args) rename(file_out_1, file_out)
def clone(self, origin): url = self._make_remote_url(origin) try: self.git.clone(url, dry_run=self.dry_run) except git.NotAuthorizedToClone: raise NotAuthorizedToClone( 'Permission denied. Add SSH key to GitHub.') except git.RepoAlreadyExists: raise RepoAlreadyExists('.homefiles repo already exists') except git.GitExecutableNotFound: raise GitExecutableNotFound('git needs to be installed first') repo_name = url.split('/')[-1].replace('.git', '') utils.rename(repo_name, self.repo_path, dry_run=self.dry_run)
def _audio_correction(episode, source_ifo, src_dir, dest_dir, demux_map, novid=False): ''' DB 138 DB 137 and 138 are on the same VID. 137 plays properly, but 138 has an audio delay if the episode is ripped by PGC. Need to rip the audio from the VID, trim it, then combine it with the OP for proper audio. ''' op_vid, ep_vid = demux_map['complex']['vid'] start_frame = demux_map['complex']['start'] # get OP audio logger.debug('Ripping OP audio...') _run_pgcdemux( episode.pgcdemux, source_ifo, dest_dir, 'vid', op_vid, None, None, novid=True) # rename the file op_audio = files_index(dest_dir)['audio'][0] op_newfname = os.path.join(dest_dir, 'op_audio.ac3') rename(op_audio, op_newfname) # get episode audio logger.debug('Ripping episode audio...') _run_pgcdemux( episode.pgcdemux, source_ifo, dest_dir, 'vid', ep_vid, None, None, novid=True) ep_audio = files_index(dest_dir)['audio'][0] # trim the audio logger.debug('Trimming audio...') ep_newfname = os.path.join(dest_dir, 'ep_audio.ac3') retime_ac3( episode, ep_audio, ep_newfname, 448, offset_override=[{'frame': 0, 'offset': start_frame}]) # smash them together logger.debug('Combining audio...') final_file = os.path.join(dest_dir, 'AudioFile_80.ac3') combine_files([op_newfname, ep_newfname], final_file) logger.debug('Audio processing complete.') # normal demux logger.debug('Demuxing normally from now on.') _run_pgcdemux( episode.pgcdemux, source_ifo, dest_dir, 'pgc', None, demux_map['pgc'], None, novid=novid, noaud=True)
def fix_file_names( dir ): """ The current version of xslproc doesn't correctly handle spaces. We have to manually go through the result set and decode encoded spaces (%20). """ utils.log( 'Fixing encoded file names...' ) for root, dirs, files in os.walk( dir ): for file in files: if file.find( "%20" ) > -1: new_name = file.replace( "%20", " " ) utils.rename( utils.log , os.path.join( root, file ) , os.path.join( root, new_name ) )
def successors(self, node): h = node.state constraints, c_length, pset, nset, gensym = node.extra if len(pset) == 0: return p, pm = choice(pset) p_index = build_index(p) operator = Operator(tuple(('Rule', )), h.union(constraints), []) found = False for m in operator.match(p_index, initial_mapping=pm): reverse_m = {m[a]: a for a in m} pos_partial = set([rename(reverse_m, x) for x in p]) found = True break if not found: return # specialize current variables using pset? for var in m: limited_m = {var: m[var]} new_h = frozenset([subst(limited_m, l) for l in h]) new_pset, new_nset = test_coverage(new_h, constraints, pset, nset) new_c_length = c_length + 1 score = self.score(len(new_pset), len(new_nset), 0.01 * new_c_length) yield Node(new_h, node, ('specializing var', var, m[var]), -1 * score, extra=(constraints, new_c_length, new_pset, new_nset, gensym)) # add new literals from pset for l in pos_partial: if l not in h: l = generate_literal(l[0], len(l) - 1, gensym) # l = generalize_literal(l, gensym) new_h = h.union([l]) new_pset, new_nset = test_coverage(new_h, constraints, pset, nset) new_c_length = c_length + 1 score = self.score(len(new_pset), len(new_nset), 0.01 * new_c_length) yield Node(new_h, node, ('add literal', l), -1 * score, extra=(constraints, new_c_length, new_pset, new_nset, gensym))
def fix_audio(delaycut, file_in): ''' Attempt to fix broken AC3 files ''' logger.info('Fixing %s', file_in) file_out = os.path.join(os.path.dirname(file_in), os.path.basename(file_in).replace( '.ac3', '.fixed.ac3')) file_in_old = os.path.join(os.path.dirname(file_in), os.path.basename(file_in).replace( '.ac3', '.old.ac3')) _run_delaycut([delaycut, '-i', file_in, '-o', file_out, '-fixcrc', 'silence']) rename(file_in, file_in_old) rename(file_out, file_in) logger.info('Fix complete.')
def untrack(self, path): dst_path = utils.truepath(path) if not os.path.exists(dst_path): raise Exception("Path '%s' not found" % dst_path) if not os.path.islink(dst_path): raise Exception("Path '%s' is not a symlink" % dst_path) src_path = os.path.realpath(dst_path) undo_log = [] utils.remove_symlink(dst_path, dry_run=self.dry_run, undo_log=undo_log) try: utils.rename(src_path, dst_path, dry_run=self.dry_run) except: utils.undo_operations(undo_log) raise self.git.rm(src_path) self.git.commit(message="Untracking '%s'" % path)
def track(self, path, bundle=None): """Track a file or a directory.""" # We don't use kwarg default, because None represents default to # callers bundle = bundle or 'Default' src_path = utils.truepath(path) is_directory = os.path.isdir(src_path) if self.root_path not in src_path: raise Exception('Cannot track files outside of root path') bundle_path = os.path.join(self.repo_path, bundle) dst_path = os.path.join( bundle_path, utils.relpath(self.root_path, src_path)) undo_log = [] dst_dir = os.path.dirname(dst_path) if not os.path.exists(dst_dir): utils.makedirs(dst_dir, dry_run=self.dry_run, undo_log=undo_log) try: utils.rename(src_path, dst_path, dry_run=self.dry_run, undo_log=undo_log) try: utils.symlink(dst_path, src_path, dry_run=self.dry_run) except: utils.undo_operations(undo_log) raise except: utils.undo_operations(undo_log) raise self.git.add(dst_path) if is_directory: marker = self._track_directory(dst_path) self.git.add(marker) self.git.commit(message="Tracking '%s'" % path)
def build_ui_caps(self): dirr = self.dirr with OSFS(dirr) as f: data = make_temp_fs(f) self.vfs = data capsmap = {} vfs = self.vfs # vfs.tree() for path, _, files in vfs.walk(): for i in files: dd = {} nn = i.name pp = rename(nn) dd['fixname'] = nn dd['cap'] = pp.episode dd['season'] = pp.season opth = vfs.gettext(join(path, nn)) oon = split(opth)[1] dd['original'] = oon dd['ext'] = pp.ext.lower() dd['vpath'] = join(path, nn) dd['state'] = True dd['fold'] = split(path)[1] capsmap[oon] = dd self.capsmap = capsmap nonly = self.move.checkedId() == 3 li = self.li lic = li.count() cps = list(capsmap.values()) cpl = len(cps) if cpl <= lic: for n, i in enumerate(cps): name = i['fixname'] if nonly: name = i['cap'] + i['ext'] ll = li.item(n) ll.setText(name + "\t" + i['original']) for i in range(lic - cpl): ll = li.takeItem(0) del ll else: for i in range(lic): name = cps[i]['fixname'] if nonly: name = i['cap'] + i['ext'] ll = li.item(i) ll.setText(name + "\t" + cps[i]['original']) for i in range(cpl - lic): name = cps[lic + i]['fixname'] if nonly: name = i['cap'] + i['ext'] li.addItem(name + "\t" + cps[lic + i]['original'])
def check_inputs(self, inputs, delete): invalid_inputs = False if len(inputs) == 0: invalid_inputs = True else: if delete: if inputs[0] == "": invalid_inputs = True else: for i in inputs: if i == "": invalid_inputs = True break # error message only appears once if invalid_inputs: alert = QtWidgets.QMessageBox() alert.setText("Input must be non-empty") alert.setWindowTitle("Invalid input") alert.setIcon(QtWidgets.QMessageBox.Warning) alert.setStandardButtons(QtWidgets.QMessageBox.Ok) alert.exec_() else: if delete: successful_delete, error_msg = utils.delete_model(inputs[0]) alert = QtWidgets.QMessageBox() if successful_delete: alert.setText("File deleted") alert.setWindowTitle("Confirmation") alert.setIcon(QtWidgets.QMessageBox.Information) else: alert.setText("File not deleted. " + str(error_msg)) alert.setWindowTitle("Error") alert.setIcon(QtWidgets.QMessageBox.Warning) alert.setStandardButtons(QtWidgets.QMessageBox.Ok) alert.exec_() else: # rename new_name = os.path.join(os.path.dirname(inputs[0]), inputs[1]) successful_rename, error_msg = utils.rename(new_name, inputs[0]) alert = QtWidgets.QMessageBox() if successful_rename: alert.setText("File renamed") alert.setWindowTitle("Confirmation") alert.setIcon(QtWidgets.QMessageBox.Information) else: alert.setText("File not renamed. " + str(error_msg)) alert.setWindowTitle("Error") alert.setIcon(QtWidgets.QMessageBox.Warning) alert.setStandardButtons(QtWidgets.QMessageBox.Ok) alert.exec_()
def test_rename(): """ testing the renaming functionality """ FOLDER = path("/tmp") expected = [ ("Arrow.S01E20.HDTV.x264-LOL.mp4", "Arrow S01E20.mp4"), ("Techsnap-0106.mp4", "Techsnap-0106.mp4"), ("[HorribleSubs] Aku no Hana - 03 [720p].mkv", "Aku No Hana 03.mkv"), #anime with version numbers ("[HorribleSubs] Aku no Hana - 03v2 [720p].mkv", "Aku No Hana 03.mkv"), ("[HorribleSubs] Aku no Hana - 03 [720p].mkv", "Aku No Hana 03.mkv"), ("Fluent 2012_ Steve Souders, _Your Script Just Killed My Site_(720p_H.264-AAC).mp4", "Fluent 2012 - Steve Souders, Your Script Just Killed My Site.mp4"), ] for start, end in expected: out = rename(FOLDER.joinpath(start), dry_run=True) eq_(str(out.name), end)
def find_nexts(self, top='/', deep=0, maxdeep=2): if deep == 0: self.results = [] # print(top) if deep > maxdeep: return # if self.logger: # self.logger.emit(top, INFORMATION) dirs, nondirs = [], [] for name in self.filesystem.scandir(top): if name.is_dir: dirs.append(name) elif splitext(name.name)[1].lower() in video_formats: nondirs.append(name) # print(dirs,nondirs) for fil in nondirs: pp = rename(fil.name) if pp.error: pp = parse(j.name) t1 = '' t2 = 0 try: if pp.is_video: if pp.episode: t1 = transform(pp.title) fill = t1 if pp.season: fill += ' - ' + str(pp.season) + \ 'x' + str(pp.episode) else: fill += ' - ' + str(pp.episode) fill += pp.ext else: continue t2 = pp.episode else: continue except KeyError: if self.logger: self.logger.emit("Error procesando: " + fil.name, WARNING) continue bedd = 100 gap = 2 near = '' for j in self.caps_list.keys(): edd = editDistance(t1, j, True) if edd <= gap and edd < bedd: near = j bedd = edd if edd == 0: break if near != '': if isinstance(t2, str): if 'x' in t2: t2 = t2.split('x')[1] if int(t2) > self.caps_list[near]: best = (near, fil.name, top, fill) self.results.append(best) if self.logger: self.logger.emit('Encontrado: ' + str(best), INFORMATION) for name in sorted(dirs, key=skey): path = join(top, name.name) if not self.filesystem.islink(path): try: self.find_nexts(path, deep + 1, maxdeep) except (PermissionError, fs.errors.DirectoryExpected) as e: # print(e) self.logger.emit(str(e), ERROR)
def optimize_clause(h, constraints, pset, nset): """ Returns the set of most specific generalization of h that do NOT cover x. """ c_length = clause_length(h) p_covered, n_covered = test_coverage(h, constraints, pset, nset) p_uncovered = [p for p in pset if p not in p_covered] n_uncovered = [n for n in nset if n not in n_covered] initial_score = clause_score(clause_accuracy_weight, len(p_covered), len(p_uncovered), len(n_covered), len(n_uncovered), c_length) p, pm = choice(p_covered) pos_partial = list(compute_bottom_clause(p, pm)) # print('POS PARTIAL', pos_partial) # TODO if we wanted we could add the introduction of new variables to the # get_variablizations function. possible_literals = {} for i, l in enumerate(pos_partial): possible_literals[i] = [None, l] + [v for v in get_variablizations(l)] partial_literals = set( [l for i in possible_literals for l in possible_literals[i]]) additional_literals = h - partial_literals if len(additional_literals) > 0: p_index = build_index(p) operator = Operator(tuple(('Rule', )), h.union(constraints), []) for add_m in operator.match(p_index, initial_mapping=pm): break additional_lit_mapping = { rename(add_m, l): l for l in additional_literals } for l in additional_lit_mapping: new_l = additional_lit_mapping[l] print(pos_partial) print(add_m) print(l) print(new_l) possible_literals[pos_partial.index(l)].append(new_l) reverse_pl = { l: (i, j) for i in possible_literals for j, l in enumerate(possible_literals[i]) } clause_vector = [0 for i in range(len(possible_literals))] for l in h: i, j = reverse_pl[l] clause_vector[i] = j clause_vector = tuple(clause_vector) flip_weights = [(len(possible_literals[i]) - 1, i) for i in possible_literals] # size = 1 # for w, _ in flip_weights: # size *= (w + 1) # print("SIZE OF SEARCH SPACE:", size) num_successors = sum([w for w, c in flip_weights]) temp_length = 2 * num_successors # print("TEMP LENGTH", temp_length) # print('INITIAL SCORE', initial_score) problem = ClauseOptimizationProblem(clause_vector, initial_cost=-1 * initial_score, extra=(possible_literals, flip_weights, constraints, pset, nset)) # for sol in hill_climbing(problem): for sol in simulated_annealing(problem, temp_length=temp_length): # print("SOLUTION FOUND", sol.state) return build_clause(sol.state, possible_literals)
cbars = ax.bar(ind+0.4, df['merge'].values.tolist() + [chopgm], \ width, ecolor='k', color=u.getcolor(4), edgecolor='k') # Put the x labels if the value is less than (-10) c = 0 for t in df['paths']: if t < -10: ax.text(c + 0.05, -5, str(int(t)), fontsize=20, rotation=90) c += 1 ax.set_ylabel('Y Label', fontsize=32) ax.set_xticks(ind + 0.4) # Adding extra name to the x labels # rotation='degree' for rotating the text ax.set_xticklabels(u.rename(df['name']) + ['custom']) # Set the background color # ax.set_axis_bgcolor('white') plt.gca().xaxis.grid(False) plt.gca().yaxis.grid(True, color='black') plt.tick_params(axis='x', which='both', bottom='off', top='off') plt.tick_params(axis='y', which='both', right='off') ax.spines['bottom'].set_color('black') ax.spines['top'].set_color('black') ax.spines['right'].set_color('black') ax.spines['left'].set_color('black')
def compute_bottom_clause(self, x, mapping): reverse_m = {mapping[a]: a for a in mapping} partial = set([rename(reverse_m, l) for l in x]) return frozenset(partial)
def compute_bottom_clause(x, mapping): reverse_m = {mapping[a]: a for a in mapping} # print("REVERSEM", reverse_m) partial = set([rename(reverse_m, l) for l in x]) return frozenset(partial)
def gen_specializations(self, node): h = node.state (constraints, c_length, p_covered, p_uncovered, n_covered, n_uncovered, gensym) = node.extra if len(p_covered) == 0: return p, pm = choice(p_covered) p_index = build_index(p) operator = Operator(tuple(('Rule', )), h.union(constraints), []) found = False for m in operator.match(p_index, initial_mapping=pm): reverse_m = {m[a]: a for a in m} pos_partial = set([rename(reverse_m, x) for x in p]) found = True break if not found: return # specialize current variables using pset? for var in m: limited_m = {var: m[var]} new_h = frozenset([subst(limited_m, l) for l in h]) new_p_subset, new_n_subset = test_coverage(new_h, constraints, p_covered, n_covered) new_p_covered = new_p_subset new_p_uncovered = p_uncovered + [ p for p in p_covered if p not in new_p_subset ] new_n_covered = new_n_subset new_n_uncovered = n_uncovered + [ n for n in n_covered if n not in new_n_subset ] new_c_length = c_length + 1 score = self.score(len(new_p_covered), len(new_p_uncovered), len(new_n_covered), len(new_n_uncovered), new_c_length) yield Node(new_h, None, None, -1 * score, extra=(constraints, new_c_length, new_p_covered, new_p_uncovered, new_n_covered, new_n_uncovered, gensym)) # add new literals from pset for l in pos_partial: if l not in h: l = generate_literal(l[0], len(l) - 1, gensym) # l = generalize_literal(l, gensym) new_h = h.union([l]) new_p_subset, new_n_subset = test_coverage( new_h, constraints, p_covered, n_covered) new_p_covered = new_p_subset new_p_uncovered = p_uncovered + [ p for p in p_covered if p not in new_p_subset ] new_n_covered = new_n_subset new_n_uncovered = n_uncovered + [ n for n in n_covered if n not in new_n_subset ] new_c_length = c_length + 1 score = self.score(len(new_p_covered), len(new_p_uncovered), len(new_n_covered), len(new_n_uncovered), new_c_length) yield Node(new_h, None, None, -1 * score, extra=(constraints, new_c_length, new_p_covered, new_p_uncovered, new_n_covered, new_n_uncovered, gensym))
#folders where the files will be renamed RENAME_DIRS = [ HOME_DIR('Videos/WATCHED'), DOWNLOAD_DIR, ] MOVE_DIRS = [ HOME_DIR('Videos/WATCHED'), ] #renaming files for d in RENAME_DIRS: with d: #do the renaming of files for f in d.files(): rename(f, DEBUG) #check for subtitles and merge them if possible sub_files = [f for f in d.files() if f.ext in (".sub", ".srt")] for sub_file in sub_files: sub_name = sub_file.namebase #search for the corresponding video file for g in d.files(): if sub_name.startswith(g.namebase) and sub_file != g: #add the subtitles file and remove the originals add_subtitles_mkv(g, sub_file, DEBUG) break #copy files if the storage is connected if HDD_DIR.exists(): #try copying
sys.stderr.write("Only linux based systems are currently supported!") exit(1) # args = { 'device': 'wifi0', 'ssid': 'NAOCAM', 'password':'******' } args = parseArguments() log_directory = os.path.join(os.path.dirname(__file__), 'logs/') Logger.setupLogger(args.quiet, args.verbose, args.syslog, log_directory) # call different functions based on the arguments if args.gc_check: CheckGameController(args.gc_source) elif args.check_bt != False: CheckBluetooth(args.check_bt if args.check_bt else 'D6:B9:D4:D7:B7:40') elif args.rename: rename(args.videos[0], args.logs[0]) else: if args.config: # use config for network setup try: import config args.ssid = config.ssid args.passwd = config.passwd args.retries = config.retries args.mac = config.mac args.gc_source = config.gc_source except: # no config available! Logger.error("No config available OR invalid config!") exit(2)
# Writing text on top of the bars c = 0 ll = df['data'].values.tolist() for t in df['data']: if (int(t) > 50): ax.text(c + 0.65, 62 + 0.2, str(int(t)), fontsize=18, rotation=0) else: ax.text(c + 0.65, ll[c] + 0.2, str(int(t)), fontsize=18, rotation=0) c += 1 # Set X label values ax.set_ylabel('Y LABEL', fontsize=32) ax.set_xticks(ind + 0.8) # Put the labels from 'app' coulmn ax.set_xticklabels(u.rename(df['app'])) ax.set_facecolor('white') plt.gca().xaxis.grid(False) plt.gca().yaxis.grid(True, color='black') plt.tick_params(axis='x', which='both', bottom='off', top='off') plt.tick_params(axis='y', which='both', right='off') ax.spines['bottom'].set_color('black') ax.spines['top'].set_color('black') ax.spines['right'].set_color('black') ax.spines['left'].set_color('black') # Saving the plot fig.savefig('test.pdf', facecolor=fig.get_facecolor(), bbox_inches='tight')
def _interlacing_correction(episode, source_ifo, src_dir, dest_dir, demux_map, novid=False): ''' DB 26, DB 41, DBZ 24 Parts of these episodes are encoded interlaced, but flagged progressive. Need to rip each piece one at a time, correct the flag, then combine. ''' cells = demux_map['complex']['cells'] output_files = [] for cell in cells: # demux cell logger.debug('Ripping cell %s...', cell) _run_pgcdemux(episode.pgcdemux, source_ifo, dest_dir, 'cell', [cell['vid']], None, [cell['cell']], noaud=True) output = files_index(dest_dir)['video'][0] renamed = os.path.join( dest_dir, str(cell['vid']) + '_' + str(cell['cell']) + '.m2v') rename(output, renamed) # fix the messed up cell # need to open ReStream GUI for this, ugh if cell['fix']: logger.info('Launching ReStream...') proc = subprocess.Popen(episode.restream) proc.poll() # user prompt print('\n1. In the ReStream window, copy & paste\n\n' ' {0}\n\n' ' into "MPEG-2 Source" box at the top.\n\n' ' In the command prompt, you can double left-click the line to highlight it,\n' ' then right-click to copy it.\n\n' '2. Uncheck the checkbox which says ' '\"Frametype progressive.\"\n' '3. Click the button which says \"Write!\"\n' '4. When finished, you may close the ' 'ReStream window.\n'.format(renamed)) input('Once completed, press enter to continue...') # take the fixed one and run with it fixed_cell = ('%s.0%s' % os.path.splitext(renamed)) logger.debug('Looking for fixed cell...') if not os.path.isfile(fixed_cell): logger.error( '%s not found! Please follow the ReStream instructions.', fixed_cell) sys.exit(1) logger.debug('Fixed cell found! Continuing.') output_files.append(fixed_cell) else: output_files.append(renamed) # use dgindex to merge the files logger.info('Combining cells...') final_file = os.path.join(dest_dir, 'VideoFile.m2v') # dgindex adds .demuxed to the file so we have to rename it final_dgd = os.path.join(dest_dir, 'VideoFile.demuxed.m2v') args = [episode.dgindex, '-i'] args.extend(output_files) args.extend(['-od', os.path.splitext(final_file)[0], '-minimize', '-exit']) with open(os.devnull, 'w') as devnull: dgindex = subprocess.call(args, stdout=devnull, stderr=subprocess.STDOUT) check_abort(dgindex, 'DGIndex') logger.info('Cell combination finished.') rename(final_dgd, final_file) # normal demux logger.debug('Demuxing normally from now on.') _run_pgcdemux(episode.pgcdemux, source_ifo, dest_dir, 'pgc', None, demux_map['pgc'], None, novid=True)
def _fuck_the_orange_bricks(episode, source_ifo, src_dir, dest_dir, demux_map, novid=False): ''' DBZ OBs Each episode is split into VIDs for OP, recap, episode, and ED. Each of these VIDs has its own independent audio delay. They are mostly the same so it only needs to be corrected once -- and correcting each one would actually produce gaps in the audio. Need to rip each VID, correct the delay, then combine video & audio. Delay only needs to be corrected on Season 1 ''' output_files = {'v': [], 'a': []} # 3 audio tracks, so 3 indices each output_files['a'] = [[], [], []] delay_set = [False, False, False] for v_idx, vid in enumerate(demux_map['vid']): # demux audio/video logger.debug('Demuxing VID %s.', vid) _run_pgcdemux(episode.pgcdemux, source_ifo, dest_dir, 'vid', [vid], None, None, novid=False) files = files_index(dest_dir) output_v = files['video'][0] renamed_v = os.path.join( dest_dir, str(vid) + '.m2v') rename(output_v, renamed_v) output_files['v'].append(renamed_v) logger.debug('Demxed to %s', renamed_v) # get audio delay from pgcdemux logfile logfile = os.path.join(dest_dir, 'LogFile.txt') delay = '' with open(logfile, 'r') as log: delay_section = False for line in log: if 'Audio Delays' in line: delay_section = True if not delay_section: continue delay = re.findall(r'Audio_1=(.+)', line) if (delay): delay = int(delay[0]) break logger.debug('Audio delay is %s ms.', delay) # correct audio delay for idx, a in enumerate(files['audio']): if demux_map['audio'][idx] in ['en', 'us']: if demux_map['audio'][idx] == 'en': bitrate = '51_448' else: bitrate = '20_192' output_a = a renamed_a = os.path.join( dest_dir, str(vid) + '_' + str(idx) + '.ac3') if (int(episode.number) < 40 and ((v_idx == 0 and delay > 0 and not delay_set[idx]) or (v_idx == 1 and not delay_set[idx]))): delay_set[idx] = True logger.debug('Correcting the audio for VID %s (#%s)', vid, v_idx) correct_ac3_delay( episode.delaycut, output_a, renamed_a, delay, bitrate) else: rename(output_a, renamed_a) output_files['a'][idx].append(renamed_a) # combine audio for idx, a_files in enumerate(output_files['a']): final_file = os.path.join( dest_dir, 'AudioFile_8' + str(idx) + '.ac3') combine_files(a_files, final_file) final_file = os.path.join(dest_dir, 'VideoFile.m2v') # dgindex adds .demuxed to the file so we have to rename it final_dgd = os.path.join(dest_dir, 'VideoFile.demuxed.m2v') args = [episode.dgindex, '-i'] args.extend(output_files['v']) args.extend(['-od', os.path.splitext(final_file)[0], '-minimize', '-exit']) with open(os.devnull, 'w') as devnull: dgd = subprocess.call(args, stdout=devnull, stderr=subprocess.STDOUT) check_abort(dgd, 'DGIndex') logger.info('VID combination finished.') rename(final_dgd, final_file)
def make_temp_fs(fff): # make virtual filesystem in ram with the final # organization of the filesystem ff = cache_directory(read_only(fff)) ram = MemoryFS() # for path, dirs, files in ff.walk(): posprocsub = [] fils = set() files = ff.scandir('/') path = '/' folds = set([i.name for i in files if i.is_dir]) files = ff.scandir('/') for j in files: if not j.is_file: continue if splitext(j.name)[1] in subs_formats: posprocsub.append(j.name) continue pp = rename(j.name) if pp.error: pp = parse(j.name) try: if pp.is_video: fold = transform(pp.title) if not (fold in folds): fold = best_ed(fold, folds) folds.add(fold) pth = join('/', fold) if not ram.exists(pth): ram.makedir(fold) fils.add(pp.title) if pp.episode: if pp.season: fill = pp.title + ' - ' + \ str(pp.season) + 'X' + str(pp.episode) else: fill = pp.title + ' - ' + str(pp.episode) else: fill = pp.title if pp.episode_title: fill += ' - ' + str(pp.episode_title) fill += pp.ext ram.settext(join(pth, fill), join(path, j.name)) except KeyError: continue for j in posprocsub: pp = rename(j) if pp.error: pp = parse(j.name) fold = transform(pp.title) pth = join('/', fold) if pp.episode: if pp.season: fill = pp.title + ' - ' + \ str(pp.season) + 'X' + str(pp.episode) else: fill = fold + ' - ' + str(pp.episode) else: fill = fold if pp.episode_title: fill = fill + ' - ' + str(pp.episode_title) fill += pp.ext if ram.exists(pth): ram.settext(join(pth, fill), join(path, j)) elif len(fils) == 1: pth = join('/', list(fils)[0]) ram.settext(join(pth, fill), join(path, j)) elif len(fils) > 1: best = None gap = 3 for i in fils: n = editDistance(i, fold) if n < 3 and n < gap: best = i gap = n elif n == 0: best = i break if best: pth = join('/', best) ram.settext(join(pth, fill), join(path, j)) else: if not (ram.exists('/subs')): ram.makedir('/subs') ram.settext(join('/subs', j), join(path, j)) else: if not (ram.exists('/subs')): ram.makedir('/subs') ram.settext(join('/subs', j), join(path, j)) return ram