def cli(): parser_main = ArgumentParser() parser_main.set_defaults(func=parser_main.print_usage) sub_parsers = parser_main.add_subparsers(help='sub-command') # main_parser # sub_parser: img parser_img = sub_parsers.add_parser('image', help='convert image file format') parser_img.add_argument('path', nargs='?', help='image file path') parser_img.add_argument('-o', '--outdir', help='output directory') parser_img.add_argument('-ext', '--ext', help='to this format like png gif ...') parser_img.set_defaults(func=main_img) # sub_parser: info parser_info = sub_parsers.add_parser('info', help='recognise file format') parser_info.add_argument('path', nargs='?', help='file path') parser_info.set_defaults(func=main_info) parse_result = parser_main.parse_args() args = remove_value(remove_key(parse_result.__dict__, 'func'), None) try: parse_result.func(**args) except Exception as ex: color.print_err(type(ex), ex) color.print_err('Invalid args')
def cut(fn, output, start_time, end_time, debug=False): if not assert_output_has_ext(output): color.print_err('Failed.') return start_time_int = video_time_str2int(start_time) end_time_int = video_time_str2int(end_time) long = end_time_int - start_time_int if long <= 0: color.print_err('end-time:%s is before than start-time:%s' % (end_time, start_time)) return fn_tmp = path2uuid(fn) output_tmp = path2uuid(output, rename=False, quiet=True) try: cmd = 'ffmpeg -ss %d -i "%s" -t %d -c:v copy -c:a copy "%s" ' \ % (start_time_int, fn_tmp, long, output) info_lines, err_lines = exec_cmd(cmd) if debug: print(cmd) print('Info: %s' % '\n'.join(err_lines)) path2uuid(output_tmp, d=True, rename=False) except Exception: raise else: color.print_ok('cut the video %s to %s from %s to %s' % (fn, output, start_time, end_time)) finally: path2uuid(fn_tmp, d=True)
def main(command, unit='s'): with timeme(unit=unit) as t: info_lines, err_lines = exec_cmd(command) color.print_info('\n'.join(info_lines)) color.print_err('\n'.join(err_lines)) color.print_info(t)
async def _main_fetch(loop, p, url, num, outdir, captcha_pattern, ext=None): async def fetch(session, url): with async_timeout.timeout(5): async with session.get(url) as response: return await response.read() session = aiohttp.ClientSession(loop=loop, headers=headers) async for i in AsyncIteratorWrapper(range(num)): captcha_name = captcha_pattern.replace('$(NO)', str(i)).replace( '$(UUID)', str(uuid.uuid4())) path = os.path.join(outdir, captcha_name) while True: try: content = await fetch(session, url) except asyncio.TimeoutError as ex: color.print_err(ex) else: break with open(path, 'wb') as fw: fw.write(content) print('download %s' % path) p.stdin.write(path.encode() + b'\n') p.stdin.flush()
def getPage(self, pageNum): # 构建URL url = self.baseURL + self.seeLZ_str + '&pn=' + str(pageNum) try: request = urllib.request.Request(url, headers=headers) response = urllib.request.urlopen(request, timeout=600) content = None while True: try: content = response.read() except http.client.IncompleteRead as ex: color.print_warn(ex) color.print_info('retry...') else: break # 返回UTF-8格式编码内容 return content.decode('utf-8') # 无法连接,报错 except urllib.error.URLError as e: if hasattr(e, "reason"): color.print_err( "Failed to connect to BaiDuTieBa, Error Reason", e.reason) return None
def cli(): arguments = docopt(__doc__, version=__version__) if not in_toplevel_of_repo(): color.print_err('Not in toplevel of a git repo.') return if arguments['list']: n = int(arguments['-n']) result = max_file_hash_name(n) if result is not None: [color.print_info(item) for item in result] elif arguments['rm']: path_pattern = arguments['<path-pattern>'] force = True if arguments['-f'] else False result = remove_from_history(path_pattern, force) color.print_ok(result) color.print_warn('run `untrack confirm` to confirm the op') elif arguments['reset']: try: reset() except ErrorReturnCode_1 as ex: color.print_err(ex) except RebundantResetException as ex: color.print_warn(ex) else: color.print_ok('reset.') elif arguments['confirm']: confirm_remove() color.print_ok('confirm remove.')
def main(path, n, encoding=None, no_more=False): try: with open(path, 'rb') as f: res_list = fileecho.head(f, n) res = b'\n'.join(res_list) detect_result = chardet.detect(res) if encoding is not None: codec = encoding elif detect_result['confidence'] > 0.7: codec = detect_result['encoding'] else: color.print_warn('Not Known encoding, may be %s.\n' 'Please point it explictly' % detect_result['encoding']) return if no_more: color.print_info(res.decode(codec, errors='ignore')) else: more(res.decode(codec, errors='ignore'), print_color=True) except FileNotFoundError: color.print_err('%s not found' % path) except PermissionError: color.print_err('Permission denied: %s' % path)
def recognise_tesseract(path, args=None): try: result = rg.tesseract(path, args) except Exception as ex: color.print_err(ex) else: color.print_info(result, len(result))
async def _main_fetch(loop, p, url, num, outdir, captcha_pattern, ext=None): async def fetch(session, url): with async_timeout.timeout(5): async with session.get(url) as response: return await response.read() session = aiohttp.ClientSession(loop=loop, headers=headers) async for i in AsyncIteratorWrapper(range(num)): captcha_name = captcha_pattern.replace('$(NO)', str(i)).replace('$(UUID)', str(uuid.uuid4())) path = os.path.join(outdir, captcha_name) while True: try: content = await fetch(session, url) except asyncio.TimeoutError as ex: color.print_err(ex) else: break with open(path, 'wb') as fw: fw.write(content) print('download %s' % path) p.stdin.write(path.encode() + b'\n') p.stdin.flush()
def __init__(self, dbname=None, debug=False): self.debug = debug self.header = headers create_tb = ('\n' ' CREATE TABLE IF NOT EXISTS PROXY\n' ' (DATE DATETIME NOT NULL,\n' ' IP CHARACTER(15),\n' ' PORT INTEGER,\n' ' REGION TEXT NOT NULL,\n' ' PRIMARY KEY(IP, PORT)\n' ' );\n' ' ') if dbname is None or dbname == RESERVERD_DB_NAME: dbname = os.path.join(resource_path, RESERVERD_DB_NAME) try: conn = sqlite3.connect(dbname) except sqlite3.OperationalError as opex: color.print_err(opex) color.print_err('dbname : {0:s}'.format(dbname)) raise Exception() else: self.conn = conn self.dbname = dbname conn.execute(create_tb) color.print_ok('conect to the db {0}'.format(dbname)) try: import lxml except ImportError: self.parse_ip_port_region = proxy_ip.parse_ip_port_region_httpparser else: self.parse_ip_port_region = proxy_ip.parse_ip_port_region_lxml
def main(path, n, encoding=None, no_more=False): try: with open(path, 'rb') as f: res_list = fileecho.tail(f, n) res = b'\n'.join(res_list) detect_result = chardet.detect(res) if encoding is not None: codec = encoding elif detect_result['confidence'] > 0.7: codec = detect_result['encoding'] else: color.print_warn('Not Known encoding, may be %s.\n' 'Please point it explictly' % detect_result['encoding']) return if no_more: color.print_info(res.decode(codec, errors='ignore')) else: more(res.decode(codec, errors='ignore'), print_color=True) except FileNotFoundError: color.print_err('%s not found' % path) except PermissionError: color.print_err('Permission denied: %s' % path)
def loop(self, page=2, timeout=4): for i in range(1, page + 1): try: self.getContent(i, timeout=timeout) except Exception as ex: color.print_err(ex) if self.debug: traceback.print_stack()
def convert(fn, output, size: str = None, rate: (int, float) = None, fps: (int, float) = None): if not assert_output_has_ext(output): color.print_err('Failed.') return fn_tmp = path2uuid(fn, quiet=True) output_tmp = path2uuid(output, quiet=True, rename=False) try: json_obj = load_video_info_json(fn_tmp) video_site, audio_site = get_video_audio_info_site_injson(json_obj) color.print_info('start convert %s to %s' % (fn, output)) cmd_list = ['ffmpeg', '-i', fn_tmp] need_convert = False if rate is not None and rate != 1: source_origin_fps = load_fps_from_json(json_obj) source_fps = source_origin_fps * float(rate) cmd_list.insert(1, '-r') cmd_list.insert(2, str(source_fps)) need_convert = True if size is not None: width = json_obj['streams'][video_site]['width'] height = json_obj['streams'][video_site]['height'] origin_size = '%sx%s' % (width, height) if origin_size != size: color.print_info('convert size from %s to %s' % (origin_size, size)) cmd_list.append('-s') cmd_list.append(size) need_convert = True if fps is not None: origin_fps = round(load_fps_from_json(json_obj), 3) if round(fps, 3) != origin_fps: cmd_list.append('-r') cmd_list.append(str(fps)) color.print_info('convert fps from %f to %f' % (origin_fps, fps)) need_convert = True _, ext_i = os.path.splitext(fn) _, ext_out = os.path.splitext(output) if ext_i != ext_out: need_convert = True if need_convert: cmd_list.append(output_tmp) for line in CommandRunner.run(' '.join(cmd_list)): print(line) else: os.rename(fn_tmp, output_tmp) path2uuid(output_tmp, d=True) except Exception as ex: path2uuid(output_tmp, d=True, rename=False) raise finally: path2uuid(fn_tmp, d=True)
def shell_loop(): status = SHELL_STATUS_RUN while status == SHELL_STATUS_RUN: display_cmd_prompt() ignore_signals() try: cmd = sys.stdin.readline() cmd_tokens = tokenize(cmd) cmd_tokens = preprocess(cmd_tokens) status = execute(cmd_tokens) except: _, err, _ = sys.exc_info() color.print_err(err)
def main(n, ext): n = int(n) for i in range(n): # path = sys.stdin.readline().strip() #encoding pointed already path = input().strip() newpath = path + ext try: with Image.open(path) as img_obj: img_obj.save(newpath) os.remove(path) except Exception as ex: color.print_err(ex) color.print_ok('fetched %s. no.%d' % (newpath, i + 1))
def cli(): global GLOBAL_MODE global CONFIG_DIR global TAG_LIST_CONFIG_PATH global SEARCH_PATTERN_CONFIG_PATH arguments = docopt(__doc__, version=__version__) GLOBAL_MODE = bool(arguments['--global']) if GLOBAL_MODE: if iswin(): CONFIG_DIR = os.path.join(os.path.dirname(get_home_dir()), 'All Users', '.jvirtualenv.d') else: CONFIG_DIR = '/etc/jvirtualenv.d' else: CONFIG_DIR = os.path.join(get_home_dir(), '.jvirtualenv.d') TAG_LIST_CONFIG_PATH = os.path.join(CONFIG_DIR, 'tag-list.json') SEARCH_PATTERN_CONFIG_PATH = os.path.join(CONFIG_DIR, 'search-pattern.json') if arguments['list-tag']: pretty_print_config(get_config()) elif arguments['reinit-tag']: if not iswin(): with sh.contrib.sudo: sh.updatedb() init_config() color.print_ok('reinit config in %s' % TAG_LIST_CONFIG_PATH) elif arguments['--java']: version_info = find_version(arguments['--java']) if version_info is None: color.print_err('No matched tag') return project_path = arguments['<project>'] write_activate_file(project_path, version_info['home'], version_info['tag'], bool(arguments['--force']))
def main_fetch(url, num, outdir, captcha_pattern, ext=None): kwargs = locals() # collect all kwargs of func main_fetch if ext is not None and re.match(pattern_ext, ext) is None: color.print_err('error -ext arg, should be .png, .jpg ect.') return pyfile_path = os.path.join(os.path.dirname(__file__), 'convert_image_p.py') p = Popen([sys.executable, pyfile_path, str(num), ext], stdin=PIPE, stderr=sys.stderr, stdout=sys.stdout, bufsize=1000) p.stdin.encoding = 'utf8' loop = asyncio.get_event_loop() tasks = [ asyncio.ensure_future(_main_fetch(loop, p, url, **kwargs)) ] loop.run_until_complete(asyncio.wait(tasks)) p.kill()
def start(self): color.print_info('start analyse..., url {0:s}'.format(self.baseURL)) content = self.getPage(0) pageNum = self.getPageNum(content) title = self.getTitle(content) self.openFile(title) # Write LZ ID and NAME pattern = re.compile('<div id="post_content_.*?>(.*?)</div>') items = re.finditer(pattern, content) item = next(items) id_pattern = r'(?<=post_content_)(\d)+(?=")' lz_id = re.search(id_pattern, item.group(0)).group(0) # print(lz_id) lz_name = BDTB.get_name_by_id(lz_id) splitLine = "=" * 80 + '\n' self.file.write(splitLine) self.file.write('LZ {0} {1}\n'.format(lz_id, lz_name)) self.file.write(splitLine) if pageNum is None: color.print_err( "the URL {0:s} might be invalidated".format(self.baseURL)) return try: color.print_info( "This tie {0:s} has {1:d} pages".format(title, pageNum)) for i in range(1, int(pageNum) + 1): color.print_info("write to page {0:d}".format(i)) page = self.getPage(i) content = self.getContent(page) self.writeData(content) # 出现写入异常 except IOError as e: color.print_err(e) else: color.print_ok("Successful!") finally: self.closeFile()
def start(self): color.print_info('start analyse..., url {0:s}'.format(self.baseURL)) content = self.getPage(0) pageNum = self.getPageNum(content) title = self.getTitle(content) self.openFile(title) # Write LZ ID and NAME pattern = re.compile('<div id="post_content_.*?>(.*?)</div>') items = re.finditer(pattern, content) item = next(items) id_pattern = r'(?<=post_content_)(\d)+(?=")' lz_id = re.search(id_pattern, item.group(0)).group(0) # print(lz_id) lz_name = BDTB.get_name_by_id(lz_id) splitLine = "=" * 80 + '\n' self.file.write(splitLine) self.file.write('LZ {0} {1}\n'.format(lz_id, lz_name)) self.file.write(splitLine) if pageNum is None: color.print_err("the URL {0:s} might be invalidated".format( self.baseURL)) return try: color.print_info("This tie {0:s} has {1:d} pages".format( title, pageNum)) for i in range(1, int(pageNum) + 1): color.print_info("write to page {0:d}".format(i)) page = self.getPage(i) content = self.getContent(page) self.writeData(content) # 出现写入异常 except IOError as e: color.print_err(e) else: color.print_ok("Successful!") finally: self.closeFile()
def try_get_root(self, num, in_out='nn', timeout=5): url = "http://www.xicidaili.com/{0}/{1:d}".format(in_out, num) # 国内高匿 req = urllib.request.Request(url, headers=headers) result = proxy_ip.install_proxy_opener(test_url=url) if result is not None: try: resp = urllib.request.urlopen(req, timeout=timeout) except Exception as ex: color.print_err(ex) if self.debug: traceback.print_stack() return None else: color.print_ok('Connect server {0} OK!'.format(url)) return resp try: # 使用这个方式是全局方法。 proxy_support = urllib.request.ProxyHandler(proxies=None) opener = urllib.request.build_opener(proxy_support) urllib.request.install_opener(opener) resp = urllib.request.urlopen(req, timeout=timeout) except urllib.error.URLError as ex: return None except Exception as ex: color.print_err(ex) if self.debug: traceback.print_stack() else: color.print_ok('{0} Connect server {1} OK!'.format('origin ip', url)) return resp
def main_fetch(url, num, outdir, captcha_pattern, ext=None): kwargs = locals() # collect all kwargs of func main_fetch if ext is not None and re.match(pattern_ext, ext) is None: color.print_err('error -ext arg, should be .png, .jpg ect.') return pyfile_path = os.path.join(os.path.dirname(__file__), 'convert_image_p.py') p = Popen([sys.executable, pyfile_path, str(num), ext], stdin=PIPE, stderr=sys.stderr, stdout=sys.stdout, bufsize=1000) p.stdin.encoding = 'utf8' loop = asyncio.get_event_loop() tasks = [asyncio.ensure_future(_main_fetch(loop, p, url, **kwargs))] loop.run_until_complete(asyncio.wait(tasks)) p.kill()
def extract(fn, output, type, **other_kwargs): if not assert_output_has_ext(output): color.print_err('Failed.') return fn_tmp = path2uuid(fn, quiet=True) output_tmp = path2uuid(output, quiet=True, rename=False) extract_cmd_list = ['ffmpeg', '-i', fn_tmp] if type == 'audio': extract_cmd_list.extend(['-acodec', 'copy', '-vn', output_tmp]) elif type == 'video': extract_cmd_list.extend(['-vcodec', 'copy', '-an', output_tmp]) elif type == 'subtitle': extract_cmd_list.extend(['-scodec', 'copy', '-an', '-vn', output_tmp]) elif type == 'frame': start_time = video_time_str2int(other_kwargs['start-time']) extract_cmd_list.extend(['-y', '-f', 'image2', '-ss', str(start_time), '-vframes', '1', output_tmp]) else: color.print_err('error type: %s' % type) return # print(extract_cmd_list) for line in CommandRunner.run(' '.join(extract_cmd_list)): print(line) path2uuid(fn_tmp, d=True) try: path2uuid(output_tmp, d=True) except: path2uuid(output_tmp, d=True, rename=True) color.print_err('extract Failed.') else: color.print_ok('extract Done.')
def isAlive(self, ip, port, region='中国大陆', test_url=None, timeout=4, allow_delete=True): proxy = {'http': '{0}:{1}'.format(ip, port)} print(proxy['http'], region) inside = {'中国大陆', 'china', 'taiwan', '台湾'} # 使用这个方式是全局方法。 proxy_support = urllib.request.ProxyHandler(proxy) opener = urllib.request.build_opener(proxy_support) urllib.request.install_opener(opener) # google.com not work ... if test_url is None: test_url = "http://www.qq.com" req = urllib.request.Request(test_url, headers=headers) try: resp = urllib.request.urlopen(req, timeout=timeout) if resp.code == 200: import bs4 content = resp.read() soup = bs4.BeautifulSoup(content, 'html.parser') s = soup.find('h1') if s is not None and s.contents[0].lower().find('unauthorized') != -1: color.print_err("Can't use") return False else: color.print_ok("work") # print(resp.read()) return True else: color.print_err("not work") return False except Exception as ex: color.print_err("Not work") if self.debug: color.print_err(ex) return False
def main(path, pwd, check_username=False, username=None): if pwd is None: pwd = getpass.getpass('Input your master password: '******'<%s>' % username interactive_help = split_blankline(__doc__)[0] while True: input_result = input(base_prompt).strip() # STRIP !! if 'q' == input_result: return try: op = input_result.split(' ')[0] if input_result.startswith('query') or op == '?': label = split_whitespace(input_result)[1] all_match = pwd_keeper.query_account(label) if all_match is None: print('None') possiable_labels = [] for each_label in pwd_keeper.get_labels(): each_label2 = each_label.lower() label2 = label.lower() if each_label2.startswith(label2) or \ each_label2.endswith(label2): possiable_labels.append(each_label) if len(possiable_labels) != 0: print('Maybe: ', end='') for each_label in possiable_labels: color.print_info(each_label, end='') color.print_info() else: for item in all_match: try: color.print_info( 'usrename:{0} passowrd:{1}'.format(*item)) except UnicodeEncodeError: username, password = item[0], \ item[1] color.print_err('usrename:{0} passowrd:{1}'.format( username, password)) color.print_err( 'Warning: Master Password {0} may be Error'. format(desensitization_pwd(pwd))) elif op in ('add', '+'): _, label, username, password = split_whitespace(input_result) pwd_keeper.add_account(label, username, password) elif input_result.startswith('del-account'): _, label, username = split_whitespace(input_result) pwd_keeper.del_account(label, username) elif input_result.startswith('del-label'): _, label = split_whitespace(input_result) pwd_keeper.del_label(label) elif input_result.startswith('update-account'): _, label, username, password = split_whitespace(input_result) pwd_keeper.update_account(label, username, password) elif input_result.startswith('update-label'): _, old_label, new_label = split_whitespace(input_result) pwd_keeper.update_label(old_label, new_label) elif input_result.startswith( 'list') or input_result == 'ls' or input_result == 'l': [ color.print_info(label) for label in pwd_keeper.get_labels() if pwd_keeper ] elif op == 'h': color.print_info(interactive_help) elif input_result == '': pass else: # '?' color.print_err('\nInvalid Input') print(input_result) color.print_info(interactive_help) except Exception: color.print_err('\nInvalid Input:') print(input_result) color.print_info(interactive_help) print()
def handle_directory_conflict(ex): color.print_err('directory %s conflict' % ex.args[0])
def assert_output_has_ext(fn): if os.path.splitext(fn)[1] == '': color.print_err('you are supposed to point to the output format explicitly!') return False else: return True
def cli(): arguments = docopt(__doc__, version=minghu6.__version__) path_list = arguments['<filename>'] try: fr_list = [] [fr_list.append(open(path, 'rb')) for path in path_list] except FileNotFoundError: color.print_err('%s not found' % path_list) return else: if arguments['charset']: fr = fr_list[0] result = fileecho.guess_charset(fr) encoding, confidence = result['encoding'], result['confidence'] if encoding is None: color.print_err('unknown') else: color.print_info('{0}, {1:.2f}'.format(encoding, confidence)) fr.close() elif arguments['convert']: fr = fr_list[0] path = path_list[0] to_charset = arguments['<to_charset>'] from_charset = arguments['--from_charset'] if from_charset is None: result = fileecho.guess_charset(fr) encoding, confidence = result['encoding'], result['confidence'] if confidence is None: color.print_err('unknown from_charset, ' 'you must point it explicity') return elif confidence < 0.7: color.print_warn('uncertained from_charset, ' 'maybe %s\n' 'you must point it explicity' % encoding) return else: from_charset = encoding # rename(name_old, name_new) # name_a, name_b must same driver in windows dir = os.path.dirname(os.path.abspath(path)) fwn = tempfile.mktemp(dir=dir) with open(fwn, 'wb') as fw: for line in fr: fw.write(line.decode(from_charset, errors='ignore') .encode(to_charset, errors='ignore')) fr.close() if arguments['--output'] is None: shutil.copy(fwn, path) else: shutil.copy(fwn, arguments['--output']) os.remove(fwn) elif arguments['merge']: if arguments['--regex'] is not None: # color.print_info(arguments) merge_file_path_list = findlist(startdir=os.curdir, pattern=arguments['--regex'], regex_match=True, dosort=True) else: merge_file_path_list = arguments['<filename>'] with open(arguments['--output'], 'wb') as outfile: for infile_path in merge_file_path_list: with open(infile_path, 'rb') as infile: outfile.write(infile.read()) outfile.write(b'\n') color.print_ok('have merged file %s' % infile_path)
def main_info(path): name = fileformat.fileformat(path) if name == fileformat.UNKNOWN_TYPE: color.print_err(name) else: color.print_info('%s<===>%s' % (name.normal_name, name.ext_name))
def merge(pattern_list, output, type, **other_kwargs): isprefix = other_kwargs.get('isprefix', False) if not assert_output_has_ext(output): color.print_err('Failed.') return base_dir = os.curdir merge_file_list = [] merge_file_list2 = [] if type in ('vedio', 'audio', 'gif'): for fn in os.listdir(base_dir): if os.path.isdir(fn): continue if fn == '.path2uuid.sqlite3': continue for pattern in pattern_list: if isprefix: if fn.lower().startswith(pattern.lower()): merge_file_list.append(fn) else: if fnmatch.fnmatch(fn, pattern): merge_file_list.append(fn) else: # 'va', 'vs merge_file_list = pattern_list # common_prefix_pattern = r'^(\w)+\+$' if isprefix and len(pattern_list) == 1: def key(fn): base = os.path.splitext(os.path.basename(fn))[0] v = LooseVersion(base.split(pattern_list[0])[1]) return v elif type in ('va', 'vs'): key = lambda x: 0 else: key = lambda fn: fn merge_file_list = sorted(merge_file_list, key=key) color.print_info('The following file will be merged in order') for i, file_to_merge in enumerate(merge_file_list): color.print_info('%3d. %s' % (i, file_to_merge)) if len(merge_file_list) <= 1: color.print_info('Do nothing.') return args = input('press enter to continue, q to quit') if args in ('q', 'Q'): return merge_file_tmp_list = list(map(lambda x: path2uuid(x, quiet=True), merge_file_list)) merge_file_tmp_list2 = [] if type == 'video': # check if the video can be merge FileInfo = namedtuple('FileInfo', ['width', 'height', 'fps']) merge_file_info_list = [] for fn in merge_file_tmp_list: json_obj = load_video_info_json(fn) video_site, audio_site = get_video_audio_info_site_injson(json_obj) codec_name = json_obj['streams'][video_site]['codec_name'] width = int(json_obj['streams'][video_site]['width']) height = int(json_obj['streams'][video_site]['height']) fps = round(load_fps_from_json(json_obj), 3) merge_file_info_list.append(FileInfo(width, height, fps)) if not each_same(merge_file_info_list, key=lambda x: (x.width, x.height, x.fps)): color.print_err('width, height, fps should be same of all video') min_width = sorted(merge_file_info_list, key=lambda x: x.width)[0].width min_height = sorted(merge_file_info_list, key=lambda x: x.height)[0].height min_resolution = '%dx%d' % (min_width, min_height) min_fps = sorted(merge_file_info_list, key=lambda x: x.fps)[0].fps color.print_warn('all_to_resolution: %s' % min_resolution) color.print_warn('all_to_fps: %s' % min_fps) if askyesno('convert to fix?'): merge_file_tmp_list2 = list(map(lambda x: add_postfix(x, 'tmp'), merge_file_tmp_list)) def tmp(fn_tuple): convert(*fn_tuple, size=min_resolution, fps=min_fps) list(map(lambda x: tmp(x), zip(merge_file_tmp_list, merge_file_tmp_list2))) else: return elif type == 'audio': pass elif type == 'va': pass elif type == 'gif': pass output_tmp = path2uuid(output, rename=False, quiet=True) if len(merge_file_tmp_list2) == 0: input_file_list = merge_file_tmp_list else: input_file_list = merge_file_tmp_list2 # only for merge video try: fw = open('.mylist', 'w') for fn in input_file_list: fw.write("file '%s' \n" % fn) fw.close() if type in ('video', 'audio'): merge_cmd = 'ffmpeg -f concat -i %s -c copy %s' % ('.mylist', output_tmp) elif type == 'va': merge_cmd = 'ffmpeg -i %s -i %s -vcodec copy -acodec copy %s ' \ % (input_file_list[0], input_file_list[1], output_tmp) elif type == 'vs': with open(input_file_list[1]) as f_subtitle: encoding = guess_charset(f_subtitle)['encoding'] if encoding.lower() not in ('utf-8', 'ascii'): info, err = exec_cmd('%s -m minghu6.tools.text convert %s utf-8' % (sys.executable, input_file_list[1])) if len(err) > 1 or err[0] != '': # exec failed color.print_err('error codec of the subtitle %s (need utf-8)') merge_cmd = 'ffmpeg -i %s -vf subtitles=%s %s' \ % (input_file_list[0], input_file_list[1], output_tmp) elif type == 'gif': framerate = other_kwargs['framerate'] merge_cmd = 'ffmpeg -f image2 -framerate %d -i %s %s' \ % (int(framerate), '.mylist', output_tmp) for line in CommandRunner.run(merge_cmd): print(line) path2uuid(output_tmp, d=True) except Exception: raise else: color.print_ok('Done.') finally: try: os.remove('.mylist') except: pass for fn in input_file_list: path2uuid(fn, d=True)
def main(path, pwd, check_username=False, username=None): if pwd is None: pwd = getpass.getpass('Input your master password: '******'<%s>' % username interactive_help = split_blankline(__doc__)[0] while True: input_result = input(base_prompt).strip() # STRIP !! if 'q!' in input_result: return try: if input_result.startswith('query'): label = split_whitespace(input_result)[1] all_match = pwd_keeper.query_account(label) if all_match is None: print('None') possiable_labels = [] for each_label in pwd_keeper.get_labels(): each_label2 = each_label.lower() label2 = label.lower() if each_label2.startswith(label2) or \ each_label2.endswith(label2): possiable_labels.append(each_label) if len(possiable_labels) != 0: print('Maybe: ', end='') for each_label in possiable_labels: color.print_info(each_label, end='') color.print_info() else: for item in all_match: try: color.print_info('usrename:{0} passowrd:{1}'.format(*item)) except UnicodeEncodeError: username, password = item[0], \ item[1] color.print_err('usrename:{0} passowrd:{1}'. format(username, password)) color.print_err('Warning: Master Password {0} may be Error'.format( desensitization_pwd(pwd)) ) elif input_result.startswith('add'): _, label, username, password = split_whitespace(input_result) pwd_keeper.add_account(label, username, password) elif input_result.startswith('del-account'): _, label, username = split_whitespace(input_result) pwd_keeper.del_account(label, username) elif input_result.startswith('del-label'): _, label = split_whitespace(input_result) pwd_keeper.del_label(label) elif input_result.startswith('update-account'): _, label, username, password = split_whitespace(input_result) pwd_keeper.update_account(label, username, password) elif input_result.startswith('update-label'): _, old_label, new_label = split_whitespace(input_result) pwd_keeper.update_label(old_label, new_label) elif input_result.startswith('list'): [color.print_info(label) for label in pwd_keeper.get_labels() if pwd_keeper] elif input_result.startswith('?'): color.print_info(interactive_help) elif input_result == '': pass else: # '?' color.print_err('\nInvalid Input') print(input_result) color.print_info(interactive_help) except ValueError: color.print_err('\nInvalid Input:') print(input_result) color.print_info(interactive_help) print()
def cli(): arguments = docopt(__doc__, version=minghu6.__version__) path_list = arguments['<filename>'] try: fr_list = [] [fr_list.append(open(path, 'rb')) for path in path_list] except FileNotFoundError: color.print_err('%s not found' % path_list) return else: if arguments['charset']: fr = fr_list[0] result = fileecho.guess_charset(fr) encoding, confidence = result['encoding'], result['confidence'] if encoding is None: color.print_err('unknown') else: color.print_info('{0}, {1:.2f}'.format(encoding, confidence)) fr.close() elif arguments['convert']: fr = fr_list[0] output = os.path.abspath(arguments['--output']) to_charset = arguments['<to_charset>'] from_charset = arguments['--from_charset'] if from_charset is None: result = fileecho.guess_charset(fr) encoding, confidence = result['encoding'], result['confidence'] if confidence is None: color.print_err('unknown from_charset, ' 'you must point it explicity') return elif confidence < 0.7: color.print_warn('uncertained from_charset, ' 'maybe %s\n' 'you must point it explicity' % encoding) return else: from_charset = encoding # rename(name_old, name_new) # name_a, name_b must same driver in windows with open(output, 'wb') as fw: for line in fr: #print(line.decode(from_charset)) print(line.decode(from_charset).encode(from_charset, errors='ignore').decode(to_charset, errors='ignore')) fw.write(line.decode(from_charset).encode(from_charset, errors='ignore').decode(to_charset, errors='ignore').encode(from_charset)) fr.close() elif arguments['merge']: if arguments['--regex'] is not None: print(arguments['--regex']) # color.print_info(arguments) merge_file_path_list = findlist(startdir=os.curdir, pattern=arguments['--regex'], regex_match=True, dosort=True) color.print_normal('merge file:') pprint(merge_file_path_list) else: merge_file_path_list = arguments['<filename>'] with open(arguments['--output'], 'wb') as outfile: for infile_path in merge_file_path_list: with open(infile_path, 'rb') as infile: outfile.write(infile.read()) outfile.write(b'\n') color.print_ok('have merged file %s' % infile_path)