def copy_file_to_sdcard(src_dir, dst_dir): """复制文件到内存卡 该需求是因为部分设备读取内存卡不是按照文件名顺序 可能是按照存储顺序? 于是格式化内存卡,然后按顺序指定多个目录,复制进内存卡 """ files = [] is_list = False if isinstance(src_dir, list): is_list = True for path in src_dir: files.extend(filex.list_file(path)) else: files.extend(filex.list_file(src_dir)) length = len(files) print(f'共 {length} 个文件') if length == 0: return print(f'读取文件大小') sum_size = 0 for i, file in enumerate(files): size = os.path.getsize(file) sum_size += size print(f'{i + 1}/{length},{file} {filex.parse_file_size(size)}') print(f'文件总大小 {filex.parse_file_size(sum_size)}') print() print(f'开始复制文件') copy_size = 0 start_time = time.time() for i, file in enumerate(files): if is_list: # 如果是多个目录,取目录名 dir_name = os.path.dirname(file) dst_file = os.path.join(dst_dir, os.path.basename(dir_name), os.path.basename(file)) else: # 如果是单个目录,直接取相对路径 src_file = os.path.relpath(file, src_dir) dst_file = os.path.join(dst_dir, src_file) print( f'复制文件 {i + 1}/{length},{filex.get_file_size_str(file)},{file} -> {dst_file}' ) # 复制文件 filex.check_and_create_dir(dst_file) shutil.copyfile(file, dst_file) # 因为太大,需要记录进度 copy_size += os.path.getsize(file) print( f'已复制 {filex.parse_file_size(copy_size)}/{filex.parse_file_size(sum_size)}' ) # 记录时间 FileUtils.count_time(start_time, copy_size / 1024 / 1024, sum_size / 1024 / 1024, 'M')
def check_and_append_tips_name(file_path, tips_name_file, result_file=None): """ 根据检查tips的文件是否全 该方法用于 IdeTipsAndTricks 没有指定所有的文件,但还是需要翻译文件名的,所以补全 """ if result_file is None: result_file = filex.get_result_file_name(tips_name_file, '_append') file_list = filex.list_file(file_path) print('共%d个文件' % len(file_list)) lines = filex.read_lines(tips_name_file) tips_name = [] for line in lines: if '=' in line: name = line.split('=')[0] tips_name.append(name) # 名字只加一层,exclude里面的不处理 file_name = '%s\\%s.html' % (file_path, name) if file_name in file_list: file_list.remove(file_name) else: print('文件不存在%s' % file_name) print('共有%d个tip名' % len(tips_name)) print('还缺%d个文件' % len(file_list)) # 写入结果 lines.append('\n# append\n') for file_name in file_list: name = os.path.splitext(os.path.split(file_name)[1])[0] word = Tips.camel_word_to_words(name) lines.append('%s=%s\n' % (name, word)) filex.write_lines(result_file, lines)
def check_use_layout_res(self): """根据检查出的 java 文件检查使用的布局资源""" java_files = filex.read_lines(self.rv_java_files_path, ignore_line_separator=True) pattern = re.compile(r'(?<!setContentView\()R\.layout\.(.*?)[;),]') name_pattern = re.compile(r'^(?!fragment|dialog|high|pop|layout|address)') xml_name_list = [] for file in java_files: content = filex.read(file) all_match = re.findall(pattern, content) if all_match: print('%s 找到布局使用' % file) for match in all_match: if re.search(name_pattern, match): print(match) if 'item' not in match: print('不包含 item') if match not in xml_name_list: xml_name_list.append(match) else: print('过滤', match) print('共使用了 %d 个文件' % len(xml_name_list)) print('查找对应的 xml 文件') files = filex.list_file(self.work_space, 'xml$') xml_file_list = [] for xml_name in xml_name_list: for file in files: name = os.path.splitext(os.path.split(file)[1])[0] if xml_name == name: xml_file_list.append(file) break print('共找到 %d 个文件' % len(xml_file_list)) filex.write_lines(self.item_xml_files_path, xml_file_list, add_line_separator=True)
def process_jar_by_translation(translation_dir, source_dir, target_dir): """根据汉化包备份包""" jar_files = filex.list_file(translation_dir) file_list = [ jar_file.replace(translation_dir, '') for jar_file in jar_files ] TranslationFile.copy_dir(source_dir, target_dir, file_list)
def update_dirs_by_xmind(self): """更新目录""" files = filex.list_file(self.back_dir) if not files: print(f'没有找到备份数据') return back_file = sorted(files)[-1] print(f'选择备份文件 {back_file}') origin_dirs = self.collect_xmind_topics(back_file) # 按路径排序,较长的在前面,防目移动目父录导致子目录变化 origin_dirs = sorted(origin_dirs, key=lambda x: x.path, reverse=True) print(f'原始目录 {len(origin_dirs)} 个') print(origin_dirs) new_dirs = self.collect_xmind_topics(self.xmind_file_path) new_dirs = sorted(new_dirs, key=lambda x: x.path, reverse=True) print(f'新的目录 {len(new_dirs)} 个') print(new_dirs) # 检查空白 if not self.valid_dir_names(new_dirs): return print() print('1-检查是否需要移动目录') origin_dirs_dict = self.dirs_to_dict(origin_dirs) new_dirs_dict = self.dirs_to_dict(new_dirs) for k, v in origin_dirs_dict.items(): if k in new_dirs_dict: new_v = new_dirs_dict[k] if v.path != new_v.path: if os.path.exists(v.path): if not os.path.exists(new_v.path): print(f'目录移动 {v.path} -> {new_v.path}') shutil.move(v.path, new_v.path) else: print(f'目录变化 {v.path} -> {new_v.path}') print(f'但目标目录已存在 {new_v.path}') os.rmdir(v.path) else: print(f'目录变化 {v.path} -> {new_v.path}') print(f'但源目录已不存在 {v.path}') else: # 不存在 print(f'目录已不存在于 xmind 中,尝试删除 {v.path}') self.delete_empty_dir(v.path) print('\n2-检查并创建目录') self.create_dirs(new_dirs, False) print('\n3-检查多余的目录') dirs = self.collect_dirs(self.root_dir) print(f'共有目录 {len(dirs)} 个') dict_values = [topic_dir.path for topic_dir in new_dirs] for dir_name in dirs: if dir_name not in dict_values: self.delete_empty_dir(dir_name) self.back_file()
def rename(dir_path): files = filex.list_file(dir_path) length = len(files) for i in range(length): file_path = files[i] new_path = dir_path + os.path.sep + '平方X-%02d%s' % (i + 1, os.path.splitext(file_path)[1]) print('%s -> %s' % (file_path, new_path)) os.rename(file_path, new_path)
def process_dir_for_translation(process_dir, result_dir=None, name_pattern=None): """处理文件夹中的所有文件""" if result_dir is None: result_dir = process_dir + '_delete' file_list = filex.list_file(process_dir, name_pattern) length = len(file_list) for i in range(length): file = file_list[i] print('process %d/%d' % (i + 1, length)) result_file = file.replace(process_dir, result_dir) ActionsBundle.process_file_for_translation(file, result_file)
def check_same_en_difference_cn(en_dir, cn_dir, print_msg=False, suffix='', trans_unicode=True): """英文相同时,是否有不一致的翻译""" all_translation = dict() diff_translation = dict() en_file_list = filex.list_file(en_dir, '\.(?!png|gif)') for en_file in en_file_list: print('\ncheck ' + en_file) cn_file = Translator.get_cn_file_name(en_dir, cn_dir, en_file, suffix) if not os.path.exists(cn_file): print('中文文件不存在' + cn_file) continue en_dict = Tools.get_dict_from_file(en_file) cn_dict = Tools.get_dict_from_file(cn_file, delete_cn_shortcut=True, trans_unicode=trans_unicode) for key, en_value in en_dict.items(): if key in cn_dict.keys(): # 有key对应的中英文 cn_value = cn_dict[key] if cn_value != en_value: # 中英文不一样才算翻译 if en_value in all_translation.keys(): pre_translation = all_translation[en_value] if pre_translation != cn_value: if en_value not in diff_translation.keys(): diff_translation[ en_value] = pre_translation + '\n' + cn_value else: pre_diff_translation = diff_translation[ en_value] if cn_value not in pre_diff_translation.split( '\n'): # 之前没有记录过才再记录 diff_translation[ en_value] = pre_diff_translation + '\n' + cn_value if print_msg: print( '\n词典中已经存在%s,但翻译不相同\n%s\n%s' % (en_value, pre_translation, cn_value)) else: all_translation[en_value] = cn_value if print_msg: print('the size is %d' % len(sorted(all_translation.keys()))) # 读取守毕 for key in diff_translation.keys(): all_translation.pop(key) return all_translation, diff_translation
def generate_need_translation_file2(en_dir, result_file): """生成需要翻译的文件""" en_file_list = filex.list_file(en_dir) all_translation = dict() for en_file in en_file_list: en_dict = Tools.get_dict_from_file(en_file) all_translation.update(en_dict) result = list() for key, value in all_translation.items(): result.append('%s=%s\n' % (key, value)) filex.write_lines(result_file, result)
def check_adapter_files(self): """检查 adapter """ java_file_list = filex.list_file(self.work_space, 'java$') print('共有 %d 个 java 文件' % len(java_file_list)) adapter_file_list = [] for java_file in java_file_list: name = 'RecyclerView.Adapter' if 'extends ' + name in filex.read(java_file): print('%s 继承了 %s' % (java_file, name)) adapter_file_list.append(java_file) self.check_subclass(java_file_list, adapter_file_list) return adapter_file_list
def process_dir_translation_result(en_dir, cn_dir, result_dir=None, name_pattern=None): """处理文件夹中的所有文件""" if result_dir is None: result_dir = cn_dir + '_add' en_file_list = filex.list_file(en_dir, name_pattern) length = len(en_file_list) for i in range(length): en_file = en_file_list[i] print('process %d/%d' % (i + 1, length)) cn_file = en_file.replace(en_dir, cn_dir) cn_file = filex.get_result_file_name(cn_file, '_zh_CN') result_file = en_file.replace(en_dir, result_dir) ActionsBundle.add_ellipsis_and_shortcut(en_file, cn_file, result_file)
def check_rv_xml_files(self): files = filex.list_file(self.work_space, 'xml$') length = len(files) print('共有文件 %d 个' % length) rv_xml_files = [] for i in range(length): file = files[i] print('%d/%d %s' % (i + 1, length, file)) # 检查文件中是否包含 recycler view if 'RecyclerView' in filex.read(file): rv_xml_files.append(file) print('共有 %d 个文件包含 recycler view' % len(rv_xml_files)) filex.write_lines(self.rv_xml_files_path, rv_xml_files, add_line_separator=True)
def auto_process_file2(text, add_note=True): """ 自动处理文件 :param text: 从 discuz 复制的文本 :param add_note: 是否添加转截申明 :return: """ title, tid = BlogXTools.get_title_and_tid(text) if tid is not None: file_name = '[%s]%s.md' % (tid, title) found_file = None for file in filex.list_file(BlogXTools.process_dir): parent_dir, base_name = os.path.split(file) if base_name == file_name: print('打到文件') found_file = file break elif base_name == '%s.txt' % title: print('找到未命名的文件,执行重命名') new_name = parent_dir + '/' + file_name os.rename(file, new_name) found_file = new_name break if found_file: lines = filex.read_lines(found_file) first_line = lines[0] need_process = True if first_line.startswith('[md]'): print('第一行已经包含[md]标签,不处理') need_process = False elif first_line.startswith('>本文由平方X'): print('第一行已经包含转载申明') else: if add_note: # 添加转载申明 url = 'http://blog.pingfangx.com/%s.html' % tid result = '>本文由平方X发表于平方X网,转载请注明出处。[%s](%s)\n\n' % (url, url) lines.insert(0, result) print('已写入转载申明') if need_process: # 写入[md]标签 lines.insert(0, '[md]\n\n') lines.append('\n\n[/md]') filex.write_lines(found_file, lines) # 复制 text = ''.join(lines) return text
def list_file(self, dir_path, ignore_compressed=True): # jpg 或 png,但不包括 .9.png files = filex.list_file(dir_path, '(jpg)|((?<!\.9\.)png)$') print(f'过滤掉非图片文件,还剩 {len(files)} 个') files = list(filter(self.not_in_keep_files, files)) print(f'过滤掉保留的文件,还剩 {len(files)} 个') files = list(filter(lambda x: os.path.getsize(x) >= self.minimum_size, files)) print(f'过滤掉小于 {self.minimum_size} 的文件,还剩 {len(files)} 个') if ignore_compressed: files = list(filter(self.not_compress_yet, files)) print(f'过滤掉已压缩的文件,还剩 {len(files)} 个') return files
def check_same_key_difference_value(dir_path): """如果key相同,value是否有不一致 发现确实有不一致,所以必须区分文件""" file_list = filex.list_file(dir_path) all_translation = dict() for file in file_list: print('\ncheck ' + file) translation_dict = Tools.get_dict_from_file( file, delete_value_and_symbol=True, trans_unicode=True) if all_translation: # 不为空再处理,为空的第一次直接update for key, value in translation_dict.items(): if key in all_translation.keys(): if all_translation[key] != value: print('key相同%s,但value不一致\n%s\n%s' % (key, all_translation[key], value)) all_translation.update(translation_dict)
def jar_file(jar_file_dir, jar_content_dir): """ 打包jar文件 本来也可以直接打包到源目录,但为了可以提供jar包,就打包到工作目录 """ for file in TranslationFile.file_list: source_file = '%s/%s' % (jar_file_dir, file) print('处理%s' % source_file) if not os.path.exists(source_file): print('文件不存在%s' % source_file) continue work_jar_file = '%s/%s' % (jar_content_dir, file) work_jar_dir = os.path.splitext(work_jar_file)[0] with zipfile.ZipFile(source_file, 'a') as zip_file: work_file_list = filex.list_file(work_jar_dir) print('压缩%d个文件' % len(work_file_list)) for work_file in work_file_list: # 相对于jar目录,所以替换 # 注意这里会导致文件重复 zip_file.write(work_file, arcname=work_file.replace(work_jar_dir, ''))
def rename_file_index(dir_path): """重命名文件索引""" files = filex.list_file(dir_path) if not files: print('文件为空') return print(f'1 读取序号模式') index_pattern = FileUtils.get_file_index_pattern(files) print(f'读取到序号正则为 {index_pattern}') print() print(f'2 取最长序号') max_index = FileUtils.get_max_length_index(files, index_pattern) print(f'最长序号为 {max_index},取最大长度为 {len(max_index)}') print() print(f'3 尝试重命名') FileUtils.rename_file_by_max_length_index(files, index_pattern, len(max_index)) print(f'重命名完成')
def check_rv_java_files(self): """检查包含使用了包含 recycler 的 xml 文件的 java 文件""" rv_xml_files = filex.read_lines(self.rv_xml_files_path, ignore_line_separator=True) rv_xml_files = [os.path.split(file)[1].replace('.xml', '') for file in rv_xml_files] print('共有 %d 个 xml 文件包含 recycler view' % len(rv_xml_files)) print(rv_xml_files) java_files = filex.list_file(self.work_space, 'java$') java_file_length = len(java_files) print('共有 %d 个 java 文件' % len(java_files)) print('检查使用了资源文件的类') rv_java_files = [] for i in range(java_file_length): java_file = java_files[i] # print('%d/%d %s' % (i + 1, java_file_length, java_file)) if self.check_java_file(java_file, rv_xml_files): rv_java_files.append(java_file) print('共有 %d 个 java 文件使用了资源文件' % len(rv_java_files)) print('添加子类') self.check_subclass(java_files, rv_java_files) return rv_java_files
def generate_need_translation_file(en_dir, result_dir): """生成需要翻译的文件""" en_file_list = filex.list_file(en_dir) all_translation = dict() for en_file in en_file_list: en_dict = Tools.get_dict_from_file(en_file) all_translation.update(en_dict) i = 0 size = 3000 result = list() for key, value in all_translation.items(): i += 1 result.append('%s=%s\n' % (key, value)) if i % size == size - 1: index = int(i / size) result_file = '%s\\%2d.properties' % (result_dir, index) filex.write_lines(result_file, result) result.clear() if result: index = int(i / size) result_file = '%s\\%2d.properties' % (result_dir, index) filex.write_lines(result_file, result) result.clear()
def unpack_all_jar(dir_path): for file_path in filex.list_file(dir_path, '.jar$'): print('解包%s' % file_path) folder = os.path.splitext(file_path)[0] with zipfile.ZipFile(file_path) as zip_file: zip_file.extractall(folder)
def process_tips_translation_result(tips_names_file, tips_cn_dir, result_type=RESULT_TYPE_ANDROID_STUDIO, result_dir=None, result_file_type=0, language='cn'): """ 处理OmegaT翻译的tips的结果 :param tips_cn_dir: :param tips_names_file: :param result_type: 0为AndroidStudio,1为GitHub Page :param result_dir: :param result_file_type: 结果文件类型,0为数字,1为数字加名字 :param language: 语言 :return: """ if result_dir is None: if result_type == Tips.RESULT_TYPE_GITHUB_PAGES: result_dir = tips_cn_dir + '_github_page' else: result_dir = tips_cn_dir + "_android_studio" if language == 'en': Tips.KEYMAP_DICT = Tips.KEYMAP_EN_DICT else: Tips.KEYMAP_DICT = Tips.KEYMAP_CN_DICT print('处理' + tips_cn_dir) file_dict = Tips.get_file_dict_in_dir(tips_cn_dir, ignore_excluded=True) if file_dict is None: return all_lines = filex.read_lines(tips_names_file, ignore_line_separator=True) if all_lines is None: return # 删除空行 lines = [] append_line = -1 for line in all_lines: if '=' in line: lines.append(line) else: if 'append' in line: # 有几个就从第几个开始,如果有1个,则index从1开始是添加的 append_line = len(lines) length = len(lines) print('共%d行,添加行是%d' % (length, append_line)) all_files = filex.list_file(tips_cn_dir) print('共%d文件' % len(all_files)) for i in range(length): line = lines[i] en_name, cn_name = line.split('=') file_path = r'%s\%s.html' % (tips_cn_dir, en_name) if i >= append_line or not os.path.exists(file_path): # 如果是添加的或不存在,取exclude的 excluded_file_path = r'%s\excluded\%s.html' % (tips_cn_dir, en_name) if os.path.exists(excluded_file_path): # 如果存在则赋值,否则可能顺序在后,其实文件还是在前 file_path = excluded_file_path if not os.path.exists(file_path): print('文件不存在%s' % en_name) continue # 已经有了,移除 if file_path in all_files: all_files.remove(file_path) else: # 该错误的原因是在 IdeTipsAndTricks 中有 2 个相同的名字 print('文件不存于列表中%s' % file_path) if language == 'cn': add_cn_title = '(%s)' % cn_name else: add_cn_title = '' header = '<h1>%s%s</h1>\n' % (en_name, add_cn_title) if result_type == Tips.RESULT_TYPE_ANDROID_STUDIO: author_url = '<a href=\'%s\'>[%s]</a>' % ( 'https://www.pingfangx.com/xx/translation/feedback?from=tips', '汉化反馈') header = '<h1>%s%s %s</h1>\n' % (en_name, add_cn_title, author_url) footer = None result_name = file_path.replace(tips_cn_dir, result_dir) else: # 前一页 pre_page = '' if i > 0: pre_name = lines[i - 1].split('=')[0] if result_file_type == 1: pre_file = '%03d-%s.html' % (i, pre_name) else: pre_file = '%03d.html' % i pre_page = '<a href=\'%s\'><<%s</a>' % (pre_file, pre_name) # 后一页 next_page = '' if i < length - 1: next_name = lines[i + 1].split('=')[0] if result_file_type == 1: next_file = '%03d-%s.html' % (i + 2, next_name) else: next_file = '%03d.html' % (i + 2) next_page = '<a href=\'%s\'>>>%s</a>' % (next_file, next_name) # 当前文件名和结果名 dir_name, base_name = os.path.split(file_path) name, ext = os.path.splitext(base_name) if result_file_type == 1: current_file = '%03d-%s.html' % (i + 1, name) else: current_file = '%03d%s' % (i + 1, ext) result_name = '%s\\%s' % (result_dir, current_file) # 主页 home_page = '<a href=\'%s\'>homepage</a>' % '../index.html' # 切换页面 if language == 'cn': to_another_page = '<a href=\'../%s/%s\'>English</a>' % ( 'en', current_file) else: to_another_page = '<a href=\'../%s/%s\'>中文</a>' % ( 'cn', current_file) add_homepage = '<p>%s | %s</p>\n' % (home_page, to_another_page) header = add_homepage + header footer = '<p>%s %s</p>\n<p> </p>' % (pre_page, next_page) Tips.process_tips_translation_file(file_path, result_name, result_type, header, footer, language) print('剩余文件%d个' % len(all_files)) if len(all_files) > 0: print('**请补全文件**')
def compare_translation(en_dir, compare_dir_list, omegat_dict_file=None, dict_file=None, dict_diff_file=None, by_index=False, trans_unicode=True): if dict_file is None: dict_file = 'data/dict.txt' if dict_diff_file is None: dict_diff_file = filex.get_result_file_name(dict_file, '_diff') separator = '[xx|]' dict_list = list() for i in compare_dir_list: if i == r'C:\Users\Admin\Desktop\AndroidStudio汉化\汉化包\整理': t_dict = dict() for i_file in filex.list_file(i, '.properties'): t_dict.update(filex.get_dict_from_file(i_file)) dict_list.append(t_dict) continue i_all_translation, i_diff_translation = Translator.check_same_en_difference_cn( en_dir, i, False, '', trans_unicode=trans_unicode) dict_list.append(i_all_translation) if omegat_dict_file is not None: dict_list.insert(0, Translator.get_omegat_dict(omegat_dict_file)) for i in range(len(dict_list)): print('%d中共包含翻译%d条' % (i + 1, len(sorted(dict_list[i].keys())))) all_translation = dict() diff_translation = dict() print_i = True if by_index: # 按倒序更新,得到结果 for i in range(len(dict_list) - 1, -1, -1): all_translation.update(dict_list[i]) print('更新%d后,size是%d' % (i + 1, len(sorted(all_translation.keys())))) else: for i in range(len(dict_list)): i_dict = dict_list[i] index = 0 length = len(sorted(i_dict.keys())) for key, i_value in i_dict.items(): index += 1 if print_i: print('\n检查%d/%d,%s' % (index, length, key)) print('词典%d中是%s' % (i, i_value)) has_diff = False for j in range(i + 1, len(dict_list)): j_dict = dict_list[j] if key in j_dict: j_value = j_dict[key] if i_value == j_value: if print_i: print('词典%d中相同' % j) else: has_diff = True if key in diff_translation.keys(): pre_translation = diff_translation[key] if j_value not in pre_translation.split( separator): diff_translation[ key] = pre_translation + separator + j_value.replace( '\n', '') else: diff_translation[key] = (i_value + separator + j_value).replace( '\n', '') if print_i: print('词典%d中是%s' % (j, j_value)) # 处理后移除 j_dict.pop(key) else: if print_i: print('词典%d中缺少' % j) if not has_diff: if print_i: print('统一翻译') if i_value: # 只添加不为空的 all_translation[key] = i_value print('%d中处理%d条,其中%d条翻译相同,%d条不同' % (i, len(sorted( i_dict.keys())), len(sorted(all_translation.keys())), len(sorted(diff_translation.keys())))) print('size is %d' % len(sorted(all_translation.keys()))) if all_translation: if dict_file.endswith('.tmx') or dict_file.endswith('.tmx.xml'): Tools.save_omegat_dict(all_translation, dict_file) else: result = list() for key, value in all_translation.items(): result.append('%s=%s\n' % (key, value)) filex.write_lines(dict_file, result) print('diff size is %d' % len(sorted(diff_translation.keys()))) if diff_translation: result = list() for key, value in diff_translation.items(): result.append('%s=%s\n' % (key, value)) filex.write_lines(dict_diff_file, result)
def check_translation_complete(en_dir, cn_dir, out_put=None, suffix=''): """翻译是否完整""" incomplete_dict = dict() en_file_list = filex.list_file(en_dir, '\.(?!png|gif)') incomplete_file = [] miss_file = [] complete_count = 0 same_file = [] complete_file = [] for en_file in en_file_list: # print('\ncheck ' + en_file) cn_file = Translator.get_cn_file_name(en_dir, cn_dir, en_file, suffix) if not os.path.exists(cn_file): # print('中文文件不存在' + cn_file) miss_file.append(cn_file) continue if filecmp.cmp(en_file, cn_file): # print('文件相同' + en_file) same_file.append(en_file) continue en_dict = Tools.get_dict_from_file(en_file) cn_dict = Tools.get_dict_from_file(cn_file, trans_unicode=True) is_complete = True translation_count_in_file = 0 for key, en_value in en_dict.items(): if key not in cn_dict.keys(): is_complete = False incomplete_dict[key] = en_value # print('没有翻译%s对应的%s' % (key, en_value)) else: cn_value = cn_dict[key] if en_value == cn_value: is_complete = False incomplete_dict[key] = en_value # print('%s对应的翻译仍然是%s,未翻译' % (key, en_value)) else: translation_count_in_file += 1 complete_count += 1 if not is_complete: print('文件未完全翻译' + en_file) incomplete_file.append(en_file) else: if translation_count_in_file == 0: # 一句都没翻译 # print('文件一句都没翻译' + en_file) same_file.append(en_file) else: complete_file.append(en_file) print('文件翻译完整' + en_file) print('缺少%d个文件' % len(miss_file)) print(miss_file) print('有%d个文件完全相同' % len(same_file)) print('有%d个文件未翻译完整' % len(incomplete_file)) print(incomplete_file) print('有%d个文件完整翻译,共%d条翻译' % (len(complete_file), complete_count)) if out_put is not None: result = list() for key, value in incomplete_dict.items(): result.append('%s=%s\n\n' % (key, value)) print('incomplete size is %d' % len(sorted(incomplete_dict.keys()))) filex.write_lines(out_put, result)