def replace(): in_file = r'ignore/project_save.tmx' out_file = r'ignore/project_save_result.tmx' lines = filex.read_lines(in_file) sep = '=' * 10 for i, line in enumerate(lines): if cn_type_param in line: print(f'在 {i + 1} 行找到 {cn_type_param}') en_line = lines[i - 3] en_type_parameter_count = en_line.lower().count( en_type_parameter) en_type_argument_count = en_line.lower().count( en_type_argument) if not en_type_parameter_count and not en_type_argument_count: print(sep + f' 英文中没有相关单词 {en_line}' + sep) elif en_type_parameter_count and en_type_argument_count: print(sep + f'英文中两者都有,不翻译 {en_line}' + sep) elif en_type_parameter_count: print( f'英文中仅有 {en_type_parameter},替换 {cn_type_param}->{cn_type_parameter}' ) lines[i] = line.replace(cn_type_param, cn_type_parameter) elif en_type_argument_count: print( f'英文中仅有 {en_type_argument},替换 {cn_type_param}->{cn_type_argument}' ) lines[i] = line.replace(cn_type_param, cn_type_argument) else: print(sep + f'异常情况' + sep) filex.write_lines(out_file, lines)
def process_keymap_reference_card(file_path, result_file=None): """ 处理快捷键参考文件 来自:https://resources.jetbrains.com/storage/products/intellij-idea/docs/IntelliJIDEA_ReferenceCard.pdf 保存后将其复制出来,然后用“【”划分快捷键,再进行一次处理 :param file_path: :param result_file: :return: """ if result_file is None: result_file = filex.get_result_file_name(file_path, '_modified', 'properties') lines = filex.read_lines(file_path) if lines is None: return result = [] # 以一个或多个#开头,接内容 p_title = re.compile(r'^(#+)\s?(.*)') for line in lines: line = line.replace('\n', '') line = re.sub(p_title, r'[\1] \2', line) if '【' in line: split_result = line.split('【') line = '* %s' % split_result[0] result.append(line + '\n') filex.write_lines(result_file, result)
def get_all_translation(en_dir, cn_dir, dict_file=None, dict_diff_file=None, suffix=''): if dict_file is None: base_name = os.path.split(cn_dir)[1] dict_file = 'data/%s_dict.txt' % base_name if dict_diff_file is None: dict_diff_file = filex.get_result_file_name(dict_file, '_diff') """读取并输出所有翻译""" all_translation, diff_translation = Translator.check_same_en_difference_cn( en_dir, cn_dir, False, suffix) result = list() for key, value in all_translation.items(): result.append('%s=%s\n' % (key, value)) print('size is %d' % len(sorted(all_translation.keys()))) filex.write_lines(dict_file, result) result = list() for key, value in diff_translation.items(): result.append('%s=%s\n\n' % (key, value)) print('size is %d' % len(sorted(diff_translation.keys()))) filex.write_lines(dict_diff_file, result)
def process_keymap_reference_card_translation(en_file, cn_file, result_file=None): """ 将翻译结果转为md文件 :param en_file: :param cn_file: :param result_file: :return: """ if result_file is None: result_file = filex.get_result_file_name(en_file, '_result', 'md') en_lines = filex.read_lines(en_file) cn_lines = filex.read_lines(cn_file) if en_lines is None or cn_lines is None: return None # 以[]中一个或多个#开头,接内容 p_title = re.compile(r'^\[(#+)\]\s?(.*)') result = [] for i in range(len(cn_lines)): line = cn_lines[i] line = encodex.unicode_str_to_chinese(line) line = re.sub(p_title, r'\1 \2', line) en_line = en_lines[i].replace('\n', '') if '【' in en_line: shortcut = en_line.split('【')[1] line = line.replace('* ', "") line = '* %-30s%s' % ('【%s】' % shortcut, line) result.append(line) filex.write_lines(result_file, result)
def modify_api(self): lines = filex.read_lines(self.path, ignore_line_separator=True) if not lines: print(f'行为空') return changed_apis = self.get_changed_apis() if not changed_apis: print(f'api 为空') return changed_dealer_apis = self.get_changed_apis(True) if not changed_dealer_apis: print(f'dealer api 为空') return # 开始处理 new_lines = [] api_urls = [] # 以空格开头,@ 加method,( ,可能带dealder,) pattern = re.compile(r'\s+@(\w+)\((.*)"(.+)"\)$') for i, line in enumerate(lines): line = line.replace('\n', '') match = pattern.match(line) if not match: # 不匹配直接添加 new_lines.append(line) continue api_urls.append(line) print(f'处理第 {len(api_urls)} 个 {line}') method, add, url = match.groups() if 'DEALER' in add: # dealer 的 new_url = self.modify_line(url, changed_dealer_apis) else: # api 的 new_url = self.modify_line(url, changed_apis) if url == new_url: new_lines.append(line) print(f'没有变化') else: new_lines.append(line.replace(url, new_url)) print(f'变为了 {new_url}') print(f'共 {len(api_urls)} 个 api') remain_apis = list(filter(lambda x: not x.replaced, changed_apis)) print(f'有 {len(remain_apis)} 个接口未替换') for api in remain_apis: print(api.old) remain_apis = list( filter(lambda x: not x.replaced, changed_dealer_apis)) print(f'有 {len(remain_apis)} 个 dealer 接口未替换') for api in remain_apis: print(api.old) # 保存文件 filex.write_lines(self.path, new_lines, add_line_separator=True)
def check_and_append_tips_name(file_path, tips_name_file, result_file=None): """ 根据检查tips的文件是否全 该方法用于 IdeTipsAndTricks 没有指定所有的文件,但还是需要翻译文件名的,所以补全 """ if result_file is None: result_file = filex.get_result_file_name(tips_name_file, '_append') file_list = filex.list_file(file_path) print('共%d个文件' % len(file_list)) lines = filex.read_lines(tips_name_file) tips_name = [] for line in lines: if '=' in line: name = line.split('=')[0] tips_name.append(name) # 名字只加一层,exclude里面的不处理 file_name = '%s\\%s.html' % (file_path, name) if file_name in file_list: file_list.remove(file_name) else: print('文件不存在%s' % file_name) print('共有%d个tip名' % len(tips_name)) print('还缺%d个文件' % len(file_list)) # 写入结果 lines.append('\n# append\n') for file_name in file_list: name = os.path.splitext(os.path.split(file_name)[1])[0] word = Tips.camel_word_to_words(name) lines.append('%s=%s\n' % (name, word)) filex.write_lines(result_file, lines)
def get_ysl_list(result_file): """读取口红列表""" # 官网的读不出来列表,反正也不多,手动加一下 url_list = [ 'http://www.yslbeautycn.com/product/00030YSL.html', 'http://www.yslbeautycn.com/product/00031YSL.html', ] result = list() i = 0 for details_url in url_list: page = netx.get(details_url, need_print=False) soup = BeautifulSoup(page, "html.parser") category = soup.select_one('.pdp_top_content_wrapper').select_one('.product_subtitle').string category = category.replace('圣罗兰', '') # image = soup.select_one('.primary_image')['src'] # color_2 = soup.select_one('.product_image.b-product_img')['src'] color_list = soup.select_one('.swatches.js_swatches.color.contentcarousel_list') for color_li in color_list.select('li'): for color_div in color_li.select('div'): url = color_div.select_one('a')['href'] color_image = color_div.select_one('img')['src'] name = color_div.select_one('span').string name = name.replace('(', '(').replace(')', ')') split_list = name.split('(', 1) if len(split_list) > 1: name = split_list[0].strip() other = '(' + split_list[1].strip() else: other = '' i += 1 lipstick = Lipstick('%03d' % i, category, name, url, '', other, color_image) result.append(str(lipstick)) filex.write_lines(result_file, result, add_line_separator=True)
def get_dior_list(result_file): """读取口红列表""" url = 'https://www.dior.cn/beauty/zh_cn/%E9%A6%99%E6%B0%9B%E4%B8%8E%E7%BE%8E%E5%AE%B9/%E5%BD%A9%E5%A6%86/%E5' \ '%94%87%E9%83%A8/%E5%94%87%E8%86%8F/fr-lipsticks-%E5%94%87%E8%86%8F.html ' page = netx.get(url, need_print=False) # 解析结果 soup = BeautifulSoup(page, "html.parser") result = list() i = 0 for category in soup.select('.category.js-category'): '大的分组' category_name = category.select_one('.category-title').string.replace('Dior迪奥', '') print('\n分组:%s' % category_name) for column in category.select('.column.product'): '每一个系列' legend_name = column.select_one('.legend-name').string.replace('Dior迪奥', '') legend_desc = column.select_one('.legend-description').string.strip() print('系列名:' + legend_name) legend_swatches_list = column.select_one('.legend-swatches-list') for legend_li in legend_swatches_list.select('li'): a = legend_li.find('a') url = a['href'] color = a.find('img') image = ChooseLipstick.dior_host + color['src'] i += 1 lipstick = Lipstick('%03d' % i, category_name + '-' + legend_name, '', url, '', legend_desc, image) result.append(str(lipstick) + '\n') filex.write_lines(result_file, result)
def get_dior_details(source_file, result_file): """获取口红详情""" lipstick_list = filex.read_lines(source_file, ignore_line_separator=True) length = len(lipstick_list) for i in range(length): lipstick = Lipstick.from_string(lipstick_list[i]) print('获取第 %d/%d个口红信息' % (i + 1, length)) url = ChooseLipstick.dior_host + urllib.parse.quote(lipstick.url) page = netx.get(url, need_print=False) soup = BeautifulSoup(page, "html.parser") cover_img_tag = soup.select_one('.png-bg.cover-bg') # all_image = cover_img['data-zoom-views'] cover_img = cover_img_tag.select_one('.js-cover-img')['src'] cover_img = ChooseLipstick.dior_host + cover_img # name = soup.select_one('.quickbuy-title').string # desc = soup.select_one('.quickbuy-subtitle').string price = soup.select_one('.details-price.js-order-value').string.strip() color_name = soup.select_one('.swatches-list').select_one('li.selected').select_one('a')['data-swatch-name'] # color_span = soup.select_one('.swatch-name.js-swatch-name') # color = color_span.select_one('span').string # swatches_list = soup.select_one('.swatches-list.js-products-selector') # swatches = swatches_list.select_one('li.selected') lipstick.url = url lipstick.price = price lipstick.name = color_name lipstick.img = ','.join((lipstick.img, cover_img)) filex.write_lines(result_file, [str(lipstick)], mode='a', add_line_separator=True)
def check_use_layout_res(self): """根据检查出的 java 文件检查使用的布局资源""" java_files = filex.read_lines(self.rv_java_files_path, ignore_line_separator=True) pattern = re.compile(r'(?<!setContentView\()R\.layout\.(.*?)[;),]') name_pattern = re.compile(r'^(?!fragment|dialog|high|pop|layout|address)') xml_name_list = [] for file in java_files: content = filex.read(file) all_match = re.findall(pattern, content) if all_match: print('%s 找到布局使用' % file) for match in all_match: if re.search(name_pattern, match): print(match) if 'item' not in match: print('不包含 item') if match not in xml_name_list: xml_name_list.append(match) else: print('过滤', match) print('共使用了 %d 个文件' % len(xml_name_list)) print('查找对应的 xml 文件') files = filex.list_file(self.work_space, 'xml$') xml_file_list = [] for xml_name in xml_name_list: for file in files: name = os.path.splitext(os.path.split(file)[1])[0] if xml_name == name: xml_file_list.append(file) break print('共找到 %d 个文件' % len(xml_file_list)) filex.write_lines(self.item_xml_files_path, xml_file_list, add_line_separator=True)
def modify_xml_file(item_xml_file): lines = filex.read_lines(item_xml_file) modified = False length = len(lines) wrap_content = 'wrap_content' match_parent = 'match_parent' fill_parent = 'fill_parent' for i in range(length): line = lines[i] # print(line) if '?>' in line: # 第一行不处理 continue if 'android:layout_width' in line: if wrap_content in line: modified = True print('layout_width 设置为 %s,改为 %s' % (wrap_content, match_parent)) lines[i] = line.replace(wrap_content, match_parent) if 'android:layout_height' in line: if match_parent in line: modified = True print('layout_height 设置为 %s,改为 %s' % (match_parent, wrap_content)) lines[i] = line.replace(match_parent, wrap_content) elif fill_parent in line: modified = True print('layout_height 设置为 %s,改为 %s' % (fill_parent, wrap_content)) lines[i] = line.replace(fill_parent, wrap_content) if '>' in line: # 标签结束 break if modified: print('作出了修改,需要保存') filex.write_lines(item_xml_file, lines) return True return False
def generate_md5_of_current_source_files(self, md5_file): """生成当前所有源文件的 md5""" files = self.list_source_image_file() lines = [] for file in files: md5 = self.get_file_md5(file) lines.append(f'{md5},{file}') filex.write_lines(md5_file, lines, add_line_separator=True)
def add_ellipsis_and_shortcut(en_file, cn_file, result_file=None): """ 处理快捷键,将_字母替换为(_字母) :param en_file: :param cn_file: :param result_file: :return: """ if result_file is None: result_file = filex.get_result_file_name(cn_file, '_add_ellipsis_and_shortcut') en_dict = Tools.get_dict_from_file(en_file, delete_value_ellipsis=False, delete_value_underline=False) cn_dict = Tools.get_dict_from_file(cn_file, delete_value_ellipsis=False, delete_value_underline=False) count = 0 p_ellipsise = re.compile('……|…$') p_period = re.compile('\.') for (k, v) in en_dict.items(): if v.endswith('.'): # 以.结尾 if k in cn_dict.keys(): cn_value = cn_dict[k] old_value = cn_value if v.endswith('...'): # 省略号结尾 cn_value = re.sub(p_ellipsise, '...', cn_value) if not cn_value.endswith('...'): cn_value += '...' elif v.endswith('.'): # 句号结尾 cn_value = re.sub(p_period, '。', cn_value) if not cn_value.endswith('。'): cn_value += '。' if cn_value != old_value: print('修改【%s】为【%s】' % (old_value, cn_value)) cn_dict[k] = cn_value if '_' in v: # 有快捷方式 index = v.find('_') shortcut = v[index + 1:index + 2] # 包含快捷键 if k in cn_dict.keys(): # 都有 cn_value = cn_dict[k] count += 1 # 已经是(_字母结)结尾的,重新替换一遍 p = re.compile(r'(.*)(\(_\w\))') if re.match(p, cn_value) is not None: replace_result = re.sub(p, r'\1' + '(_%s)' % shortcut, cn_value) print('替换%d,key=%s,v=%s,cn=%s,r=%s' % (count, shortcut, v, cn_value, replace_result)) else: replace_result = cn_value.replace('_', '') + '(_%s)' % shortcut print('添加%d,key=%s,v=%s,cn=%s,r=%s' % (count, shortcut, v, cn_value, replace_result)) cn_dict[k] = replace_result result = Tools.translate_file_by_dict(en_file, cn_dict, '') # 重新翻译 result.insert(0, '# from:[AndroidStudio翻译(3)-ActionsBundle中文翻译](http://blog.pingfangx.com/2355.html)\n') filex.write_lines(result_file, result)
def check_rv_java_and_adapter_files(self): file_list1 = self.check_rv_java_files() file_list2 = self.check_adapter_files() print('大小分别为 %d,%d' % (len(file_list1), len(file_list2))) for file in file_list2: if file not in file_list1: file_list1.append(file) print('合并为 %d' % len(file_list1)) filex.write_lines(self.rv_java_files_path, file_list1, add_line_separator=True)
def modify_use_layout_res(self): """修改所使用的资源文件""" item_xml_files = filex.read_lines(self.item_xml_files_path, ignore_line_separator=True) modified_xml_files = [] for item_xml_file in item_xml_files: print('检查', item_xml_file) if self.modify_xml_file(item_xml_file): modified_xml_files.append(item_xml_file) print('共修改 %d 个 xml 文件' % len(modified_xml_files)) filex.write_lines(self.modified_xml_files_path, modified_xml_files, add_line_separator=True)
def download_android_source(self, out_file=None): """ 下载安卓源码 :param out_file: 输出文件,如果直接运行,不会显示 clone 的进度(可能是我不会,我简单搜了一下,没找到) 所以先输出为bat文件,再执行 【纠正】但是一些博文中是有进度的,比较了一下发下,直接运行 .py 文件(即以 cmd 运行),就有进度了。 但是在 PyCharm 中运行是没有进度展示的,所以费了很大的精力在生成 .bat 和在 .bat 中计算时间并,哈哈。 :return: """ if not os.path.exists(self.source_root): os.mkdir(self.source_root) timex_bat_file = os.path.split(out_file)[0] + os.path.sep + 'timex.bat' root = Et.parse(self.manifest_file) project_list = root.findall('project') length = len(project_list) result = list() for i in range(length): project = project_list[i] dir_path = project.attrib['path'] last = dir_path.rfind("/") if last != -1: # 最后一个名字由 git 创建,将其截去 dir_path = self.source_root + os.path.sep + dir_path[:last] if not os.path.exists(dir_path): os.makedirs(dir_path) work_dir = dir_path else: # 如果没有/,则是在当前目录 work_dir = self.source_root # 执行命令 name = project.attrib['name'] cmd = '%s clone %s/%s.git' % (self.git_path, self.project_root, name) if out_file: result.append('\n@echo.') result.append('@echo cloning %d/%d' % (i + 1, length)) result.append('cd /d %s' % work_dir) result.append(cmd) result.append('call %s' % timex_bat_file) else: print('clone %d/%d' % (i + 1, length)) os.chdir(work_dir) self.run_cmd(cmd) if out_file: # 保存时间 result.insert(0, 'call %s save' % timex_bat_file) result.append('\n@ echo download complete.') result.append('@pause') filex.write_lines(out_file, result, add_line_separator=True) # 复制 shutil.copy('data/timex.bat', timex_bat_file) print('复制 timex.bat 完成')
def export_omegat_dictionary_to_file(file_path, result_file=None): """导出omegat的词库为文件""" if result_file is None: result_file = filex.get_result_file_name(file_path, '', 'txt') omegat_dict = Translator.get_omegat_dict(file_path) result = list() for key, value in omegat_dict.items(): result.append('%s=%s\n' % (key, value)) print('size is %d' % len(sorted(omegat_dict.keys()))) filex.write_lines(result_file, result)
def generate_need_translation_file2(en_dir, result_file): """生成需要翻译的文件""" en_file_list = filex.list_file(en_dir) all_translation = dict() for en_file in en_file_list: en_dict = Tools.get_dict_from_file(en_file) all_translation.update(en_dict) result = list() for key, value in all_translation.items(): result.append('%s=%s\n' % (key, value)) filex.write_lines(result_file, result)
def handle_keymap_file(en_file, cn_file, comment_file, result_file=None): """ 将一行一行的keymap重新处理 导出为.properties,这样OmegaT处理时会换照配置文件去处理 我们以[#]或[desc]加空格为开头,会被过滤器正确解析 :param en_file: :param cn_file: :param comment_file: :param result_file: :return: """ if result_file is None: result_file = filex.get_result_file_name(cn_file, '_add_desc_and_comment', 'properties') lines = filex.read_lines(cn_file) if lines is None: return desc_dict = KeymapList.get_action_desc_dict(en_file) comment_dict = KeymapList.get_comment_dict(comment_file) count = 0 desc_count = 0 result = [] for line in lines: line = line.replace('\n', '') if line.startswith('#'): old_line = line # 因为有加了#,所以处理下 line = line.lstrip('# ') # 相差的长度是trip掉的,注意在替换了\n之后 prefix = old_line[0:len(old_line) - len(line)].rstrip() else: prefix = '#' * 5 append = '' count += 1 if line in desc_dict.keys(): desc = desc_dict[line] desc_count += 1 print('%d/%d,line=%s,desc=%s' % (count, desc_count, line, desc)) # 有描述,添加 append += '\n\n%s %s' % ('[desc]', desc) if line in comment_dict.keys(): comment = comment_dict[line] print('%s的描述为%s' % (line, comment)) append += '\n\n%s' % comment line = '\n\n[%s] %s%s' % (prefix, line, append) result.append(line) filex.write_lines(result_file, result)
def filter_word(self): lines = filex.read_lines(self.source_file, ignore_line_separator=True) # 过滤空格和逗号名号 lines = [re.subn(r'[\s,。]', '', line)[0] for line in lines] # 过滤行 lines = list(filter(self.filter_line, lines)) # 写入文件暂存 filex.write_lines(self.target_file, lines, add_line_separator=True) # 拼接以处理 all_text = ''.join(lines) print(f'整理完毕,共 {len(all_text)} 字') self.find_duplicate(lines)
def check_rv_xml_files(self): files = filex.list_file(self.work_space, 'xml$') length = len(files) print('共有文件 %d 个' % length) rv_xml_files = [] for i in range(length): file = files[i] print('%d/%d %s' % (i + 1, length, file)) # 检查文件中是否包含 recycler view if 'RecyclerView' in filex.read(file): rv_xml_files.append(file) print('共有 %d 个文件包含 recycler view' % len(rv_xml_files)) filex.write_lines(self.rv_xml_files_path, rv_xml_files, add_line_separator=True)
def get_num_in_multi_thread(self): self.numbers = filex.read_lines(self.num_file_path, ignore_line_separator=True) if self.numbers is None: self.numbers = [] length = len(self.numbers) print(f'获取前共 {length} 个') q = queue.Queue() for i in range(1000): q.put(i) t = threadx.HandleQueueMultiThread(q, self.get_in_thread, thread_num=10, print_before_task=True) t.start() current_length = len(self.numbers) print(f'获取结束,获取到 {current_length-length}号码,当前共 {current_length} 个') filex.write_lines(self.num_file_path, self.numbers, add_line_separator=True)
def auto_process_file2(text, add_note=True): """ 自动处理文件 :param text: 从 discuz 复制的文本 :param add_note: 是否添加转截申明 :return: """ title, tid = BlogXTools.get_title_and_tid(text) if tid is not None: file_name = '[%s]%s.md' % (tid, title) found_file = None for file in filex.list_file(BlogXTools.process_dir): parent_dir, base_name = os.path.split(file) if base_name == file_name: print('打到文件') found_file = file break elif base_name == '%s.txt' % title: print('找到未命名的文件,执行重命名') new_name = parent_dir + '/' + file_name os.rename(file, new_name) found_file = new_name break if found_file: lines = filex.read_lines(found_file) first_line = lines[0] need_process = True if first_line.startswith('[md]'): print('第一行已经包含[md]标签,不处理') need_process = False elif first_line.startswith('>本文由平方X'): print('第一行已经包含转载申明') else: if add_note: # 添加转载申明 url = 'http://blog.pingfangx.com/%s.html' % tid result = '>本文由平方X发表于平方X网,转载请注明出处。[%s](%s)\n\n' % (url, url) lines.insert(0, result) print('已写入转载申明') if need_process: # 写入[md]标签 lines.insert(0, '[md]\n\n') lines.append('\n\n[/md]') filex.write_lines(found_file, lines) # 复制 text = ''.join(lines) return text
def process_tips_translation_file(file_path, result_file, result_type, add_header=None, add_footer=None, language='cn'): """ 处理翻译的tip文件,将 <meta http-equiv="content-type" content="text/html; charset=UTF-8"> 删除,这是OmegaT自动添加的,添加后AndroidStudio反而不能正常加载了。 然后&符号需要转义回去。 :param file_path: :param result_file: :param result_type: AndroidStudio中需要删除meta :param add_header: 添加header :param add_footer: 添加footer :param language: 语言 :return: """ lines = filex.read_lines(file_path) if lines is None: return if add_header and add_header in lines: # 不重复添加 add_header = None meta = r'<meta http-equiv="content-type" content="text/html; charset=UTF-8">' result = [] add_meta = False for line in lines: if result_type == Tips.RESULT_TYPE_GITHUB_PAGES or meta not in line: if not add_meta and language == 'en': if '<link rel=' in line: # 添加meta result.append(meta) add_meta = True # 替换并添加 line = Tips.parse_line(line, result_type) result.append(line) # 添加header if add_header is not None and line.lstrip().startswith( '<body'): result.append(add_header) if add_footer is not None: # 还是放上面好了 result.append(add_footer) filex.write_lines(result_file, result, print_msg=False)
def get_num(self): url = self.url url += str(int(time.time() * 1000)) print(url) result = netx.get(url) result = result[result.index('(') + 1:result.index(')')] result = json.loads(result) num_list = list(filter(lambda x: len(str(x)) == 11, result['numArray'])) num_list = [str(x) for x in num_list] print(num_list) print(f'获取到号码 {len(num_list)} 个') old_numbers = filex.read_lines(self.num_file_path, ignore_line_separator=True) print(f'之前有号码 {len(old_numbers)} 个') num_list = list(filter(lambda x: x not in old_numbers, num_list)) print(f'过滤后,剩号码 {len(num_list)} 个') filex.write_lines(self.num_file_path, num_list, 'a', add_line_separator=True)
def get_ysl_details(source_file, result_file): lines = filex.read_lines(source_file, ignore_line_separator=True) length = len(lines) for i in range(length): print('获取 %d/%d ' % (i + 1, length)) line = lines[i] lipstick = Lipstick.from_string(line) # 有一些颜色没指定,打开会转到默认颜色 page = netx.get(lipstick.url, need_print=False) soup = BeautifulSoup(page, "html.parser") cover_image = soup.select_one('.primary_image')['src'] color_image2 = soup.select_one('.product_tab_shades_left').select_one('.product_image.b-product_img')['src'] price = soup.select_one('.product_price.price_sale.b-product_price-sale').text.strip() lipstick.img = ','.join((lipstick.img, color_image2, cover_image)) lipstick.price = price filex.write_lines(result_file, [str(lipstick)], mode='a', add_line_separator=True)
def update_indent_file(self): workbook = xmind.load(self.xmind_file_path) out_file = self.get_indent_out_file(workbook) if not os.path.exists(out_file): print(f'{out_file} 不存在,直接写入') self.print_workbook_as_indent() return else: print(f'更新 {out_file}') old_lines = filex.read_lines(out_file, ignore_line_separator=True) new_lines = self.collect_sheet_as_indent(workbook.getPrimarySheet()) for i, line in enumerate(old_lines): if line not in new_lines: print(f'需要插入行\n{line}') self.insert_line(i, old_lines, new_lines) filex.write_lines(out_file, new_lines, add_line_separator=True)
def delete_symbol_of_file(file, result_file, replace_list): lines = filex.read_lines(file) if lines is None: return result = [] for line in lines: line = line.replace('\n', '') if line is None: continue old_line = line for replace_pattern in replace_list: line = re.sub(replace_pattern[0], replace_pattern[1], line) if old_line != line: print('处理【%s】为【%s】' % (old_line, line)) result.append(line + '\n') filex.write_lines(result_file, result)
def start(self): lines = filex.read_lines(self.file, ignore_line_separator=True) if not lines: print(f'文件内容为空:{self.file}') return next_line = self.get_next_line(lines, 0) while next_line.valid(): print(f'处理第 {next_line.index + 1} 行【{next_line.line}】') if self.need_inline(next_line): self.inline(lines, next_line) next_line = self.get_next_line( lines, next_line.pre_index) # 从 pre 开始找 else: next_line = self.get_next_line(lines, next_line.index) # 从 index 开始找 print(f'处理完毕,写入结果') filex.write_lines(self.output, lines, add_line_separator=True)
def process_tips_manifest_file(self, file_path, result_file=None): """ 处理清单文件,整理tips的名称方便翻译 :param file_path: :param result_file: :return: """ if result_file is None: result_file = filex.get_result_file_name(file_path, '_en', 'properties') ordered_file_list = self.get_tips_order_files(file_path) result = [] for file in ordered_file_list: name = file.split('.')[0] word = self.camel_word_to_words(name) result.append('%s=%s\n' % (name, word)) filex.write_lines(result_file, result)