Ejemplo n.º 1
0
    def process_keymap_reference_card_translation(en_file,
                                                  cn_file,
                                                  result_file=None):
        """
        将翻译结果转为md文件
        :param en_file:
        :param cn_file:
        :param result_file:
        :return:
        """
        if result_file is None:
            result_file = filex.get_result_file_name(en_file, '_result', 'md')
        en_lines = filex.read_lines(en_file)
        cn_lines = filex.read_lines(cn_file)
        if en_lines is None or cn_lines is None:
            return None

        # 以[]中一个或多个#开头,接内容
        p_title = re.compile(r'^\[(#+)\]\s?(.*)')
        result = []
        for i in range(len(cn_lines)):
            line = cn_lines[i]
            line = encodex.unicode_str_to_chinese(line)
            line = re.sub(p_title, r'\1 \2', line)
            en_line = en_lines[i].replace('\n', '')
            if '【' in en_line:
                shortcut = en_line.split('【')[1]
                line = line.replace('* ', "")
                line = '* %-30s%s' % ('【%s】' % shortcut, line)
            result.append(line)
        filex.write_lines(result_file, result)
Ejemplo n.º 2
0
    def modify_api(self):
        lines = filex.read_lines(self.path, ignore_line_separator=True)
        if not lines:
            print(f'行为空')
            return

        changed_apis = self.get_changed_apis()
        if not changed_apis:
            print(f'api 为空')
            return
        changed_dealer_apis = self.get_changed_apis(True)
        if not changed_dealer_apis:
            print(f'dealer api 为空')
            return

        # 开始处理
        new_lines = []
        api_urls = []
        # 以空格开头,@ 加method,( ,可能带dealder,)
        pattern = re.compile(r'\s+@(\w+)\((.*)"(.+)"\)$')
        for i, line in enumerate(lines):
            line = line.replace('\n', '')
            match = pattern.match(line)
            if not match:
                # 不匹配直接添加
                new_lines.append(line)
                continue

            api_urls.append(line)
            print(f'处理第 {len(api_urls)} 个 {line}')

            method, add, url = match.groups()
            if 'DEALER' in add:
                # dealer 的
                new_url = self.modify_line(url, changed_dealer_apis)
            else:
                # api 的
                new_url = self.modify_line(url, changed_apis)
            if url == new_url:
                new_lines.append(line)
                print(f'没有变化')
            else:
                new_lines.append(line.replace(url, new_url))
                print(f'变为了 {new_url}')

        print(f'共 {len(api_urls)} 个 api')

        remain_apis = list(filter(lambda x: not x.replaced, changed_apis))
        print(f'有 {len(remain_apis)} 个接口未替换')
        for api in remain_apis:
            print(api.old)

        remain_apis = list(
            filter(lambda x: not x.replaced, changed_dealer_apis))
        print(f'有 {len(remain_apis)} 个 dealer 接口未替换')
        for api in remain_apis:
            print(api.old)

        # 保存文件
        filex.write_lines(self.path, new_lines, add_line_separator=True)
    def replace():
        in_file = r'ignore/project_save.tmx'
        out_file = r'ignore/project_save_result.tmx'
        lines = filex.read_lines(in_file)

        sep = '=' * 10
        for i, line in enumerate(lines):
            if cn_type_param in line:
                print(f'在 {i + 1} 行找到 {cn_type_param}')
                en_line = lines[i - 3]
                en_type_parameter_count = en_line.lower().count(
                    en_type_parameter)
                en_type_argument_count = en_line.lower().count(
                    en_type_argument)
                if not en_type_parameter_count and not en_type_argument_count:
                    print(sep + f' 英文中没有相关单词 {en_line}' + sep)
                elif en_type_parameter_count and en_type_argument_count:
                    print(sep + f'英文中两者都有,不翻译 {en_line}' + sep)
                elif en_type_parameter_count:
                    print(
                        f'英文中仅有 {en_type_parameter},替换 {cn_type_param}->{cn_type_parameter}'
                    )
                    lines[i] = line.replace(cn_type_param, cn_type_parameter)
                elif en_type_argument_count:
                    print(
                        f'英文中仅有 {en_type_argument},替换 {cn_type_param}->{cn_type_argument}'
                    )
                    lines[i] = line.replace(cn_type_param, cn_type_argument)
                else:
                    print(sep + f'异常情况' + sep)
        filex.write_lines(out_file, lines)
Ejemplo n.º 4
0
 def process_keymap_reference_card(file_path, result_file=None):
     """
     处理快捷键参考文件
     来自:https://resources.jetbrains.com/storage/products/intellij-idea/docs/IntelliJIDEA_ReferenceCard.pdf
     保存后将其复制出来,然后用“【”划分快捷键,再进行一次处理
     :param file_path:
     :param result_file:
     :return:
     """
     if result_file is None:
         result_file = filex.get_result_file_name(file_path, '_modified',
                                                  'properties')
     lines = filex.read_lines(file_path)
     if lines is None:
         return
     result = []
     # 以一个或多个#开头,接内容
     p_title = re.compile(r'^(#+)\s?(.*)')
     for line in lines:
         line = line.replace('\n', '')
         line = re.sub(p_title, r'[\1] \2', line)
         if '【' in line:
             split_result = line.split('【')
             line = '* %s' % split_result[0]
         result.append(line + '\n')
     filex.write_lines(result_file, result)
Ejemplo n.º 5
0
 def pares_files(file_path):
     """解析文件"""
     dir_list = []
     file_list = []
     lines = filex.read_lines(file_path, ignore_line_separator=True)
     current_dir = ''
     for line in lines:
         if ':' in line:
             # 有时间,是文件
             is_dir = False
         else:
             if line == '.' or '/' in line or '.' not in line:
                 # 不包含“.” 视为文件,但是也有少数文件如 “install”
                 is_dir = True
             else:
                 is_dir = False
         if is_dir:
             current_dir = line
         else:
             # 是文件
             if current_dir not in dir_list:
                 dir_list.append(current_dir)
             file_name = line.split('\t')[0]
             file_path = f'{current_dir}/{file_name}'
             if file_path.startswith('./'):
                 file_path = file_path[2:]
             file_list.append(file_path)
     return dir_list, file_list
Ejemplo n.º 6
0
 def check_use_layout_res(self):
     """根据检查出的 java 文件检查使用的布局资源"""
     java_files = filex.read_lines(self.rv_java_files_path, ignore_line_separator=True)
     pattern = re.compile(r'(?<!setContentView\()R\.layout\.(.*?)[;),]')
     name_pattern = re.compile(r'^(?!fragment|dialog|high|pop|layout|address)')
     xml_name_list = []
     for file in java_files:
         content = filex.read(file)
         all_match = re.findall(pattern, content)
         if all_match:
             print('%s 找到布局使用' % file)
             for match in all_match:
                 if re.search(name_pattern, match):
                     print(match)
                     if 'item' not in match:
                         print('不包含 item')
                     if match not in xml_name_list:
                         xml_name_list.append(match)
                 else:
                     print('过滤', match)
     print('共使用了 %d 个文件' % len(xml_name_list))
     print('查找对应的 xml 文件')
     files = filex.list_file(self.work_space, 'xml$')
     xml_file_list = []
     for xml_name in xml_name_list:
         for file in files:
             name = os.path.splitext(os.path.split(file)[1])[0]
             if xml_name == name:
                 xml_file_list.append(file)
                 break
     print('共找到 %d 个文件' % len(xml_file_list))
     filex.write_lines(self.item_xml_files_path, xml_file_list, add_line_separator=True)
Ejemplo n.º 7
0
    def get_dior_details(source_file, result_file):
        """获取口红详情"""
        lipstick_list = filex.read_lines(source_file, ignore_line_separator=True)
        length = len(lipstick_list)
        for i in range(length):
            lipstick = Lipstick.from_string(lipstick_list[i])
            print('获取第 %d/%d个口红信息' % (i + 1, length))

            url = ChooseLipstick.dior_host + urllib.parse.quote(lipstick.url)
            page = netx.get(url, need_print=False)
            soup = BeautifulSoup(page, "html.parser")
            cover_img_tag = soup.select_one('.png-bg.cover-bg')
            # all_image = cover_img['data-zoom-views']
            cover_img = cover_img_tag.select_one('.js-cover-img')['src']
            cover_img = ChooseLipstick.dior_host + cover_img

            # name = soup.select_one('.quickbuy-title').string
            # desc = soup.select_one('.quickbuy-subtitle').string
            price = soup.select_one('.details-price.js-order-value').string.strip()
            color_name = soup.select_one('.swatches-list').select_one('li.selected').select_one('a')['data-swatch-name']
            # color_span = soup.select_one('.swatch-name.js-swatch-name')
            # color = color_span.select_one('span').string
            # swatches_list = soup.select_one('.swatches-list.js-products-selector')
            # swatches = swatches_list.select_one('li.selected')
            lipstick.url = url
            lipstick.price = price
            lipstick.name = color_name
            lipstick.img = ','.join((lipstick.img, cover_img))
            filex.write_lines(result_file, [str(lipstick)], mode='a', add_line_separator=True)
Ejemplo n.º 8
0
 def check_and_append_tips_name(file_path,
                                tips_name_file,
                                result_file=None):
     """
     根据检查tips的文件是否全
     该方法用于 IdeTipsAndTricks 没有指定所有的文件,但还是需要翻译文件名的,所以补全
     """
     if result_file is None:
         result_file = filex.get_result_file_name(tips_name_file, '_append')
     file_list = filex.list_file(file_path)
     print('共%d个文件' % len(file_list))
     lines = filex.read_lines(tips_name_file)
     tips_name = []
     for line in lines:
         if '=' in line:
             name = line.split('=')[0]
             tips_name.append(name)
             # 名字只加一层,exclude里面的不处理
             file_name = '%s\\%s.html' % (file_path, name)
             if file_name in file_list:
                 file_list.remove(file_name)
             else:
                 print('文件不存在%s' % file_name)
     print('共有%d个tip名' % len(tips_name))
     print('还缺%d个文件' % len(file_list))
     # 写入结果
     lines.append('\n# append\n')
     for file_name in file_list:
         name = os.path.splitext(os.path.split(file_name)[1])[0]
         word = Tips.camel_word_to_words(name)
         lines.append('%s=%s\n' % (name, word))
     filex.write_lines(result_file, lines)
Ejemplo n.º 9
0
    def order_tips_file(tips_names_file, processed_dir, result_dir):
        """
        排序tips的翻译文件
        :param tips_names_file:
        :param processed_dir:
        :param result_dir:
        :return:
        """

        file_dict = Tips.get_file_dict_in_dir(processed_dir)
        if file_dict is None:
            return

        lines = filex.read_lines(tips_names_file, ignore_line_separator=True)
        if lines is None:
            return

        length = len(lines)
        for i in range(length):
            line = lines[i]
            en_name, cn_name = line.split('=')
            if en_name in file_dict.keys():
                old_name = file_dict[en_name]
                dir_name, file_name = os.path.split(old_name)
                new_name = '%s\\%03d-%s' % (result_dir, i + 1, file_name)
                print('复制%s为%s' % (old_name, new_name))
                filex.check_and_create_dir(new_name)
                shutil.copy(old_name, new_name)
            else:
                print('没有文件' + en_name)
Ejemplo n.º 10
0
 def modify_xml_file(item_xml_file):
     lines = filex.read_lines(item_xml_file)
     modified = False
     length = len(lines)
     wrap_content = 'wrap_content'
     match_parent = 'match_parent'
     fill_parent = 'fill_parent'
     for i in range(length):
         line = lines[i]
         # print(line)
         if '?>' in line:
             # 第一行不处理
             continue
         if 'android:layout_width' in line:
             if wrap_content in line:
                 modified = True
                 print('layout_width 设置为 %s,改为 %s' % (wrap_content, match_parent))
                 lines[i] = line.replace(wrap_content, match_parent)
         if 'android:layout_height' in line:
             if match_parent in line:
                 modified = True
                 print('layout_height 设置为 %s,改为 %s' % (match_parent, wrap_content))
                 lines[i] = line.replace(match_parent, wrap_content)
             elif fill_parent in line:
                 modified = True
                 print('layout_height 设置为 %s,改为 %s' % (fill_parent, wrap_content))
                 lines[i] = line.replace(fill_parent, wrap_content)
         if '>' in line:
             # 标签结束
             break
     if modified:
         print('作出了修改,需要保存')
         filex.write_lines(item_xml_file, lines)
         return True
     return False
Ejemplo n.º 11
0
 def get_comment_dict(file_path, print_msg=False):
     """
     读取注释字典,用于新的keymap读取注释
     :param file_path:
     :param print_msg:
     :return:
     """
     lines = filex.read_lines(file_path, ignore_line_separator=True)
     if lines is None:
         return
     comment_dict = dict()
     p_title = re.compile(r'\[(#+)\]\s?')
     for i in range(len(lines)):
         line = lines[i]
         if line.startswith('#[x'):
             # 是注释
             pre_index = i - 1
             action = ''
             while action == '' and pre_index >= 0:
                 pre_line = lines[pre_index]
                 if pre_line.startswith('[#'):
                     action = re.sub(p_title, '', pre_line)
                     comment = line
                     if action in comment_dict.keys():
                         if print_msg:
                             print('%s注释重复' % action)
                         comment = comment_dict[action] + '\n' + comment
                     comment_dict[action] = comment
                     if print_msg:
                         print('%s的注释为%s' % (action, comment))
                 pre_index -= 1
     return comment_dict
Ejemplo n.º 12
0
    def __init__(self, api_key, source_dir, target_dir=None, old_target_dir=None, minimum_size=1024, keep_files=None,
                 offset=0):
        """

        :param api_key: api key
        :param source_dir: 源目录
        :param target_dir: 目标目录,如果为空,则与源目录相同
        :param old_target_dir: 旧版的目标目录
        :param minimum_size: 最小文件大小,小于此大小的文件不压缩
        :param keep_files:保留的文件名,可以使胙用正则匹配
        :param offset:全部压缩时的偏移
        """
        tinify.key = api_key
        self.source_dir = source_dir
        self.target_dir = target_dir
        if not self.target_dir:
            self.target_dir = self.source_dir
        self.old_target_dir = old_target_dir
        self.minimum_size = minimum_size
        self.keep_files = keep_files
        if isinstance(self.keep_files, str):
            # 如果是字符串,认为是文件
            self.keep_files = filex.read_lines(self.keep_files, ignore_line_separator=True)
        self.offset = offset
        self.md5_file = 'ignore/md5.txt'
Ejemplo n.º 13
0
 def copy_tiny_png_by_md5(self, md5_file, reference_target_dir):
     """
     根据文件的 md5 复制已压缩的文件
     如果存在 md5 ,如果在参考目标目录中有结果文件,则直接将其复制到结果目录中
     :param md5_file: md5 文件
     :param reference_target_dir:参考目标目录
     :return:
     """
     lines = filex.read_lines(md5_file, ignore_line_separator=True)
     if not lines:
         print('没有 md5')
         return
     md5_dict = {}
     for line in lines:
         md5, path = line.split(',')
         md5_dict[md5] = path
     files = self.list_need_compress_file()
     for file in files:
         md5 = self.get_file_md5(file)
         if not (md5 in md5_dict.keys()):
             continue
         reference_source_file = md5_dict[md5]
         reference_target_file = reference_source_file.replace(self.source_dir, reference_target_dir)
         if not os.path.exists(reference_target_file):
             # 不存在参考文件
             print(f'已压缩文件不存在 {reference_target_file}')
             continue
         target_file = self.get_target_file(file)
         print(f'文件相同,复制已压缩文件 {reference_target_file} -> {target_file}')
         self.copy_file(reference_target_file, target_file)
Ejemplo n.º 14
0
 def iter_pan_url(self):
     """从每一页中读取百度网盘地址"""
     lines = filex.read_lines(self.url_list_file,
                              ignore_line_separator=True)
     length = len(lines)
     for i in range(length):
         print('获取 %d/%d' % (i + 1, length))
         self.get_pan_url(lines[i])
Ejemplo n.º 15
0
 def iter_save_pan(self):
     """保存进百度网盘"""
     lines = filex.read_lines(self.pan_list_file,
                              ignore_line_separator=True)
     length = len(lines)
     for i in range(length):
         print('保存 %d/%d' % (i + 1, length))
         self.save_pan_url(lines[i], self.driver)
     print('结束')
Ejemplo n.º 16
0
 def read_and_show(self, file_path):
     """
     读取文件
     :param file_path: 文件路径
     :return: 
     """
     lines = filex.read_lines(file_path)
     root = self.parse_file_from_lines(lines)
     if root is not None:
         print(self.parse_file_to_text(root))
Ejemplo n.º 17
0
 def get_tree_data_from_file(self, file_path):
     """
     从文件中读取tree数据
     :param file_path: 
     :return: 
     """
     lines = filex.read_lines(file_path)
     if lines is None:
         return None
     return self.get_tree_from_lines(lines)
Ejemplo n.º 18
0
 def modify_use_layout_res(self):
     """修改所使用的资源文件"""
     item_xml_files = filex.read_lines(self.item_xml_files_path, ignore_line_separator=True)
     modified_xml_files = []
     for item_xml_file in item_xml_files:
         print('检查', item_xml_file)
         if self.modify_xml_file(item_xml_file):
             modified_xml_files.append(item_xml_file)
     print('共修改 %d 个 xml 文件' % len(modified_xml_files))
     filex.write_lines(self.modified_xml_files_path, modified_xml_files, add_line_separator=True)
Ejemplo n.º 19
0
    def export_html(source_file, result_file):
        """导出 html"""
        lipstick_type = os.path.splitext(os.path.basename(result_file))[0].split('_')[0]
        current_path = os.path.dirname(result_file) + '/'
        lines = filex.read_lines(source_file, ignore_line_separator=True)
        length = len(lines)
        html = '''
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>选呀选呀选口红~</title>
    <link rel="stylesheet" href="https://cdn.bootcss.com/bootstrap/3.3.7/css/bootstrap.min.css"
          integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
    <link rel="stylesheet" href="css/lipstick.css">
</head>
<body>
<div class="container-fluid">
    [content]
</div>
</body>
</html>
        '''
        content = list()
        for i in range(length):
            content.append('<div class="row">')
            lipstick = Lipstick.from_string(lines[i])
            # 左边
            content.append('<div class="%s">' % 'col-xs-3')
            item_list = ['编号:' + lipstick.index, '色号:' + lipstick.name, '类别:' + lipstick.category, lipstick.other]
            for cell in item_list:
                content.append('<h4>%s</h4>' % cell)
            content.append('</div>')

            # 右边的图
            image_list = lipstick.img.split(',')
            length = len(image_list)
            for j in range(length):
                if length == 2:
                    if j == 0:
                        col_style = 'col-xs-3'
                    else:
                        col_style = 'col-xs-5'
                else:
                    col_style = 'col-xs-3'
                img = image_list[j]
                image_path = '%simage/%s_%03d_%d.jpg' % (current_path, lipstick_type, i + 1, j + 1)
                ima_tag = '<img class="%s" src="%s"/>' % (col_style, image_path.replace(current_path, ''))
                if not os.path.exists(image_path):
                    netx.get_file(img, image_path)
                content.append(ima_tag)
            content.append('</div>')

        content = html.replace('[content]', '\n'.join(content))
        filex.write(result_file, content)
Ejemplo n.º 20
0
 def read_params(file_path):
     lines = filex.read_lines(file_path, ignore_line_separator=True)
     if not lines:
         return None
     p = re.compile(r'name="(.*)"\t(.*)')
     result = {}
     for line in lines:
         match = re.search(p, line)
         if match:
             result[match.group(1)] = match.group(2)
     return result
Ejemplo n.º 21
0
 def filter_num(self):
     pattern = '87910'
     pattern = re.compile(pattern)
     numbers = filex.read_lines(self.num_file_path, ignore_line_separator=True)
     result = []
     for num in numbers:
         if re.search(pattern, num):
             result.append(num)
     print(f'共 {len(numbers)} 个号码,过滤 {pattern} ,得到 {len(result)} 个')
     for num in result:
         print(num)
Ejemplo n.º 22
0
    def process_file(self):
        """处理文件"""
        folder, file_name = os.path.split(self.file_path)
        title = os.path.splitext(file_name)[0]
        if title.startswith('[%s]' % self.tid):
            new_name = title + '.md'
        else:
            new_name = '[%s]%s.md' % (self.tid, title)
        new_path = folder + os.path.sep + new_name
        if file_name != new_name:
            print('%s → %s' % (self.file_path, new_path))
            os.rename(self.file_path, new_path)
            self.file_path = new_path
            print('已将 file_path 置为 %s' % self.file_path)

        print('处理文件%s' % new_path)
        try:
            lines = filex.read_lines(new_path)
        except UnicodeDecodeError:
            lines = filex.read_lines(
                new_path, encoding=locale.getpreferredencoding(False))
        first_line = lines[0]
        need_process = True
        if first_line.startswith('[md]'):
            print('第一行已经包含[md]标签,不处理')
            need_process = False
        elif first_line.startswith('>本文由平方X'):
            print('第一行已经包含转载申明')
        else:
            if self.add_note:
                # 添加转载申明
                url = 'http://blog.pingfangx.com/%s.html' % self.tid
                result = '>本文由平方X发表于平方X网,转载请注明出处。[%s](%s)\n\n' % (url, url)
                lines.insert(0, result)
                print('已写入转载申明')
        if need_process:
            # 写入[md]标签
            if self.add_md_in_file:
                lines.insert(0, '[md]\n\n')
                lines.append('\n\n[/md]')
        filex.write_lines(new_path, lines)
Ejemplo n.º 23
0
    def validate_version(self):
        """校验是否已更新软件"""

        build_file_path = self.path + os.sep + 'build.txt'
        if not os.path.exists(build_file_path):
            print('build.txt 不存在')
            return

        pre_build_file_path = '%s/jars/%s/英文包/%s/%s' % (self.work_dir, self.name, self.pre_version, 'build.txt')
        if not os.path.exists(pre_build_file_path):
            print('上一版本的 build.txt 不存在')
            return

        print('比较 %s 和 %s' % (build_file_path, pre_build_file_path))
        if filecmp.cmp(build_file_path, pre_build_file_path):
            print('当前版本与前一版本相同,请先更新软件')
            return
        else:
            print('不相同')
            print(filex.read_lines(build_file_path))
            print(filex.read_lines(pre_build_file_path))
Ejemplo n.º 24
0
    def handle_keymap_file(en_file, cn_file, comment_file, result_file=None):
        """
        将一行一行的keymap重新处理
        导出为.properties,这样OmegaT处理时会换照配置文件去处理
        我们以[#]或[desc]加空格为开头,会被过滤器正确解析
        :param en_file:
        :param cn_file:
        :param comment_file:
        :param result_file:
        :return:
        """
        if result_file is None:
            result_file = filex.get_result_file_name(cn_file,
                                                     '_add_desc_and_comment',
                                                     'properties')

        lines = filex.read_lines(cn_file)
        if lines is None:
            return

        desc_dict = KeymapList.get_action_desc_dict(en_file)
        comment_dict = KeymapList.get_comment_dict(comment_file)

        count = 0
        desc_count = 0
        result = []
        for line in lines:
            line = line.replace('\n', '')
            if line.startswith('#'):
                old_line = line
                # 因为有加了#,所以处理下
                line = line.lstrip('# ')
                # 相差的长度是trip掉的,注意在替换了\n之后
                prefix = old_line[0:len(old_line) - len(line)].rstrip()
            else:
                prefix = '#' * 5
            append = ''
            count += 1
            if line in desc_dict.keys():
                desc = desc_dict[line]
                desc_count += 1
                print('%d/%d,line=%s,desc=%s' %
                      (count, desc_count, line, desc))
                # 有描述,添加
                append += '\n\n%s %s' % ('[desc]', desc)
            if line in comment_dict.keys():
                comment = comment_dict[line]
                print('%s的描述为%s' % (line, comment))
                append += '\n\n%s' % comment
            line = '\n\n[%s] %s%s' % (prefix, line, append)
            result.append(line)
        filex.write_lines(result_file, result)
Ejemplo n.º 25
0
    def filter_word(self):
        lines = filex.read_lines(self.source_file, ignore_line_separator=True)
        # 过滤空格和逗号名号
        lines = [re.subn(r'[\s,。]', '', line)[0] for line in lines]
        # 过滤行
        lines = list(filter(self.filter_line, lines))
        # 写入文件暂存
        filex.write_lines(self.target_file, lines, add_line_separator=True)

        # 拼接以处理
        all_text = ''.join(lines)
        print(f'整理完毕,共 {len(all_text)} 字')
        self.find_duplicate(lines)
Ejemplo n.º 26
0
 def write_to_excel(result_file, excel_file=None):
     """
     写入excel
     :param result_file: 
     :param excel_file: 
     :return: 
     """
     if excel_file is None:
         excel_file = filex.get_result_file_name(result_file, '', 'xls')
     lines = filex.read_lines(result_file, ignore_line_separator=True)
     if lines is None:
         return
     excelx.write_list_to_excel(excel_file, lines, title=Floor.get_excel_title())
Ejemplo n.º 27
0
 def get_num_in_multi_thread(self):
     self.numbers = filex.read_lines(self.num_file_path, ignore_line_separator=True)
     if self.numbers is None:
         self.numbers = []
     length = len(self.numbers)
     print(f'获取前共 {length} 个')
     q = queue.Queue()
     for i in range(1000):
         q.put(i)
     t = threadx.HandleQueueMultiThread(q, self.get_in_thread, thread_num=10, print_before_task=True)
     t.start()
     current_length = len(self.numbers)
     print(f'获取结束,获取到 {current_length-length}号码,当前共 {current_length} 个')
     filex.write_lines(self.num_file_path, self.numbers, add_line_separator=True)
Ejemplo n.º 28
0
 def get_all_cmd(self):
     """获取 help 输出的所有命令,处理换行"""
     lines = filex.read_lines(self.help_list_file,
                              encoding='gbk',
                              ignore_line_separator=True)
     result = list()
     for i in range(1, len(lines)):
         line = lines[i]
         if line.startswith(' '):
             result[-1] += line.lstrip()
         else:
             if ' ' in line:
                 result.append(line)
     return result
Ejemplo n.º 29
0
 def export_excel(self):
     """导出 excel"""
     data = []
     list1 = filex.read_lines(self.rv_java_files_path, ignore_line_separator=True)
     list2 = filex.read_lines(self.modified_xml_files_path, ignore_line_separator=True)
     list3 = filex.read_lines(self.item_xml_files_path, ignore_line_separator=True)
     list1.sort()
     list2.sort()
     list3.sort()
     title = ["是否修改", "是否已检查", "使用场景", "文件名", "文件名"]
     for file in list1:
         short_name = os.path.splitext(os.path.split(file)[1])[0]
         comment = self.read_file_comment(file)
         data.append(['', '', comment, short_name, file])
     for file in list2:
         short_name = os.path.splitext(os.path.split(file)[1])[0]
         data.append([1, '', '', short_name, file])
     for file in list3:
         # 不需要重复
         if file not in list2:
             short_name = os.path.splitext(os.path.split(file)[1])[0]
             data.append(['', '', '', short_name, file])
     excelx.write_list_to_excel(self.excel_path, data, title)
Ejemplo n.º 30
0
    def auto_process_file2(text, add_note=True):
        """
        自动处理文件
        :param text: 从 discuz 复制的文本
        :param add_note: 是否添加转截申明
        :return: 
        """
        title, tid = BlogXTools.get_title_and_tid(text)
        if tid is not None:
            file_name = '[%s]%s.md' % (tid, title)
            found_file = None
            for file in filex.list_file(BlogXTools.process_dir):
                parent_dir, base_name = os.path.split(file)
                if base_name == file_name:
                    print('打到文件')
                    found_file = file
                    break
                elif base_name == '%s.txt' % title:
                    print('找到未命名的文件,执行重命名')
                    new_name = parent_dir + '/' + file_name
                    os.rename(file, new_name)
                    found_file = new_name
                    break
            if found_file:
                lines = filex.read_lines(found_file)
                first_line = lines[0]
                need_process = True
                if first_line.startswith('[md]'):
                    print('第一行已经包含[md]标签,不处理')
                    need_process = False
                elif first_line.startswith('>本文由平方X'):
                    print('第一行已经包含转载申明')
                else:
                    if add_note:
                        # 添加转载申明
                        url = 'http://blog.pingfangx.com/%s.html' % tid
                        result = '>本文由平方X发表于平方X网,转载请注明出处。[%s](%s)\n\n' % (url,
                                                                          url)
                        lines.insert(0, result)
                        print('已写入转载申明')
                if need_process:
                    # 写入[md]标签
                    lines.insert(0, '[md]\n\n')
                    lines.append('\n\n[/md]')
                    filex.write_lines(found_file, lines)
                # 复制
                text = ''.join(lines)

        return text