def minify_file_in_script(data, offset_path='.', argv=None): """ 压缩文件 :param data: 规则字典 :param path: 当前路径 :return: """ now_path = data['source_path'] + '/' + offset_path if os.path.isdir(now_path): fps = os.listdir(now_path) for fp in fps: minify_file_in_script(data, offset_path + '/' + fp, argv=argv) else: # 判断是不是修改了这个文件 if os.path.realpath(now_path) != os.path.realpath( argv['change_file_path']): return fp_type = tools.get_file_type(now_path) if fp_type and fp_type in ['html', 'css', 'js']: read_type = 'r' else: read_type = 'rb' with open(now_path, read_type) as fp: fp_data = fp.read() if fp_type and fp_type in ['html', 'css', 'js']: fp_data = minify_file(fp_data, fp_type, data['encryption'], data['encryption']) write_type = 'w' else: write_type = 'wb' tools.output_file(data['target_path'] + '/' + offset_path, fp_data, write_type)
def separate_javascript(html_str, data): """ 分离js :param html_str: html代码 :return: """ js_data = '' soup = BeautifulSoup(html_str, "html.parser") scripts = soup.find_all("script") for obj in scripts: if obj.string and obj.string.strip(): js_data += ('\n' + obj.string) obj.string = '' if not js_data: return html_str # 压缩js js_data = handle_javascript(js_data, False, False) # 写入分离出的js文件 version = hashlib.md5(js_data.encode("UTF-8")).hexdigest() file_path = data['tools_obtain_static_path'] + '/' + str_to_file_path( version, 8) + '.min.js' tools.output_file(file_path, js_data) link_str = '<script src="%s"></script>' % ( data['tools_obtain_static_path_prefix'] + '/' + str_to_file_path(version, 8) + '.min.js') soup.find('body').append(BeautifulSoup(link_str, "html.parser")) return str(soup)
def handle_render_html(data, offset_path='.', argv=None): """ 渲染html :param data: 规则字典 :return: """ now_path = data['source_path'] + '/' + offset_path if os.path.isdir(now_path): fps = os.listdir(now_path) for fp in fps: handle_render_html(data, '/'.join([offset_path, fp]), argv) else: # 判断是不是.html文件 if not now_path.endswith('.html'): return # 判断修改的文件是不是影响了编译 output_file = data['target_path'] + '/' + offset_path d = get_the_file_all_use_file(now_path, output_file, argv['project_path']) if argv['change_file_path'] not in d: return with open(now_path, 'r') as fp: html = render_html_use_template(fp.read(), output_file, argv['project_path'], now_path) tools.output_file(output_file, html)
def minify_file_in_script(data, offset_path='.', argv=None): """ 压缩文件 :param data: 规则字典 :param path: 当前路径 :return: """ now_path = data['source_path'] + '/' + offset_path if os.path.isdir(now_path): fps = os.listdir(now_path) for fp in fps: minify_file_in_script(data, offset_path + '/' + fp, argv=argv) else: # 判断是不是修改了这个文件 if os.path.realpath(now_path) != os.path.realpath(argv['change_file_path']): return fp_type = tools.get_file_type(now_path) if fp_type and fp_type in ['html', 'css', 'js']: read_type = 'r' else: read_type = 'rb' with open(now_path, read_type) as fp: fp_data = fp.read() if fp_type and fp_type in ['html', 'css', 'js']: fp_data = minify_file(fp_data, fp_type, data['encryption'], data['encryption']) write_type = 'w' else: write_type = 'wb' tools.output_file(data['target_path'] + '/' + offset_path, fp_data, write_type)
def build_html(data, offset_path='.', argv=None): """ 生成线上html :return: """ now_path = data['source_path'] + '/' + offset_path if os.path.isdir(now_path): fps = os.listdir(now_path) for fp in fps: build_html(data, '/'.join([offset_path, fp]), argv) else: # 判断是不是.html文件 if not now_path.endswith('.html'): return # 编译html文件 print("编译html文件:%s" % now_path) log.info("编译html文件:%s" % now_path) with open(now_path, 'r') as fp: output_path = data['target_path'] + '/' + offset_path html = build_html_use_template(fp.read(), output_path, argv['project_path'], now_path, data) tools.output_file(output_path, html)
def css_precompiled(data, offset_path='.', argv=None): """ 预编译css :param data: 规则字典 :param argv: 输入变量 :return: """ now_path = data['source_path'] + '/' + offset_path if os.path.isdir(now_path): fps = os.listdir(now_path) for fp in fps: css_precompiled(data, '/'.join([offset_path, fp]), argv) else: # 判断是不是修改了这个文件 if os.path.realpath(now_path) != os.path.realpath( argv['change_file_path']): return with open(now_path, 'r') as fp: file_type = tools.get_file_type(now_path) if file_type not in ['styl', 'less', 'scss', 'css', 'sass']: return if file_type == 'scss' or file_type == 'sass': fp_data = css_precompiled_lib.handle_scss(fp.read()) elif file_type == 'less': fp_data = css_precompiled_lib.handle_less(fp.read()) else: fp_data = fp.read() tools.output_file(data['target_path'] + '/' + offset_path + '.css', fp_data)
def css_precompiled(data, offset_path='.', argv=None): """ 预编译css :param data: 规则字典 :param argv: 输入变量 :return: """ now_path = data['source_path'] + '/' + offset_path if os.path.isdir(now_path): fps = os.listdir(now_path) for fp in fps: css_precompiled(data, '/'.join([offset_path, fp]), argv) else: # 判断是不是修改了这个文件 if os.path.realpath(now_path) != os.path.realpath(argv['change_file_path']): return with open(now_path, 'r') as fp: file_type = tools.get_file_type(now_path) if file_type not in ['styl', 'less', 'scss', 'css', 'sass']: return if file_type == 'scss' or file_type == 'sass': fp_data = css_precompiled_lib.handle_scss(fp.read()) elif file_type == 'less': fp_data = css_precompiled_lib.handle_less(fp.read()) else: fp_data = fp.read() tools.output_file(data['target_path'] + '/' + offset_path + '.css', fp_data)
def separate_javascript(html_str, data): """ 分离js :param html_str: html代码 :return: """ js_data = '' soup = BeautifulSoup(html_str, "html.parser") scripts = soup.find_all("script") for obj in scripts: if obj.string and obj.string.strip(): js_data += ('\n' + obj.string) obj.string = '' if not js_data: return html_str # 压缩js js_data = handle_javascript(js_data, False, False) # 写入分离出的js文件 version = hashlib.md5(js_data.encode("UTF-8")).hexdigest() file_path = data['tools_obtain_static_path'] + '/' + str_to_file_path(version, 8) + '.min.js' tools.output_file(file_path, js_data) link_str = '<script src="%s"></script>' % (data['tools_obtain_static_path_prefix'] + '/' + str_to_file_path(version, 8) + '.min.js') soup.find('body').append(BeautifulSoup(link_str, "html.parser")) return str(soup)
def merge_file(data): """ 合并文件 :param data: 规则字典 :return: """ text = get_after_merge_file_str(data['source_path']) text = minify_file(text, data['minify'], data['encryption'], data['encryption']) tools.output_file(data['target_path'], text)
def make_component(data): """ 制作组件 :param data: 规则字典 :return: """ js, code = get_component_js_str(data['source_path']) if code == 0: tools.output_file(data['target_path'], js) elif code == -1: log.info('由于没有发现组件配置文件,所以忽略本次制作')
def handle_rule(data, argv): """ 规则处理器 :param data: 规则字典 :param argv: 动态参数 :return: """ data = handle_rule_data(data, argv) # 判断规则是不是需要执行,(编译html除外) changle_file_path = argv['change_file_path'] if data['rule'] != 'render_html' and not changle_file_path.startswith(os.path.realpath(data['source_path'])): return if data['rule'] == 'make_component': log.info('触发制作组件,文件源:%s' % changle_file_path) dao_lib.make_component(data) print('制作组件完成 ' + time.strftime('%H:%M:%S')) elif data['rule'] == 'merge': log.info('触发合并文件,文件源:%s' % changle_file_path) dao_lib.merge_file(data) print('合并文件完成 ' + time.strftime('%H:%M:%S')) elif data['rule'] == 'minify': log.info('触发压缩文件,文件源:%s' % changle_file_path) dao_lib.minify_file_in_script(data, argv=argv) print('压缩文件完成 ' + time.strftime('%H:%M:%S')) elif data['rule'] == 'css_precompiled': log.info('触发预编译css文件,文件源:%s' % changle_file_path) dao_lib.css_precompiled(data, argv=argv) print('预编译css文件完成 ' + time.strftime('%H:%M:%S')) elif data['rule'] == 'css_precompiled_and_merge': log.info('触发预编译css文件,文件源:%s' % changle_file_path) # 获取合并后的css if data['compiled_first']: # 先编译再合并 d = dao_lib.css_precompiled_get_str(data) else: d = dao_lib.get_after_merge_file_str(data['source_path'], file_type='[scss|less]$') if data['compiled_type'] == 'scss': d = dao_lib.css_precompiled_lib.handle_scss(d) elif data['compiled_type'] == 'less': d = dao_lib.css_precompiled_lib.handle_less(d) tools.output_file(data['target_path'], d) print('预编译css文件(并且合并成一个css)完成 ' + time.strftime('%H:%M:%S')) elif data['rule'] == 'render_html': log.info('触发预编译html文件,文件源:%s' % changle_file_path) dao_lib.handle_render_html(data, argv=argv) print('预编译html文件完成 ' + time.strftime('%H:%M:%S'))
def hash_css_and_js_version(html_str, file_path, data, project_path): """ hash静态资源 :param html_str: html代码 :return: """ soup = BeautifulSoup(html_str, "html.parser") links = soup.find_all("link") for index, _ in enumerate(links): if 'href' not in links[index].attrs: continue link_path = handle_css_of_js_path(links[index]['href']) path = get_real_static_path(link_path, project_path, file_path) if path and os.path.exists(path): with open(path, 'r') as fp: version = hashlib.md5(fp.read().encode("UTF-8")).hexdigest() static_path = data['hash_static_path'] + '/' + str_to_file_path( version, 8) + '.min.css' tools.output_file(static_path, fp.read()) links[index]['href'] = data[ 'hash_static_path_prefix'] + '/' + str_to_file_path( version, 8) + '.min.css' scripts = soup.find_all("script") for index, _ in enumerate(scripts): if not scripts[index].get('src', False): continue link_path = handle_css_of_js_path(scripts[index]['src']) path = get_real_static_path(link_path, project_path, file_path) if path and os.path.exists(path): with open(path, 'r') as fp: version = hashlib.md5(fp.read().encode("UTF-8")).hexdigest() static_path = data['hash_static_path'] + '/' + str_to_file_path( version, 8) + '.min.js' tools.output_file(static_path, fp.read()) scripts[index]['src'] = data[ 'hash_static_path_prefix'] + '/' + str_to_file_path( version, 8) + '.min.js' return str(soup)
def separate_style(html_str, data): """ 分离css :param html_str: html代码 :return: """ css_index = 0 css_data = '' soup = BeautifulSoup(html_str, "html.parser") styles = soup.find_all("style") for obj in styles: css_data += ('\n' + obj.string) objs = soup.find_all(True) for index, _ in enumerate(objs): obj = objs[index] style_text = obj.get('style', None) if style_text: css_data += ('\n' + ".modular_front_css_%d{%s}" % (css_index, style_text)) if 'class' not in obj.attrs: obj.attrs['class'] = [] obj.attrs['class'].append(("modular_front_css_%d" % css_index)) del obj['style'] css_index += 1 # 写入分离出的css文件 version = hashlib.md5(css_data.encode("UTF-8")).hexdigest() file_path = data['tools_obtain_static_path'] + '/' + str_to_file_path( version, 8) + '.min.css' tools.output_file(file_path, css_data) link_str = '<link rel="stylesheet" href="%s" />' % ( data['tools_obtain_static_path_prefix'] + '/' + str_to_file_path(version, 8) + '.min.css') soup.find('head').append(BeautifulSoup(link_str, "html.parser")) return str(soup)
def hash_css_and_js_version(html_str, file_path, data, project_path): """ hash静态资源 :param html_str: html代码 :return: """ soup = BeautifulSoup(html_str, "html.parser") links = soup.find_all("link") for index, _ in enumerate(links): if 'href' not in links[index].attrs: continue link_path = handle_css_of_js_path(links[index]['href']) path = get_real_static_path(link_path, project_path, file_path) if path and os.path.exists(path): with open(path, 'r') as fp: version = hashlib.md5(fp.read().encode("UTF-8")).hexdigest() static_path = data['hash_static_path'] + '/' + str_to_file_path(version, 8) + '.min.css' tools.output_file(static_path, fp.read()) links[index]['href'] = data['hash_static_path_prefix'] + '/' + str_to_file_path(version, 8) + '.min.css' scripts = soup.find_all("script") for index, _ in enumerate(scripts): if not scripts[index].get('src', False): continue link_path = handle_css_of_js_path(scripts[index]['src']) path = get_real_static_path(link_path, project_path, file_path) if path and os.path.exists(path): with open(path, 'r') as fp: version = hashlib.md5(fp.read().encode("UTF-8")).hexdigest() static_path = data['hash_static_path'] + '/' + str_to_file_path(version, 8) + '.min.js' tools.output_file(static_path, fp.read()) scripts[index]['src'] = data['hash_static_path_prefix'] + '/' + str_to_file_path(version, 8) + '.min.js' return str(soup)
def separate_style(html_str, data): """ 分离css :param html_str: html代码 :return: """ css_index = 0 css_data = '' soup = BeautifulSoup(html_str, "html.parser") styles = soup.find_all("style") for obj in styles: css_data += ('\n' + obj.string) objs = soup.find_all(True) for index, _ in enumerate(objs): obj = objs[index] style_text = obj.get('style', None) if style_text: css_data += ('\n' + ".modular_front_css_%d{%s}" % (css_index, style_text)) if 'class' not in obj.attrs: obj.attrs['class'] = [] obj.attrs['class'].append(("modular_front_css_%d" % css_index)) del obj['style'] css_index += 1 # 写入分离出的css文件 version = hashlib.md5(css_data.encode("UTF-8")).hexdigest() file_path = data['tools_obtain_static_path'] + '/' + str_to_file_path(version, 8) + '.min.css' tools.output_file(file_path, css_data) link_str = '<link rel="stylesheet" href="%s" />' % (data['tools_obtain_static_path_prefix'] + '/' + str_to_file_path(version, 8) + '.min.css') soup.find('head').append(BeautifulSoup(link_str, "html.parser")) return str(soup)