def run_report(table_data, figure_base_quality, figure_base_content, figure_base_gc, tpl_docx, out_dir): out_dir = mkdir(out_dir) table_data = read_table_data(table_data) r = { "raw_data": table_data[2], "clean_data": table_data[4], "table_data": table_data } # docx report tpl = DocxTemplate(tpl_docx) tpl.render(r) for i, j in zip( ["base_quality.png", "base_content.png", "gc_distribution.png"], [figure_base_quality, figure_base_content, figure_base_gc]): tpl.replace_pic(i, j) tpl.save(os.path.join(out_dir, "report.docx")) return r
def pdf_profil_student(data): connect_to_bdd = sqlite3.connect('donnee.db') cur = connect_to_bdd.cursor() get_profil_student = cur.execute( ''' SELECT matricule_etud, nom, prenom, date_naissance, tel, email, cin, adresse, niveau, pdp_name FROM ETUDIANT WHERE matricule_etud=? ''', (data, )) resu = get_profil_student.fetchall() info = { 'num_matricule': RichText(resu[0][0], font='Arial', bold=False, size=24), 'nom': RichText(resu[0][1], font='Arial', bold=False, size=24), 'prenom': RichText(resu[0][2], font='Arial', bold=False, size=24), 'date_naissance': RichText(resu[0][3], font='Arial', bold=False, size=24), 'tel': RichText(resu[0][4], font='Arial', bold=False, size=24), 'email': RichText(resu[0][5], font='Arial', bold=False, size=24), 'cin': RichText(resu[0][6], font='Arial', bold=False, size=24), 'adresse': RichText(resu[0][7], font='Arial', bold=False, size=24), 'niveau': RichText(resu[0][8], font='Arial', bold=False, size=24) } #get tamplate document = DocxTemplate(f"template/template_etudiant.docx") #change image in the template if resu[0][9] != '': document.replace_pic( 'face0.png', os.getcwd() + f"\\src\\dist\\img\\pdp\\{resu[0][9]}") #creation template document.render(info) name = "Profil_" + resu[0][1] + "_" + resu[0][2] #save doc created document.save(f"{gettempdir()}\\{name}.docx") #convertion template to pdf dirpath = os.environ.get('USERPROFILE') + "\\Desktop" pdf_got = f"{dirpath}\\{name}.pdf" convert(f"{gettempdir()}\\{name}.docx", pdf_got) return pdf_got
def save_doc(context, inline_imgs, file_dict, template_path, result_path): tpl = DocxTemplate(template_path) for cur_key in file_dict: tpl.replace_pic(cur_key, file_dict[cur_key]) inline_image_context = dict() for key in inline_imgs: width = 170 width_key = key + '_width_percent' if width_key in inline_imgs: width = inline_imgs[width_key] * width inline_image_context[key] = InlineImage(tpl, inline_imgs[key], width=Mm(width)) context = {**context, **inline_image_context} tpl.render(context) tpl.save(result_path)
def gen_docx(infile_path, outfile_path, data): if infile_path[-5:] != '.docx': raise Exception('input file must be a .docx') if outfile_path[-5:] != '.docx': raise Exception('output file must be a .docx') doc = DocxTemplate(infile_path) doc.replace_pic('dummy.jpg', f'{data["uid"]}.jpg') context = { 'shareholder_name': data['name'], 'user_id': data['shareholder_id'], 'n_shares': data['n_shares'], } doc.render(context) doc.save(outfile_path)
def fill_monitoring_card(animal_id, output_path): conn = get_connection() query = f"""select card_number pet_id, age pet_age, weight pet_weight, name pet_name, s.value pet_sex, sp.value pet_type, col.value pet_color, e.value pet_ears, t.value pet_tail, si.value pet_size, f.value pet_fur FROM animal a INNER JOIN dict_species sp ON sp.id=a.species_id INNER JOIN dict_color col ON col.id=a.color_id INNER JOIN dict_sex s ON s.id=a.sex_id INNER JOIN dict_fur f ON f.id=a.fur_id INNER JOIN dict_ear e ON e.id=a.ear_id INNER JOIN dict_tail t ON t.id=a.tail_id INNER JOIN dict_size si ON si.id=a.size_id INNER JOIN file ON file.id=a.file_id WHERE a.id = {animal_id} """ data_df = pd.read_sql_query(query, con=conn) empty_row = ['__' for _ in range(len(data_df.columns))] data_df.loc[data_df.shape[0]] = empty_row # card_args = ['', '', '', '', 'adress', 'org_name', 'valier_id', '', # '', '', '', '', 'pet_breed', '', # '', '', '', '', 'pet_special_sign', 'pet_temper', # 'mark_id', # 'steril_date', 'steril_month', 'steril_year', 'steril_place', 'steril_doc', 'steril_flag', # 'order_id', 'steril_date', 'steril_month', 'steril_year', # 'trap_id', 'trap_date', 'trap_month', 'trap_year', 'trap_location', 'trap_video'] card_tables = ['tbl_treat_parasites', 'tbl_vaccine', 'tbl_health'] render_dict = data_df.loc[0].to_dict() render_dict = _add_current_date(render_dict) render_dict = _fill_empty_tables(card_tables, render_dict) render_dict['tbl_health'] = [{'id': 1}, {'id': 2}] tpl = DocxTemplate(_MONITORING_CARD_PATH) tpl.replace_pic('pet_photo.png', test_img_path) tpl.render(render_dict) tpl.save(output_path) conn.close()
from docxtpl import DocxTemplate import os image_dir = 'templates/images/' files = [ f for f in os.listdir(image_dir) if os.path.isfile(os.path.join(image_dir, f)) ] tpl = DocxTemplate('templates/replace_picture_tpl.docx') for file in files: DEST_FILE = f'output/auto_{file.rsplit(".",1)[0]}.docx' context = {} old_im = "fruit_logo.png" new_im = f"templates/images/{file}" tpl.replace_pic(old_im, new_im) tpl.render(context) tpl.save(DEST_FILE)
def main(searchTime1, searchTime2): # 计算去年搜索起始时间 lastYear1 = datetime.strptime(searchTime1, '%Y-%m-%d %H:%M:%S').year - 1 lastMonth1 = datetime.strptime(searchTime1, '%Y-%m-%d %H:%M:%S').month lastDay1 = datetime.strptime(searchTime1, '%Y-%m-%d %H:%M:%S').day lastTime1 = datetime(lastYear1, lastMonth1, lastDay1, 0, 1).strftime('%Y-%m-%d %H:%M:%S') # 计算去年搜索结束时间 lastYear2 = datetime.strptime(searchTime2, '%Y-%m-%d %H:%M:%S').year - 1 lastMonth2 = datetime.strptime(searchTime2, '%Y-%m-%d %H:%M:%S').month lastDay2 = datetime.strptime(searchTime2, '%Y-%m-%d %H:%M:%S').day lastTime2 = datetime(lastYear2, lastMonth2, lastDay2, 23, 59).strftime('%Y-%m-%d %H:%M:%S') # 获取今年当月情况 monthInfo1 = thisYearCurrentMonth(searchTime1, searchTime2) if monthInfo1 is None: return # 获取去年当月情况 monthInfo2 = lastYearCurrentMonth(lastTime1, lastTime2) if monthInfo2 is None: return # 输出文件夹 outputRootDir = os.path.join(globalCfg['output_path'], 'Statistic') timeStamp = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) outputDir = os.path.join(outputRootDir, timeStamp) os.makedirs(outputDir) # 当前月统计 outTif1 = os.path.join(outputDir, 'image1.tif') outTif2 = os.path.join(outputDir, 'image2.tif') outImage1 = os.path.join(outputDir, 'image1.jpg') outImage2 = os.path.join(outputDir, 'image2.jpg') para1 = paragraph(monthInfo1, monthInfo2, searchTime2) exportImage(monthInfo1, monthInfo2, searchTime1, searchTime2, outTif1, outTif2) # ====================================================================================== # 搜索起始时间 MarchTime = '2020-03-01 00:01:00' # 计算去年搜索起始时间 lastYear1 = datetime.strptime(MarchTime, '%Y-%m-%d %H:%M:%S').year - 1 lastMonth1 = datetime.strptime(MarchTime, '%Y-%m-%d %H:%M:%S').month lastDay1 = datetime.strptime(MarchTime, '%Y-%m-%d %H:%M:%S').day lastTime1 = datetime(lastYear1, lastMonth1, lastDay1, 0, 1).strftime('%Y-%m-%d %H:%M:%S') # 计算去年搜索结束时间 lastYear2 = datetime.strptime(searchTime2, '%Y-%m-%d %H:%M:%S').year - 1 lastMonth2 = datetime.strptime(searchTime2, '%Y-%m-%d %H:%M:%S').month lastDay2 = datetime.strptime(searchTime2, '%Y-%m-%d %H:%M:%S').day lastTime2 = datetime(lastYear2, lastMonth2, lastDay2, 23, 59).strftime('%Y-%m-%d %H:%M:%S') # 3月到当前月统计 # 获取今年当月情况 monthInfo3 = thisYearCurrentMonth(MarchTime, searchTime2) # 获取去年当月情况 monthInfo4 = lastYearCurrentMonth(lastTime1, lastTime2) outTif3 = os.path.join(outputDir, 'image3.tif') outTif4 = os.path.join(outputDir, 'image4.tif') outImage3 = os.path.join(outputDir, 'image3.jpg') outImage4 = os.path.join(outputDir, 'image4.jpg') para2 = paragraph(monthInfo3, monthInfo4, searchTime2) exportImage(monthInfo3, monthInfo4, MarchTime, searchTime2, outTif3, outTif4) # 月报word写出 # 数据获取 this_year = str(datetime.strptime(searchTime1, '%Y-%m-%d %H:%M:%S').year) this_month = str(datetime.strptime(searchTime1, '%Y-%m-%d %H:%M:%S').month) last_year = str(datetime.strptime(MarchTime, '%Y-%m-%d %H:%M:%S').year - 1) time_range = '3~%s' % this_month paragraph1 = para1 paragraph2 = para2 # [count, maxRegionStr, maxArea, maxAreaDateStr, meanArea, imageUuidList] count1 = monthInfo1[0] count2 = monthInfo2[0] count3 = monthInfo3[0] count4 = monthInfo4[0] max_area1 = monthInfo1[2] max_area2 = monthInfo2[2] max_area3 = monthInfo3[2] max_area4 = monthInfo4[2] mean_area1 = monthInfo1[4] mean_area2 = monthInfo2[4] mean_area3 = monthInfo3[4] mean_area4 = monthInfo4[4] countCal1 = count1 - count2 if countCal1 > 0: count_cal1 = '+%d' % countCal1 elif countCal1 < 0: count_cal1 = '-%d' % abs(countCal1) else: count_cal1 = '0' countCal2 = count3 - count4 if countCal2 > 0: count_cal2 = '+%d' % countCal2 elif countCal2 < 0: count_cal2 = '-%d' % abs(countCal2) else: count_cal2 = '0' maxAreaCal1 = (max_area1 - max_area2) / max_area2 * 100 if maxAreaCal1 > 0: max_area_cal1 = '+%.1f%%' % maxAreaCal1 elif maxAreaCal1 < 0: max_area_cal1 = '-%.1f%%' % abs(maxAreaCal1) else: max_area_cal1 = '0%' maxAreaCal2 = (max_area3 - max_area4) / max_area4 * 100 if maxAreaCal2 > 0: max_area_cal2 = '+%.1f%%' % maxAreaCal2 elif maxAreaCal2 < 0: max_area_cal2 = '-%.1f%%' % abs(maxAreaCal2) else: max_area_cal2 = '0%' meanAreaCal1 = (mean_area1 - mean_area2) / mean_area2 * 100 if meanAreaCal1 > 0: mean_area_cal1 = '+%.1f%%' % meanAreaCal1 elif meanAreaCal1 < 0: mean_area_cal1 = '-%.1f%%' % abs(meanAreaCal1) else: mean_area_cal1 = '0%' meanAreaCal2 = (mean_area3 - mean_area4) / mean_area4 * 100 if meanAreaCal2 > 0: mean_area_cal2 = '+%.1f%%' % meanAreaCal2 elif meanAreaCal2 < 0: mean_area_cal2 = '-%.1f%%' % abs(meanAreaCal2) else: mean_area_cal2 = '0%' replaceText = { 'this_year': this_year, 'this_month': this_month, 'last_year': last_year, 'time_range': time_range, 'paragraph1': paragraph1, 'paragraph2': paragraph2, 'count1': count1, 'count2': count2, 'count3': count3, 'count4': count4, 'max_area1': max_area1, 'max_area2': max_area2, 'max_area3': max_area3, 'max_area4': max_area4, 'mean_area1': mean_area1, 'mean_area2': mean_area2, 'mean_area3': mean_area3, 'mean_area4': mean_area4, 'count_cal1': count_cal1, 'count_cal2': count_cal2, 'max_area_cal1': max_area_cal1, 'max_area_cal2': max_area_cal2, 'mean_area_cal1': mean_area_cal1, 'mean_area_cal2': mean_area_cal2 } dependDir = globalCfg['depend_path'] templateDir = os.path.join(dependDir, 'word') templatePath = os.path.join(templateDir, 'report_month.docx') tpl = DocxTemplate(templatePath) tpl.render(replaceText) reportName = '%s年%s月太湖蓝藻遥感监测月报' % (this_year, this_month) docxName = reportName + '.docx' outWordPath = os.path.join(outputDir, docxName) replacePic = { "template_picture1.jpg": outImage1, "template_picture2.jpg": outImage2, "template_picture3.jpg": outImage3, "template_picture4.jpg": outImage4 } for key in replacePic.keys(): tpl.replace_pic(key, replacePic[key]) if os.path.exists(outWordPath): os.remove(outWordPath) tpl.save(outWordPath) monitorTime = '%s年%s月' % (this_year, this_month) processTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) db_info = [ reportName, monitorTime, '月报', processTime, outWordPath, 0, '太湖', 'EOS', 'MODIS', '6ce5de13-da13-11ea-871a-0242ac110003' ] executeSql(db_info)
def run_report(cfg, table_data, table_species, table_findgse, table_scope, table_kmer, table_assembly, figure_base_quality, figure_base_content, figure_base_gc, figure_species, figure_findgse, figure_genomescope, figure_kmer, figure_gc_depth, tpl_docx, tpl_html, out_dir): out_dir = mkdir(out_dir) now = datetime.now() config = read_config(cfg) table_data = read_table_data(table_data) table_species = read_tsv(table_species, '\t') if config["general"]["assembly"].lower() == "true": if table_assembly.lower == "no": LOG.debug("Please enter the assembly result statistics file") assembly = True table_assembly = read_table_assembly(table_assembly) else: assembly = False table_assembly = [[0, 0, 0, 0], [0], [0], [0], [0], [0], [0, 0, 0, 0, 0]] if config["general"]["homogeneous"].lower() == "true": if table_findgse.lower == "no" or table_scope.lower == "no": LOG.debug( "Please output the genomic prediction results of FindGSE and GenomeScope" ) homogeneous = True table_findgse = read_table_findgse(table_findgse) table_scope = read_table_scope(table_scope) findgse_genome = table_findgse[0] scope_genome = table_scope[0] max_genome = max(findgse_genome, scope_genome) min_genome = min(findgse_genome, scope_genome) average_genome = (max_genome + min_genome) * 1.0 / 2 software = "FindGSE和GenomeScope" estimate_size = '{0:,}-{1:,}'.format(min_genome, max_genome) heterozygosity = table_scope[1] else: if table_kmer.lower == "no": LOG.debug( "Please output the genomic prediction results of Kmerfreq") homogeneous = False table_kmer = read_table_kmer(table_kmer) software = "Kmerfreq" estimate_size = '{:,}'.format(int(table_kmer[4].replace(",", ""))) heterozygosity = float(table_kmer[5].replace("%", "")) findgse_genome = 0 scope_genome = 0 max_genome = 0 min_genome = 0 average_genome = 0 r = { "project": "", "id": "", "name": "", "species": "", "strain": "", "sequencer": "Illumina", "author": "", "reviewer": "", "year": now.year, "month": now.month, "day": now.day, "kmer": "", "raw_data": table_data[2], "clean_data": table_data[4], "estimate_size": estimate_size, "heterozygosity": '{}%'.format(heterozygosity), "software": software, "homogeneous": homogeneous, "assembly_size": '{:,}'.format(int(table_assembly[6][3])), "findgse_genome": '{:,}'.format(findgse_genome), "scope_genome": '{:,}'.format(scope_genome), "average_genome": '{:,}'.format(average_genome), "max_genome": '{:,}'.format(max_genome), "min_genome": '{:,}'.format(min_genome), "table_data": table_data, "table_species": table_species, "pollution_description": "", "table_findgse": table_findgse, "table_scope": table_scope, "table_kmer": table_kmer, "assembly": assembly, "table_assembly": table_assembly, "scaffold_length": '{:,}'.format(int(table_assembly[6][3])), "scaffold_n50": '{:,}'.format(int(table_assembly[0][3])), "scaffold_number": '{:,}'.format(int(table_assembly[6][4])), "contig_length": '{:,}'.format(int(table_assembly[6][1])), "contig_n50": '{:,}'.format(int(table_assembly[0][1])), "contig_number": '{:,}'.format(int(table_assembly[6][2])), "depth_description": "" } r.update(config["general"]) tpl = DocxTemplate(tpl_docx) tpl.render(r) if homogeneous: if assembly: figure_name = [ "base_quality.png", "base_content.png", "base_gc.png", "top10_species.png", "findgse.png", "genomescope.png", "gc_depth.png" ] figure_variable = [ figure_base_quality, figure_base_content, figure_base_gc, figure_species, figure_findgse, figure_genomescope, figure_gc_depth ] else: figure_name = [ "base_quality.png", "base_content.png", "base_gc.png", "top10_species.png", "findgse.png", "genomescope.png", "gc_depth.png" ] figure_variable = [ figure_base_quality, figure_base_content, figure_base_gc, figure_species, figure_findgse, figure_genomescope ] else: if assembly: figure_name = [ "base_quality.png", "base_content.png", "base_gc.png", "top10_species.png", "heterozygosity.png", "gc_depth.png" ] figure_variable = [ figure_base_quality, figure_base_content, figure_base_gc, figure_species, figure_kmer, figure_gc_depth ] else: figure_name = [ "base_quality.png", "base_content.png", "base_gc.png", "top10_species.png", "heterozygosity.png" ] figure_variable = [ figure_base_quality, figure_base_content, figure_base_gc, figure_species, figure_kmer ] for i, j in zip(figure_name, figure_variable): tpl.replace_pic(i, j) tpl.save(os.path.join(out_dir, "report.docx")) # html_report for i in ["images", "static"]: temp = os.path.join(out_dir, i) if os.path.exists(temp): shutil.rmtree(temp) shutil.copytree(os.path.join(tpl_html, i), temp) for i in figure_variable: shutil.copy(i, os.path.join(out_dir, "images/")) for j in ["index.html", "main.html"]: tpl = Template(open(os.path.join(tpl_html, j)).read().decode("utf-8")) with open(os.path.join(out_dir, j), "w") as fh: fh.write(tpl.render(r).encode("utf-8")) # html_report return r
def generate(data): try: domaine = data['metier']['domaine'].strip() nbp = len(data['parcours']) if nbp == 5: flimit = 288 else: flimit = 403 # Try to make a space between aspet&contrainte if len(data['metier']['aspectPositif']) < 45: data['metier']['aspectPositif'] += '\n' context = { 'nom' : RichText(data['profil']['nom'], font='Arial', bold=True,\ size = 22 if len(data['profil']['nom'])>50 else 24), 'poste' : data['profil']['poste'], 'biographie' : RichText(data['profil']['biographie'], font='Arial', \ size = 22 if len(data['profil']['biographie'])>399 else 24), 'competenceQualite' : RichText(data['metier']['competenceQualite'], font='Arial', color='#000000', \ size = 22 if len(data['metier']['competenceQualite'])>185 else 24), 'accessMetier' : RichText(data['metier']['accessMetier'], font='Arial', color='#000000',\ size = 22 if len(data['metier']['accessMetier'])>160 else 24), 'aspectPositif' : RichText(data['metier']['aspectPositif'], font='Arial', color='#000000',\ size = 22 if len(data['metier']['aspectPositif'])>72 else 24), 'contrainte' : RichText(data['metier']['contrainte'], font='Arial', color='#000000',\ size = 22 if len(data['metier']['contrainte'])>72 else 24), 'domaine' : domaine, 'formation' : RichText(data['etudes']['formation'], font='Arial', color='#000000',\ size = 22 if (len(data['etudes']['formation'])>flimit) else 24), 'etablissement' : RichText(data['etudes']['etablissement'], font='Arial', color='#000000',\ size = 28 if len(data['etudes']['etablissement'])>32 else 32), 'insertionProfessionnel' : RichText(data['etudes']['insertionProfessionnel'], font='Arial', color='#000000',\ size = 22 if len(data['etudes']['insertionProfessionnel'])>111 else 24) } for i in range(nbp): if i == nbp - 1: color = '#ffffff' else: color = '#000000' context[f'p{i}'] = RichText(data['parcours'][i][0], font='Arial', color=color,\ size = 22 if len(data['parcours'][i][0])>11 else 24) context[f'v{i}'] = RichText(data[ 'parcours'][i][1], font='Arial', color=color, \ size = 22 if len(data['parcours'][i][1])>34 else 24) col = None for kdom, vdom in verifJSON().items(): if kdom == domaine: col = vdom break # Si aucune correspondance if not col: return # Get the template doc = DocxTemplate(f"template/Trame-vierge{nbp}-{col}.docx") # Change image on template if image set if data['profil']['profilImage'] != '': data['profil']['profilImage'] = forceJPG( data['profil']['profilImage']) doc.replace_pic('test.jpg', data['profil']['profilImage']) # Generate template doc.render(context) name = "FicheMetier_" + data['profil']['poste'].replace(' ', '_') # Save the doc generated doc.save(f"{gettempdir()}\\{name}.docx") # Convert to pdf convert(f"{gettempdir()}\\{name}.docx") return f"{gettempdir()}\\{name}.pdf" except Exception as err: print(err) return
# -*- coding: utf-8 -*- ''' Created : 2017-09-03 @author: Eric Lapouyade ''' from docxtpl import DocxTemplate DEST_FILE = 'output/replace_picture.docx' tpl = DocxTemplate('templates/replace_picture_tpl.docx') context = {} tpl.replace_pic('python_logo.png', 'templates/python.png') tpl.render(context) tpl.save(DEST_FILE)
def exportWord(jsonPath, productUuid): # 解析当前json中信息 with open(jsonPath, 'r') as f: jsonData = json.load(f) cloud = jsonData['cloud'] # 云量信息 totalArea = jsonData['totalArea'] # 蓝藻总面积 # 生成文字所需信息=============================================================== issue = os.path.basename(jsonPath).split('_')[3] year = int(issue[0:4]) mm = int(issue[4:6]) dd = int(issue[6:8]) hour = int(issue[8:10]) minute = int(issue[10:12]) timeStr = '%d月%d日%d时%d分' % (mm, dd, hour, minute) totalPercent = jsonData['totalPercent'] # 蓝藻总百分比 lakeStat = jsonData['lakeStat'] # 蓝藻面积分布区域 algaeThreshold = jsonData['algaeThreshold'] lakeRegionList = [] for key in lakeStat.keys(): if lakeStat[key] == 1: lakeRegionList.append(LAKE_REGION_NAME[key]) if len(lakeRegionList) == 0: lakeRegionStr = '' elif len(lakeRegionList) == 1: lakeRegionStr = lakeRegionList[0] else: tempList = lakeRegionList[0:-1] lakeRegionStr = '、'.join(tempList) + '和' + lakeRegionList[-1] areaWX = jsonData['adminArea']['wuxi'] areaCZ = jsonData['adminArea']['changzhou'] areaSZ = jsonData['adminArea']['suzhou'] percentWX = jsonData['adminPercent']['wuxi'] percentCZ = jsonData['adminPercent']['changzhou'] percentSZ = jsonData['adminPercent']['suzhou'] areaH = jsonData['highArea'] areaM = jsonData['midArea'] areaL = jsonData['lowArea'] percentH = jsonData['highPercent'] percentM = jsonData['midPercent'] percentL = jsonData['lowPercent'] # 计算期号 nowDatetime = datetime.datetime.strptime(issue[0:8] + '0000', '%Y%m%d%H%M') # 3月以前算上一年期号 if mm < 3: startDatetime = datetime.datetime.strptime(str(year) + '01010000', '%Y%m%d%H%M') else: startDatetime = datetime.datetime.strptime(str(year) + '03010000', '%Y%m%d%H%M') num = (nowDatetime - startDatetime).days + 1 # 期号 label2 = '' label3 = '' # 蓝藻日报文字部分=================================================== # 1.全云 if cloud >= 95: description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖全部被云层覆盖,无法判断蓝藻聚集情况。' % timeStr description2 = '%sEOS/MODIS卫星遥感影像显示,太湖全部被云层覆盖,无法判断蓝藻聚集情况。' % timeStr label1 = '图1 %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd) templateID = 1 typeID = 1 # 2.有云无藻 elif 5 < cloud < 95 and totalArea == 0: description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖部分湖区被云层覆盖,无云区域内未发现蓝藻聚集现象。' % timeStr description2 = '%sEOS/MODIS卫星遥感影像显示,太湖部分湖区被云层覆盖,无云区域内未发现蓝藻聚集现象。' % timeStr label1 = '图1 %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd) templateID = 1 typeID = 2 # 3.无云无藻 elif cloud <= 5 and totalArea == 0: description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖未发现蓝藻聚集现象。' % timeStr description2 = '%sEOS/MODIS卫星遥感影像显示,太湖未发现蓝藻聚集现象。' % timeStr label1 = '图1 %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd) templateID = 1 typeID = 3 # 4.有云有藻 面积不大于300 elif 5 < cloud < 95 and 0 < totalArea <= 300: description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米(图2),' \ '占全湖总面积的%.1f%%,主要分布在%s。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,' \ '占%d%%;苏州水域%d平方千米,占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) description2 = '%sEOS/MODIS卫星遥感影像显示,太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米,' \ '占全湖总面积的%.1f%%,主要分布在%s。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,' \ '占%d%%;苏州水域%d平方千米,占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) label1 = '图1 %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd) label2 = '图2 %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd) templateID = 2 typeID = 4 # 5.无云有藻 面积不大于300 elif cloud <= 5 and 0 < totalArea <= 300: description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖发现蓝藻聚集面积约%d平方千米(图2),占全湖总面积的%.1f%%,' \ '主要分布在%s。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,占%d%%;苏州水域%d平方千米,' \ '占%d%%。'\ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) description2 = '%sEOS/MODIS卫星遥感影像显示,太湖发现蓝藻聚集面积约%d平方千米,占全湖总面积的%.1f%%,' \ '主要分布在%s。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,占%d%%;苏州水域%d平方千米,' \ '占%d%%。'\ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) label1 = '图1 %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd) label2 = '图2 %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd) templateID = 2 typeID = 5 # 6.无云有藻 面积大于300 有高中低聚集区 elif cloud <= 5 and totalArea > 300 and areaH > 0 and areaM > 0 and areaL > 0: description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖发现蓝藻聚集面积约%d平方千米(图2),占全湖总面积的%.1f%%,' \ '主要分布在%s。其中,高、中、低聚集区面积分别约为%d平方千米、%d平方千米和%d平方千米,' \ '占蓝藻总聚集面积的%d%%、%d%%和%d%%(表1、图3)。按行政边界划分,无锡水域%d平方千米,占%d%%;' \ '常州水域%d平方千米,占%d%%;苏州水域%d平方千米,占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaH, areaM, areaL, percentH, percentM, percentL, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) description2 = '%sEOS/MODIS卫星遥感影像显示,太湖发现蓝藻聚集面积约%d平方千米,占全湖总面积的%.1f%%,' \ '主要分布在%s。其中,高、中、低聚集区面积分别约为%d平方千米、%d平方千米和%d平方千米,' \ '占蓝藻总聚集面积的%d%%、%d%%和%d%%。按行政边界划分,无锡水域%d平方千米,占%d%%;' \ '常州水域%d平方千米,占%d%%;苏州水域%d平方千米,占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaH, areaM, areaL, percentH, percentM, percentL, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) label1 = '图1 %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd) label2 = '图2 %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd) label3 = '图3 %d年%d月%d日太湖蓝藻聚集强度分级' % (year, mm, dd) templateID = 3 typeID = 5 # 7.无云有藻 面积大于300 无高聚集区 有中低聚集区 elif cloud <= 5 and totalArea > 300 and areaH == 0 and areaM > 0 and areaL > 0: description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖发现蓝藻聚集面积约%d平方千米(图2),占全湖总面积的%.1f%%,' \ '主要分布在%s。其中,无高聚集区,中、低聚集区面积分别约为%d平方千米和%d平方千米,占蓝藻总聚集面积的%d%%和%d%%' \ '(表1、图3)。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,占%d%%;苏州水域%d平方千米,' \ '占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaM, areaL, percentM, percentL, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) description2 = '%sEOS/MODIS卫星遥感影像显示,太湖发现蓝藻聚集面积约%d平方千米,占全湖总面积的%.1f%%,' \ '主要分布在%s。其中,无高聚集区,中、低聚集区面积分别约为%d平方千米和%d平方千米,占蓝藻总聚集面积的%d%%和%d%%' \ '。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,占%d%%;苏州水域%d平方千米,' \ '占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaM, areaL, percentM, percentL, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) label1 = '图1 %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd) label2 = '图2 %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd) label3 = '图3 %d年%d月%d日太湖蓝藻聚集强度分级' % (year, mm, dd) templateID = 3 typeID = 5 # 8.无云有藻 面积大于300 无高中聚集区 有低聚集区 elif cloud <= 5 and totalArea > 300 and areaH == 0 and areaM == 0 and areaL > 0: description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖发现蓝藻聚集面积约%d平方千米(图2),占全湖总面积的%.1f%%,' \ '主要分布在%s,全部为低聚集区(表1、图3)。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,' \ '占%d%%;苏州水域%d平方千米,占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) description2 = '%sEOS/MODIS卫星遥感影像显示,太湖发现蓝藻聚集面积约%d平方千米,占全湖总面积的%.1f%%,' \ '主要分布在%s,全部为低聚集区。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,' \ '占%d%%;苏州水域%d平方千米,占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) label1 = '图1 %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd) label2 = '图2 %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd) label3 = '图3 %d年%d月%d日太湖蓝藻聚集强度分级' % (year, mm, dd) templateID = 3 typeID = 5 # 9.有云有藻 面积大于300 有高中低聚集区 elif 5 < cloud < 95 and totalArea > 300 and areaH > 0 and areaM > 0 and areaL > 0: description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米(图2),' \ '占全湖总面积的%.1f%%,主要分布在%s。其中,高、中、低聚集区面积分别约为%d平方千米、%d平方千米和%d平方千米,' \ '占蓝藻总聚集面积的%d%%、%d%%和%d%%(表1、图3)。按行政边界划分,无锡水域%d平方千米,占%d%%;' \ '常州水域%d平方千米,占%d%%;苏州水域%d平方千米,占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaH, areaM, areaL, percentH, percentM, percentL, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) description2 = '%sEOS/MODIS卫星遥感影像显示,太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米,' \ '占全湖总面积的%.1f%%,主要分布在%s。其中,高、中、低聚集区面积分别约为%d平方千米、%d平方千米和%d平方千米,' \ '占蓝藻总聚集面积的%d%%、%d%%和%d%%。按行政边界划分,无锡水域%d平方千米,占%d%%;' \ '常州水域%d平方千米,占%d%%;苏州水域%d平方千米,占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaH, areaM, areaL, percentH, percentM, percentL, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) label1 = '图1 %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd) label2 = '图2 %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd) label3 = '图3 %d年%d月%d日太湖蓝藻聚集强度分级' % (year, mm, dd) templateID = 3 typeID = 4 # 10.有云有藻 面积大于300 无高聚集区 有中低聚集区 elif 5 < cloud < 95 and totalArea > 300 and areaH == 0 and areaM > 0 and areaL > 0: description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米(图2),' \ '占全湖总面积的%.1f%%,主要分布在%s。其中,无高聚集区,中、低聚集区面积约分别约为%d平方千米和%d平方千米,' \ '占蓝藻总聚集面积的%d%%和%d%%(表1、图3)。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,' \ '占%d%%;苏州水域%d平方千米,占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaM, areaL, percentM, percentL, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) description2 = '%sEOS/MODIS卫星遥感影像显示,太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米,' \ '占全湖总面积的%.1f%%,主要分布在%s。其中,无高聚集区,中、低聚集区面积约分别约为%d平方千米和%d平方千米,' \ '占蓝藻总聚集面积的%d%%和%d%%。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,' \ '占%d%%;苏州水域%d平方千米,占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaM, areaL, percentM, percentL, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) label1 = '图1 %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd) label2 = '图2 %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd) label3 = '图3 %d年%d月%d日太湖蓝藻聚集强度分级' % (year, mm, dd) templateID = 3 typeID = 4 # 11.有云有藻 面积大于300 无高中聚集区 有低聚集区 elif 5 < cloud < 95 and totalArea > 300 and areaH == 0 and areaM == 0 and areaL > 0: description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米(图2),' \ '占全湖总面积的%.1f%%,主要分布在%s,全部为低聚集区(表1、图3)。按行政边界划分,无锡水域%d平方千米,占%d%%;' \ '常州水域%d平方千米,占%d%%;苏州水域%d平方千米,占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) description2 = '%sEOS/MODIS卫星遥感影像显示,太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米,' \ '占全湖总面积的%.1f%%,主要分布在%s,全部为低聚集区。按行政边界划分,无锡水域%d平方千米,占%d%%;' \ '常州水域%d平方千米,占%d%%;苏州水域%d平方千米,占%d%%。' \ % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ) label1 = '图1 %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd) label2 = '图2 %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd) label3 = '图3 %d年%d月%d日太湖蓝藻聚集强度分级' % (year, mm, dd) templateID = 3 typeID = 4 else: print('No Match Found!!!') return print(description) # 生成文件==================================================== # 1.生成日报 replaceText = {'year': year, 'num': num, 'mm': mm, 'dd': dd, 'description': description, 'label1': label1, 'label2': label2, 'label3': label3, 'areaH': areaH, 'areaM': areaM, 'areaL': areaL, 'totalArea': totalArea, 'percentH': percentH, 'percentM': percentM, 'percentL': percentL} dependDir = globalCfg['depend_path'] templateDir = os.path.join(dependDir, 'word') templatePath = os.path.join(templateDir, 'report_daily' + str(templateID) + '.docx') tpl = DocxTemplate(templatePath) tpl.render(replaceText) jsonBaseName = os.path.basename(jsonPath) outputDir = os.path.dirname(jsonPath) outWordName = jsonBaseName.replace('.json', '.docx') outWordPath = os.path.join(outputDir, outWordName) picturePath1 = os.path.join(outputDir, jsonBaseName.replace('.json', '_reportImg1_noPoints.jpg')) picturePath2 = os.path.join(outputDir, jsonBaseName.replace('.json', '_reportImg2.jpg')) picturePath3 = os.path.join(outputDir, jsonBaseName.replace('.json', '_reportImg3.jpg')) if not (os.path.exists(picturePath1) and os.path.exists(picturePath2) and os.path.exists(picturePath2)): print('Cannot Find JPG File!!!') return if templateID == 1: replacePic = {"template_picture1.jpg": picturePath1} elif templateID == 2: replacePic = {"template_picture1.jpg": picturePath1, "template_picture2.jpg": picturePath2} elif templateID == 3: replacePic = {"template_picture1.jpg": picturePath1, "template_picture2.jpg": picturePath2, "template_picture3.jpg": picturePath3} else: replacePic = {} for key in replacePic.keys(): tpl.replace_pic(key, replacePic[key]) if os.path.exists(outWordPath): os.remove(outWordPath) tpl.save(outWordPath) # 2.生成推送所需txt第一段文字,剩余两段文字后续添加 outTxtName = jsonBaseName.replace('.json', '.txt') outTxtPath = os.path.join(outputDir, outTxtName) with open(outTxtPath, 'w') as f: f.write(description2) # 3.生成EXCEL xls_num = num xls_date = str(year) + '/' + str(mm) + '/' + str(dd) xls_time = '%s时%s分' % (str(hour), str(minute)) xls_threshold = str(algaeThreshold) xls_ndviMax = str(jsonData['ndviMax']) xls_ndviMin = str(jsonData['ndviMin']) xls_ndviMean = str(jsonData['ndviMean']) xls_boundary = str(jsonData['boundaryThreshold']) xls_area = '' if typeID == 4 or typeID == 5: xls_area = str(totalArea) xls_algae_area = '' if typeID == 2 or typeID == 3: xls_algae_area = '0' elif typeID == 4 or typeID == 5: xls_algae_area = str(totalArea) xls_high = '' if totalArea >= 300 and areaH > 0: xls_high = str(areaH) xls_mid = '' if totalArea >= 300 and areaM > 0: xls_mid = str(areaM) xls_low = '' if totalArea >= 300 and areaL > 0: xls_low = str(areaL) xls_region = lakeRegionStr xls_cloud = str(cloud) if cloud > 50 and totalArea == 0: xls_activate = '0' else: xls_activate = '1' xls_explain = str(typeID) xls_weather = '' if cloud <= 5: xls_cloud_cover = '无覆盖' elif cloud >= 95: xls_cloud_cover = '全部覆盖' else: xls_cloud_cover = '部分覆盖' xls_total_percent = '%.2f%%' % totalPercent xls_intensity_threshold = '' if totalArea >= 300: xls_intensity_threshold = '%.3f-%.3f,%.3f-%.3f,%.3f-%.3f' \ % (jsonData['ndviMin'], jsonData['insThreshold1'], jsonData['insThreshold1'], jsonData['insThreshold2'], jsonData['insThreshold2'], jsonData['ndviMax']) outXlsxName = jsonBaseName.replace('.json', '.xlsx') outXlsxPath = os.path.join(outputDir, outXlsxName) if os.path.exists(outXlsxPath): os.remove(outXlsxPath) workBook = xlsxwriter.Workbook(outXlsxPath) sheet = workBook.add_worksheet() writeTable = {'A1': '报告期数', 'A2': xls_num, 'B1': '日期', 'B2': xls_date, 'C1': '时间', 'C2': xls_time, 'D1': 'NDVI阈值', 'D2': xls_threshold, 'E1': 'NDVI最大值(蓝藻区域)', 'E2': xls_ndviMax, 'F1': 'NDVI最小值(蓝藻区域)', 'F2': xls_ndviMin, 'G1': 'NDVI均值(蓝藻区域)', 'G2': xls_ndviMean, 'H1': '边界缩放', 'H2': xls_boundary, 'I1': '面积(km2)无云无藻不填', 'I2': xls_area, 'J1': '蓝藻面积(无云无藻填0,全云不填,其他按面积填)', 'J2': xls_algae_area, 'K1': '高聚区面积', 'K2': xls_high, 'L1': '中聚区面积', 'L2': xls_mid, 'M1': '低聚区面积', 'M2': xls_low, 'N1': '分布范围(竺山湖、梅梁湖、贡湖、西部沿岸、南部沿岸、东部沿岸和湖心区)', 'N2': xls_region, 'O1': '云量', 'O2': xls_cloud, 'P1': '是否为有效监测(云量超过50%并且没有监测到蓝藻算无效,1为有效,0为无效)', 'P2': xls_activate, 'Q1': '说明(1全云;2有云无藻;3无云无藻;4有云有藻;5无云有藻)', 'Q2': xls_explain, 'R1': '天气', 'R2': xls_weather, 'S1': '是否被云覆盖(无覆盖、全部覆盖、部分覆盖)', 'S2': xls_cloud_cover, 'T1': '水华面积百分比', 'T2': xls_total_percent, 'U1': 'NDVI分级阈值', 'U2': xls_intensity_threshold } format1 = workBook.add_format({'align': 'center', 'font_size': 10, 'valign': 'vcenter', 'text_wrap': 1}) for key in writeTable.keys(): sheet.write(key, writeTable[key], format1) sheet.set_row(0, 60) sheet.set_column('A:M', 8.85) sheet.set_column('N:N', 73) sheet.set_column('P:P', 73) sheet.set_column('Q:Q', 60) sheet.set_column('S:S', 44) sheet.set_column('T:T', 15) sheet.set_column('U:U', 40) workBook.close() # 4.生成EXCEL_WX writeTable2 = {'A1': '报告期数', 'A2': xls_num, 'B1': '日期', 'B2': xls_date, 'C1': '时间', 'C2': xls_time, 'D1': 'NDVI阈值', 'D2': xls_threshold, 'E1': '边界缩放', 'E2': xls_boundary, 'F1': '面积(km2)无云无藻不填', 'F2': xls_area, } outXlsxWxName = jsonBaseName.replace('.json', '_wx.xlsx') outXlsxWxPath = os.path.join(outputDir, outXlsxWxName) if os.path.exists(outXlsxWxPath): os.remove(outXlsxWxPath) workBook2 = xlsxwriter.Workbook(outXlsxWxPath) sheet2 = workBook2.add_worksheet() format2 = workBook2.add_format({'align': 'center', 'font_size': 10, 'valign': 'vcenter', 'text_wrap': 1}) for key in writeTable2.keys(): sheet2.write(key, writeTable2[key], format2) sheet.set_row(0, 60) sheet.set_column('A:F', 9) workBook2.close() # 转pdf供前端查看 Windows无法测试=================================================== outPdfDir = os.path.join(globalCfg['taihu_report_remote'], issue[0:8]) if not os.path.exists(outPdfDir): os.makedirs(outPdfDir) cmdStr = 'libreoffice6.3 --headless --convert-to pdf:writer_pdf_Export ' + outWordPath + ' --outdir ' + outPdfDir print(cmdStr) try: os.system(cmdStr) print('Convert PDF Success.') except Exception as e: print(e) # 信息入库========================================================= conn = pymysql.connect( db=globalCfg['database'], user=globalCfg['database_user'], password=globalCfg['database_passwd'], host=globalCfg['database_host'], port=globalCfg['database_port'] ) # t_water_report_taihu cursor = conn.cursor() algaeTifName = os.path.basename(jsonPath).replace('.json', '.tif') db_uuid = str(uuid.uuid4()) db_date = issue[0:4] + '-' + issue[4:6] + '-' + issue[6:8] db_number = str(num) db_description = description db_label1 = '' db_label2 = '' db_label3 = '' db_image1 = '' db_image2 = '' db_image3 = '' db_high_area = '' db_mid_area = '' db_low_area = '' db_total_area = '' db_high_percent = '' db_mid_percent = '' db_low_percent = '' db_total_percent = '' db_image = algaeTifName db_title = '太湖蓝藻水华卫星遥感监测日报' db_time_modify = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) if templateID == 1: db_label1 = label1 db_image1 = picturePath1.replace('\\', '/').replace('/mnt/resource/', '') elif templateID == 2: db_label1 = label1 db_image1 = picturePath1.replace('\\', '/').replace('/mnt/resource/', '') db_label2 = label2 db_image2 = picturePath2.replace('\\', '/').replace('/mnt/resource/', '') elif templateID == 3: db_label1 = label1 db_image1 = picturePath1.replace('\\', '/').replace('/mnt/resource/', '') db_label2 = label2 db_image2 = picturePath2.replace('\\', '/').replace('/mnt/resource/', '') db_label3 = label3 db_image3 = picturePath3.replace('\\', '/').replace('/mnt/resource/', '') db_high_area = str(areaH) db_mid_area = str(areaM) db_low_area = str(areaL) db_total_area = str(totalArea) db_total_percent = '100.0' db_high_percent = str(percentH) db_mid_percent = str(percentM) db_low_percent = str(percentL) else: pass # 查找是否已存在 sqlStr = 'SELECT * FROM ' + globalCfg['database_table_report_taihu'] + \ ' WHERE image=%s and is_deleted=0;' cursor.execute(sqlStr, algaeTifName) sqlRes = cursor.fetchall() if len(sqlRes) > 0: # 更新 sqlStr = 'UPDATE ' + globalCfg['database_table_report_taihu'] + \ ' SET date=%s,number=%s,description=%s,image1=%s,image2=%s,image3=%s,label1=%s,label2=%s,label3=%s,' \ 'high_area=%s,mid_area=%s,low_area=%s,total_area=%s,high_percent=%s,mid_percent=%s,low_percent=%s,' \ 'total_percent=%s,title=%s,time_modify=%s WHERE image=%s;' sqlData = (db_date, db_number, db_description, db_image1, db_image2, db_image3, db_label1, db_label2, db_label3, db_high_area, db_mid_area, db_low_area, db_total_area, db_high_percent, db_mid_percent, db_low_percent, db_total_percent, db_title, db_time_modify, db_image) cursor.execute(sqlStr, sqlData) conn.commit() else: # 插入 sqlStr = 'INSERT INTO ' + globalCfg['database_table_report_taihu'] + \ ' (uuid,date,number,description,image1,image2,image3,label1,label2,label3,high_area,' \ 'mid_area,low_area,total_area,high_percent,mid_percent,low_percent,total_percent,is_deleted,' \ 'is_default,image,title,time_modify) VALUES ' \ '(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);' sqlData = (db_uuid, db_date, db_number, db_description, db_image1, db_image2, db_image3, db_label1, db_label2, db_label3, db_high_area, db_mid_area, db_low_area, db_total_area, db_high_percent, db_mid_percent, db_low_percent, db_total_percent, 0, 0, db_image, db_title, db_time_modify) cursor.execute(sqlStr, sqlData) conn.commit() # t_water_taihu_modis # 查找是否已存在 sqlStr = 'SELECT * FROM ' + globalCfg['database_table_report_taihu_info'] + \ ' WHERE image_uuid=%s;' cursor.execute(sqlStr, productUuid) sqlRes = cursor.fetchall() db_date = '%s-%s-%s %s:%s' % (issue[0:4], issue[4:6], issue[6:8], issue[8:10], issue[10:12]) regionArea = jsonData['regionArea'] area_zsh = str(regionArea['zhushanhu']) area_mlh = str(regionArea['meilianghu']) area_gh = str(regionArea['gonghu']) area_xbya = str(regionArea['westCoast']) area_nbya = str(regionArea['southCoast']) area_hxq = str(regionArea['centerLake']) area_dbya = str(regionArea['eastCoast']) area_dth = str(regionArea['eastTaihu']) db_region_area = ','.join([area_zsh, area_mlh, area_gh, area_xbya, area_nbya, area_hxq, area_dbya, area_dth]) if len(sqlRes) > 0: # 更新 sqlStr = 'UPDATE ' + globalCfg['database_table_report_taihu_info'] + \ ' SET number=%s,date=%s,ndvi_threshold=%s,ndvi_max=%s,ndvi_min=%s,ndvi_mean=%s,boundary=%s,area=%s,' \ 'region_area=%s,high_area=%s,mid_area=%s,low_area=%s,cloud=%s,type=%s,is_activate=%s,ndvi_grade=%s ' \ 'WHERE image_uuid=%s;' sqlData = (xls_num, db_date, xls_threshold, xls_ndviMax, xls_ndviMin, xls_ndviMean, xls_boundary, str(totalArea), db_region_area, str(areaH), str(areaM), str(areaL), xls_cloud, xls_explain, xls_activate, xls_intensity_threshold, productUuid) cursor.execute(sqlStr, sqlData) conn.commit() else: sqlStr = 'INSERT INTO ' + globalCfg['database_table_report_taihu_info'] + \ ' (number,date,ndvi_threshold,ndvi_max,ndvi_min,ndvi_mean,boundary,area,region_area,high_area,' \ 'mid_area,low_area,cloud,type,is_activate,ndvi_grade,image_uuid) ' \ 'VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);' sqlData = (xls_num, db_date, xls_threshold, xls_ndviMax, xls_ndviMin, xls_ndviMean, xls_boundary, str(totalArea), db_region_area, str(areaH), str(areaM), str(areaL), xls_cloud, xls_explain, xls_activate, xls_intensity_threshold, productUuid) cursor.execute(sqlStr, sqlData) conn.commit() # 更新t_export_image信息 sqlStr = 'SELECT * FROM ' + globalCfg['database_table_export_image'] + \ ' WHERE uuid=%s and is_deleted=0;' cursor.execute(sqlStr, productUuid) sqlRes = cursor.fetchall() if len(sqlRes) > 0: sqlStr = 'UPDATE ' + globalCfg['database_table_export_image'] + \ ' SET area=%s,threshold=%s WHERE uuid=%s;' sqlData = (totalArea, algaeThreshold, productUuid) cursor.execute(sqlStr, sqlData) conn.commit() else: pass cursor.close() conn.close() # 更新切片============================================================== tileDict = {} basename = '_'.join(jsonBaseName.split('_')[0:7]) # 1.蓝藻产品切片 algaeTifPath = os.path.join(outputDir, jsonBaseName.replace('.json', '.tif')) algaeTifRender = os.path.join(outputDir, jsonBaseName.replace('.json', '_render.tif')) colorTable = {1: (255, 251, 0)} UniqueValues.Render(algaeTifPath, colorTable, returnMode='GEOTIFF', outputPath=algaeTifRender, isAlpha=True) tileDict['taihu_algae_ndvi'] = {'tif': algaeTifRender, 'name': basename + '_taihu_algae_ndvi', 'legendType': '1', 'legendColor': [(255, 251, 0)], 'legendName': ['水华']} # 2.蓝藻强度产品切片 intensityTifPath = os.path.join(outputDir, jsonBaseName.replace('_ndvi.json', '_intensity.tif')) intensityTifRender = os.path.join(outputDir, jsonBaseName.replace('_ndvi.json', '_intensity_render.tif')) colorTable = {1: (0, 255, 102), 2: (255, 255, 0), 3: (255, 153, 0)} UniqueValues.Render(intensityTifPath, colorTable, returnMode='GEOTIFF', outputPath=intensityTifRender, isAlpha=True) tileDict['algaeClassify'] = {'tif': intensityTifRender, 'name': basename + '_classify', 'legendType': '1', 'legendColor': [(0, 255, 102), (255, 255, 0), (255, 153, 0)], 'legendName': ['轻度', '中度', '重度']} # 调用gdal2tiles工具进行切片 pythonPath = globalCfg['python_path'] gdal2tilesPath = globalCfg['gdal2tiles_path'] tileOutRootDir = globalCfg['tile_server_path'] for key in tileDict.keys(): tileTif = tileDict[key]['tif'] tileOutDir = os.path.join(tileOutRootDir, tileDict[key]['name']) if os.path.exists(tileOutDir): shutil.rmtree(tileOutDir) cmd = '%s %s -z %s -w all %s %s' % (pythonPath, gdal2tilesPath, TILE_LEVEL, tileTif, tileOutDir) os.system(cmd) os.remove(tileTif) tileDict[key]['path'] = tileOutDir
class Report(object): """生成报告""" def __init__(self, proj_id, sample_id, customer_institute, bioinfo_engineer, supervisor, proj_path, template_file, output_path): """初始化结果 :proj_id: TODO :proj_name: TODO :proj_path: TODO """ self._proj_id = proj_id self._sample_id = sample_id self._customer_institute = customer_institute self._bioinfo_engineer = bioinfo_engineer self._supervisor = supervisor self._proj_path = proj_path self._template_file = template_file self._output_path = output_path self.tpl = DocxTemplate(self._template_file) def format_cover_table_string(self, string): return "{}".format(string) def get_proj_info(self): """获取项目信息 :returns: TODO """ proj_info = { 'customer_institute': self.format_cover_table_string(self._customer_institute), 'proj_id': self.format_cover_table_string(self._proj_id), 'sample_id': self._sample_id, 'report_date': self.format_cover_table_string( datetime.now().strftime("%Y-%m-%d")), 'bioinfo_engineer': self.format_cover_table_string(self._bioinfo_engineer), 'supervisor': self.format_cover_table_string(self._supervisor) } return proj_info ''' def get_img(self, file_name, width=110.1, flag=True): """TODO: Docstring for proc_img. :file_name: TODO :returns: TODO """ if 0: return InlineImage(self.tpl, file_name, width=Mm(width)) return file_name ''' def N50(self, file_name): with open(file_name, 'r') as io: for line in io: line = line.strip() fields = line.split("\t") if fields[0] == "N50": return fields[1] return 0 def basic_reads_stats(self, file_name): stats_list = [] with open(file_name, 'r') as io: for line in io: line = line.strip() if line == '': continue if not line[0] == "#": fields = line.split("\t") stats_list.append(fields[1]) stats_row_name = [ "file_num", "num_of_reads", "num_bases", "average_length", "max_length" ] stats_dict = dict(i for i in zip(stats_row_name, stats_list)) return stats_dict def get_first_match(self, end_with): file_list = [ i for i in os.listdir(self._proj_path) if i.endswith(end_with) ] num_of_matchs = len(file_list) if num_of_matchs > 1: raise RuntimeError( "Only one {} file is supported now".format(end_with)) elif num_of_matchs < 1: raise RuntimeError( "Cannot find {} file in proj_path".format(end_with)) else: first = os.path.join(self._proj_path, file_list[0]) return first def fastq_qc(self): read_distribute = self.get_first_match("reads.distribute.xls") read_stats = self.get_first_match("reads.stat.xls") fastq_qc = {} if self.N50(read_distribute): fastq_qc["N50"] = self.N50(read_distribute) else: raise RuntimeError("Cannot find N50 in read_distribute file") fastq_qc.update(self.basic_reads_stats(read_stats)) # depth = num_bases/3000000000 fastq_qc['depth'] = "{:4.2f}".format( int(fastq_qc['num_bases'].replace(',', '')) / 3000000000) # length distribut read_length_distribute = self.get_first_match( "Reads_length_histogram.png") self.tpl.replace_pic('sample_id.Reads_length_histogram.png', read_length_distribute) # fastq_qc['length_histogram'] = self.get_img(read_length_distribute) return fastq_qc def basic_bam_stats(self): bam_stat_file = self.get_first_match("bam.bc") stats_row_name = [ "raw_total_sequences", "reads_mapped", "total_length", "bases_mapped", "error_rate" ] stats_list = [] with open(bam_stat_file, "r") as io: bam_stat_dict = {} for line in io: line = line.strip() if line[0:2] == "SN": fields = line.split("\t") key = fields[1].strip(":").replace(" ", "_") value = fields[2] if key == "error_rate": value = "{0:.2f}%".format(10 * float(value[0:6])) if not key == "error_rate": value = "{:,}".format(int(value)) if key in stats_row_name: stats_list.append(value) if len(stats_list) == len(stats_row_name): break bam_stat_dict = dict(i for i in zip(stats_row_name, stats_list)) bam_stat_dict["reads_map_rate"] = "{0:.2f}%".format( 100 * float(bam_stat_dict["reads_mapped"].replace(',', '')) / float(bam_stat_dict["raw_total_sequences"].replace(',', ''))) bam_stat_dict["bases_map_rate"] = "{0:.2f}%".format( 100 * float(bam_stat_dict["bases_mapped"].replace(',', '')) / float(bam_stat_dict["total_length"].replace(',', ''))) return bam_stat_dict def sv_num(self): read_svnum = self.get_first_match("re4.svnum") with open(read_svnum, "r") as io: sv_num_dict = {} for line in io: line = line.strip() if not line[0] == "#": fields = line.split("\t") sv_num_dict[fields[0]] = "{:,}".format(int(fields[1])) total_sv_num = sum( [int(sv_num_dict[k].replace(',', '')) for k in sv_num_dict]) sv_num_dict["Total"] = "{:,}".format(total_sv_num) # sv_num bar sv_num_barplot = self.get_first_match("re4.svnum.png") self.tpl.replace_pic('sample_id.re4.svnum.png', sv_num_barplot) return sv_num_dict def sv_len(self): sv_len_densityplot = self.get_first_match("re4.svlen.png") self.tpl.replace_pic('sample_id.re4.svlen.png', sv_len_densityplot) def create_report(self): """生成报告 """ # sv len pic self.sv_len() content = { 'proj_info': self.get_proj_info(), 'fastq_qc': self.fastq_qc(), 'bam_stat': self.basic_bam_stats(), 'sv_num': self.sv_num() } self.tpl.render(content) output_file = '{proj_id}_{sample_id}_sv_report_{date}.docx'.format( proj_id=self._proj_id, sample_id=self._sample_id, date=time.strftime('%Y-%m-%d', time.localtime(time.time()))) self.tpl.save(os.path.join(self._output_path, output_file))
# outPicPath = r'C:\Users\Administrator\Desktop\NHS\TH_AD.jpg' # barChartInfo = {'legend': ('2020', '2021'), 'yMax': 2000, 'yLineStep': 200, 'labelOffsetY': 10, 'labelOffsetX': 0, # 'labelSize': 20, 'barWidth': 0.3, 'barGap': 0.1, 'yAxisName': '藻类密度(万个/升)', # 'paperSize': (25, 15), 'adjust': [0.07, 0.15, 0.98, 0.95]} # df = pd.DataFrame( # {'StationName': ['傀儡湖三水厂取水口', '阳澄湖二水厂', '水源地均值'], # '2020': [Decimal('3.9'), Decimal('11.4'), Decimal('7.6')], # '2021': [Decimal('10.0'), Decimal('8.1'), Decimal('9.0')]}) # outPicPath = r'C:\Users\Administrator\Desktop\NHS\YCH_CHLA.jpg' # barChartInfo = {'legend': ('2020', '2021'), 'yMax': 30, 'yLineStep': 5, 'labelOffsetY': 0.5, 'labelOffsetX': 0.03, # 'labelSize': 30, 'barWidth': 0.15, 'barGap': 0.1, 'yAxisName': '叶绿素a浓度(微克/升)', # 'paperSize': (20, 15), 'adjust': [0.07, 0.2, 0.98, 0.95]} # # df = pd.DataFrame( # {'StationName': ['傀儡湖三水厂取水口', '阳澄湖二水厂', '水源地均值'], # '2020': [Decimal('925'), Decimal('751'), Decimal('838')], # '2021': [Decimal('190'), Decimal('262'), Decimal('226')]}) # outPicPath = r'C:\Users\Administrator\Desktop\NHS\YCH_AD.jpg' # barChartInfo = {'legend': ('2020', '2021'), 'yMax': 2000, 'yLineStep': 200, 'labelOffsetY': 30, # 'labelOffsetX': 0.03, 'labelSize': 30, 'barWidth': 0.15, 'barGap': 0.1, # 'yAxisName': '藻类密度(万个/升)', 'paperSize': (20, 15), 'adjust': [0.15, 0.2, 0.98, 0.95]} drawBarChart(df, outPicPath, barChartInfo) tpl = DocxTemplate(r'C:\Users\Administrator\Desktop\NHS\template.docx') tpl.replace_pic('standard.png', outPicPath) outWordPath = r'C:\Users\Administrator\Desktop\NHS\1.docx' tpl.save(outWordPath) print('F')
# from docxtpl import DocxTemplate # doc = DocxTemplate("LetterHead.docx") # tpl=DocxTemplate('LetterHead.docx') # #rt = RichText('You can add an hyperlink, here to ') # #rt.add('google',url_id=tpl.build_url_id('http://google.com')) # tpl.replace_pic('logo.png','sbfc.png') # #context = { 'company_name' : "World company" } # #doc.render(context) # tpl.save("generated_doc.docx") from docxtpl import DocxTemplate headerImageUrl = './header.png' footerImageUrl = './footer.png' templateUrl = './LetterHeadTemplate.docx' docx_template = DocxTemplate('./LetterHeadTemplate.docx') docx_template.replace_pic('header_sample.png',headerImageUrl) docx_template.replace_pic('footer_sample.png',footerImageUrl) docx_template.save("generated_doc.docx")
def exportWord(jsonPath, productUuid): # 解析当前json中信息 with open(jsonPath, 'r') as f: jsonData = json.load(f) cloud = jsonData['cloud'] # 云量信息 totalArea = jsonData['totalArea'] # 蓝藻总面积 # 生成文字所需信息=============================================================== issue = os.path.basename(jsonPath).split('_')[3] year = int(issue[0:4]) month = int(issue[4:6]) day = int(issue[6:8]) hour = int(issue[8:10]) minute = int(issue[10:12]) if hour < 12: apStr = '上午' else: apStr = '下午' totalPercent = jsonData['totalPercent'] # 蓝藻总占比 lakeStat = jsonData['lakeStat'] # 蓝藻面积分布区域 algaeThreshold = jsonData['algaeThreshold'] lakeRegionList = [] lakeRegionStr = '' # 区域描述1 for key in lakeStat.keys(): if lakeStat[key] == 1: lakeRegionList.append(LAKE_REGION_NAME[key]) if len(lakeStat) > 0: lakeRegionStr = '、'.join(lakeRegionList) # 区域描述2 if len(lakeRegionList) == 0: lakeRegionStr2 = '' elif len(lakeRegionList) == 1: lakeRegionStr2 = lakeRegionList[0] else: tempList = lakeRegionList[0:-1] lakeRegionStr2 = '、'.join(tempList) + '和' + lakeRegionList[-1] description = '' # 未会商报告文字部分 description2 = '' # 监测中心每日一报文字部分 # 1.全云 if cloud >= 95: description = '%d月%d日遥感监测结果显示,太湖%s全部被云层覆盖,无法判断蓝藻聚集情况,未会商。' % ( month, day, apStr) description2 = '%d月%d日%d时%d分EOS/MODIS卫星遥感影像显示,太湖全部被云层覆盖,无法判断蓝藻聚集情况。' \ % (month, day, hour, minute) # 2.有云无藻 elif 5 < cloud < 95 and totalArea == 0: description = '%d月%d日遥感监测结果显示,太湖%s部分被云层覆盖,无云区域内未发现蓝藻聚集现象,未会商。' % ( month, day, apStr) description2 = '%d月%d日%d时%d分EOS/MODIS卫星遥感影像显示,太湖部分湖区被云层覆盖,无云区域内未发现蓝藻聚集现象。' \ % (month, day, hour, minute) # 3.无云无藻 elif cloud <= 5 and totalArea == 0: description = '%d月%d日遥感监测结果显示,太湖%s未发现蓝藻聚集现象,未会商。' % (month, day, apStr) description2 = '%d月%d日%d时%d分EOS/MODIS卫星遥感影像显示,太湖未发现蓝藻聚集现象。' % ( month, day, hour, minute) # 4.有云有藻 elif 5 < cloud < 95 and totalArea > 0: description = '%d月%d日遥感监测结果显示,太湖%s部分被云层覆盖,在%s发现蓝藻聚集现象,面积约%d平方千米,占太湖总面积的%.1f%%,' \ '未会商。' % (month, day, apStr, lakeRegionStr, totalArea, totalPercent) description2 = '%d月%d日%d时%d分EOS/MODIS卫星遥感影像显示,太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米,' \ '主要分布在%s。' % (month, day, hour, minute, totalArea, lakeRegionStr2) # 5.无云有藻 elif cloud <= 5 and totalArea > 0: description = '%d月%d日遥感监测结果显示,太湖%s在%s发现蓝藻聚集现象,面积约%d平方千米,占太湖总面积的%.1f%%,未会商。' \ % (month, day, apStr, lakeRegionStr, totalArea, totalPercent) description2 = '%d月%d日%d时%d分EOS/MODIS卫星遥感影像显示,太湖发现蓝藻聚集面积约%d平方千米,' \ '主要分布%s。' % (month, day, hour, minute, totalArea, lakeRegionStr2) else: pass print(description) print(description2) # 生成word==================================================== # 简报word生成 dependDir = globalCfg['depend_path'] templateDir = os.path.join(dependDir, 'word') templatePath = os.path.join(templateDir, 'report_quick.docx') tpl = DocxTemplate(templatePath) replaceText = {"content": description} tpl.render(replaceText) outputDir = os.path.dirname(jsonPath) jsonBaseName = os.path.basename(jsonPath) outWordName = jsonBaseName.replace('.json', '_quick.docx') outWordPath = os.path.join(outputDir, outWordName) picturePath1 = os.path.join( outputDir, jsonBaseName.replace('.json', '_reportImg1_noPoints.jpg')) picturePath2 = os.path.join( outputDir, jsonBaseName.replace('.json', '_reportImg2.jpg')) if not (os.path.exists(picturePath1) and os.path.exists(picturePath2)): print('Cannot Find JPG File!!!') return replacePic = { "template_picture1.jpg": picturePath1, "template_picture2.jpg": picturePath2 } for key in replacePic.keys(): tpl.replace_pic(key, replacePic[key]) if os.path.exists(outWordPath): os.remove(outWordPath) tpl.save(outWordPath) # 每日一报word生成 templatePath2 = os.path.join(templateDir, 'report_jsem.docx') tpl2 = DocxTemplate(templatePath2) replaceText2 = { "content": description2, "year": str(year), "mm": str(month), "dd": str(day) } tpl2.render(replaceText2) outWordName2 = jsonBaseName.replace('.json', '_jsem.docx') outWordPath2 = os.path.join(outputDir, outWordName2) replacePic2 = { "template_picture1.jpg": picturePath1, } for key in replacePic2.keys(): tpl2.replace_pic(key, replacePic2[key]) if os.path.exists(outWordPath2): os.remove(outWordPath2) tpl2.save(outWordPath2) # 信息入库t_water_report_taihu_quick==================================================== conn = pymysql.connect(db=globalCfg['database'], user=globalCfg['database_user'], password=globalCfg['database_passwd'], host=globalCfg['database_host'], port=globalCfg['database_port']) # 先查询数据库是否有这期数据 cursor = conn.cursor() sqlStr = 'SELECT * FROM ' + globalCfg['database_table_report_taihu_quick'] + \ ' WHERE image=%s and is_deleted=0;' algaeTifName = os.path.basename(jsonPath).replace('.json', '.tif') cursor.execute(sqlStr, algaeTifName) sqlRes = cursor.fetchall() if len(sqlRes) > 0: # 更新 db_image = algaeTifName db_description = description db_image1 = picturePath1.replace('\\', '/').replace('/mnt/resource/', '') db_image2 = picturePath2.replace('\\', '/').replace('/mnt/resource/', '') db_time_modify = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) sqlStr = 'UPDATE ' + globalCfg['database_table_report_taihu_quick'] + \ ' SET description=%s,image1=%s,image2=%s,time_modify=%s WHERE image=%s;' sqlData = (db_description, db_image1, db_image2, db_time_modify, db_image) cursor.execute(sqlStr, sqlData) conn.commit() else: # 插入 db_uuid = str(uuid.uuid4()) db_description = description db_image1 = picturePath1.replace('\\', '/').replace('/mnt/resource/', '') db_image2 = picturePath2.replace('\\', '/').replace('/mnt/resource/', '') db_image = algaeTifName sqlStr = 'INSERT INTO ' + globalCfg['database_table_report_taihu_quick'] + \ ' (uuid,description,image1,image2,image) VALUES (%s,%s,%s,%s,%s);' sqlData = (db_uuid, db_description, db_image1, db_image2, db_image) cursor.execute(sqlStr, sqlData) conn.commit() # 更新t_export_image表信息 sqlStr = 'SELECT * FROM ' + globalCfg['database_table_export_image'] + \ ' WHERE uuid=%s and is_deleted=0;' cursor.execute(sqlStr, productUuid) sqlRes = cursor.fetchall() if len(sqlRes) > 0: sqlStr = 'UPDATE ' + globalCfg['database_table_export_image'] + \ ' SET area=%s,threshold=%s WHERE uuid=%s;' sqlData = (totalArea, algaeThreshold, productUuid) cursor.execute(sqlStr, sqlData) conn.commit() else: pass cursor.close() conn.close() # 更新切片============================================================== tileDict = {} basename = '_'.join(jsonBaseName.split('_')[0:7]) # 1.蓝藻产品切片 algaeTifPath = os.path.join(outputDir, jsonBaseName.replace('.json', '.tif')) algaeTifRender = os.path.join(outputDir, jsonBaseName.replace('.json', '_render.tif')) colorTable = {1: (255, 251, 0)} UniqueValues.Render(algaeTifPath, colorTable, returnMode='GEOTIFF', outputPath=algaeTifRender, isAlpha=True) tileDict['taihu_algae_ndvi'] = { 'tif': algaeTifRender, 'name': basename + '_taihu_algae_ndvi', 'legendType': '1', 'legendColor': [(255, 251, 0)], 'legendName': ['水华'] } # 2.蓝藻强度产品切片 intensityTifPath = os.path.join( outputDir, jsonBaseName.replace('_ndvi.json', '_intensity.tif')) intensityTifRender = os.path.join( outputDir, jsonBaseName.replace('_ndvi.json', '_intensity_render.tif')) colorTable = {1: (0, 255, 102), 2: (255, 255, 0), 3: (255, 153, 0)} UniqueValues.Render(intensityTifPath, colorTable, returnMode='GEOTIFF', outputPath=intensityTifRender, isAlpha=True) tileDict['algaeClassify'] = { 'tif': intensityTifRender, 'name': basename + '_classify', 'legendType': '1', 'legendColor': [(0, 255, 102), (255, 255, 0), (255, 153, 0)], 'legendName': ['轻度', '中度', '重度'] } # 调用gdal2tiles工具进行切片 pythonPath = globalCfg['python_path'] gdal2tilesPath = globalCfg['gdal2tiles_path'] tileOutRootDir = globalCfg['tile_server_path'] for key in tileDict.keys(): tileTif = tileDict[key]['tif'] tileOutDir = os.path.join(tileOutRootDir, tileDict[key]['name']) if os.path.exists(tileOutDir): shutil.rmtree(tileOutDir) cmd = '%s %s -z %s -w all %s %s' % (pythonPath, gdal2tilesPath, TILE_LEVEL, tileTif, tileOutDir) os.system(cmd) os.remove(tileTif) tileDict[key]['path'] = tileOutDir
# -*- coding: utf-8 -*- ''' Created : 2017-09-03 @author: Eric Lapouyade ''' from docxtpl import DocxTemplate DEST_FILE = 'test_files/replace_picture.docx' tpl = DocxTemplate('test_files/replace_picture_tpl.docx') context = {} tpl.replace_pic('python_logo.png', 'test_files/python.png') tpl.render(context) tpl.save(DEST_FILE)