class Document(object): def __init__(self, templ_docx): self._doc = DocxTemplate(templ_docx) self._pic_to_render = {} self._context = {} self._chapter = {} self._sequence = 0 def sequence(self): self._sequence = self._sequence + 1 return self._sequence def add_context(self, context={}): self._context = context.copy() def add_heading(self, title, level=1): docx = self._doc.get_docx() docx.add_heading(title, level) def add_chapter(self, chapter, title=None, level=2, remove_file=True): try: docx = self._doc.get_docx() if title is not None: docx.add_heading(title, level) id = "chapter" + str(self.sequence()) chapter.render() docx.add_paragraph("{{p " + id + "}}") self._chapter[id] = self._doc.new_subdoc(chapter.get_filename()) pic_to_add = chapter.get_pic_to_add() if pic_to_add: for pic in pic_to_add: path = pic_to_add[pic]["path"] width = pic_to_add[pic]["width"] self._pic_to_render[pic] = InlineImage(self._doc, path, width=Mm(width)) finally: if remove_file: chapter.remove_file() def inlineimage(self, image_file, image_width=147): return InlineImage(self._doc, image_file, width=Mm(image_width)) def render(self): if self._chapter or self._context: self._context.update(self._chapter) self._doc.render(self._context) if self._pic_to_render: self._doc.render(self._pic_to_render) def save(self, filename): self._doc.save(filename)
def excel2word(excel_path, word_path, output_path): # 选择Excel 配置文件和Word 模板文件, 生成Word 报告 tpl = DocxTemplate(word_path) wb = load_workbook(filename=excel_path) context = {} for sheetname in wb.sheetnames: ws1 = wb[sheetname] if sheetname == "条目配置": context.update(get_item_config(ws1)) else: document = tpl.new_subdoc() context[f"table_{sheetname}"] = document table = document.add_table(rows=ws1.max_row, cols=ws1.max_column) table.style = "table" for x in range(1, ws1.max_row + 1): for y in range(1, ws1.max_column + 1): c = ws1.cell(row=x, column=y, value=None) if not type(c) == MergedCell: table.rows[x - 1].cells[y - 1].text = str(c.value) for cell in ws1.merged_cells: # print(type(cell)) cells = list(cell.cells) c_f = table.rows[cells[0][0] - 1].cells[cells[0][1] - 1] c_l = table.rows[cells[-1][0] - 1].cells[cells[-1][1] - 1] c_f.merge(c_l) tpl.render(context) tpl.save(output_path)
def get_page(self, id, doc): context = {} try: id = int(id) except: return context.update({'error': 'id value is not int'}) page = Page.objects.filter(id=id) if len(page) <= 0: return context.update({'error': f'Page {id} not found'}) lpage = page[0] if lpage.content_type.name.replace(' ', '') == 'meynstreampage': lpage = getattr(lpage, lpage.content_type.name.replace(' ', '')).body lblock = [] for block in lpage: pass if block.block_type in ('heading'): lblock.append({'type': block.block_type, 'value': block.value}) elif block.block_type in ('paragraph'): lblock.append({'type': block.block_type, 'value': self.html2sb(str(block.value), doc)}) elif block.block_type in ('warning'): lblock.append({'type': block.block_type, 'value': self.html2sb(block.value.bound_blocks['warning'].value.source, doc)}) else: logging.error(f"type : { block.block_type }") subdoc_dir = os.path.join(settings.STATICFILES_DIRS[0], "template/other.docx") doc = DocxTemplate(subdoc_dir) subdoc = doc.new_subdoc().add_paragraph('un paragraphe') lblock.append({'type': 'other', 'value': doc.build_xml({})}) context.update({'blocks': lblock}) return context
def main(): tmp_file = 'merge_columnout.csv' preprocess_csv(input_csv, tmp_file) tpl = DocxTemplate(template_docx) sd = tpl.new_subdoc() prev = None with open(tmp_file, 'r') as f: reader = csv.DictReader(f, delimiter=',') for row in reader: if prev is None or prev['Emplacement du jardin'] != row[ 'Emplacement du jardin']: _add_localisation_header(row['Emplacement du jardin'], document=sd) _create_new_entry_in_doc(row, document=sd) prev = row context = { 'mysubdoc': sd, } tpl.render(context) tpl.save('output.docx')
def nccn(): """ 生成适应症癌种 :return: """ nccnpath = os.path.join(basepath, 'baseinfo', 'nccn.xlsx') tplpath = os.path.join(basepath, 'template', 'blank.docx') nccnxlsx = pd.read_excel(nccnpath, None) for treadid, df in nccnxlsx.items(): df = df.fillna('NA') tpl = DocxTemplate(tplpath) sd = tpl.new_subdoc() p = sd.add_paragraph('', style='02级标题') sd.add_paragraph() table = sd.add_table(rows=1, cols=3, style='1summary_common') content = ['检测基因', '检测意义', '检测结果'] table.cell(0, 0).text = content[0] table.cell(0, 1).text = content[1] table.cell(0, 2).text = content[2] nccndescribe = '' for _, rows in df.iterrows(): gene = rows['检测基因'] if gene == 'info': nccndescribe = rows['检测范围'] elif gene == '微卫星状态': continue elif gene == 'treatresult': treatresult = rows['检测范围'] p.add_run('{}NCCN指南相关基因'.format(treatresult)) else: rownum = len(table.rows) common.add_tables_self(table, 1) common.add_run_self(table.cell(rownum, 0).paragraphs[0], gene, font_size=8, alignment='left') if rows['检测意义'] == 'NA': table.cell(rownum, 1).merge(table.cell(rownum - 1, 1)) else: common.add_run_self(table.cell(rownum, 1).paragraphs[0],rows['检测意义'], font_size=8, alignment='other') rlist = ['{{%p if nccn.{}.f %}}'.format(gene), '{{{{ nccn.{0}.r }}}}'.format(gene), '{{%p else %}}', '{{{{ nccn.{0}.r }}}}'.format(gene), '{{%p endif %}}'] for i in range(5): if i > 0: table.cell(rownum, 2).add_paragraph('') if i == 1: common.add_run_self(table.cell(rownum, 2).paragraphs[i], rlist[i], font_size=8, font_color=(255, 99, 28), alignment='left') else: common.add_run_self(table.cell(rownum, 2).paragraphs[i], rlist[i], font_size=8, alignment='left') width = [2.74, 11, 4.0] common.adjust_table_width(table, width) sd.add_paragraph(nccndescribe, style='0星号段落') word = os.path.join(basepath, 'template', '4medicaltip', 'nccn', f'{treadid}.docx') context = {'context': sd} tpl.render(context, autoescape=True) tpl.save(word)
def space_group_subdoc(tpl_doc: DocxTemplate, cif: CifContainer) -> Subdoc: """ Generates a Subdoc subdocument with the xml code for a math element in MSWord. """ s = SpaceGroups() try: spgrxml = s.to_mathml(cif.space_group) except KeyError: spgrxml = '<math xmlns="http://www.w3.org/1998/Math/MathML">?</math>' spgr_word = math_to_word(spgrxml) # I have to create a subdocument in order to add the xml: sd = tpl_doc.new_subdoc() p: Paragraph = sd.add_paragraph() p.alignment = WD_PARAGRAPH_ALIGNMENT.LEFT p._element.append(spgr_word) try: p.add_run(' ({})'.format(cif.spgr_number)) except AttributeError: pass return sd
# -*- coding: utf-8 -*- ''' Created : 2015-03-12 @author: Eric Lapouyade ''' from docxtpl import DocxTemplate from docx.shared import Inches tpl=DocxTemplate('test_files/subdoc_tpl.docx') sd = tpl.new_subdoc() p = sd.add_paragraph('This is a sub-document inserted into a bigger one') p = sd.add_paragraph('It has been ') p.add_run('dynamically').style = 'dynamic' p.add_run(' generated with python by using ') p.add_run('python-docx').italic = True p.add_run(' library') sd.add_heading('Heading, level 1', level=1) sd.add_paragraph('This is an Intense quote', style='IntenseQuote') sd.add_paragraph('A picture :') sd.add_picture('python_logo.png', width=Inches(1.25)) sd.add_paragraph('A Table :') table = sd.add_table(rows=1, cols=3) hdr_cells = table.rows[0].cells hdr_cells[0].text = 'Qty' hdr_cells[1].text = 'Id'
# -*- coding: utf-8 -*- ''' Created : 2015-03-12 @author: Eric Lapouyade ''' from docxtpl import DocxTemplate tpl=DocxTemplate('test_files/subdoc_tpl.docx') sd = tpl.new_subdoc() p = sd.add_paragraph('This is a sub-document inserted into a bigger one') p = sd.add_paragraph('It has been ') p.add_run('dynamically').style = 'dynamic' p.add_run(' generated with python by using ') p.add_run('python-docx').italic = True p.add_run(' library') context = { 'mysubdoc' : sd, } tpl.render(context) tpl.save('test_files/subdoc.docx')
# Run's color data output_run.font.color.rgb = run.font.color.rgb # Run's font data output_run.style.name = run.style.name # Paragraph's alignment data output_para.paragraph_format.alignment = paragraph.paragraph_format.alignment ## make logic that if a line has capital letter each word or just one line no ## punctuation, make it bold? ## look at source code for docxcomposer - maybe i can use this to insert whole ## thing into # %% from docxtpl import DocxTemplate from docx.shared import Inches doc = DocxTemplate("templates/template.docx") sd = doc.new_subdoc(docpath = 'articles/Article1.docx') context = { 'mysubdoc': sd, } doc.render(context) doc.save('generated_docs/subdoc_test.docx') # %%
def genReport(): survey = request.args.get('survey') company = request.args.get('company') now = datetime.datetime.now() curdate = now.strftime("%Y-%m-%d") host, base, colection, dbuser, pwd = bl.mongoInit('users') #file='C:\\toHDD\\toViggu\\surveyapp_python\\dataframe_1555184982.xlsx' req = requests.get( "http://localhost/backend/chartsAll?survey={}&company={}".format( survey, company)) print(req.text) file = req.text r = requests.get( "http://localhost/backend/getAllRadar?dataframe={}".format(file)) jsonres = r.json() subscores = [] subscores.append(jsonres['Physical'][0]) subscores.append(jsonres['Organizational'][0]) subscores.append(jsonres['Technical'][0]) subscores.append(jsonres['Physical'][1]) subscores.append(jsonres['Organizational'][1]) subscores.append(jsonres['Technical'][1]) subscores.append(jsonres['Physical'][2]) subscores.append(jsonres['Organizational'][2]) subscores.append(jsonres['Technical'][2]) subscores.append(jsonres['Physical'][3]) subscores.append(jsonres['Organizational'][3]) subscores.append(jsonres['Technical'][3]) R0 = cl.genFullRadar(sectors, subsectors, subscores) r = requests.get( "http://localhost/backend/getAllRadarBySector?dataframe={}".format( file)) jsonres = r.json() R1_labels = jsonres[0]['labels'] R1_values = jsonres[0]['score'] R2_labels = jsonres[1]['labels'] R2_values = jsonres[1]['score'] R3_labels = jsonres[2]['labels'] R3_values = jsonres[2]['score'] R4_labels = jsonres[3]['labels'] R4_values = jsonres[3]['score'] R1 = cl.genRadar(R1_labels, R1_values, '#ff6666', 'Robustness') R2 = cl.genRadar(R2_labels, R2_values, '#ffcc99', 'Redundancy') R3 = cl.genRadar(R3_labels, R3_values, '#99ff99', 'Resourcefulness') R4 = cl.genRadar(R4_labels, R4_values, '#66b3ff', 'Rapidity') r = requests.get( "http://localhost/backend/getAllRadarAllSectors?dataframe={}".format( file)) jsonres = r.json() labels = jsonres['category'] values = jsonres['scores'] print(values) labels = pd.Series(labels) values = pd.Series(values) R5 = cl.barh(labels, values) r = requests.get( "http://localhost/backend/getAllRadar?dataframe={}".format(file)) jsonres = r.json() phy = jsonres['Physical'] org = jsonres['Organizational'] tech = jsonres['Technical'] subsecs = ['Physical', 'Organizational', 'Technical'] bar1 = [phy[0], org[0], tech[0]] bar2 = [phy[1], org[1], tech[1]] bar3 = [phy[2], org[2], tech[2]] bar4 = [phy[3], org[3], tech[3]] R1_bar = cl.barRadar('R1', bar1, subsecs) R2_bar = cl.barRadar('R2', bar2, subsecs) R3_bar = cl.barRadar('R3', bar3, subsecs) R4_bar = cl.barRadar('R4', bar4, subsecs) lists = [R0, R1, R2, R3, R4, R5, R1_bar, R2_bar, R3_bar, R4_bar] print(lists) doc = DocxTemplate("sampleWord.docx") s1 = doc.new_subdoc() s1.add_picture(R1, width=Inches(7)) s2 = doc.new_subdoc() s2.add_picture(R2, width=Inches(7)) s3 = doc.new_subdoc() s3.add_picture(R3, width=Inches(7)) s4 = doc.new_subdoc() s4.add_picture(R4, width=Inches(7)) s5 = doc.new_subdoc() s5.add_picture(R0, width=Inches(7)) s6 = doc.new_subdoc() s6.add_picture(R5, width=Inches(7)) s7 = doc.new_subdoc() s7.add_picture(R1_bar, width=Inches(5)) s8 = doc.new_subdoc() s8.add_picture(R2_bar, width=Inches(5)) s9 = doc.new_subdoc() s9.add_picture(R3_bar, width=Inches(5)) s10 = doc.new_subdoc() s10.add_picture(R4_bar, width=Inches(5)) req = requests.get( "http://localhost/backend/surveysCount?survey={}&company={}".format( survey, company)) count = req.text context = { 'bar1': s7, 'bar2': s8, 'bar3': s9, 'bar4': s10, 'chart5': s6, 'radar1': s1, 'radar2': s2, 'radar3': s3, 'radar4': s4, 'complete': s5, 'company': company, 'survey': survey, 'date': curdate, 'number': count } doc.render(context) filename = "Survey_Metrics_{}.docx".format(bl.getTimeStamp()) doc.save(filename) outputFile = bl.convertToPDF(filename) bl.sendReport(survey, company, outputFile) return "Sent Report"
# -*- coding: utf-8 -*- ''' Created : 2021-07-30 @author: Eric Lapouyade ''' from docxtpl import DocxTemplate tpl = DocxTemplate('templates/merge_docx_master_tpl.docx') sd = tpl.new_subdoc('templates/merge_docx_subdoc.docx') context = { 'mysubdoc': sd, } tpl.render(context) tpl.save('output/merge_docx.docx')
class doc(object): def __init__(self): base_url = getDir() + '/user/user_template/' #初始化生成一个doc对象 asset_url = base_url + 'demo.docx' self.tpl = DocxTemplate(asset_url) self.webvul = self.tpl.new_subdoc() #web应用漏洞 self.appvul = self.tpl.new_subdoc() #应用程序漏洞 self.devicevul = self.tpl.new_subdoc() #网络设备漏洞 self.sysvul = self.tpl.new_subdoc() #操作系统应用漏洞 self.time = self.tpl.new_subdoc() self.number = 0 def add_title(self, title, type , level=3): doc = self.check_type(type) run = doc.add_heading('', level=level).add_run(title) run.font.name = "宋体" run.font.size = Pt(14) r = run.element r.rPr.rFonts.set(qn('w:eastAsia'), u'宋体') def add_table(self,jsons, type=1): ''' 在doc中添加表格 :param json: type:1为web漏洞2为应用程序漏洞,3为网络设备漏洞4为系统漏洞 :return: ''' #获取要写入的subdoc节点 doc = self.check_type(type) for json in jsons: # j控制json字典下标 #print(json) j = 0 title = json['title'] #生成标题 self.number += 1 self.add_title(title,type=type) table = doc.add_table(rows=len(json)-1, cols=2, style="Style1")#新建一个表格 table.autofit = False for key, value in json.items(): if j == 0: j +=1 else: table.columns[0].width = Cm(3) table.columns[1].width = Cm(12) table.cell(j - 1, 0).width = Cm(3) #设置单元格宽度 table.cell(j - 1, 1).width = Cm(12) #table.alignment=WD_TABLE_ALIGNMENT.RIGHT 设置对齐方式 keyCell = table.cell(j-1, 0) #表格赋值 valueCell = table.cell(j-1, 1) #设置key单元格字体与字体大小 key_paragraph = keyCell.paragraphs[0] #keyRun = keyCell.paragraphs[0].add_run(key) keyRun = key_paragraph.add_run(key) keyRun.font.name = u'微软雅黑' # 设置字体 keyRun._element.rPr.rFonts.set(qn('w:eastAsia'), u'微软雅黑') keyRun.font.size = Pt(10.5) # 设置字号为五号 key_paragraph.paragraph_format.line_spacing = WD_LINE_SPACING.ONE_POINT_FIVE#设置1.5倍行间距 key_paragraph.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.RIGHT #设置水平对齐方式右对齐 keyCell.vertical_alignment = WD_CELL_VERTICAL_ALIGNMENT.CENTER #设置value单元格字体与字体大小 val_paragraph = valueCell.paragraphs[0] #valueRun = valueCell.paragraphs[0].add_run(value) # 填入的内容 valueRun = val_paragraph.add_run(value) valueRun.font.name = u'微软雅黑' # 设置字体 valueRun.font.size = Pt(10.5) # 设置字号为五号 valueRun._element.rPr.rFonts.set(qn('w:eastAsia'), u'微软雅黑') val_paragraph.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE#设置1.5倍行间距 val_paragraph.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.LEFT#设置水平对齐方式左对齐 valueCell.vertical_alignment = WD_CELL_VERTICAL_ALIGNMENT.CENTER #keyCell.text = key #valueCell.text = value #print(key,value) j = j+1 #赋值给初始话的subdoc值 if type==1: self.webvul=doc elif type==2: self.appvul= doc elif type==3: self.devicevul = doc else: self.sysvul = doc #保存docx文档 def save_doc(self,current_time, start_time): #添加doc编辑时间 filename = "上海驭胜信息安全通告(%s至%s).docx" % (str(start_time),str(current_time)) content = {'subdoc': self.webvul, 'appdoc':self.appvul, 'devicedoc': self.devicevul, 'sysdoc': self.sysvul, 'time': current_time , 'start_time': start_time, 'end_time': current_time} self.tpl.render(content) self.tpl.save(filename) print("file %s success to save!" % filename) def check_type(self,num): if num==1: return self.webvul elif num ==2: return self.appvul elif num ==3: return self.devicevul else: return self.sysvul def add_hyperlink(self,paragraph, text, url): # This gets access to the document.xml.rels file and gets a new relation id value part = paragraph.part r_id = part.relate_to(url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True) # Create the w:hyperlink tag and add needed values hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink') hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, ) # Create a w:r element and a new w:rPr element new_run = docx.oxml.shared.OxmlElement('w:r') rPr = docx.oxml.shared.OxmlElement('w:rPr') # Join all the xml elements together add add the required text to the w:r element new_run.append(rPr) new_run.text = text hyperlink.append(new_run) # Create a new Run object and add the hyperlink into it r = paragraph.add_run () r.font.name = u'微软雅黑' # 设置字体 r.font.size = Pt(10.5) # 设置字号为五号 r._element.rPr.rFonts.set(qn('w:eastAsia'), u'微软雅黑') r._r.append (hyperlink) # A workaround for the lack of a hyperlink style (doesn't go purple after using the link) # Delete this if using a template that has the hyperlink style in it r.font.color.theme_color = MSO_THEME_COLOR_INDEX.HYPERLINK r.font.underline = True return hyperlink
from docxtpl import DocxTemplate doc = DocxTemplate("黄浦投资意见书.docx") sd = doc.new_subdoc('sub.docx') context = { 't1': 'tttttttttttttt1', 't2': 'tttttttttttttt2', 'sub': sd, } doc.render(context) doc.save("generated_doc.docx")
def reportBuild(featurePath, wmPath, gmPath, csfPath, hippoPath, originImagePath, tempDir): Volume = "" SurfaceArea = "" AreaVolume = "" Uniformity = "" Energy = "" Entropy = "" Variance = "" wmVolume = "" gmVolume = "" csfVolume = "" totalVolume = "" wmVper = "" gmVper = "" csfVper = "" leftVolume = "" rightVolume = "" if (featurePath is not None): with open(featurePath, 'r') as f: featureData = csv.DictReader(f) featureDict = [row for row in featureData][0] Volume = float(featureDict["original_shape_VoxelVolume"]) SurfaceArea = round(float(featureDict['original_shape_SurfaceArea']), 3) AreaVolume = round( float(featureDict["original_shape_SurfaceVolumeRatio"]), 3) Median = round(float(featureDict['original_firstorder_Median']), 3) Mean = round(float(featureDict['original_firstorder_Mean']), 3) SurfaceAreaMark = compare(SurfaceArea, 3728.80, 4276.30) AreaVolumeMark = compare(AreaVolume, 0.610, 0.689) MedianMark = compare(Median, 1.58, 1.75) MeanMark = compare(Mean, 1.59, 1.75) #Uniformity = featureDict["original_firstorder_Uniformity"] #Energy = featureDict["original_firstorder_Energy"] #Entropy = featureDict["original_firstorder_Entropy"] #Variance = featureDict["original_firstorder_Variance"] hippoVolumeImagePath = os.path.join(tempDir, "hippoVolume.png") drawDis(u=stdDistribution["NChippoVolumeMean"], subject=Volume, xlabel="海马体积 / mm3", sig=stdDistribution["NChippoVolumeStd"], u2=stdDistribution["ADhippoVolumeMean"], sig2=stdDistribution["ADhippoVolumeStd"], savePath=hippoVolumeImagePath) if (wmPath is not None): wmImage = nib.load(wmPath) gmImage = nib.load(gmPath) csfImage = nib.load(csfPath) wmVolume = np.sum(wmImage.get_fdata().flatten()) wmVolume = round(wmVolume, 2) gmVolume = np.sum(gmImage.get_fdata().flatten()) gmVolume = round(gmVolume, 2) csfVolume = np.sum(csfImage.get_fdata().flatten()) csfVolume = round(csfVolume, 2) totalVolume = wmVolume + gmVolume + csfVolume totalVolume = round(totalVolume, 2) wmVperData = float(int(wmVolume / totalVolume * 1000)) / 10 gmVperData = float(int(gmVolume / totalVolume * 1000)) / 10 csfVperData = float(int(csfVolume / totalVolume * 1000)) / 10 wmVper = str(wmVperData) + "%" gmVper = str(gmVperData) + "%" csfVper = str(csfVperData) + "%" brainDignose = "" if (wmVperData < reference["wmPer"][0]): brainDignose += "白质体积占比减少;" elif (wmVperData > reference["wmPer"][1]): brainDignose += "白质体积占比增加;" if (gmVperData < reference["gmPer"][0]): brainDignose += "灰质体积占比减少;" elif (gmVperData > reference["gmPer"][1]): brainDignose += "灰质体积占比增加;" if (csfVperData < reference["csfPer"][0]): brainDignose += "脑脊液体积占比减少;" elif (wmVperData > reference["csfPer"][1]): brainDignose += "脑脊液体积占比增加;" if (brainDignose == ""): brainDignose += "未见明显异常" VolumeDistributionImagePath = os.path.join(tempDir, "volume.png") drawDis(u=stdDistribution["NCgmMeanPer"], subject=gmVperData, xlabel="灰质体积占比 / % ", sig=stdDistribution["NCgmStdPer"], u2=stdDistribution['ADgmMeanPer'], sig2=stdDistribution['ADgmStdPer'], savePath=VolumeDistributionImagePath) hippoDignose = "" if (hippoPath is not None): hippoImage = nib.load(hippoPath) leftHippo = hippoImage.get_fdata()[0:91, :, :] rightHippo = hippoImage.get_fdata()[91:, :, :] leftVolume = np.sum(leftHippo.flatten()) rightVolume = np.sum(rightHippo.flatten()) leftVolumeMark = "" if (leftVolume < reference["leftVolume"][0]): leftVolumeMark = "↓" hippoDignose += "左侧海马体积减少;" elif (leftVolume > reference["leftVolume"][1]): leftVolumeMark = "↑" hippoDignose += "左侧海马体积增加;" rightVolumeMark = "" if (rightVolume < reference["rightVolume"][0]): rightVolumeMark = "↓" hippoDignose += "右侧海马体积减少;" elif (rightVolume > reference["rightVolume"][1]): rightVolumeMark = "↑" hippoDignose += "右侧海马体积增加;" if (hippoDignose == ""): hippoDignose += "未见明显异常" hippoImagePngPath = os.path.join(tempDir, "hippoimage.png") drawRoi(originImagePath, hippoPath, hippoImagePngPath) path = os.path.dirname(os.path.realpath(__file__)) doc = DocxTemplate(os.path.join(path, "./Report.docx")) sd = doc.new_subdoc() #sd.add_paragraph(' :') rows = 30 cols = 4 table = sd.add_table(rows=rows, cols=cols, style="mystyle") # # # header cells = table.rows[0].cells cells[0].text = "指标" cells[1].text = "值" cells[2].text = "参考范围" cells[3].text = "备注" referencePath = os.path.join(path, "feature.csv") with open(referencePath, 'r') as f: refData = csv.DictReader(f) refDicts = [row for row in refData] i = 1 for ref in refDicts: if (i > 29): break featureName = ref['feature'] featureValue = float(featureDict[featureName]) refDataF = str(round( float(ref['NC-mean']) - float(ref['NC-std']), 2)) + "~" + str( round(float(ref['NC-mean']) + float(ref['NC-std']), 2)) remark = "" if (featureValue < float(ref['NC-mean']) - float(ref['NC-std'])): remark = "↓" elif (featureValue > float(ref['NC-mean']) + float(ref['NC-std'])): remark = "↑" table.cell(i, 0).text = featureName table.cell(i, 1).text = str(round(featureValue, 2)) table.cell(i, 2).text = str(refDataF) table.cell(i, 3).text = remark i += 1 context = { 'SubjectName': "Test Subject", "VolumeDistributionImage": InlineImage(doc, VolumeDistributionImagePath, width=Mm(130)), "HippoImage": InlineImage(doc, hippoImagePngPath, width=Mm(130)), "HippoDistributionImage": InlineImage(doc, hippoVolumeImagePath, width=Mm(130)), "Volume": Volume, "SurfaceArea": SurfaceArea, "AreaVolume": AreaVolume, "Median": Median, "Mean": Mean, "wmVolume": wmVolume, "gmVolume": gmVolume, "csfVolume": csfVolume, "totalVolume": totalVolume, "wmVper": wmVper, "gmVper": gmVper, "csfVper": csfVper, "leftVolume": leftVolume, "rightVolume": rightVolume, 'mytable': sd, "SurfaceAreaMark": SurfaceAreaMark, "AreaVolumeMark": AreaVolumeMark, "MedianMark": MedianMark, "MeanMark": MeanMark, "leftVolumeMark": leftVolumeMark, "rightVolumeMark": rightVolumeMark, "brainDignose": brainDignose, "hippoDignose": hippoDignose, "rightVolumeRef": str(reference["rightVolume"][0]) + "~" + str(reference["rightVolume"][1]), "leftVolumeRef": str(reference["leftVolume"][0]) + "~" + str(reference["leftVolume"][1]), } doc.render(context) doc.save( os.path.realpath( os.path.join(tempDir, os.path.basename(originImagePath) + ".docx")))
def main(): ### MARK1 抓取命令行参数 parser = argparse.ArgumentParser( prog='PGS_report', description= 'Save PGS project\'s graph (.png) file to a (.docx) word file and sex info file to an excel (.xlsx) file and write a report based on word template to a (.docx) file. ' ) parser.add_argument( '--project_type', action="store", required=False, default="PGS", choices=["PGS", "ChromInst", "ONCPGD", "ONPGS", "CPGD", "IBPGS"], help="The Project type. [default=PGS]") parser.add_argument('--project_dir', action="store", required=True, help='The project output directory') parser.add_argument('--bin_size', action="store", required=True, help='The bin size, like 1000K. ') parser.add_argument( '--cnv_file', action="store", required=True, help= 'The call cnv file created by pipeline. It is used to create the sex info excel file and PGS test results are extracted from it.' ) parser.add_argument('--patient_info', action="store", required=True, help='The patient info txt file') parser.add_argument( '--out_dir', action="store", required=True, help='The output graph/info/report files\'s directory ') parser.add_argument('--data_sts', action="store", required=True, help="data.sts file") args = parser.parse_args() project_type = args.project_type project_dir = os.path.abspath(args.project_dir) bin_size = args.bin_size cnv_file = os.path.abspath(args.cnv_file) patient_info = os.path.abspath(args.patient_info) out_dir = os.path.abspath(args.out_dir) data_sts = os.path.abspath(args.data_sts) ### MARK2 解析参数文件,转化为变量 logger = create_logging('PGS report logger') ## MARK2.1 解析送检单 patient_info dict_patient_info, dict_sample_info, sample_barcodes = parse_patient_info( patient_info) logger.info("DONE:parse patient info") # MARK2.1.1 将缺失值“单击此处输入文字”替换为空字符 for (k, v) in dict_patient_info.items(): if v in MISSING_VALUES: dict_patient_info[k] = '' logger.info("DONE:clean patient info") # 信息抓取补充 ######read qc_discription.txt dict_qc = {} fh_sts = open(data_sts) for line in fh_sts.readlines(): if line.split(): line = line.rstrip() list_sts = line.split("\t") dict_qc[list_sts[0]] = list_sts[10] logger.info("DONE:parse data.sts") ## MARK2.2 解析结果 cnv_file result_barcodes, dict_result, dict_result_gender = parse_cnv_file( cnv_file, dict_qc) logger.info("DONE:parse cnv file") ## MARK2.3 解析cnv图 dict_red_blue_png_no_XY = parse_graph(project_dir, 'graph', 'with_chrID_no_XY', result_barcodes, bin_size) dict_red_blue_png_XY = parse_graph(project_dir, 'graph', 'with_chrID_with_XY', result_barcodes, bin_size) dict_colorful_png_XY = parse_graph(project_dir, 'graph1', 'with_chrID_with_XY', result_barcodes, bin_size) logger.info("DONE:parse png graph") ### MARK3 根据需求,生成内部参量 ## MARK 2018/3/1补充,由送检barcode和结果barcode,生成一个新的用于出结果的barcode report_barcodes = gen_report_barcodes(sample_barcodes, result_barcodes) ## MARK3.1 抓取 项目名,医院名,女方姓名,是否要logo # sample_sheet_ID 项目名 sample_sheet_ID = dict_patient_info["ProjectID"] if sample_sheet_ID in MISSING_VALUES: logger.error("Project ID is missing!") exit(1) # hospital_ID 医院名 array_sample_sheet_ID = sample_sheet_ID.split('_') hospital_ID = array_sample_sheet_ID[3] # woman_name 女方姓名 woman_name = dict_patient_info['WomanName'] if 'Control' in sample_sheet_ID or 'control' in sample_sheet_ID: woman_name = 'Control' # if_logo 是否要logo if_logo = dict_patient_info['Template'] ## MARK3.2 完整的输出名 out_name = "Project_" + sample_sheet_ID + u"(" + woman_name + u")" if woman_name == '': out_name = "Project_" + sample_sheet_ID ### MARK4 结果展示 ####################### ## MARK4.1 生成CNV全图 # CNV全图名 out_graph = out_dir + '/' + out_name + u"CNV全图.docx" document = Document() for barcode in report_barcodes: png_fullpath = dict_colorful_png_XY[barcode] if os.path.exists(png_fullpath): document.add_picture(png_fullpath, width=Inches(6.35)) else: log = "save graph file : " + png_fullpath + " does not exist!" logger.warning(log) document.save(out_graph) logger.info("DONE:save graph file") ############################################################ ## MARK4.2 生成性别信息表 # 性别信息名 out_info = out_dir + '/' + out_name + "info.xlsx" wb = Workbook() ws = wb.active ws.title = "info" excel_row = 1 for barcode in report_barcodes: ws.cell(row=excel_row, column=1, value=barcode) ws.cell(row=excel_row, column=2, value=dict_sample_info.get(barcode, barcode)) ws.cell(row=excel_row, column=3, value=dict_result_gender[barcode]) excel_row += 1 ws = adjustColumnWidth(ws) wb.save(out_info) logger.info("DONE:save sex info xlsx file") #################### ## MARK4.3 生成报告 ## MARK 报告名 project_type_name = '' if project_type == 'ONCPGD' or project_type == 'ONPGS': project_type_name = "24h-胚胎染色体拷贝数检测报告单" elif project_type == 'CPGD': project_type_name = "MALBAC-PGD™ 染色体病胚胎植入前遗传学诊断报告单" elif project_type == 'PGS' or project_type == 'IBPGS': project_type_name = "胚胎植入前遗传学筛查(PGS)检测报告单" else: project_type_name = "ChromInst 9h-胚胎染色体拷贝数检测报告单" out_report = out_dir + '/' + out_name + project_type_name + ".docx" ## MARK 报告模板 if project_type == 'ONCPGD' or project_type == 'ONPGS': report_temp = ONCPGD_REPORT_TEMPLATE elif project_type == 'CPGD': report_temp = CPGD_REPORT_TEMPLATE else: if if_logo == "Yes" or if_logo == "yes": report_temp = PGS_REPORT_TEMPLATE else: report_temp = PGS_REPORT_TEMPLATE_WITHOUT_LOGO # MARK4.3.1 定制化 # 某些医院要求不写医院名 if hospital_ID in HOSPITAL_ID_WITHOUT_HOSPITAL_NAME: dict_patient_info["SubmissionOrganization"] = '' # ONCPGD活检日期填到送检日期位置 if project_type == 'ONCPGD' or project_type == 'ONPGS': dict_patient_info["SubmissionDate"] = dict_patient_info["BiopsyDate"] ### MARK4.3.4 取模版,贴报告 shutil.copyfile(report_temp, out_report) ReportTML = DocxTemplate(out_report) ## 报_送检信息 # 获取报告日期 now = datetime.datetime.now() month = now.month day = now.day if month < 10: month = '0' + str(month) if day < 10: day = '0' + str(day) report_date = u"{}年{}月{}日".format(now.year, month, day) context = dict_patient_info context['ReportDate'] = report_date ## 报_核型 result = [] for barcode in report_barcodes: tmp_dict = { 'sample_id': dict_sample_info.get(barcode, barcode), 'sample_barcode': barcode, 'test_result': dict_result[barcode] } result.append(tmp_dict) context['result'] = result ## 报_图片 # 确定报告中用的红蓝图 dict_report_png = {} report_png = '' for barcode in result_barcodes: red_blue_no_XY = dict_red_blue_png_no_XY[barcode] red_blue_XY = dict_red_blue_png_XY[barcode] if dict_result_gender[barcode] == "XX" or dict_result_gender[ barcode] == "XY": report_png = red_blue_no_XY elif dict_result[barcode] == "N/A": report_png = red_blue_no_XY else: report_png = red_blue_XY dict_report_png[barcode] = report_png subdoc_picture = ReportTML.new_subdoc() for barcode in report_barcodes: doc_png = dict_report_png[barcode] if os.path.exists(doc_png): subdoc_picture.add_picture(doc_png, width=Inches(6.1)) else: log = "save report file: " + doc_png + " does not exist!" logger.warning(log) context['subdoc_picture'] = subdoc_picture # 写入,报告完成 ReportTML.render(context) ReportTML.save(out_report) logger.info("DONE:save report file")