document = Document() NomerRabot = input("Nomer work: ") NomerGroop = input("Nomer group: ") Prepod = input("Teacher: ") Student = input("Student: ") NameWork = input("Name work: ") Goal = input("Goal: ") Code = input("Code: ") PostanovkaZadachi = input("Zadacha: ") Test = input("Test: ") End = input("End: ") Information = input("Information: ") with open(Code, "r") as file: for line in file: Text = line paragraph = document.add_paragraph('Номер отчета: ' + NomerRabot) paragraph = document.add_paragraph('Номер группы: ' + NomerGroop) paragraph = document.add_paragraph('Преподаватель: ' + Prepod) paragraph = document.add_paragraph('Студент: ' + Student) paragraph = document.add_paragraph('Название работы: ' + NameWork) paragraph = document.add_paragraph('Цель работы: ' + Goal) paragraph = document.add_paragraph('Код: ' + Text) paragraph = document.add_paragraph('Тест: ') document.add_picture(Test, width=Inches(1.0)) paragraph = document.add_paragraph('Вывод: ' + End) paragraph = document.add_paragraph('Источники: ' + Information) document.save('test1.docx')
def writedocxwithrealxls(file_path, filename, orders): document = Document() style = document.styles['Normal'] font = style.font font.name = 'Times New Roman' font.size = Pt(9) for section in document.sections: section.orientation = 1 # 1 is LANDSCAPE, 0 is POTRAIT section.page_width = Mm(297) # for A4 Paper section.page_height = Mm(210) section.left_margin = Inches(0.5) section.right_margin = Inches(0.5) section.top_margin = Inches(0.5) section.bottom_margin = Inches(0.5) for item in orders: table = document.add_table(rows=0, cols=16) table.columns[0].width = Inches(0.45) table.columns[1].width = Inches(1.25) table.columns[2].width = Inches(1.65) table.columns[3].width = Inches(0.50) table.columns[4].width = Inches(0.55) table.columns[5].width = Inches(0.65) table.columns[6].width = Inches(0.75) table.columns[7].width = Inches(0.05) table.columns[8].width = Inches(0.05) table.columns[9].width = Inches(0.45) table.columns[10].width = Inches(1.25) table.columns[11].width = Inches(1.65) table.columns[12].width = Inches(0.50) table.columns[13].width = Inches(0.55) table.columns[14].width = Inches(0.65) table.columns[15].width = Inches(0.75) #Delivery Notes Title row_one = table.add_row().cells row_one[0].alignment = WD_ALIGN_PARAGRAPH.CENTER row_one[0].merge(row_one[6]) row_one[0].paragraphs[0].add_run(item[0][0]).bold = True # Copy row_one[9].alignment = WD_ALIGN_PARAGRAPH.CENTER row_one[9].merge(row_one[15]) row_one[9].paragraphs[0].add_run(item[0][0]).bold = True #Distributor Name, Customer Name, Order Number row_two = table.add_row().cells row_two[0].merge(row_two[1]) row_two[0].text = item[1][0] row_two[0].paragraphs[0].add_run('\n' + item[2][0]) row_two[2].text = item[1][1] row_two[2].paragraphs[0].add_run('\n' + item[2][1]) row_two[2].paragraphs[0].add_run('\n' + item[3][0]) row_two[2].paragraphs[0].add_run('\n' + item[4][0]) row_two[3].merge(row_two[6]) row_two[3].text = item[1][2] row_two[3].paragraphs[0].add_run('\n' + item[2][2]) row_two[3].paragraphs[0].add_run('\n' + item[3][1]) row_two[3].paragraphs[0].add_run('\n' + item[4][1]) row_two[3].paragraphs[0].add_run('\n' + item[5][0]) #Copy row_two[9].merge(row_two[10]) row_two[9].text = item[1][0] row_two[9].paragraphs[0].add_run('\n' + item[2][0]) row_two[11].text = item[1][1] row_two[11].paragraphs[0].add_run('\n' + item[2][1]) row_two[11].paragraphs[0].add_run('\n' + item[3][0]) row_two[11].paragraphs[0].add_run('\n' + item[4][0]) row_two[12].merge(row_two[15]) row_two[12].text = item[1][2] row_two[12].paragraphs[0].add_run('\n' + item[2][2]) row_two[12].paragraphs[0].add_run('\n' + item[3][1]) row_two[12].paragraphs[0].add_run('\n' + item[4][1]) row_two[0].paragraphs[0].add_run('\n' + item[5][0]) #Driver Message row_seven = table.add_row().cells row_seven[0].merge(row_seven[6]) row_seven[0].text = item[6][0] #Copy row_seven[9].merge(row_seven[15]) row_seven[9].text = item[6][0] # Product Detail Title row_nine = table.add_row().cells row_nine[0].merge(row_nine[6]) row_table1 = row_nine[0].add_table(rows=0, cols=6) row_table1.style = 'TableGrid' row_table1.columns[0].width = Inches(0.45) row_table1.columns[1].width = Inches(2.0) row_table1.columns[2].width = Inches(0.50) row_table1.columns[3].width = Inches(0.55) row_table1.columns[4].width = Inches(0.65) row_table1.columns[5].width = Inches(0.75) row_table_cells1 = row_table1.add_row().cells row_table_cells1[0].paragraphs[0].add_run('Code').bold = True row_table_cells1[0].alignment = WD_ALIGN_PARAGRAPH.CENTER row_table_cells1[1].alignment = WD_ALIGN_PARAGRAPH.CENTER row_table_cells1[1].paragraphs[0].add_run('Description').bold = True row_table_cells1[2].paragraphs[0].add_run('UOM').bold = True row_table_cells1[3].paragraphs[0].add_run('QTY').bold = True row_table_cells1[4].paragraphs[0].add_run('Price').bold = True row_table_cells1[5].paragraphs[0].add_run('Amount').bold = True #Copy row_nine[9].merge(row_nine[15]) row_table2 = row_nine[9].add_table(rows=0, cols=6) row_table2.style = 'TableGrid' row_table2.columns[0].width = Inches(0.45) row_table2.columns[1].width = Inches(2.0) row_table2.columns[2].width = Inches(0.50) row_table2.columns[3].width = Inches(0.55) row_table2.columns[4].width = Inches(0.65) row_table2.columns[5].width = Inches(0.75) row_table_cells2 = row_table2.add_row().cells row_table_cells2[0].paragraphs[0].add_run('Code').bold = True row_table_cells2[0].alignment = WD_ALIGN_PARAGRAPH.CENTER row_table_cells2[1].alignment = WD_ALIGN_PARAGRAPH.CENTER row_table_cells2[1].paragraphs[0].add_run('Description').bold = True row_table_cells2[2].paragraphs[0].add_run('UOM').bold = True row_table_cells2[3].paragraphs[0].add_run('QTY').bold = True row_table_cells2[4].paragraphs[0].add_run('Price').bold = True row_table_cells2[5].paragraphs[0].add_run('Amount').bold = True for products in item[8]: #Product Detail row_product1 = row_table1.add_row().cells row_product1[0].text = str(products[0]).replace('.0','') row_product1[1].text = str(products[1]) row_product1[2].text = str(products[2]) row_product1[3].text = str(products[3]).replace('.0','') row_product1[4].text = str(products[4]).replace('.0','') row_product1[5].text = str(products[5]).replace('.0','') #Copy row_product2 = row_table2.add_row().cells row_product2[0].text = str(products[0]).replace('.0','') row_product2[1].text = str(products[1]) row_product2[2].text = str(products[2]) row_product2[3].text = str(products[3]).replace('.0','') row_product2[4].text = str(products[4]).replace('.0','') row_product2[5].text = str(products[5]).replace('.0','') #Total Products row_eleven1 = row_table1.add_row().cells row_eleven1[0].text = '' row_eleven1[1].text = item[9][0] row_eleven1[2].text = '' row_eleven1[3].text = str(item[9][1]).replace('.0','') row_eleven1[4].text = '' row_eleven1[5].text = str(item[9][2]).replace('.0','') #Copy row_eleven2 = row_table2.add_row().cells row_eleven2[0].text = '' row_eleven2[1].text = item[9][0] row_eleven2[2].text = '' row_eleven2[3].text = str(item[9][1]).replace('.0','') row_eleven2[4].text = '' row_eleven2[5].text = str(item[9][2]).replace('.0','') #Tax Informaiton row_twelve = table.add_row().cells row_twelve[0].merge(row_twelve[6]) row_twelve[0].text = item[10][0] #Copy row_twelve[9].merge(row_twelve[15]) row_twelve[9].text = item[10][0] row_thirteen = table.add_row().cells row_fourteen = table.add_row().cells row_fifteen = table.add_row().cells #Buyer, Seller row_sixteen = table.add_row().cells row_sixteen[0].merge(row_sixteen[2]) row_sixteen[0].text = item[11][0] row_sixteen[4].merge(row_sixteen[6]) row_sixteen[4].text = item[11][1] #Copy row_sixteen[9].merge(row_sixteen[11]) row_sixteen[9].text = item[11][0] row_sixteen[13].merge(row_sixteen[15]) row_sixteen[13].text = item[11][1] document.add_page_break() document.save('%s%s.docx' % (file_path, filename))
print("Start parsing " + s_file_name + ":") with open(s_absolute_path) as fp: html = fp.read() article = newspaper.Article(url='http://example.com/test-url') article.set_html(html) article.parse() article.nlp() s_article_title = article.title response = requests.get(article.top_image, stream=True) b_image = io.BytesIO(response.content) s_article_text = article.text # add article-title and (if available) -subtitle to docx document.add_heading(s_article_title, level=2) #document.add_heading(s_article_sub_title, level=2) document.add_picture(b_image, width=Inches(3.25)) # add text content (paragraph) to docx paragraph = document.add_paragraph(s_article_text) document.add_page_break() #------------------------------------------------------------------------------------- # save it print("Save document") document.save(s_docx_absl_path + "/" + s_output_filename)
def get_detail(): f_error = open('tv_error.txt', 'a+', encoding='utf-8') f_img = open('tv_img.txt', 'a+', encoding='utf-8') # f_final=open('tv_final.txt','a+',encoding='utf-8') root = 'http://m.1988.tv' wordrootname = '1988_病虫害word' htmlrootname = '1988_病虫害html' imgrootname = '1988_病虫害img' if not os.path.exists(wordrootname): os.mkdir(wordrootname) if not os.path.exists(htmlrootname): os.mkdir(htmlrootname) if not os.path.exists(imgrootname): os.mkdir(imgrootname) for line in open('tv_final.1.txt', 'r', encoding='utf-8'): line = eval(line) fname = line[1].replace('?', '').replace('|', '').replace( '"', '').replace('>', '').replace('<', '').replace('*', '').replace( '*', '').replace('\\', '').replace(':', '').replace('/', '') print(fname) if 'baik' in line[0]: path1 = './' + wordrootname + '/' + fname + '_' + line[0][ 7:].replace('.html', '') path2 = './' + htmlrootname + '/' + fname + '_' + line[0][ 7:].replace('.html', '') else: path1 = './' + wordrootname + '/' + fname + '_' + line[0][ 14:].replace('.html', '') path2 = './' + htmlrootname + '/' + fname + '_' + line[0][ 14:].replace('.html', '') # print(path) # if os.path.exists(path1+'.docx'): # print(path1+'已存在') # continue # url=root+line[0] # url='http://www.tv.com/newshow.asp?id=50163' # line[1]='白菜要丰产施肥必须有技法' # print(url) doc = Document() # for i in range(3): # try: # req=requests.get(url,timeout=15) # req.encoding='utf-8' # bsObj=BeautifulSoup(req.text,'html.parser') # #病虫害 # content=bsObj.find('div',class_='content') # img_list=content.find_all('img') # #写入标题 # break # except: # pass # if content==None : # print(url+'未取到数据') # f_error.write(str(line)+'\n') # continue # if len(img_list)>0: # print(url+'存在图片') # f_img.write(str(line)+'\n') # continue line[1] = line[1].replace('\r\n', '').strip() content = BeautifulSoup(line[2].replace('</p', '\n</p'), 'html.parser') img_list = content.find_all('mip-img') # f_final.write(str(line+[str(content)])+'\n') # continue doc.add_heading(line[1], level=0) skin_text = str(content) for im in img_list: src = im.attrs['src'] print(src) newSrc = d_load(src, imgrootname) #替换内容 skin_text = skin_text.replace(src, '../' + imgrootname + '/' + newSrc) try: doc.add_picture('./' + imgrootname + '/' + newSrc, width=Inches(3)) except: pass #写入文本内容 # content_word=remove_control_characters(str(content.get_text())) doc.add_paragraph(u'%s' % (content.get_text())) doc.save(path1 + '.docx') #写入html fp = open(path2 + '.html', 'w', encoding='utf-8') fp.write( '<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"></head><body><h1>' + str(line[1]) + '</h1>' + str(skin_text) + '</body></html>') #写入数据 fp.close() # break f_error.close() f_img.close()
#!usr/bin/env/python3 #-*- coding: utf-8 -*- __author__ = 'Michael Shen' #pip install python-docx from docx import Document from docx.shared import Inches string = '文字内容' images = 'haha.jpg' # 保存在本地的图片 doc = Document() # doc对象 doc.add_paragraph(string) # 添加文字 doc.add_picture(images, width=Inches(2)) # 添加图, 设置宽度 doc.save('word文档.docx') # 保存路径
def browse_folder(self): directory = QtWidgets.QFileDialog.getSaveFileName( self, "Сохранение резюме", getenv('USERPROFILE') + r"\Desktop", 'Документ Word (*.docx)') if directory[0] != "": resume_savefile = Document() resume_savefile.save(directory[0]) resume = Document(docx=directory[0]) sex = self.sex_comboBox.currentIndex() sex_text = self.sex_comboBox.currentText() second_name = self.secondname.displayText() name = self.name.displayText() patronymic = self.patronymic.displayText() post = self.post.displayText() phone_number = self.phoneNumber.displayText() professional_skills = self.professional_skills.toPlainText() email = self.email.displayText() birth_date = self.birthDate.date().toPyDate() personal_qualities = self.personal_qualities.toPlainText() edu_level = self.edu_level.currentText() experience = self.experience.toPlainText() birth_year = int(str(birth_date)[0:4]) birth_month = str(birth_date)[5:7] birth_day = str(birth_date)[8:10] birth_date = birth_day + "." + birth_month + "." + str(birth_year) age = str(2019 - birth_year) if sex == 1: sex = "Мужчина, " else: sex = "Женщина, " resume.add_heading('Резюме', level=0) resume.add_heading(text=str(second_name) + " " + str(name) + " " + str(patronymic), level=1) resume.add_heading(sex + age + " лет, дата рождения: " + birth_date, level=1) if photopath[0] != "": resume.add_picture(photopath[0], width=Inches(1.25)) p = resume.add_paragraph(text="", style=None) p.add_run('Должность: ').bold = True p.add_run(post) if phone_number != "": mobile_phone = resume.add_paragraph(text="", style=None) mobile_phone.add_run('Моб. телефон: ').bold = True mobile_phone.add_run(phone_number) if email != "": em = resume.add_paragraph(text="", style=None) em.add_run('E-mail: ').bold = True em.add_run(email) edu = resume.add_paragraph(text="", style=None) edu.add_run('Уровень образования: ').bold = True edu.add_run(edu_level) if personal_qualities != "": personal_qualities_title = resume.add_paragraph(text="", style=None) personal_qualities_title.add_run( 'Персональные качества: ').bold = True resume.add_paragraph(text=personal_qualities, style=None) if professional_skills != "": professional_skills_title = resume.add_paragraph(text="", style=None) professional_skills_title.add_run( 'Профессиональные навыки: ').bold = True resume.add_paragraph(text=professional_skills, style=None) if experience != "": experience_title = resume.add_paragraph(text="", style=None) experience_title.add_run('Опыт работы:').bold = True resume.add_paragraph(text=experience, style=None) # database_file = open('resumaker_database.rdb', 'a', encoding="utf-8") # write_string = "\n" + name + "\t" + second_name + "\t" + post + "\t" + sex_text + "\t" + age +\ # "\t" + edu_level # database_file.write(write_string) # database_file.close() db = open('resumaker_database.rdb', 'r+', encoding="utf-8") if db.read() != "": db.write("\n") db.write(name + "," + second_name + "," + post + "," +\ age + "," + edu_level + "," + directory[0]) db.close() resume.save(directory[0])
from docx.shared import Inches from docx.shared import RGBColor import docx #创建word文档 file = docx.Document() #添加段落字体颜色 file.add_paragraph().add_run('这里是字体颜色测试_1').font.color.rgb = RGBColor( 167, 2, 25) font_1 = file.add_paragraph().add_run('这里是字体颜色测试') font_1.font.color.rgb = RGBColor(0, 255, 0) #添加图片 file.add_picture('213.jpg', width=Inches(2)) #保存为word文档 file.save('mytest2.docx')
fig.suptitle("Analisis grafico de la variable {}".format(i)) axs[0, 0].hist(valor3, bins=conj, density=True) axs[0, 0].set_title("histograma") axs[0, 0].set(ylabel="densidad") axs[0, 1].boxplot(valor3, sym='ko', whis=1.5) axs[0, 1].set_title("Diagrama de cajas") #axs[0,1].set(xlabel="intervalos", ylabel="densidad") axs[1, 0].plot(conj[:len(conj) - 1], his.cumsum(), "o-") axs[1, 0].set_title("Grafico de ojiva") axs[1, 0].set(xlabel="observaciones", ylabel="densidad acumulada") axs[1, 1].step(x, y) axs[1, 1].set_title("distribucion empirica") nombre = "grafico_variable_{}.png".format(i) plt.savefig(nombre) #time.sleep(1) proyecto.add_picture(nombre, width=Inches(6)) """ |parrafo | """ proyecto.add_paragraph( fp.escritura(i, var1=round(stats.skew(valor3), 2), var2=round(stats.kurtosis(valor3), 2), var3=round(np.percentile(valor3, 50), 2), var4=round(np.percentile(valor3, 75), 2), var5=round(np.percentile(valor3, 25), 2))) proyecto.add_page_break() else: proyecto.add_heading(
def find_best_binary_model( X, y, model_search_spaces, k_outer_fold=5, skip_outer_folds=[], k_inner_fold=5, skip_inner_folds=[], n_initial_points=5, n_calls=10, calibrated=False, ensemble=False, loss_metric='average_precision', peeking_metrics=[], report_level=11, size_variance_validation=20, skopt_func=gp_minimize, verbose=False, build_final_model=True ): """Finds best binary calibrated classification model and optionally generate a report doing a nested cross validation. In the inner cross validation, doing a Bayesian Search, the best parameters are found. In the outer cross validation, the model is validated. Finally, the whole procedure is used for the full dataset to return the best possible model. Parameters ---------- X : np.array Feature set. y : np.array Classification target to predict. For the moment only binary labels are allowed, and values are supposed to be {0, 1} or {-1, 1} model_search_spaces : Dict[str : List[List[skopt.Space]] Dict of models to try inside of the inner loops. For each model, there is the corresponding list of space objects to delimit where the parameters live, including the pipeline postprocess to make. It admits also an option to set undersampling_majority_class method. It admits two values, True or False. If True it builds an ensemble model in the inner loop by creating many balanced folds by using the minority class with a undersampling of the majority class. If using this option, it also admits an Int max_k_undersampling, in order to limit the number of splits made for this (because if the imbalance ratio is for example 1000:1, it will create 1000 splits, which can be too much). k_outer_fold : int, default=5 Number of folds for the outer cross-validation. skip_outer_folds : list, default=None If set, list of folds to skip during the loop. k_inner_fold : int, default=5 Number of folds for the inner cross-validation. skip_inner_folds : list, default=None If set, list of folds to skip during the loop. n_initial_points : int, default=5 Number of initial points to use in Bayesian Optimization. n_calls : int, default=5 Number of additional calls to use in Bayesian Optimization. calibrated : bool, default=False If True, all models are calibrated using CalibratedClassifierCV ensemble : bool, default=False If True, an ensemble model is built in the inner training loop. Otherwise, a model fitted with the whole inner dataset will be built. loss_metric : str, default='auc' Metric to use in order to find best parameters in Bayesian Search. Options: - roc_auc - average_precision - neg_brier_score - neg_log_loss - histogram_width peeking_metrics : List[str], default=[] If not empty, in the report there will be a comparison between the metric of evaluation on the inner fold and the list of metrics in peeking_metrics. report_levels : int, default=11 If 00, no report is returned. If 01, plots are not included. All peeking-metrics are evaluated on the outer fold for each inner-fold model. If 10, plots are included. No evaluation of peeking-metrics on the outer fold for each inner-fold model. If 11, a full report (it can be more time consuming). size_variance_validation : int, default=20 Number of samples to use to check variance of different models. skopt_func : callable, default=gp_minimize Minimization function of the skopt library to be used. verbose : bool, default=False If True, you can trace the progress in the terminal. build_final_model : bool, default=True If False, no final model is built (only the report doc is returned). It can be convenient during the experimental phase. Returns ------- model : Model trained with the full dataset using the same procedure as in the inner cross validation. report_doc : Document python-docx if report_level > 0. Otherwise, None report_dfs : Dict of dataframes, one key for each model in model_search_spaces. each key, a dataframe with all inner models built with their params and loss_metric. """ # TODO: Add new parameter information # Validation of inputs X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'], force_all_finite=False, allow_nd=True) counter = Counter(y) if len(counter) > 2: raise NotImplementedError("Multilabel classification is not yet implemented") y_values = set(counter.keys()) if y_values != {-1, 1} and y_values != {0, 1}: raise NotImplementedError("Values of target are expected to be in {0, 1} or in {-1, 1}") if not _validate_model_search_space(model_search_spaces): raise ValueError("model_search_spaces is not well formed") if not _validate_folds(k_outer_fold, skip_outer_folds, k_inner_fold, skip_inner_folds): raise ValueError("Folds parameters are not well formed") if not _validate_bayesian_search(n_initial_points, n_calls, skopt_func): raise ValueError("Bayesian search parameters are not well formed") if not is_supported(loss_metric): raise NotImplementedError(f"Loss metric {loss_metric} is not implemented.") if not isinstance(peeking_metrics, list): raise ValueError("Peeking metrics must be a list of str") for metric in peeking_metrics: if not is_supported(metric): raise NotImplementedError(f"Metric {metric} is not implemented.") if not isinstance(calibrated, bool): raise ValueError("calibrated must be a boolean") if not isinstance(ensemble, bool): raise ValueError("ensemble must be a boolean") if not isinstance(verbose, bool): raise ValueError("verbose must be a boolean") if not isinstance(build_final_model, bool): raise ValueError("build_final_model must be a boolean") if not isinstance(size_variance_validation, int): raise ValueError("size_variance_validation must be an int") if size_variance_validation < 0 or size_variance_validation > len(y): raise ValueError("size_variance_validation cannot be negative nor bigger than number of instances") if size_variance_validation > 0 and size_variance_validation < len(counter): raise ValueError("size_variance_validation, if not zero, cannot be less than number of classes") if report_level not in [0, 1, 10, 11]: raise ValueError("report_level must be either 0, 1, 10 or 11") # End of validation of inputs if calibrated: ensemble = True if loss_metric not in peeking_metrics: peeking_metrics.append(loss_metric) if report_level > 0: outer_report_doc = Document() section = outer_report_doc.sections[0] section.page_width = Inches(reporting_width(report_level, peeking_metrics)) outer_report_doc.add_heading('Report of training', 0) write_intro_doc( outer_report_doc, y, model_search_spaces, k_outer_fold, skip_outer_folds, k_inner_fold, skip_inner_folds, n_initial_points, n_calls, ensemble, calibrated, loss_metric, size_variance_validation, skopt_func) inner_report_doc = Document() section = inner_report_doc.sections[0] section.page_width = Inches(reporting_width(report_level, peeking_metrics)) else: outer_report_doc = None inner_report_doc = None X_val_var = [] y_val_var = [] if size_variance_validation > 0: X, X_val_var, y, y_val_var = train_test_split(X, y, test_size=size_variance_validation, random_state=42, stratify=y) outer_cv = StratifiedKFold(n_splits=k_outer_fold) dict_inner_models = [] list_report_dfs = [] outer_Xs = [] outer_ys = [] folds_index = [] if inner_report_doc: inner_report_doc.add_heading(f'Report of inner trainings', level=1) for k, (train_index, test_index) in enumerate(outer_cv.split(X, y)): if k not in skip_outer_folds: folds_index.append(k) if inner_report_doc: inner_report_doc.add_heading(f'Report of inner training in fold {k} of outer Cross Validation', level=2) X_hold_out = X[test_index] if report_level in [1, 11] else [] y_hold_out = y[test_index] if report_level in [1, 11] else [] inner_model, model_params, model_comments, report_dfs = train_inner_model( X=X[train_index], y=y[train_index], model_search_spaces=model_search_spaces, X_hold_out=X_hold_out, y_hold_out=y_hold_out, k_inner_fold=k_inner_fold, skip_inner_folds=skip_inner_folds, n_initial_points=n_initial_points, n_calls=n_calls, ensemble=ensemble, calibrated=calibrated, loss_metric=loss_metric, peeking_metrics=peeking_metrics, verbose=verbose, skopt_func=skopt_func, report_doc=inner_report_doc) dict_inner_models.append({'model': inner_model, 'params': model_params, 'comments': model_comments}) list_report_dfs.append(report_dfs) outer_Xs.append(X[test_index]) outer_ys.append(y[test_index]) if outer_report_doc: outer_report_doc.add_heading(f'Report of validation of the models in the outer Cross Validation', level=1) add_plots = True if report_level > 9 else False report_dfs = merge_report_dfs(*list_report_dfs) evaluate_models( dict_models=dict_inner_models, Xs=outer_Xs, ys=outer_ys, X_val_var=X_val_var, y_val_var=y_val_var, folds_index=folds_index, report_doc=outer_report_doc, loss_metric=loss_metric, peeking_metrics=peeking_metrics, add_plots=add_plots, report_dfs=report_dfs ) # After assessing the procedure, we repeat it on the full dataset: final_model = None if build_final_model: final_model, _, _, _ = train_inner_model( X=X, y=y, model_search_spaces=model_search_spaces, X_hold_out=[], y_hold_out=[], k_inner_fold=k_inner_fold, skip_inner_folds=skip_inner_folds, n_initial_points=n_initial_points, n_calls=n_calls, ensemble=ensemble, calibrated=calibrated, loss_metric=loss_metric, peeking_metrics=[], verbose=verbose, skopt_func=skopt_func, report_doc=None) return final_model, merge_docs(outer_report_doc, inner_report_doc), report_dfs
def generuj_rozkaz(request): query1 = Dane.objects.filter(typ='przepustkę jednorazową', transport__contains='kolejowym w klasie 2').order_by( '-stopien_id', 'nazwisko') query2 = Dane.objects.filter(typ='urlop', transport__contains='kolejowym w klasie 2').order_by('-stopien_id', 'nazwisko') query3 = Dane.objects.filter(typ='przepustkę jednorazową', transport__contains='autobusowym w').order_by( '-stopien_id', 'nazwisko') query4 = Dane.objects.filter(typ='urlop', transport__contains='autobusowym w').order_by('-stopien_id', 'nazwisko') THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) generated_rozkaz = os.path.join(THIS_FOLDER, 'demo.docx') document = Document() dodaj_naglowek(document) licznik = 1 tables = [] # tworzenie tabeli osobna funkcja if query1: p = document.add_paragraph(switch_litery(licznik) + 'na przepustkę jednorazową – środek transportu PKP:') # p.paragraph_format.left_indent = Inches (0.25) table1 = dodaj_tabele(document, query1) tables.append(table1) p.paragraph_format.space_before = Pt(12) if query2: licznik += 1 p = document.add_paragraph(switch_litery(licznik) + 'na urlop – środek transportu PKP:') # p.paragraph_format.left_indent = Inches (0.25) table2 = dodaj_tabele(document, query2) tables.append(table2) p.paragraph_format.space_before = Pt(12) if query3: licznik += 1 p = document.add_paragraph(switch_litery(licznik) + 'na przepustkę jednorazową – środek transportu PKS:') # p.paragraph_format.left_indent = Inches (0.25) table3 = dodaj_tabele(document, query3) tables.append(table3) p.paragraph_format.space_before = Pt(12) if query4: licznik += 1 p = document.add_paragraph(switch_litery(licznik) + 'na urlop – środek transportu PKS:') # p.paragraph_format.left_indent = Inches (0.25) table4 = dodaj_tabele(document, query4) tables.append(table4) p.paragraph_format.space_before = Pt(12) # ustawianie parametrów dokumentu style = document.styles['Normal'] font = style.font font.name = 'Times New Roman' font.size = Pt(12) # ustawianie szerokosci tabelek i = 1 for tbl in tables: for row in tbl.rows: j = 1 row.height_rule = WD_ROW_HEIGHT.EXACTLY row.height = Inches(0.20) for cell in row.cells: if j == 1: # 1) cell.width = Inches(0.1) if j == 2: # stopien cell.width = Inches(1.25) if j == 3: # imie cell.width = Inches(0.9) if j == 4: # nazwisko cell.width = Inches(1.5) if j == 5: cell.width = Inches(2.4) if j == 6: cell.width = Inches(0.6) if j == 7: cell.width = Inches(1.2) j += 1 i += 1 dodaj_stopke(document) # ustawianie marginesow sections = document.sections for section in sections: section.top_margin = Cm(1.5) section.bottom_margin = Cm(1.59) section.left_margin = Cm(0.75) section.right_margin = Cm(1.32) # download document.save(generated_rozkaz) response = HttpResponse(open(generated_rozkaz, 'rb').read()) response['Content-Type'] = 'text/plain' previous_month = datetime.datetime.today().month - 1 previous_month_name = calendar.month_name[previous_month] # ustawienie nazwy pliku z rozkazem response['Content-Disposition'] = 'attachment; filename= rozkaz"{}".docx'.format(previous_month_name) return response
document = Document() document.add_heading('Document Title', 0) p = document.add_paragraph('A plain paragraph having some ') p.add_run('bold').bold = True p.add_run(' and some ') p.add_run('italic.').italic = True document.add_heading('Heading, level 1', level=1) document.add_paragraph('Intense quote', style='IntenseQuote') document.add_paragraph('first item in unordered list', style='ListBullet') document.add_paragraph('first item in ordered list', style='ListNumber') document.add_picture('butterfly.jpg', width=Inches(1.25)) table = document.add_table(rows=1, cols=3) hdr_cells = table.rows[0].cells hdr_cells[0].text = 'Qty' hdr_cells[1].text = 'Id' hdr_cells[2].text = 'Desc' recordset = [[1, "101", "Spam"], [2, "42", "Eggs"], [3, "631", "Spam, spam, eggs, and spam"]] for item in recordset: row_cells = table.add_row().cells row_cells[0].text = str(item[0]) row_cells[1].text = str(item[1]) row_cells[2].text = item[2] document.add_page_break()
def clonarTemplateYAgregarFinding(doc, indexNros, language, jsonFinding): nroParagraph = indexNros[0] nroTable = indexNros[1] nroPageBreak = indexNros[2] urls = jsonFinding['resourceAf'] refUrls = jsonFinding['RECOMMENDATION']['URLS'] page_break = doc.paragraphs[nroPageBreak] templateParag = deepcopy(doc.paragraphs[nroParagraph]) # Titulo finding runNro = 9 if not indexNros[3]: runNro = 7 templateParag.runs[runNro].text = jsonFinding['TITLE'] # Agarra pagebreak anterior y pone el nuevo titulo abajo page_break._p.addnext(templateParag._p) # Agregar todo el contenido new_tbl = deepcopy(doc.tables[nroTable]) p, pb = templateParag._p, page_break._p new_pb = deepcopy(pb) p.addnext(new_pb) # Clonamos toda la informacion que es parte del finding (Observacion, tabla, impacto, etc.) #Excepto el parrafo de la imagen ya que si lo clonamos se puede romper el formato obsTitle = deepcopy(doc.paragraphs[nroParagraph + 2]) obsText = deepcopy(doc.paragraphs[nroParagraph + 3]) urlTitle = deepcopy(doc.paragraphs[nroParagraph + 4]) obsNote = deepcopy(doc.paragraphs[nroParagraph + 7]) legendTable = deepcopy(doc.paragraphs[nroParagraph + 8]) legendFigure = deepcopy(doc.paragraphs[nroParagraph + 10]) impactTitle = deepcopy(doc.paragraphs[nroParagraph + 11]) impactDesc = deepcopy(doc.paragraphs[nroParagraph + 12]) likelihoodTitle = deepcopy(doc.paragraphs[nroParagraph + 13]) likelihoodDesc = deepcopy(doc.paragraphs[nroParagraph + 14]) recomendTitle = deepcopy(doc.paragraphs[nroParagraph + 15]) recomendNote = deepcopy(doc.paragraphs[nroParagraph + 16]) recomendText = deepcopy(doc.paragraphs[nroParagraph + 17]) referenceTitle = deepcopy(doc.paragraphs[nroParagraph + 18]) referenceLinkLegend = deepcopy(doc.paragraphs[nroParagraph + 19]) # Cargamos la informacion en base a lo que se tenga # URLS RECOMENDACIONES if len(refUrls) >= 1: for url in refUrls: urlRef = deepcopy(doc.paragraphs[nroParagraph + 20]) urlRef.text = url p.addnext(urlRef._p) if len(refUrls) > 1: referenceLinkLegend.text = constants.enlacesRecomendacion_EN if language else constants.enlacesRecomendacion_ES p.addnext(referenceLinkLegend._p) else: p.addnext(referenceLinkLegend._p) p.addnext(referenceTitle._p) recomendText.text = jsonFinding['RECOMMENDATION']['TITLE'] p.addnext(recomendText._p) p.addnext(recomendNote._p) p.addnext(recomendTitle._p) p.addnext(likelihoodDesc._p) p.addnext(likelihoodTitle._p) impactDesc.text = jsonFinding['IMPLICATION'] p.addnext(impactDesc._p) p.addnext(impactTitle._p) p.addnext(legendFigure._p) #Agregamos imagen si es que hay figureText = doc.paragraphs[nroParagraph + 9] figureText.clear() figureText.add_run() image_path = add_screenshot(jsonFinding) if image_path: figureText.runs[0].add_picture(image_path,width=Inches(5.33), height=Inches(4.0)) delete_screenshot(image_path) para = deepcopy(figureText) p.addnext(para._p) p.addnext(legendTable._p) mid_tbl = deepcopy(doc.tables[nroTable + 1]) p.addnext(mid_tbl._tbl) if jsonFinding['OBSERVATION']['NOTE']: agregarNota(obsNote, jsonFinding['OBSERVATION']['NOTE']) else: obsNote.text = "" p.addnext(obsNote._p) for url in urls: urlExample = deepcopy(doc.paragraphs[nroParagraph + 5]) urlExample.text = url p.addnext(urlExample._p) if len(urls) == 1: urlTitle.text = constants.urlAfectada_EN if language else constants.urlAfectada_ES p.addnext(urlTitle._p) else: p.addnext(urlTitle._p) obsText.text = jsonFinding['OBSERVATION']['TITLE'] obsText.text+= add_cves(jsonFinding) p.addnext(obsText._p) p.addnext(obsTitle._p) # Espacio vacio para respetar el formato p.addnext(deepcopy(doc.paragraphs[nroParagraph + 1])._p) p.addnext(new_tbl._tbl) addFindingInfo(new_tbl, language, urls)
def recover_docx(bounding_boxs, output_name, image): line_tables = [] current_line = 0 # current_bbox = 0 i = 0 bounding_boxs.sort(key=functools.cmp_to_key(compare_table)) # bounding_boxs.sort(key=functools.cmp_to_key(compare_table)) while i < len(bounding_boxs): if bounding_boxs[i][5] == 0: if bounding_boxs[i][0] == '': i += 1 continue str_on_line = [] str_on_line.append(bounding_boxs[i][0]) j = i + 1 if j < len(bounding_boxs): mi = min(bounding_boxs[i][4] + bounding_boxs[i][2], bounding_boxs[j][4] + bounding_boxs[j][2]) ma = max(bounding_boxs[i][2], bounding_boxs[j][2]) # min - max >= 55 while j < len(bounding_boxs) and (mi - ma >= 40): if bounding_boxs[j][0] == '': j += 1 break str_on_line.append(bounding_boxs[j][0]) j += 1 if j < len(bounding_boxs): mi = min(bounding_boxs[i][4] + bounding_boxs[i][2], bounding_boxs[j][4] + bounding_boxs[j][2]) ma = max(bounding_boxs[i][2], bounding_boxs[j][2]) i = j if len(str_on_line) > 0: line_tables.append((str_on_line, 0)) elif bounding_boxs[i][5] != 0: line_tables.append(((bounding_boxs[i][1], bounding_boxs[i][2], bounding_boxs[i][3], bounding_boxs[i][4]), 1)) i += 1 if os.path.exists(outputName) == False: dc = Document() dc.save(outputName) document = Document(outputName) for row in line_tables: if row[1] == 0: table = document.add_table(rows=1, cols=len(row[0])) i = 0 row_cells = table.rows[0].cells for cell in row[0]: p = row_cells[i].add_paragraph(cell) p.alignment = WD_TABLE_ALIGNMENT.CENTER i += 1 if row[1] == 1: cropped_table = image[row[0][1]:row[0][1] + row[0][3], row[0][0]:row[0][0] + row[0][2]] # cv2.imshow('cropped_table', cropped_table) # cv2.waitKey(0) cv2.imwrite('cropped_table.jpg', cropped_table) table = document.add_table(rows=1, cols=1) row_cells = table.add_row().cells p = row_cells[0].add_paragraph() p.alignment = WD_TABLE_ALIGNMENT.CENTER r = p.add_run() img_width = image.shape[0] print(img_width) img_height = image.shape[1] print(img_height) print('row[0][3]', row[0][3]) print('height', row[0][3] / img_height) # r.add_picture('cropped_table.jpg') document.add_picture('cropped_table.jpg', width=Inches(6.0), height=Inches(row[0][3] / img_height * 5)) i += 1 document.save(outputName)
#!/usr/bin/env python # -*- coding: UTF-8 -*- from docx import Document from docx.shared import Inches from docx.shared import Pt from random import sample from random import shuffle from random import seed from random import randint document = Document() sections = document.sections for section in sections: section.top_margin = Inches(.5) section.bottom_margin = Inches(.5) section.left_margin = Inches(.5) section.right_margin = Inches(.5) tests = raw_input("How many tests? ") for x in range(int(tests)): paragraph = document.add_paragraph('Name:_____________________________________________________________________________Date:_________________Hour:__________________') table = document.add_table(rows=4, cols=2) a = table.cell(0,0) b = table.cell(0,1) A = a.merge(b) cell = table.cell(0,0) cell.text = "Taya tries to throw a piece of paper into the trash. She fires the paper at " + str(randint(80,100)/10.0)+ "m/s. The trash can is "+ str(randint(31,59)/10.0)+ " meters away.\nAt what angle must Taya throw the paper to hit the trash can?" cell = table.cell(1,0) cell.text = "STEP 1: Write the given values, with variables, for this problem.\n\n\n\n\n\n\n" cell = table.cell(1,1)
class Describe_Cell(object): def it_knows_what_text_it_contains(self, text_get_fixture): cell, expected_text = text_get_fixture text = cell.text assert text == expected_text def it_can_replace_its_content_with_a_string_of_text( self, text_set_fixture): cell, text, expected_xml = text_set_fixture cell.text = text assert cell._tc.xml == expected_xml def it_knows_its_width_in_EMU(self, width_get_fixture): cell, expected_width = width_get_fixture assert cell.width == expected_width def it_can_change_its_width(self, width_set_fixture): cell, value, expected_xml = width_set_fixture cell.width = value assert cell.width == value assert cell._tc.xml == expected_xml def it_provides_access_to_the_paragraphs_it_contains( self, paragraphs_fixture): cell = paragraphs_fixture paragraphs = cell.paragraphs assert len(paragraphs) == 2 count = 0 for idx, paragraph in enumerate(paragraphs): assert isinstance(paragraph, Paragraph) assert paragraph is paragraphs[idx] count += 1 assert count == 2 def it_provides_access_to_the_tables_it_contains(self, tables_fixture): # test len(), iterable, and indexed access cell, expected_count = tables_fixture tables = cell.tables assert len(tables) == expected_count count = 0 for idx, table in enumerate(tables): assert isinstance(table, Table) assert tables[idx] is table count += 1 assert count == expected_count def it_can_add_a_paragraph(self, add_paragraph_fixture): cell, expected_xml = add_paragraph_fixture p = cell.add_paragraph() assert cell._tc.xml == expected_xml assert isinstance(p, Paragraph) def it_can_add_a_table(self, add_table_fixture): cell, expected_xml = add_table_fixture table = cell.add_table(rows=0, cols=0) assert cell._tc.xml == expected_xml assert isinstance(table, Table) def it_can_merge_itself_with_other_cells(self, merge_fixture): cell, other_cell, merged_tc_ = merge_fixture merged_cell = cell.merge(other_cell) cell._tc.merge.assert_called_once_with(other_cell._tc) assert isinstance(merged_cell, _Cell) assert merged_cell._tc is merged_tc_ assert merged_cell._parent is cell._parent # fixtures ------------------------------------------------------- @pytest.fixture(params=[ ('w:tc', 'w:tc/w:p'), ('w:tc/w:p', 'w:tc/(w:p, w:p)'), ('w:tc/w:tbl', 'w:tc/(w:tbl, w:p)'), ]) def add_paragraph_fixture(self, request): tc_cxml, after_tc_cxml = request.param cell = _Cell(element(tc_cxml), None) expected_xml = xml(after_tc_cxml) return cell, expected_xml @pytest.fixture(params=[ ('w:tc', 'w:tc/(w:tbl'), ('w:tc/w:p', 'w:tc/(w:p, w:tbl'), ]) def add_table_fixture(self, request): tc_cxml, after_tc_cxml = request.param # the table has some overhead elements, also a blank para after since # it's in a cell. after_tc_cxml += ( '/(w:tblPr/w:tblW{w:type=auto,w:w=0},w:tblGrid),w:p)') cell = _Cell(element(tc_cxml), None) expected_xml = xml(after_tc_cxml) return cell, expected_xml @pytest.fixture def merge_fixture(self, tc_, tc_2_, parent_, merged_tc_): cell, other_cell = _Cell(tc_, parent_), _Cell(tc_2_, parent_) tc_.merge.return_value = merged_tc_ return cell, other_cell, merged_tc_ @pytest.fixture def paragraphs_fixture(self): return _Cell(element('w:tc/(w:p, w:p)'), None) @pytest.fixture(params=[ ('w:tc', 0), ('w:tc/w:tbl', 1), ('w:tc/(w:tbl,w:tbl)', 2), ('w:tc/(w:p,w:tbl)', 1), ('w:tc/(w:tbl,w:tbl,w:p)', 2), ]) def tables_fixture(self, request): cell_cxml, expected_count = request.param cell = _Cell(element(cell_cxml), None) return cell, expected_count @pytest.fixture(params=[ ('w:tc', ''), ('w:tc/w:p/w:r/w:t"foobar"', 'foobar'), ('w:tc/(w:p/w:r/w:t"foo",w:p/w:r/w:t"bar")', 'foo\nbar'), ('w:tc/(w:tcPr,w:p/w:r/w:t"foobar")', 'foobar'), ('w:tc/w:p/w:r/(w:t"fo",w:tab,w:t"ob",w:br,w:t"ar",w:br)', 'fo\tob\nar\n'), ]) def text_get_fixture(self, request): tc_cxml, expected_text = request.param cell = _Cell(element(tc_cxml), None) return cell, expected_text @pytest.fixture(params=[ ('w:tc/w:p', 'foobar', 'w:tc/w:p/w:r/w:t"foobar"'), ('w:tc/w:p', 'fo\tob\rar\n', 'w:tc/w:p/w:r/(w:t"fo",w:tab,w:t"ob",w:br,w:t"ar",w:br)'), ('w:tc/(w:tcPr, w:p, w:tbl, w:p)', 'foobar', 'w:tc/(w:tcPr, w:p/w:r/w:t"foobar")'), ]) def text_set_fixture(self, request): tc_cxml, new_text, expected_cxml = request.param cell = _Cell(element(tc_cxml), None) expected_xml = xml(expected_cxml) return cell, new_text, expected_xml @pytest.fixture(params=[ ('w:tc', None), ('w:tc/w:tcPr', None), ('w:tc/w:tcPr/w:tcW{w:w=25%,w:type=pct}', None), ('w:tc/w:tcPr/w:tcW{w:w=1440,w:type=dxa}', 914400), ]) def width_get_fixture(self, request): tc_cxml, expected_width = request.param cell = _Cell(element(tc_cxml), None) return cell, expected_width @pytest.fixture(params=[ ('w:tc', Inches(1), 'w:tc/w:tcPr/w:tcW{w:w=1440,w:type=dxa}'), ('w:tc/w:tcPr/w:tcW{w:w=25%,w:type=pct}', Inches(2), 'w:tc/w:tcPr/w:tcW{w:w=2880,w:type=dxa}'), ]) def width_set_fixture(self, request): tc_cxml, new_value, expected_cxml = request.param cell = _Cell(element(tc_cxml), None) expected_xml = xml(expected_cxml) return cell, new_value, expected_xml # fixture components --------------------------------------------- @pytest.fixture def merged_tc_(self, request): return instance_mock(request, CT_Tc) @pytest.fixture def parent_(self, request): return instance_mock(request, Table) @pytest.fixture def tc_(self, request): return instance_mock(request, CT_Tc) @pytest.fixture def tc_2_(self, request): return instance_mock(request, CT_Tc)
def hash(self, RutaEx): noms= MetodosHash.archYrutas() xl= ex.Excel() doc= docx.Document() bandetitulo= 0 cadeTemp= "" dirs2= [] dirs= [] DatosFlag= 0 path= xl.DatosEx(DatosFlag, RutaEx) DatosFlag= 1 extension= xl.DatosEx(DatosFlag, RutaEx) fecha_actual= time.strftime("%m/%d/%Y") hora_actual= time.strftime("%H:%M:%S") cont= 0 Descripcion= xl.Ex(RutaEx) cont_para= 0 ImaExt= ".png" noms.NomCarpe(dirs, path, dirs2) dirs2= [] dirs2= xl.NomCarpesRuta(RutaEx) os.system("cls") for i in dirs2: tupla= () cont_tupla= 0 a1= 0 a2= "" a3= "" bande= 0 cont_pixeles= 0 pixeles= 70 dire= "" DatosFlag= 0 path2= xl.DatosEx(DatosFlag, RutaEx) path3= path2 dire= "\\"+i path2+=dire path3+=dire cadeTemp= noms.NomArch(path2, extension) diraux= dirs2 dirtemporal= diraux[cont].split(" ") diraux[cont]= dirtemporal[0] print("---------------"+diraux[cont]+"---------------") for x in cadeTemp: aux= "\\"+x path2+= "\\"+x hashh, bande= noms.getsha256file(path2) if bande== 0: cont_tupla+= 1 a1= cont_tupla a2= hashh a3= x tupla2= ((a1, a2, a3),) tupla+= tupla2 print("Nombre del archivo: "+x) print("Hash: "+hashh) print("Algoritmo: SHA256") print("") cont_pixeles+= 1 elif bande>=1: pass path2= path2.rstrip(aux) print(f"Fecha actual: {fecha_actual}") print(f"Hora actual: {hora_actual}") print("--------------------------------------------") cont_pixeles+= 1 for h in range(cont_pixeles): pixeles+= 50 pixeles+= 45 screenshoty= pyautogui.screenshot(region=(1, -0.5, 850, pixeles)) screenshoty.save(path3+"\\"+diraux[cont]+".png") cont+= 1 os.system("cls") pixeles= 70 try: if bandetitulo==0: doc.add_paragraph(xl.TituloWord(RutaEx)).alignment= WD_ALIGN_PARAGRAPH.CENTER bandetitulo= 1 else: pass doc.add_paragraph("\n") doc.add_paragraph(Descripcion[cont_para]).alignment= WD_ALIGN_PARAGRAPH.JUSTIFY except: Descripcion.append("Descripción") doc.add_paragraph("\n") doc.add_paragraph(Descripcion[cont_para]).alignment= WD_ALIGN_PARAGRAPH.JUSTIFY tabla= doc.add_table(rows= 0, cols= 3, style= "Table Grid") for c1, c2, c3 in tupla: row_cells= tabla.add_row().cells row_cells[0].text= str(c1) row_cells[1].text= c2 row_cells[2].text= c3 cont_para+= 1 Imas= noms.NomArch(path2, ImaExt) for j in range(len(Imas)): auxi= "\\"+Imas[j] path3+= auxi doc.add_picture(path3, width= Inches(6.56)) path3= path3.rstrip(auxi) DatosFlag= 2 keep= xl.DatosEx(DatosFlag, RutaEx) doc.save(keep)
class DescribeSection(object): def it_knows_its_start_type(self, start_type_get_fixture): section, expected_start_type = start_type_get_fixture assert section.start_type is expected_start_type def it_can_change_its_start_type(self, start_type_set_fixture): section, new_start_type, expected_xml = start_type_set_fixture section.start_type = new_start_type assert section._sectPr.xml == expected_xml def it_knows_its_page_width(self, page_width_get_fixture): section, expected_page_width = page_width_get_fixture assert section.page_width == expected_page_width def it_can_change_its_page_width(self, page_width_set_fixture): section, new_page_width, expected_xml = page_width_set_fixture section.page_width = new_page_width assert section._sectPr.xml == expected_xml def it_knows_its_page_height(self, page_height_get_fixture): section, expected_page_height = page_height_get_fixture assert section.page_height == expected_page_height def it_can_change_its_page_height(self, page_height_set_fixture): section, new_page_height, expected_xml = page_height_set_fixture section.page_height = new_page_height assert section._sectPr.xml == expected_xml def it_knows_its_page_orientation(self, orientation_get_fixture): section, expected_orientation = orientation_get_fixture assert section.orientation is expected_orientation def it_can_change_its_orientation(self, orientation_set_fixture): section, new_orientation, expected_xml = orientation_set_fixture section.orientation = new_orientation assert section._sectPr.xml == expected_xml def it_knows_its_page_margins(self, margins_get_fixture): section, margin_prop_name, expected_value = margins_get_fixture value = getattr(section, margin_prop_name) assert value == expected_value def it_can_change_its_page_margins(self, margins_set_fixture): section, margin_prop_name, new_value, expected_xml = ( margins_set_fixture) setattr(section, margin_prop_name, new_value) assert section._sectPr.xml == expected_xml # fixtures ------------------------------------------------------- @pytest.fixture(params=[ ('w:sectPr/w:pgMar{w:left=120}', 'left_margin', 76200), ('w:sectPr/w:pgMar{w:right=240}', 'right_margin', 152400), ('w:sectPr/w:pgMar{w:top=-360}', 'top_margin', -228600), ('w:sectPr/w:pgMar{w:bottom=480}', 'bottom_margin', 304800), ('w:sectPr/w:pgMar{w:gutter=600}', 'gutter', 381000), ('w:sectPr/w:pgMar{w:header=720}', 'header_distance', 457200), ('w:sectPr/w:pgMar{w:footer=840}', 'footer_distance', 533400), ('w:sectPr/w:pgMar', 'left_margin', None), ('w:sectPr', 'top_margin', None), ]) def margins_get_fixture(self, request): sectPr_cxml, margin_prop_name, expected_value = request.param section = Section(element(sectPr_cxml)) return section, margin_prop_name, expected_value @pytest.fixture(params=[ ('w:sectPr', 'left_margin', Inches(1), 'w:sectPr/w:pgMar{w:left=1440}'), ('w:sectPr', 'right_margin', Inches(0.5), 'w:sectPr/w:pgMar{w:right=720}'), ('w:sectPr', 'top_margin', Inches(-0.25), 'w:sectPr/w:pgMar{w:top=-360}'), ('w:sectPr', 'bottom_margin', Inches(0.75), 'w:sectPr/w:pgMar{w:bottom=1080}'), ('w:sectPr', 'gutter', Inches(0.25), 'w:sectPr/w:pgMar{w:gutter=360}'), ('w:sectPr', 'header_distance', Inches(1.25), 'w:sectPr/w:pgMar{w:header=1800}'), ('w:sectPr', 'footer_distance', Inches(1.35), 'w:sectPr/w:pgMar{w:footer=1944}'), ('w:sectPr', 'left_margin', None, 'w:sectPr/w:pgMar'), ('w:sectPr/w:pgMar{w:top=-360}', 'top_margin', Inches(0.6), 'w:sectPr/w:pgMar{w:top=864}'), ]) def margins_set_fixture(self, request): sectPr_cxml, property_name, new_value, expected_cxml = request.param section = Section(element(sectPr_cxml)) expected_xml = xml(expected_cxml) return section, property_name, new_value, expected_xml @pytest.fixture(params=[ ('w:sectPr/w:pgSz{w:orient=landscape}', WD_ORIENT.LANDSCAPE), ('w:sectPr/w:pgSz{w:orient=portrait}', WD_ORIENT.PORTRAIT), ('w:sectPr/w:pgSz', WD_ORIENT.PORTRAIT), ('w:sectPr', WD_ORIENT.PORTRAIT), ]) def orientation_get_fixture(self, request): sectPr_cxml, expected_orientation = request.param section = Section(element(sectPr_cxml)) return section, expected_orientation @pytest.fixture(params=[ (WD_ORIENT.LANDSCAPE, 'w:sectPr/w:pgSz{w:orient=landscape}'), (WD_ORIENT.PORTRAIT, 'w:sectPr/w:pgSz'), (None, 'w:sectPr/w:pgSz'), ]) def orientation_set_fixture(self, request): new_orientation, expected_cxml = request.param section = Section(element('w:sectPr')) expected_xml = xml(expected_cxml) return section, new_orientation, expected_xml @pytest.fixture(params=[ ('w:sectPr/w:pgSz{w:h=2880}', Inches(2)), ('w:sectPr/w:pgSz', None), ('w:sectPr', None), ]) def page_height_get_fixture(self, request): sectPr_cxml, expected_page_height = request.param section = Section(element(sectPr_cxml)) return section, expected_page_height @pytest.fixture(params=[ (None, 'w:sectPr/w:pgSz'), (Inches(2), 'w:sectPr/w:pgSz{w:h=2880}'), ]) def page_height_set_fixture(self, request): new_page_height, expected_cxml = request.param section = Section(element('w:sectPr')) expected_xml = xml(expected_cxml) return section, new_page_height, expected_xml @pytest.fixture(params=[ ('w:sectPr/w:pgSz{w:w=1440}', Inches(1)), ('w:sectPr/w:pgSz', None), ('w:sectPr', None), ]) def page_width_get_fixture(self, request): sectPr_cxml, expected_page_width = request.param section = Section(element(sectPr_cxml)) return section, expected_page_width @pytest.fixture(params=[ (None, 'w:sectPr/w:pgSz'), (Inches(4), 'w:sectPr/w:pgSz{w:w=5760}'), ]) def page_width_set_fixture(self, request): new_page_width, expected_cxml = request.param section = Section(element('w:sectPr')) expected_xml = xml(expected_cxml) return section, new_page_width, expected_xml @pytest.fixture(params=[ ('w:sectPr', WD_SECTION.NEW_PAGE), ('w:sectPr/w:type', WD_SECTION.NEW_PAGE), ('w:sectPr/w:type{w:val=continuous}', WD_SECTION.CONTINUOUS), ('w:sectPr/w:type{w:val=nextPage}', WD_SECTION.NEW_PAGE), ('w:sectPr/w:type{w:val=oddPage}', WD_SECTION.ODD_PAGE), ('w:sectPr/w:type{w:val=evenPage}', WD_SECTION.EVEN_PAGE), ('w:sectPr/w:type{w:val=nextColumn}', WD_SECTION.NEW_COLUMN), ]) def start_type_get_fixture(self, request): sectPr_cxml, expected_start_type = request.param section = Section(element(sectPr_cxml)) return section, expected_start_type @pytest.fixture(params=[ ('w:sectPr/w:type{w:val=oddPage}', WD_SECTION.EVEN_PAGE, 'w:sectPr/w:type{w:val=evenPage}'), ('w:sectPr/w:type{w:val=nextPage}', None, 'w:sectPr'), ('w:sectPr', None, 'w:sectPr'), ('w:sectPr/w:type{w:val=continuous}', WD_SECTION.NEW_PAGE, 'w:sectPr'), ('w:sectPr/w:type', WD_SECTION.NEW_PAGE, 'w:sectPr'), ('w:sectPr/w:type', WD_SECTION.NEW_COLUMN, 'w:sectPr/w:type{w:val=nextColumn}'), ]) def start_type_set_fixture(self, request): initial_cxml, new_start_type, expected_cxml = request.param section = Section(element(initial_cxml)) expected_xml = xml(expected_cxml) return section, new_start_type, expected_xml
def create_ppt(self, ppt_dir_path, sel): # 如果文件夹不存在就创建一个 if not os.path.exists(ppt_dir_path): os.makedirs(ppt_dir_path) SLD_LAYOUT_TITLE_AND_CONTENT = 6 # 6代表ppt模版为空 prs = Presentation() # 实例化ppt # # 获取完整html # sel = self.get_html_data() # 获取标题 xpath_title = "//div[@class='doc-title']/text()" title = "".join(sel.xpath(xpath_title).extract()).strip() # 获取内容 xpath_content_p = "//div[@class='content singlePage wk-container']/div/p/img" xpath_content_p_list = sel.xpath(xpath_content_p) xpath_content_p_url_list=[] for imgs in xpath_content_p_list: xpath_content = "./@data-loading-src|./@data-src|./@src" contents_list = imgs.xpath(xpath_content).extract() xpath_content_p_url_list.append(contents_list) img_path_list = [] # 保存下载的图片路径,方便后续图片插入ppt和删除图片 # 下载图片到指定目录 for index, content_img_p in enumerate(xpath_content_p_url_list): p_img_path_list=[] for index_1,img_one in enumerate(content_img_p): one_img_saved_path = os.path.join(ppt_dir_path, "{}_{}.jpg".format(index,index_1)) self.download_img.download_one_img(img_one, one_img_saved_path) p_img_path_list.append(one_img_saved_path) p_img_max_shape = 0 for index,p_img_path in enumerate(p_img_path_list): img_shape = cv2.imread(p_img_path).shape if p_img_max_shape<img_shape[0]: p_img_max_shape = img_shape[0] index_max_img = index img_path_list.append(p_img_path_list[index_max_img]) print(img_path_list) # 获取下载的图片中最大的图片的尺寸 img_shape_max=[0,0] for img_path_one in img_path_list: img_path_one_shape = cv2.imread(img_path_one).shape if img_path_one_shape[0]>img_shape_max[0]: img_shape_max = img_path_one_shape # 把图片统一缩放最大的尺寸 for img_path_one in img_path_list: cv2.imwrite(img_path_one,cv2.resize(cv2.imread(img_path_one),(img_shape_max[1],img_shape_max[0]))) # img_shape_path = img_path_list[0] # 获得图片的尺寸 # img_shape = cv2.imread(img_shape_path).shape # 把像素转换为ppt中的长度单位emu,默认dpi是720 # 1厘米=28.346像素=360000 # 1像素 = 12700emu prs.slide_width = img_shape_max[1] * 12700 # 换算单位 prs.slide_height = img_shape_max[0] * 12700 for img_path_one in img_path_list: left = Inches(0) right = Inches(0) # width = Inches(1) slide_layout = prs.slide_layouts[SLD_LAYOUT_TITLE_AND_CONTENT] slide = prs.slides.add_slide(slide_layout) pic = slide.shapes.add_picture(img_path_one, left, right, ) print("insert {} into pptx success!".format(img_path_one)) # os.remove(img_path_one) for root,dirs,files in os.walk(ppt_dir_path): for file in files: if file.endswith(".jpg"): img_path = os.path.join(root,file) os.remove(img_path) prs.save(os.path.join(ppt_dir_path, title + ".pptx")) print("download {} success!".format(os.path.join(ppt_dir_path, title + ".pptx")))
def handle_starttag(self, tag, attrs): if tag == EM: self.italic = True elif tag == P: if not self.list_level: self.paragraph = self.document.add_paragraph() paragraph_format = self.paragraph.paragraph_format paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.JUSTIFY paragraph_format.space_after = Pt(STANDART_PT) paragraph_format.first_line_indent = Cm( 1.5) # Inches(STANDART_INCHES + 0.3) # paragraph_format.left_indent = Inches(STANDART_INCHES) paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE if self.isBlockQuote: # it's here because in html <blockquote><p></p></blockquote> paragraph_format.first_line_indent = Inches(0) paragraph_format.left_indent = Inches(STANDART_INCHES) paragraph_format.space_before = Pt(STANDART_PT) paragraph_format.space_after = Pt(STANDART_PT) paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.LEFT elif tag == STRONG: self.bold = True elif tag in [H1, H2, H3, H4, H5, H6]: self.h = int(tag[1]) elif tag == UL: self.list_level += 1 elif tag == LI: self.paragraph = self.document.add_paragraph() self.need_dot_li = True self.font = self.normal_font self.size = self.normal_size elif tag == IMG: url = attrs[0][1] response = requests.get(url) picture = response.content with open(ABS_PATH.format(PICTURE_NAME), 'wb') as file: file.write(picture) try: self.document.add_picture(ABS_PATH.format(PICTURE_NAME), width=Inches(4), height=Inches(3)) last_paragraph = self.document.paragraphs[-1] last_paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER last_paragraph.space_after = Pt(10) last_paragraph.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE self.paragraph = self.document.add_paragraph() #last_paragraph.keep_with_next = False except Exception as e: print(e) print('ERROR WITH IMAGE {}'.format(url)) elif tag == A: self.hyperlink = attrs[0][1] elif tag == BLOCKQUOTE: self.isBlockQuote = True # TABLE SECTION elif tag == TABLE: self.table_mode = True elif tag == THEAD: self.table_thead_mode = True elif tag == TH: pass elif tag == TR: if not self.table_thead_mode: self.table.add_row() elif tag == TD: pass # END TABLE SECTION elif tag == CODE: paragraph_format = self.paragraph.paragraph_format paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.LEFT paragraph_format.line_spacing_rule = WD_LINE_SPACING.SINGLE paragraph_format.first_line_indent = Inches(0) self.italic = True self.size = self.code_size self.font = self.code_font
document = Document() document.add_heading('Document Title', 0) p = document.add_paragraph('A plain paragraph having some ') p.add_run('bold').bold = True p.add_run(' and some ') p.add_run('italic.').italic = True document.add_heading('Heading, level 1', level=1) document.add_paragraph('Intense quote', style='IntenseQuote') document.add_paragraph('first item in unordered list', style='ListBullet') document.add_paragraph('first item in ordered list', style='ListNumber') document.add_picture('test.jpg', width=Inches(1.25)) table = document.add_table(rows=1, cols=3) hdr_cells = table.rows[0].cells hdr_cells[0].text = 'Qty' hdr_cells[1].text = 'Id' hdr_cells[2].text = 'Desc' #for item in recordset: # row_cells = table.add_row().cells # row_cells[0].text = str(item.qty) # row_cells[1].text = str(item.id) # row_cells[2].text = item.desc document.add_page_break() document.save('demo.docx')
for index, subList in enumerate(masterList[(mlSplit + 1):]): try: student = list2[index] app = f'Wind Ensemble: {student}' subList.append(app) except IndexError: pass from docx import Document from docx.shared import Inches from docx.shared import Pt x = Document() y = x.sections outputSection = y[0] outputSection.page_height = Inches(4) outputSection.page_width = Inches(6) for row, nestedList in output.items(): for index, seat in enumerate(nestedList): par = x.add_paragraph() run = par.add_run(f'{row}, Seat {index}') font = run.font font.size = Pt(16) font.bold = True for name in seat: par = x.add_paragraph() run = par.add_run(name) font = run.font font.size = Pt(13)
# Press ⌃R to execute it or replace it with your code. # Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings. from docx import Document from docx.shared import Inches import pyttsx3 def speak(text): pyttsx3.speak(text) document = Document() # profile picture document.add_picture('images.jpeg', width=Inches(2.0)) #name phone number and email details name = input('what is your name ? ') speak('Hello' + name + 'How are you today ?') phone_number = input('what is your phone number ? ') speak('what is your phone number ? ') email = '*****@*****.**' document.add_paragraph(name + '%' + phone_number + '/' + email) # about me document.add_heading('About me') about_me = input('Tell me about yourself') document.add_paragraph(about_me) # work experience
def make_puzzle(self, filename = 'puzzle'): difficulty = self.diff option = self.option if difficulty == 1: self.difficulty = 'random.choice([self.col, self.row])(word)' elif difficulty == 2: self.difficulty = "random.choice([self.col, self.col_rev, self.row, self.row_rev])(word)" elif difficulty == 3: self.difficulty = "random.choice([self.col, self.row, self.diagup, self.diagdown])(word)" elif difficulty == 4: self.difficulty = "random.choice([self.col, self.col_rev, self.row, self.row_rev, self.diagup, self.diagup_rev, self.diagdown, self.diagdown_rev])(word)" self.puzzle_origin = [] for i in range(self.height): self.puzzle_origin.append([]) for j in range(self.width): self.puzzle_origin[i].append('0') print("퍼즐 만드는 중") words = [word[0] for word in self.word_image] for word in words: exec(self.difficulty) string_words = ''.join(words) from collections import Counter count_alpha = Counter(string_words) common_alph = '' for alph in count_alpha.most_common(5): common_alph += alph[0] data = '' if self.korean: f = open("random_words.txt", 'r') data = f.read() regex_f = r'[가-힣]+' search_target_f = data data = ''.join(list(set(re.findall(regex_f, search_target_f)))) printed_words = '' puzzle = copy.deepcopy(self.puzzle_origin) for i in range(self.height): for j in range(self.width): if self.puzzle_origin[i][j] == "0": fill_alph = random.choice(string.ascii_lowercase) if self.korean: fill_alph = random.choice(data) #글자들 되도록 겹치지 않게 하기 위해서 많이 나오는 글자 한번쯤은 피할 수 있도록 한다. if option == 0: puzzle[i][j] = fill_alph elif option == 1: if fill_alph in common_alph: fill_alph = random.choice(string.ascii_lowercase) if self.korean: fill_alph = random.choice(data) puzzle[i][j] = fill_alph printed_words += puzzle[i][j] #글자가 겹치도록 하기 위해서 많이 나온 글자와 무작위 글자들 중에서 고르도록 한다. elif option == 2: common_alph_list = [] puzzle[i][j] = random.choice([fill_alph, random.choice(count_alpha.most_common(7))[0]]) printed_words += puzzle[i][j] # write to docx file # Write to docx to puzzle.docx document = Document() #changing the page margins sections = document.sections for section in sections: section.top_margin = Cm(1) section.bottom_margin = Cm(0.8) section.left_margin = Cm(2.3) section.right_margin = Cm(2.3) heading = 'Word Puzzle' if self.korean: heading = "낱말 찾기" head = document.add_heading(heading, 0) head.alignment = WD_ALIGN_PARAGRAPH.CENTER if os.path.exists('hwp_settings.json'): with open('hwp_settings.json') as f: data = json.load(f) para_belong = document.add_paragraph('{}학년 {}반 이름: _______'.format(data['grade'], data['class'])) else: para_belong = document.add_paragraph('__학년 __반 이름: _______') para_belong.alignment = WD_ALIGN_PARAGRAPH.RIGHT puzzle_table = document.add_table(rows=self.height, cols=self.width, style='Table Grid') puzzle_table.alignment = WD_TABLE_ALIGNMENT.CENTER self.set_height = 7200 / self.height for i, row in enumerate(puzzle_table.rows): #######################세로 길이 정하기! # accessing row xml and setting tr height tr = row._tr trPr = tr.get_or_add_trPr() trHeight = OxmlElement('w:trHeight') trHeight.set(qn('w:val'), str(self.set_height)) trHeight.set(qn('w:hRule'), "atLeast") trPr.append(trHeight) for j, cell in enumerate(row.cells): #####가로 길이 정하기! cell.width = Inches(5) if self.uppercase and not self.korean: cell.text = puzzle[i][j].upper() else: cell.text = puzzle[i][j] for paragraph in cell.paragraphs: #####가운데 정렬!! paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER paragraph.style.font.bold = True #####상하 방향에서 가운데 정렬 tc = cell._tc tcPr = tc.get_or_add_tcPr() tcVAlign = OxmlElement('w:vAlign') tcVAlign.set(qn('w:val'), "center") tcPr.append(tcVAlign) # 힌트 테이블 만들기 # 사진이 들어가는 경우 if self.pic_on: word_num = len(words) if word_num <= 15: size = 5 elif word_num <= 21: size = (word_num+2)//3 else: size = 7 hint_table = document.add_table(rows = (len(words)+size-1)//size * 2, cols = size, style = 'Table Grid') hint_table.alignment = WD_TABLE_ALIGNMENT.CENTER for i, row in enumerate(hint_table.rows): #######################세로 길이 정하기! if i%2 == 0: # accessing row xml and setting tr height tr = row._tr trPr = tr.get_or_add_trPr() trHeight = OxmlElement('w:trHeight') trHeight.set(qn('w:val'), '1000') trHeight.set(qn('w:hRule'), "atLeast") trPr.append(trHeight) elif i%2 == 1: # accessing row xml and setting tr height tr = row._tr trPr = tr.get_or_add_trPr() trHeight = OxmlElement('w:trHeight') trHeight.set(qn('w:val'), '60') trHeight.set(qn('w:hRule'), "atLeast") trPr.append(trHeight) for j, cell in enumerate(row.cells): index = i//2*size + j #단어 수 만큼 반복하기 if index < len(words): for paragraph in cell.paragraphs: if i % 2 == 1: # 초성 또는 scramble이 켜져 있는 경우 if self.chosung_scramable: word = words[index] if self.korean: cho_word = '' for chr in word: chosung_scramable = hgtk.letter.decompose(chr)[0] cho_word += chosung_scramable run = paragraph.add_run(cho_word) else: # 사진 있고 영어고 scramble인 경우 spelling = [i for i in word] shuffle(spelling) scrambled_word = ''.join(spelling) if self.uppercase: run = paragraph.add_run(scrambled_word.upper()) else: run = paragraph.add_run(scrambled_word) else: if self.uppercase and not self.korean: run = paragraph.add_run(words[index].upper()) else: run = paragraph.add_run(words[index]) font = run.font font.name = 'Arial' font.size = Pt(15) elif i % 2 == 0: try: run = paragraph.add_run() run.add_picture(self.word_image[index][1], width=cell.width *95/100, height=cell.width) except: paragraph.add_run("에러 발생. 다른 사진 선택해주세요.") #####가운데 정렬!! paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER paragraph.style.font.bold = True #####상하 방향에서 가운데 정렬 tc = cell._tc tcPr = tc.get_or_add_tcPr() tcVAlign = OxmlElement('w:vAlign') tcVAlign.set(qn('w:val'), "center") tcPr.append(tcVAlign) # 사진이 들어가지 않는 경우 else: # 사진이 안 들어가고 영어인 경우 if not self.korean: hint_table = document.add_table(rows=1, cols=1, style='Table Grid') hint_table.alignment = WD_TABLE_ALIGNMENT.CENTER hint_table_row = hint_table.rows[0] hint_tr = hint_table_row._tr hint_trPr = hint_tr.get_or_add_trPr() hint_trHeight = OxmlElement('w:trHeight') hint_trHeight.set(qn('w:val'), '1000') hint_trHeight.set(qn('w:hRule'), "atLeast") hint_trPr.append(hint_trHeight) hint_table_cell = hint_table_row.cells[0] hint = '' parenthesis = re.compile(r'(\s)?\(.*\)(\s)?') bracket = re.compile(r'(\s)?\[.*\](\s)?') for word in words: print("사전에 찾는중... " + word) req = requests.get('http://endic.naver.com/small_search.nhn?query=' + word) # 국어사전은 'http://ko.dict.naver.com/small_search.nhn?query=' html = req.text soup = BeautifulSoup(html, 'html.parser') meanings = soup.select('span.fnt_k05') if self.uppercase: word = word.upper() if self.chosung_scramable: spelling = [i for i in word] shuffle(spelling) word = ''.join(spelling) if meanings: text = meanings[0].text text = re.sub(parenthesis, '', text) text = re.sub(bracket, '', text) print(text) hint += word + "({})".format(text) + ', ' hint_table_cell.width = Inches(100) for paragraph in hint_table_cell.paragraphs: paragraph.add_run(hint.strip(', ')) paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY tc = hint_table_cell._tc tcPr = tc.get_or_add_tcPr() tcVAlign = OxmlElement('w:vAlign') tcVAlign.set(qn('w:val'), "center") tcPr.append(tcVAlign) else: # 사진이 안 들어가고 한글인 경우 if self.chosung_scramable: hint_table = document.add_table(rows=1, cols=1, style='Table Grid') hint_table.alignment = WD_TABLE_ALIGNMENT.CENTER hint_table_row = hint_table.rows[0] hint_tr = hint_table_row._tr hint_trPr = hint_tr.get_or_add_trPr() hint_trHeight = OxmlElement('w:trHeight') hint_trHeight.set(qn('w:val'), '1000') hint_trHeight.set(qn('w:hRule'), "atLeast") hint_trPr.append(hint_trHeight) hint_table_cell = hint_table_row.cells[0] hint = '' for word in words: cho_word = '' for chr in word: chosung_scramable = hgtk.letter.decompose(chr)[0] cho_word += chosung_scramable hint += cho_word + ', ' hint_table_cell.width = Inches(100) for paragraph in hint_table_cell.paragraphs: paragraph.add_run(hint.strip(', ')) paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY tc = hint_table_cell._tc tcPr = tc.get_or_add_tcPr() tcVAlign = OxmlElement('w:vAlign') tcVAlign.set(qn('w:val'), "center") tcPr.append(tcVAlign) else: hint_table = document.add_table(rows=1, cols=1, style='Table Grid') hint_table.alignment = WD_TABLE_ALIGNMENT.CENTER hint_table_row = hint_table.rows[0] hint_tr = hint_table_row._tr hint_trPr = hint_tr.get_or_add_trPr() hint_trHeight = OxmlElement('w:trHeight') hint_trHeight.set(qn('w:val'), '1000') hint_trHeight.set(qn('w:hRule'), "atLeast") hint_trPr.append(hint_trHeight) hint_table_cell = hint_table_row.cells[0] hint = '' for word in words: hint += word + ', ' hint_table_cell.width = Inches(100) for paragraph in hint_table_cell.paragraphs: paragraph.add_run(hint.strip(', ')) paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY tc = hint_table_cell._tc tcPr = tc.get_or_add_tcPr() tcVAlign = OxmlElement('w:vAlign') tcVAlign.set(qn('w:val'), "center") tcPr.append(tcVAlign) # 정답 파일 쓰기 answ_doc = Document() answer_table = answ_doc.add_table(rows=self.height, cols=self.width, style='Table Grid') answer_table.alignment = WD_TABLE_ALIGNMENT.CENTER for i, row in enumerate(answer_table.rows): #######################세로 길이 정하기! # accessing row xml and setting tr height tr = row._tr trPr = tr.get_or_add_trPr() trHeight = OxmlElement('w:trHeight') trHeight.set(qn('w:val'), str(self.set_height)) trHeight.set(qn('w:hRule'), "atLeast") trPr.append(trHeight) for j, cell in enumerate(row.cells): #####가로 길이 정하기! cell.width = Inches(8) cell.text = self.puzzle_origin[i][j] for paragraph in cell.paragraphs: #####가운데 정렬!! paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER paragraph.style.font.bold = True if cell.text == '0': for run in paragraph.runs: run.font.color.rgb = RGBColor(255, 255, 255) else: for run in paragraph.runs: run.font.color.rgb = RGBColor(255, 0, 0) #####상하 방향에서 가운데 정렬 tc = cell._tc tcPr = tc.get_or_add_tcPr() tcVAlign = OxmlElement('w:vAlign') tcVAlign.set(qn('w:val'), "center") tcPr.append(tcVAlign) answ_doc.save(str(self.desktop) + '\{}_정답.hwp'.format(filename)) document.save(str(self.desktop) +'\{}.hwp'.format(filename)) print("바탕화면에 puzzle.docx와 puzzle.hwp 로 저장")
def trans_pdf(file_name, path): t0 = time.time() cur_pdf = fitz.open(path) # 待翻译的pdf new_pdf = fitz.open() # 翻译完成后要写入的pdf new_docx = Document() # 翻译完成后要写入的docx new_docx.styles['Normal'].font.name = u'宋体' # 设置翻译完成后的字体 new_docx.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), u'宋体') # 设置翻译完成后的字体 i = 0 # 定义页面数的递增 bytes_array = 0 try: for cur_page in cur_pdf: img_list = cur_page.getImageList() # 获取当前页面的图片对象 # print(cur_page.getFontList()) # 页面使用的字体列表 # print(img_list) imgcount = 0 for img in img_list: # 获取当前页面的图像列表 pix_temp1 = fitz.Pixmap(cur_pdf, img[0]) if img[1]: pix_temp2 = fitz.Pixmap(cur_pdf, img[1]) pix_temp = fitz.Pixmap(pix_temp1) pix_temp.setAlpha(pix_temp2.samples) else: pix_temp = pix_temp1 imgcount += 1 new_name = "图片{}.png".format(imgcount) # 生成图片的名称 pix_temp.writeImage( os.path.join(settings.BASE_DIR, 'trans', 'output_file', new_name)) # bytes_array = pix_temp.getImageData('png')#可以不输出图片再写入新的pdf,通过byte # print(pix_temp.getImageData('png')) pix_temp = None # 释放资源 print( '当前正在翻译翻译第{}页...'.format(int(str(cur_page).split(' ')[1]) + 1)) # 读取输入页面的blocks blks = cur_page.getTextBlocks(flags=4) # 创建一个新的页面与之前的页面相同大小 new_page = new_pdf.newPage(-1, width=cur_page.MediaBoxSize[0], height=cur_page.MediaBoxSize[1]) img = new_page.newShape() # prepare /Contents object disp = fitz.Rect(cur_page.CropBoxPosition, cur_page.CropBoxPosition) croprect = cur_page.rect + disp # img.drawRect(croprect)#画出整个页面的矩形 # img.finish(color=gray, fill=gray)#填充颜色 begin = (0, 0, 0, 0) # 记录初始值 end = (0, 0, 0, 0) # 记录终结值 flag = 0 # 记录当前的循 reference_flag = 0 # 判断是否在参考文献之后 blks.append((1, 2, 3, 6)) content = "" imgcount = 0 fonts = 9 for num in range(len(blks)): # 如果是本页面最后一个块,直接结束,因为最后一个是方便计算自己添加的。 if num == len(blks) - 1: break # 如果这个块里放的是图像. if blks[num][-1] == 1: print('图像:::', blks[num][4]) imgcount += 1 # 图片要放置位置的坐标 img_r = blks[num][:4] # 当前页面第几个图片的位置 image_path = os.path.join(settings.BASE_DIR, 'trans', 'output_file', '图片{}.png'.format(imgcount)) # 输入流 img = open(image_path, "rb").read() # 输入到新的pdf页面对应位置 new_page.insertImage(img_r, stream=img, keep_proportion=True) # 设置图片保存的宽度 new_docx.add_picture(image_path, width=Inches(3)) # 输入到新的pdf之后就移除 os.remove(image_path) continue # 设置默认字体大小以及位置 if i == 0: # 当前是第一页的话 # 一般论文前面的标题,作者,机构名等要居中 if num == 0 or num == 1: fonts = 15 text_pos = fitz.TEXT_ALIGN_CENTER elif num == 2: fonts = 10 text_pos = fitz.TEXT_ALIGN_CENTER elif num == 3: fonts = 10 text_pos = fitz.TEXT_ALIGN_CENTER # 设置文字在当前矩阵中的位置靠左排列 else: fonts = 10 text_pos = fitz.TEXT_ALIGN_LEFT # 设置文字在当前矩阵中的位置靠左排列 else: fonts = 10 text_pos = fitz.TEXT_ALIGN_LEFT # 目的为了记录起始块坐标 if num == 0: begin = blks[0][:4] content = blks[0][4].replace("\n", " ") # 矩形块,b[0]b[1]为左上角的坐标,b[2]b[3]为右下角的坐标 r = fitz.Rect(blks[num][:4]) # 如果不是倒数第一个块,则进入此循环 if num < len(blks) - 1: # 两个块y轴距离很近的话,这里以1.0为界,这里判断当前数的右下角的坐标y值 if (abs(blks[num + 1][1] - blks[num][3]) <= 1.0 and abs(blks[num + 1][1] - blks[num][3]) >= 0): # 当前块在参考文献之后 if reference_flag == 1: trans_pragraph = blks[num][4].replace("\n", " ") res = translate_func.google_translate( trans_pragraph).replace(' ', '') new_page.insertTextbox( r, res, fontname="song", fontfile=os.path.join( settings.BASE_DIR, 'trans/static/fonts/SimSun.ttf'), fontsize=7, align=text_pos) # # 其它情况 else: flag = 1 # # 记录最后的矩形坐标,目的为了取出最后的右下角坐标点 end = blks[num + 1][:4] content += blks[num + 1][4].replace("\n", " ") # print('content::',content) # 两个块y轴距离远的的时候 else: if flag == 1: # img.drawRect(fitz.Rect(end[0],begin[1],end[2],end[3])) res = translate_func.google_translate( content).replace(' ', '') # 翻译结果去掉汉字中的空格 new_docx.add_paragraph(res) # 添加到新的docx文档中 # print('content:',content) # print(res) # fitz.Rect(end[0],begin[1],end[2],end[3])为新扩展的矩形框坐标 if begin[2] > end[2]: # 如果起始点的右下角x坐标小于结束点的右下角x坐标 new_page.insertTextbox( fitz.Rect(end[0], begin[1], begin[2], end[3]), res, fontname="song", fontfile=os.path.join( settings.BASE_DIR, 'trans/static/fonts/SimSun.ttf'), fontsize=fonts, align=text_pos) else: new_page.insertTextbox( fitz.Rect(end[0], begin[1], end[2], end[3]), res, fontname="song", fontfile=os.path.join( settings.BASE_DIR, 'trans/static/fonts/SimSun.ttf'), fontsize=fonts, align=text_pos) flag = 0 else: # img.drawRect(r) trans_pragraph = blks[num][4].replace( "\n", " ") # 将待翻译的句子换行换成空格 if is_figure(trans_pragraph.replace( ' ', '')): # 将该块的判断是否是图片标注 res = translate_func.google_translate( trans_pragraph).replace(' ', '') # 翻译结果去掉汉字中的空格 new_page.insertTextbox( r, res, fontname="song", fontfile=os.path.join( settings.BASE_DIR, 'trans/static/fonts/SimSun.ttf'), fontsize=7, align=fitz.TEXT_ALIGN_CENTER) # 标记在这里之后的都是参考文献 elif is_reference(trans_pragraph.replace(' ', '')): reference_flag = 1 new_page.insertTextbox( r, '参考文献', fontname="song", fontfile=os.path.join( settings.BASE_DIR, 'trans/static/fonts/SimSun.ttf'), fontsize=fonts, align=text_pos) else: # 翻译结果去掉汉字中的空格 res = translate_func.google_translate( trans_pragraph).replace(' ', '') # 添加到新的docx文档中 new_docx.add_paragraph(res) if reference_flag == 1: new_page.insertTextbox( r, res, fontname="song", fontfile=os.path.join( settings.BASE_DIR, 'trans/static/fonts/SimSun.ttf'), fontsize=7, align=text_pos) # else: new_page.insertTextbox( r, res, fontname="song", fontfile=os.path.join( settings.BASE_DIR, 'trans/static/fonts/SimSun.ttf'), fontsize=fonts, align=text_pos) # # 记录起始矩形坐标 begin = blks[num + 1][:4] try: content = blks[num + 1][4].replace("\n", " ") # print('content:::',content) except: pass #print('记录content失败!') # img.finish(width=0.3) # img.commit() i += 1 # 如果整个过程出现异常,防止翻译工作丢失,直接保存到文件中结束翻译。 except Exception as error: print('翻译过程出现异常如下:\n', error) # 翻译后的pdf保存路径 # new_pdf_name = os.path.join(settings.BASE_DIR, 'trans', 'output_file', 'translated_' + file_name) # 翻译后的docx保存路径 new_docx_name = os.path.join(settings.BASE_DIR, 'trans', 'output_file', 'translated_' + file_name[:-4] + '.docx') new_docx.save(new_docx_name) # new_pdf.save(new_pdf_name, garbage=4, deflate=True, clean=True) return 'fail' # 文件保存 # 翻译后的pdf保存路径 new_pdf_name = os.path.join(settings.BASE_DIR, 'trans', 'output_file', 'translated_' + file_name) # 翻译后的docx保存路径 new_docx_name = os.path.join(settings.BASE_DIR, 'trans', 'output_file', 'translated_' + file_name[:-4] + '.docx') new_docx.save(new_docx_name) new_pdf.save(new_pdf_name, garbage=4, deflate=True, clean=True) t1 = time.time() print("Total translation time: %g sec" % (t1 - t0)) return 'success'
def addPicture(self, picturePath): pictureFileName = picturePath + '_pl4.png' self.__document.add_picture(pictureFileName, width=Inches(3))
document = Document() document.add_heading('Document Title', 0) p = document.add_paragraph('A plain paragraph having some ') p.add_run('bold').bold = True p.add_run(' and some ') p.add_run('italic.').italic = True document.add_heading('Heading, level 1', level=1) document.add_paragraph('Intense quote', style='Intense Quote') document.add_paragraph('first item in unordered list', style='List Bullet') document.add_paragraph('first item in ordered list', style='List Number') document.add_picture('Test_gps.jpg', width=Inches(1.25)) records = ((3, '101', 'Spam'), (7, '422', 'Eggs'), (4, '631', 'Spam, spam, eggs, and spam')) rgbColor = RGBColor(0, 255, 0) table = document.add_table(rows=1, cols=3) hdr_cells = table.rows[0].cells hdr_cells[0].text = 'Qty' hdr_cells[1].text = 'Id' hdr_cells[2].text = 'Desc' for qty, id, desc in records: row_cells = table.add_row().cells row_cells[0].text = str(qty) row_cells[1].text = id row_cells[2].text = desc tmp = "0,255,0"
text = re.sub(r'@\S+', '', text, flags=re.MULTILINE) print(text) stopwords = set(STOPWORDS) stopwords.update([ "drink", "now", "wine", "flavor", "flavors", "RT", "Rashmika", "Mandanna", "cruz", "know" ]) # mask = np.array(Image.open(requests.get('http://www.clker.com/cliparts/O/i/x/Y/q/P/yellow-house-hi.png', stream=True).raw)) mask = np.array(Image.open('rash1.png')) wordcloud = WordCloud(stopwords=stopwords, background_color="white", max_words=100, mask=mask).generate(text) image_colors = ImageColorGenerator(mask) plt.figure(figsize=[7, 7]) plt.imshow(wordcloud.recolor(color_func=image_colors), interpolation="bilinear") plt.axis("off") plt.title('Summarization of Rashmika Tweets.') plt.savefig('Rush.png') document.add_picture('Rush.png', width=Inches(5)) document.save("Report") wordcloud.to_file("Rush.png") plt.imshow(wordcloud, interpolation='bilinear') plt.axis("off") api.update_with_media(filename='Rush.png', status='Rashmika Tweet summary') plt.show()
def take_picture(file): read() os.system('sudo fswebcam -r 640x420 test.jpg -S 8') doc=Document(file) doc.add_picture('/home/pi/test.jpg',width=Inches(1.25)) doc.save(file)
def main(param): docx_name, chessboard_cm, width_cm, height_cm = param # 获取参数 chessboard_pixel = chessboard_cm * scale # 棋盘格单元对应像素 width = round(width_cm / chessboard_cm) # 棋盘格角点列数 height = round(height_cm / chessboard_cm) # 棋盘格角点行数 width_pix = round(width_cm * scale) # 棋盘格列像素 height_pix = round(height_cm * scale) # 棋盘格行像素 size = width_cm / height_cm # 棋盘格长宽比例(用于最后的裁切) image = np.zeros((height_pix, width_pix, 3), dtype=np.uint8) # 生成对应像素图片 image.fill(255) # 填充白色 color = (255, 255, 255) fill_color = 0 for j in range(0, height + 1): # 绘制棋盘 y = round(j * chessboard_pixel) for i in range(0, width + 1): x0 = round(i * chessboard_pixel) y0 = y rect_start = (x0, y0) x1 = round(x0 + chessboard_pixel) y1 = round(y0 + chessboard_pixel) rect_end = (x1, y1) cv2.rectangle(image, rect_start, rect_end, color, 1, 0) image[y0:y1, x0:x1] = fill_color if width % 2: if i != width: fill_color = (0 if (fill_color == 255) else 255) else: if i != width + 1: fill_color = (0 if (fill_color == 255) else 255) bottom = round(width_pix / size) # 获取按cm比例对应的行像素 if bottom < height_pix: image = image[0:bottom, :, :] # 裁切 # 创建显示窗口 win_name = "chessboard" # cv.namedWindow(win_name, cv.WINDOW_NORMAL) cv2.imwrite(win_name + ".bmp", image) # cv.imshow(win_name, image) # cv.waitKey() doc = Document() # 以默认模板建立文档对象 distance = Inches(0) sec = doc.sections[0] # 页边距0英寸 sec.left_margin = distance # 以下依次设置左、右、上、下页面边距 sec.right_margin = distance sec.top_margin = distance sec.bottom_margin = distance sec.page_width = Cm(width_cm) # 设置页面宽度 sec.page_height = Cm(height_cm) # 设置页面高度 img_encode = cv2.imencode('.bmp', image)[1] # 对opencv图片进行编码 str_encode = img_encode.tostring() cc = io.BytesIO(str_encode) # img = doc.add_picture(cc, Cm(42.01)) doc.add_picture(cc) # 插入图片至word # doc.add_picture(win_name + ".bmp") doc.save(docx_name) # 保存图像
def Word(inf, lbsz, f, f2, fname, name_vl, NS, var, grop, zpps, per_name, D_file, Trig, tablepo): # Создаём объект документа if Trig: #'template_word/default.docx' document = D_file else: document = Document() # Наследуем стиль и изменяем его style = document.styles['Normal'] # Берём стиль Нормальный f0 = style.font # Переменная для изменения параметров стиля f0.name = 'Arial' # Шрифт f0.size = Pt(12) # Размер шрифта pf = style.paragraph_format pf.line_spacing = Pt(0) # Междустрочный интервал pf.space_after = Pt(0) # Интервал после абзаца # Задаём свойство полей sections = document.sections s = sections[0] s.left_margin = Mm(30) s.right_margin = Mm(15) s.top_margin = Mm(20) s.bottom_margin = Mm(20) s.page_height = Mm(297) s.page_width = Mm(210) if tablepo == 'Выводить таблицы и эпюры': tb1 = True tb2 = True elif tablepo == 'Выводить таблицы': tb1 = True tb2 = False elif tablepo == 'Выводить эпюры': tb1 = False tb2 = True # Функция выравнивания содержимого таблицы по вертикали def set_cell_vertical_alignment(cell, align="center"): try: tc = cell._tc tcPr = tc.get_or_add_tcPr() tcValign = OxmlElement('w:vAlign') tcValign.set(qn('w:val'), align) tcPr.append(tcValign) return True except: #traceback.print_exc() return False p0 = ['', '', '', ''] sv = '.' + var if var != '' else '' if tb1: # Зоголовок if not Trig: document._body.clear_content( ) # Удаляет все параграфы(Обезаетльно протом нужно чтото добавить) p0[0] = document.add_paragraph() p0[0].add_run('РЕЖИМ ЗАЗЕМЛЕНИЯ').bold = True pt_f1 = p0[0].paragraph_format pt_f1.alignment = WD_ALIGN_PARAGRAPH.CENTER pt_f1.space_after = Pt(2) p0[1] = document.add_paragraph() p0[1].add_run(name_vl).bold = True pt_f1 = p0[1].paragraph_format pt_f1.alignment = WD_ALIGN_PARAGRAPH.CENTER pt_f1.space_after = Pt(2) p0[2] = document.add_paragraph() p0[2].add_run('Безопасность производства работ на участках ВЛ '\ +'обеспечивается при следующих Схемах заземления ВЛ:').bold = True pt_f1 = p0[2].paragraph_format pt_f1.alignment = WD_ALIGN_PARAGRAPH.CENTER pt_f1.space_after = Pt(2) p0[3] = document.add_paragraph() p0[3].add_run('СХЕМА ' + NS + sv + ' ЗАЗЕМЛЕНИЯ').bold = True pt_f1 = p0[3].paragraph_format pt_f1.alignment = WD_ALIGN_PARAGRAPH.CENTER pt_f1.space_after = Pt(2) # Информация о заземлении s1 = [] s2 = [] re_op_key = list(per_name.keys()) c1 = False c2 = False v = [] inf1 = [] for h in range(len(inf)): ss1, ss2 = inf[h][1].split(Razd_simv, 1) ss1 = (ss1.strip()).replace(" ", "\u00A0") ss2 = (ss2.strip()).replace(" ", "\u00A0") inf1.append(ss1 + " " + Razd_simv + " " + ss2) if inf[h][0] == 1: if inf[h][5]: s1.append(ss1) c1 = True else: s2.append(ss1) c2 = True if inf[h][6]: s1.append(ss2) c1 = True else: s2.append(ss2) c2 = True elif inf[h][0] == 2: if inf[h][5]: s1.append(ss1) c1 = True else: s2.append(ss1) c2 = True elif inf[h][0] == 3: if inf[h][6]: s1.append(ss2) c1 = True else: s2.append(ss2) c2 = True #v.append(i) make_zy_list = [] for j in range(len(lbsz[h])): if lbsz[h][j][0] in per_name[re_op_key[h]]: new_op = per_name[re_op_key[h]][lbsz[h][j][0]] else: new_op = lbsz[h][j][0] c1 = True #s1.append('Опора №'+str(new_op)+' '+('('+lbsz[h][j][1]+' '+lbsz[h][j][2].replace(",",".")+')').replace(" ","\u00A0")+' на участке '+inf1[h]) make_zy_list.append([ new_op, ('(' + lbsz[h][j][1] + ' ' + lbsz[h][j][2].replace(",", ".") + ')').replace(" ", "\u00A0"), inf1[h] ]) vetvi_dict = {} for j in range(len(make_zy_list)): if make_zy_list[j][2] not in vetvi_dict: vetvi_dict[make_zy_list[j][2]] = [j] else: vetvi_dict[make_zy_list[j][2]].append(j) #if len(inf)==1: for key_name in vetvi_dict: for j in vetvi_dict[key_name]: s1.append('Опора №' + str(make_zy_list[j][0]) + ' ' + make_zy_list[j][1]) if len(inf) > 1: s1[len(s1) - 1] += ' на участке ' + key_name ign_f = set() for i in range(len(inf)): if grop: for j in range(len(f[i])): if inf[i][0] == 1 and abs(inf[i][2] - inf[i][3]) >= 3: if inf[i][2] == f[i][j][0][0]: if f[i][j][0][1] == inf[i][2] + 1 or f[i][j][0][ 1] == inf[i][2] - 1: ign_f.add((i, j)) elif inf[i][2] < inf[i][3]: f[i][j][0][0] = inf[i][2] + 1 elif inf[i][2] > inf[i][3]: f[i][j][0][0] = inf[i][2] - 1 if inf[i][3] == f[i][j][0][1]: if f[i][j][0][0] == inf[i][3] - 1 or f[i][j][0][ 0] == inf[i][3] + 1: ign_f.add((i, j)) elif inf[i][2] < inf[i][3]: f[i][j][0][1] = inf[i][3] - 1 elif inf[i][2] > inf[i][3]: f[i][j][0][1] = inf[i][3] + 1 if inf[i][0] == 2 and abs(inf[i][2] - inf[i][3]) >= 2: if inf[i][2] == f[i][j][0][0]: if f[i][j][0][1] == inf[i][2] + 1 or f[i][j][0][ 1] == inf[i][2] - 1: f[i][j] = None elif inf[i][2] < inf[i][3]: f[i][j][0][0] = inf[i][2] + 1 elif inf[i][2] > inf[i][3]: f[i][j][0][0] = inf[i][2] - 1 if inf[i][0] == 3 and abs(inf[i][2] - inf[i][3]) >= 2: if inf[i][3] == f[i][j][0][1]: if f[i][j][0][0] == inf[i][3] - 1 or f[i][j][0][ 0] == inf[i][3] + 1: f[i][j] = None elif inf[i][2] < inf[i][3]: f[i][j][0][1] = inf[i][3] - 1 elif inf[i][2] > inf[i][3]: f[i][j][0][1] = inf[i][3] + 1 # Цикл по ветвям p1 = [] if tb1: for i in range(len(inf)): p1.append(['', '', '', '', '']) # Имя ветви p1[i][0] = document.add_paragraph() p1[i][0].add_run(inf1[i]).bold = True pt_f = p1[i][0].paragraph_format pt_f.alignment = WD_ALIGN_PARAGRAPH.CENTER pt_f.space_after = Pt(2) if c1: p1[i][1] = document.add_paragraph() pt_f = p1[i][1].paragraph_format pt_f.first_line_indent = Mm(12.5) pt_f.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY # Выравнивание по ширине p1[i][1].add_run('ВЛ заземлена: ').bold = True for j in range(len(s1)): if j == len(s1) - 1: p1[i][1].add_run(s1[j] + '.') else: p1[i][1].add_run(s1[j] + ', ') if c2: p1[i][2] = document.add_paragraph() pt_f = p1[i][2].paragraph_format pt_f.first_line_indent = Mm(12.5) pt_f.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY # Выравнивание по ширине p1[i][2].add_run('ВЛ разземлена: ').bold = True for j in range(len(s2)): if j == len(s2) - 1: p1[i][2].add_run(s2[j] + '.') else: p1[i][2].add_run(s2[j] + ', ') # Картинка схемы заземления p1[i][3] = document.add_picture('result_schemes/' + str(i) + '.jpg', width=Inches(6.5)) #last_paragraph = document.paragraphs[-1] #last_paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER # Создаём шапку таблицы p1[i][4] = document.add_table( rows=1, cols=2, style='Table Grid') # Таблица с размерами и стилем p_t = p1[i][4].style.paragraph_format p_t.keep_with_next = True # Если невмещается таблица на текущую страницу, переносит её на ноыую row_1 = p1[i][4].rows[0] # Обращаемся к строке заголовка row_1.cells[0].text = 'Участки ВЛ' # Присваиваем текст r = row_1.cells[0].paragraphs[0].runs[0] # Делаем его жирным row_1.cells[0].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.LEFT set_cell_vertical_alignment( row_1.cells[0], align="center") # Выравнивание содержимого по вертикали r.font.bold = True row_1.cells[1].text = 'Разрешение на выполнение работ' row_1.cells[1].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.LEFT set_cell_vertical_alignment( row_1.cells[1], align="center") # Выравнивание содержимого по вертикали r = row_1.cells[1].paragraphs[0].runs[0] r.font.bold = True ss1, ss2 = inf[i][1].split(Razd_simv, 1) ss1 = (ss1.strip()).replace(" ", "\u00A0") ss2 = (ss2.strip()).replace(" ", "\u00A0") # ПС слева (если есть) if inf[i][0] == 1 or inf[i][0] == 2: row_cells = p1[i][4].add_row( ).cells # Добавляем в таблицу новую строку row_cells[0].text = ss1 set_cell_vertical_alignment( row_cells[0], align="center") # Выравнивание содержимого по вертикали if f2[i][inf[i][2]] or (zpps == True and inf[i][5] == False and (inf[h][0] == 1 or inf[h][0] == 2)): row_cells[1].text = 'Запрещено' r = row_cells[1].paragraphs[0].runs[0] r.font.bold = True set_cell_vertical_alignment( row_cells[1], align="center" ) # Выравнивание содержимого по вертикали else: row_cells[1].text = 'Разрешено' set_cell_vertical_alignment( row_cells[1], align="center" ) # Выравнивание содержимого по вертикали # Участки ВЛ for j in range(len(f[i])): if (i, j) in ign_f: continue row_cells = p1[i][4].add_row( ).cells # Добавляем в таблицу новую строку if f[i][j][0][0] in per_name[re_op_key[i]]: new_op1 = per_name[re_op_key[i]][f[i][j][0][0]] else: new_op1 = f[i][j][0][0] if f[i][j][0][1] in per_name[re_op_key[i]]: new_op2 = per_name[re_op_key[i]][f[i][j][0][1]] else: new_op2 = f[i][j][0][1] row_cells[0].text = 'Опоры № ' + str(new_op1) + '-' + str( new_op2) set_cell_vertical_alignment( row_cells[0], align="center") # Выравнивание содержимого по вертикали if f[i][j][1]: row_cells[1].text = 'Запрещено' set_cell_vertical_alignment( row_cells[1], align="center" ) # Выравнивание содержимого по вертикали r = row_cells[1].paragraphs[0].runs[0] r.font.bold = True else: row_cells[1].text = 'Разрешено' set_cell_vertical_alignment( row_cells[1], align="center" ) # Выравнивание содержимого по вертикали # ПС справа (если есть) if inf[i][0] == 1 or inf[i][0] == 3: row_cells = p1[i][4].add_row( ).cells # Добавляем в таблицу новую строку row_cells[0].text = ss2 set_cell_vertical_alignment( row_cells[0], align="center") # Выравнивание содержимого по вертикали if f2[i][inf[i][3]] or (zpps == True and inf[i][6] == False and (inf[h][0] == 1 or inf[h][0] == 3)): row_cells[1].text = 'Запрещено' set_cell_vertical_alignment( row_cells[1], align="center" ) # Выравнивание содержимого по вертикали r = row_cells[1].paragraphs[0].runs[0] r.font.bold = True else: row_cells[1].text = 'Разрешено' set_cell_vertical_alignment( row_cells[1], align="center" ) # Выравнивание содержимого по вертикали document.add_paragraph() if tb2: document.add_page_break() # Разрыв страницы # Эпюры if tb2: # Информация о заземлении s1 = [] s3 = [] s2 = [] c1 = False c2 = False c3 = False for i in range(len(inf)): ss1, ss2 = inf[i][1].split(Razd_simv, 1) ss1 = (ss1.strip()).replace(" ", "\u00A0") ss2 = (ss2.strip()).replace(" ", "\u00A0") if inf[i][0] == 1: if inf[i][5]: s1.append(ss1) c1 = True else: s2.append(ss1) c2 = True if inf[i][6]: s1.append(ss2) c1 = True else: s2.append(ss2) c2 = True elif inf[i][0] == 2: if inf[i][5]: s1.append(ss1) c1 = True else: s2.append(ss1) c2 = True elif inf[i][0] == 3: if inf[i][6]: s1.append(ss2) c1 = True else: s2.append(ss2) c2 = True make_zy_list = [] for j in range(len(lbsz[i])): if lbsz[i][j][0] in per_name[re_op_key[i]]: new_op = per_name[re_op_key[i]][lbsz[i][j][0]] else: new_op = lbsz[i][j][0] c3 = True #s3.append('№'+str(new_op)+' '+('('+lbsz[i][j][1]+' '+lbsz[i][j][2].replace(",",".")+')').replace(" ","\u00A0")+' на участке '+inf1[i]) make_zy_list.append([ new_op, ('(' + lbsz[i][j][1] + ' ' + lbsz[i][j][2].replace(",", ".") + ')').replace( " ", "\u00A0"), inf1[i] ]) vetvi_dict = {} for j in range(len(make_zy_list)): if make_zy_list[j][2] not in vetvi_dict: vetvi_dict[make_zy_list[j][2]] = [j] else: vetvi_dict[make_zy_list[j][2]].append(j) #if len(inf)==1: for key_name in vetvi_dict: for j in vetvi_dict[key_name]: s3.append('№' + str(make_zy_list[j][0]) + ' ' + make_zy_list[j][1]) if len(inf) > 1: s3[len(s3) - 1] += ' на участке ' + key_name # Цикл по ветвям p = [] for i in range(len(inf)): p.append([ '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '' ]) # Имя ВЛ p[i][15] = document.add_paragraph() p[i][15].add_run(name_vl).bold = True pt_f = p[i][15].paragraph_format pt_f.alignment = WD_ALIGN_PARAGRAPH.CENTER pt_f.space_after = Pt(2) # Заголовок p[i][0] = document.add_paragraph() if len(inf) == 1: p[i][0].add_run('СХЕМА ' + NS + sv + ' ЗАЗЕМЛЕНИЯ').bold = True else: p[i][0].add_run('СХЕМА ' + NS + sv + ' ЗАЗЕМЛЕНИЯ').bold = True #p[i][0].add_run('СХЕМА '+NS+sv+'.'+str(i+1)+' ЗАЗЕМЛЕНИЯ').bold = True pt_f = p[i][0].paragraph_format pt_f.alignment = WD_ALIGN_PARAGRAPH.CENTER pt_f.space_after = Pt(2) # Рабочий участок p[i][1] = document.add_paragraph() p[i][1].add_run('РАБОЧИЙ УЧАСТОК').bold = True pt_f = p[i][1].paragraph_format pt_f.alignment = WD_ALIGN_PARAGRAPH.CENTER pt_f.space_after = Pt(2) # ПС и опоры на которых можно работать p[i][2] = document.add_paragraph('Опоры № ') ss1, ss2 = inf[i][1].split(Razd_simv, 1) ss1 = (ss1.strip()).replace(" ", "\u00A0") ss2 = (ss2.strip()).replace(" ", "\u00A0") s4 = [] s5 = [] s6 = [] s7 = [] c4 = False c5 = False c6 = False c7 = False # Участки ВЛ for j in range(len(f[i])): if (i, j) in ign_f: continue if f[i][j][0][0] in per_name[re_op_key[i]]: new_op1 = per_name[re_op_key[i]][f[i][j][0][0]] else: new_op1 = f[i][j][0][0] if f[i][j][0][1] in per_name[re_op_key[i]]: new_op2 = per_name[re_op_key[i]][f[i][j][0][1]] else: new_op2 = f[i][j][0][1] if not f[i][j][1]: s4.append(str(new_op1) + '-' + str(new_op2)) c4 = True elif f[i][j][1]: s6.append(str(new_op1) + '-' + str(new_op2)) c6 = True # ПС слева (если есть) if inf[i][0] == 1 or inf[i][0] == 2: if f2[i][inf[i][2]] or (zpps == True and inf[i][5] == False and (inf[i][0] == 1 or inf[i][0] == 2)): s7.append(ss1) c7 = True else: s5.append(ss1) c5 = True # ПС справа (если есть) if inf[i][0] == 1 or inf[i][0] == 3: if f2[i][inf[i][3]] or (zpps == True and inf[i][6] == False and (inf[i][0] == 1 or inf[i][0] == 3)): s7.append(ss2) c7 = True else: s5.append(ss2) c5 = True if c4: for j in range(len(s4)): if j == len(s4) - 1 and c5: p[i][2].add_run(s4[j] + ', ') elif j == len(s4) - 1 and not c5: p[i][2].add_run(s4[j]) else: p[i][2].add_run(s4[j] + ', ') if c5: for j in range(len(s5)): if j == len(s5) - 1: p[i][2].add_run(s5[j]) else: p[i][2].add_run(s5[j] + ', ') pt_f = p[i][2].paragraph_format pt_f.alignment = WD_ALIGN_PARAGRAPH.CENTER pt_f.space_after = Pt(2) # Схема заземления вл p[i][3] = document.add_paragraph() p[i][3].add_run('СХЕМА ЗАЗЕМЛЕНИЯ ВЛ').bold = True pt_f = p[i][3].paragraph_format pt_f.alignment = WD_ALIGN_PARAGRAPH.CENTER # Картинка схемы заземления p[i][4] = document.add_picture('result_schemes/' + str(i) + '.jpg', width=Inches(6.5)) #last_paragraph = document.paragraphs[-1] #last_paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER # Карта заземления p[i][5] = document.add_paragraph() p[i][5].add_run('КАРТА ЗАЗЕМЛЕНИЯ').bold = True pt_f = p[i][5].paragraph_format pt_f.alignment = WD_ALIGN_PARAGRAPH.CENTER pt_f.space_after = Pt(2) k = 1 # Пункт 1 (Инфа о заземлении) p[i][6] = document.add_paragraph('1. ') pt_f = p[i][6].paragraph_format pt_f.first_line_indent = Mm(12.5) pt_f.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY # Выравнивание по ширине if c1: p[i][6].add_run('Заземлить ВЛ на ПЗ на ') for j in range(len(s1)): if j == len(s1) - 1: p[i][6].add_run(s1[j] + '. ') else: p[i][6].add_run(s1[j] + ', ') k = 2 if c3: p[i][6].add_run('Заземлить ВЛ на опор%s ' % ('e' if len(s3) == 1 else 'ах')) for j in range(len(s3)): if j == len(s3) - 1: p[i][6].add_run(s3[j] + '. ') else: p[i][6].add_run(s3[j] + ', ') k = 2 if c2: p[i][6].add_run('Не заземлять ВЛ на ') for j in range(len(s2)): if j == len(s2) - 1: p[i][6].add_run(s2[j] + '. ') else: p[i][6].add_run(s2[j] + ', ') k = 2 # Пункт 2 (Заземление ЛЗ на рабочих участках) if c4: p[i][7] = document.add_paragraph( str(k) + '. Заземление каждого рабочего места на ВЛ - заземление типа ЛЗ.' ) k += 1 pt_f = p[i][7].paragraph_format pt_f.first_line_indent = Mm(12.5) pt_f.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY # Выравнивание по ширине # Пункт 3 (Разрешение работ) if (c4 and c5) or (c4 or c5): p[i][8] = document.add_paragraph(str(k) + '. ') pt_f = p[i][8].paragraph_format pt_f.first_line_indent = Mm(12.5) pt_f.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY # Выравнивание по ширине if c4: if i == 0: p[i][8].add_run( 'Разрешаются работы на участках ВЛ опоры № ') else: p[i][8].add_run( 'Разрешаются работы на участках ответвления от ВЛ опоры № ' ) for j in range(len(s4)): if j == len(s4) - 1: p[i][8].add_run(s4[j] + '. ') else: p[i][8].add_run(s4[j] + ', ') if c5: p[i][8].add_run( 'Разрешаются работы на линейном оборудовании ') for j in range(len(s5)): if j == len(s5) - 1: p[i][8].add_run(s5[j] + '. ') else: p[i][8].add_run(s5[j] + ', ') p[i][8].add_run( 'При работах на линейных разъединителях дополнительно устанавливается заземление типа ДЗ. ' ) k += 1 # Пункт 4 (если есть безопасные участки вл) if c4: p[i][9]=document.add_paragraph(str(k)+'. Разрешается работа на участке ВЛ до 2 км'\ +' с установкой заземления типа ЛЗ с двух сторон участка,'\ +' при условии, что их установка производится в пределах рабочего участка ВЛ.') k += 1 pt_f = p[i][9].paragraph_format pt_f.first_line_indent = Mm(12.5) pt_f.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY # Выравнивание по ширине # Пункт 5 (если есть запрещённые участки) if (c6 and c7) or (c6 or c7): p[i][10] = document.add_paragraph(str(k) + '. ') pt_f = p[i][10].paragraph_format pt_f.first_line_indent = Mm(12.5) pt_f.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY # Выравнивание по ширине if c6: if i == 0: p[i][10].add_run( 'Запрещаются работы на участках ВЛ опоры № ') else: p[i][10].add_run( 'Запрещаются работы на участках ответвления от ВЛ опоры № ' ) for j in range(len(s6)): if j == len(s6) - 1: p[i][10].add_run(s6[j] + '. ') else: p[i][10].add_run(s6[j] + ', ') if c7: p[i][10].add_run( 'Запрещаются работы на линейном оборудовании ') for j in range(len(s7)): if j == len(s7) - 1: p[i][10].add_run(s7[j] + '. ') else: p[i][10].add_run(s7[j] + ', ') k += 1 # Пункт 6 (при установке ЛЗ 25 В если есть безопасные участки) #if c4: #p[i][11]=document.add_paragraph(str(k)+'. При установке заземления типа ЛЗ напряжение на рабочих '\ #+'местах не превысит нормируемой величины 25 В.') # Распеределение напряжения по длинне ВЛ p[i][12] = document.add_paragraph() p[i][12].add_run('СХЕМА СБЛИЖЕНИЯ').bold = True pt_f = p[i][12].paragraph_format pt_f.alignment = WD_ALIGN_PARAGRAPH.CENTER pt_f.space_after = Pt(2) pt_f.space_before = Pt(10) # Картинка эпюры напряжения im = Image.open('gr_sb/' + str(i) + '.jpg') im.crop((80, 0, 1610, 500)).save('gr_sb/obr' + str(i) + '.jpg') p[i][13] = document.add_picture('gr_sb/obr' + str(i) + '.jpg', width=Inches(6.25)) last_paragraph = document.paragraphs[-1] last_paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER p[i][14] = document.add_paragraph() p[i][14].add_run( 'РАСПРЕДЕЛЕНИЕ НАПРЯЖЕНИЯ ПО ДЛИНЕ ВЛ').bold = True pt_f = p[i][14].paragraph_format pt_f.alignment = WD_ALIGN_PARAGRAPH.CENTER pt_f.space_after = Pt(2) pt_f.space_before = Pt(10) # Картинка эпюры напряжения im = Image.open('images_grafik/' + str(i) + '.jpg') im.crop((80, 15, 1610, 500)).save('images_grafik/obr' + str(i) + '.jpg', "PNG") p[i][15] = document.add_picture('images_grafik/obr' + str(i) + '.jpg', width=Inches(6.25)) last_paragraph = document.paragraphs[-1] last_paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER # Разрыв страницы если нужно if i != len(inf) - 1: document.add_page_break() # Разрыв страницы # Сохраняем документ if Trig: document.add_page_break() # Разрыв страницы return document else: document.save(fname) return None