def tuning_MultinomialNB(): param_grid = { "alpha": [1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9] } result_list = [] optimized_param = [0, 0, 0, 0, 0] for alpha in param_grid["alpha"]: # try: current_param_and_eval = [alpha] clf = MultinomialNB(alpha=alpha) f1_scores_dict = load_train_output_crossvalidation(clf) f1_scores_dict = round_f1_score(f1_scores_dict) f1_train_average = f1_scores_dict["f1_train_average"] f1_dev_average = f1_scores_dict["f1_dev_average"] f1_train_micro_average = f1_scores_dict["f1_train_micro_average"] f1_dev_micro_average = f1_scores_dict["f1_dev_micro_average"] current_param_and_eval.append(f1_train_micro_average) current_param_and_eval.append(f1_dev_micro_average) current_param_and_eval.append(f1_train_average) current_param_and_eval.append(f1_dev_average) result_list.append(current_param_and_eval) if current_param_and_eval[2] > optimized_param[2]: optimized_param = current_param_and_eval # except: # print("An exception occurs.") # Generate data table geometry_options = {"margin": "2.54cm", "includeheadfoot": True} doc = Document(page_numbers=True, geometry_options=geometry_options) with doc.create(LongTable("l l l l l")) as data_table: data_table.add_hline() data_table.add_row([ "alpha", "training f1", "valid f1", "training f1 for each technique", "valid f1 for each technique" ]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() for i in range(len(result_list)): data_table.add_row(result_list[i][0:5]) data_table.add_hline() with doc.create(LongTable("l l l l l")) as data_table: data_table.add_hline() data_table.add_row([ "alpha", "training f1", "valid f1", "training f1 for each technique", "valid f1 for each technique" ]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() data_table.add_row(optimized_param) data_table.add_hline() print("This is for MultinomialNB.") doc.generate_pdf("tuning_MultinomialNB", clean_tex=False)
def test_table(): # Tabular t = Tabular(table_spec='|c|c|', data=None, pos=None, width=2) t.add_hline(start=None, end=None) t.add_row((1, 2), escape=False, strict=True, mapper=[bold]) t.add_row(1, 2, escape=False, strict=True, mapper=[bold]) # MultiColumn/MultiRow. t.add_row((MultiColumn(size=2, align='|c|', data='MultiColumn'),), strict=True) # One multiRow-cell in that table would not be proper LaTeX, # so strict is set to False t.add_row((MultiRow(size=2, width='*', data='MultiRow'),), strict=False) repr(t) # TabularX tabularx = Tabularx(table_spec='X X X', width_argument=NoEscape(r"\textwidth")) tabularx.add_row(["test1", "test2", "test3"]) # Long Table longtable = LongTable(table_spec='c c c') longtable.add_row(["test", "test2", "test3"]) longtable.end_table_header() # Colored Tabu coloredtable = Tabu(table_spec='X[c] X[c]') coloredtable.add_row(["test", "test2"], color="gray", mapper=bold) # Colored Tabu with 'spread' coloredtable = Tabu(table_spec='X[c] X[c]', spread="1in") coloredtable.add_row(["test", "test2"], color="gray", mapper=bold) # Colored Tabu with 'to' coloredtable = Tabu(table_spec='X[c] X[c]', to="5in") coloredtable.add_row(["test", "test2"], color="gray", mapper=bold) # Colored Tabularx coloredtable = Tabularx(table_spec='X[c] X[c]') coloredtable.add_row(["test", "test2"], color="gray", mapper=bold) # Column column = ColumnType("R", "X", r"\raggedleft", parameters=2) repr(column)
def create_long_table( doc, parameters, skip_parameters=[], table_specs=r"|p{0.45\linewidth}|p{0.45\linewidth}|", header=[bold("Parameter"), bold("Value")], ): """ Helper function to create long table for parameters Arguments: doc: document to add table parameters: parameters dict skip_parameters: list of parameters to skip table_specs: latex specific table settings header: list with column names """ columns = len(header) with doc.create(LongTable(table_spec=table_specs)) as data_table: # Table header data_table.add_hline() data_table.add_row(header) data_table.add_hline() data_table.end_table_header() data_table.add_row((MultiColumn(columns, align="r", data="Continued on Next Page"), )) data_table.end_table_footer() data_table.add_row((MultiColumn(columns, align="r", data="End of Table"), )) data_table.end_table_last_footer() for item in parameters: if item not in skip_parameters: data_table.add_row([item, str(parameters[item])]) data_table.add_hline()
def basic_table(): geometry_options = {"margin": "2.54cm", "includeheadfoot": True} doc = Document(page_numbers=True, geometry_options=geometry_options) batch_size_list = [8, 12, 16, 32] learning_rate_list = [1e-5, 2e-5, 5e-5, 1e-4] epochs_list = [2, 4, 8, 12] rows = [] for batch_size in batch_size_list: for learning_rate in learning_rate_list: for epochs in epochs_list: row = [] row.append(batch_size) row.append(learning_rate) row.append(epochs) row.append(0) rows.append(row) # Generate data table with doc.create(LongTable("l l l l")) as data_table: data_table.add_hline() data_table.add_row(["batch size", "learning rate", "epochs", "F1"]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() for i in range(len(rows)): data_table.add_row(rows[i]) doc.generate_pdf("hyper_tune_bert", clean_tex=False)
def genenerate_longtabu(): geometry_options = { "margin": "2.54cm", "includeheadfoot": True } doc = Document(page_numbers=True, geometry_options=geometry_options) # Generate data table with doc.create(LongTable("l l l")) as data_table: data_table.add_hline() data_table.add_row(["header 1", "header 2", "header 3"]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() data_table.add_row((MultiColumn(3, align='r', data='Containued on Next Page'),)) data_table.add_hline() data_table.end_table_footer() data_table.add_hline() data_table.add_row((MultiColumn(3, align='r', data='Not Containued on Next Page'),)) data_table.add_hline() data_table.end_table_last_footer() row = ["Content1", "9", "Longer String"] for i in range(150): data_table.add_row(row) doc.generate_pdf("longtable", clean_tex=False)
def genenerate_longtabu(data): if data == data_PU: with doc.create(LongTable("l l c c c c c c l")) as data_table: doc.append(Command('caption', 'Zadání místností pro výpočet')) doc.append(Command('label', 'rozmery')) doc.append(Command('\ ')) data_table.append data_table.add_hline() data_table.add_row([ "PU", "Místnost", "Plocha", NoEscape('h$_s$'), NoEscape('a$_n$'), NoEscape('p$_n$'), NoEscape('p$_s$'), "c", "Tab. A.1" ]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() doc.append(Command('endfoot')) doc.append(Command('endlastfoot')) # data_table.end_table_footer() data_table.add_hline() if data == data_POP: with doc.create(LongTable("l l c c c c", pos=['l'])) as data_table: doc.append(Command('caption', 'Zadání okenních otvorů')) doc.append(Command('label', 'okna')) doc.append(Command('\ ')) data_table.append data_table.add_hline() data_table.add_row([ "PU", "Místnost", "n otvorů", "šířka otvorů", "výška otvorů", "Plocha" ]) data_table.add_row( [" ", " ", " ", "[m]", "[m]", NoEscape('m$^2$')]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() data_table.add_hline() for i in range(0, len(data)): if i % 2 == 0: data_table.add_row(data[i]) else: data_table.add_row(data[i], color="Hex") data_table.add_hline()
def description_regression(full_data_files, description_folder): data_list = list() for d in full_data_files: name = d.split('/')[2].split('.')[0] df = pd.read_csv(d, sep='\s+', header=None) lines, columns = df.shape attribute = columns - 1 # Minus one because of target data_list.append({'Dataset': name, 'Size': lines, 'Attributes': attribute}) df = pd.DataFrame(data_list) df = df.sort_values('Size', ascending=False) cols = ['Dataset', 'Size', 'Attributes'] df = df[cols] df_copy = deepcopy(df) df_copy.to_csv(os.path.join(description_folder, 'data_description.csv'), sep=',', header=True, columns=['Dataset', 'Size', 'Attributes'], index=False) # # LaTeX df = df.set_index(['Dataset']) # Max classes per row max_classes = np.inf geometry_options = { "margin": "1.00cm", "includeheadfoot": True } doc = Document(page_numbers=True, geometry_options=geometry_options) # Generate data table with doc.create(LongTable("l l l")) as data_table: data_table.add_hline() header = ["Dataset", "Size", "#Attr."] data_table.add_row(header) data_table.add_hline() data_table.add_hline() for index in df.index.values: row = [index] + df.loc[index].values.tolist() data_table.add_row(row) doc.generate_pdf(os.path.join(description_folder, 'data_description'), clean_tex=False)
def k_generator(data_dir, vystup_dir): os.chdir(data_dir) data_PU = read_results('results.csv') data_used = [] for i in range(0, len(data_PU)): data_used.append(data_PU[i][0]) data_used.append(data_PU[i][1]) data_used.append(data_PU[i][2]) data_used.append(data_PU[i][4]) a = 0.15 * ((float(data_PU[i][1]) * float(data_PU[i][2]) * float(data_PU[i][4]))**0.5) # print(float(data_PU[i][1]), float(data_PU[i][2]), float(data_PU[i][4])) data_used.append(str(round(a, 2))) data_used.append('{} PG6 21A/183B'.format(math.ceil(a))) data_used = list(chunks(data_used, 6)) with doc.create(Section('Hasicí přístroje')): with doc.create(Subsection('Přehled hasicích přístrojů')): doc.append( NoEscape(r'Přehled počtu a druhu všech hasicích přístrojů,\ které budou v objektu osazeny, je patrný z\ tabulky \ref{PHP_stanoveni}.')) for i in range(0, len(data_used)): data_used[i][1] = data_used[i][1].replace(".", ",") data_used[i][2] = data_used[i][2].replace(".", ",") data_used[i][3] = data_used[i][3].replace(".", ",") data_used[i][4] = data_used[i][4].replace(".", ",") with doc.create(LongTable("l c c c c l", pos=['htb'])) as data_table: doc.append(Command('caption', 'Počet a druh hasicích přístrojů')) doc.append(Command('label', NoEscape('PHP_stanoveni'))) doc.append(Command('\ ')) data_table.append data_table.add_hline() data_table.add_row([ "Požární úsek", "S", "a", "c", NoEscape('$n_r$'), "Počet PHP - typ" ]) data_table.add_row([" ", NoEscape('[m$^2$]'), "-", "-", "-", " "]) data_table.add_hline() data_table.end_table_header() for i in range(0, len(data_used)): if i % 2 != 0: data_table.add_row(data_used[i], color="Hex") else: data_table.add_row(data_used[i]) data_table.add_hline() # doc.append(NoEscape('\insertTableNotes')) os.chdir(vystup_dir) doc.generate_pdf("K_hasicaky", clean_tex=False)
def _document_aggregate_table(self, tests: List[Dict[str, Any]]) -> None: """Document a result table of aggregate tests. Args: tests: List of corresponding test dictionary to make a table. """ with self.doc.create(LongTable('|c|p{8cm}|p{7.3cm}|', booktabs=True)) as tabular: package = Package('seqsplit') if package not in tabular.packages: tabular.packages.append(package) # add table heading tabular.add_row((MultiColumn(size=1, align='|c|', data="Test ID"), MultiColumn(size=1, align='c|', data="Test Description"), MultiColumn(size=1, align='c|', data="Input Value"))) # add table header and footer tabular.add_hline() tabular.end_table_header() tabular.add_hline() tabular.add_row((MultiColumn(3, align='r', data='Continued on Next Page'), )) tabular.add_hline() tabular.end_table_footer() tabular.end_table_last_footer() for idx, test in enumerate(tests): if idx > 0: tabular.add_hline() inp_data = [ f"{arg}={self.sanitize_value(value)}" for arg, value in test["inputs"].items() ] inp_data = [WrapText(data=x, threshold=27) for x in inp_data] des_data = [ WrapText(data=x, threshold=27) for x in test["description"].split(" ") ] row_cells = [ self.test_id, IterJoin(data=des_data, token=" "), IterJoin(data=inp_data, token=escape_latex(", \n")), ] tabular.add_row(row_cells) self.test_id += 1
def _document_sys_config(self) -> None: """Add a system config summary to the traceability document. """ with self.doc.create(Section("System Config")): with self.doc.create(Itemize()) as itemize: itemize.add_item( escape_latex(f"FastEstimator {fe.__version__}")) itemize.add_item( escape_latex(f"Python {platform.python_version()}")) itemize.add_item(escape_latex(f"OS: {sys.platform}")) itemize.add_item( f"Number of GPUs: {torch.cuda.device_count()}") if fe.fe_deterministic_seed is not None: itemize.add_item( escape_latex( f"Deterministic Seed: {fe.fe_deterministic_seed}")) with self.doc.create(LongTable('|lr|', pos=['h!'], booktabs=True)) as tabular: tabular.add_row((bold("Module"), bold("Version"))) tabular.add_hline() tabular.end_table_header() tabular.add_hline() tabular.add_row((MultiColumn(2, align='r', data='Continued on Next Page'), )) tabular.add_hline() tabular.end_table_footer() tabular.end_table_last_footer() color = True for name, module in humansorted(sys.modules.items(), key=lambda x: x[0]): if "." in name: continue # Skip sub-packages if name.startswith("_"): continue # Skip private packages if isinstance(module, Base): continue # Skip fake packages we mocked if hasattr(module, '__version__'): tabular.add_row( (escape_latex(name), escape_latex(str(module.__version__))), color='black!5' if color else 'white') color = not color elif hasattr(module, 'VERSION'): tabular.add_row((escape_latex(name), escape_latex(str(module.VERSION))), color='black!5' if color else 'white') color = not color
def periodAndRecur(doc, classes, recursive, mc): with doc.create(Subsection('Periodicity and Recurrence')): with doc.create(LongTable("l l l")) as data_table: data_table.add_hline() data_table.add_row(['$s$', '$period(s)$', '$Recurrence$'], escape=False) data_table.add_hline() data_table.end_table_header() data_table.add_hline() data_table.end_table_last_footer() for c in classes: row = [ '$' + toLatexState(c) + '$', '$' + str(mc.period(c)) + '$', str(c in recursive) ] data_table.add_row(row, escape=False)
def edgeList(doc, eL): doc.append(NewPage()) with doc.create(Subsection('Edges')): with doc.create(LongTable("l l l l l l l l l l l l l")) as data_table: data_table.add_hline() data_table.add_row(['$s$', '$s\'$', '$p$', ' ', ' '] * 2 + ['$s$', '$s\'$', '$p$'], escape=False) data_table.add_hline() data_table.end_table_header() data_table.add_hline() data_table.end_table_last_footer() for i in range(0, len(eL), 3): row = [] triplet = eL[i] row += [ '$' + toLatexState(triplet[0]) + '$', '$' + toLatexState(triplet[1]) + '$', '$' + toLatexProb(triplet[2]) + '$', ' ', ' ' ] if i + 1 < len(eL): triplet = eL[i + 1] row += [ '$' + toLatexState(triplet[0]) + '$', '$' + toLatexState(triplet[1]) + '$', '$' + toLatexProb(triplet[2]) + '$', ' ', ' ' ] if i + 2 < len(eL): triplet = eL[i + 2] row += [ '$' + toLatexState(triplet[0]) + '$', '$' + toLatexState(triplet[1]) + '$', '$' + toLatexProb(triplet[2]) + '$' ] else: row += [ ' ', ] * 3 else: row += [ ' ', ] * 8 data_table.add_row(row, escape=False)
def POP_generator(df): data_used = df.values.tolist() for i in range(0, len(data_used)): data_used[i][1] = str(("%.2f" % data_used[i][1])).replace(".", ",") data_used[i][2] = str(("%.2f" % data_used[i][2])).replace(".", ",") data_used[i][3] = str(("%.2f" % data_used[i][3])).replace(".", ",") with doc.create(Section('Vymezení požárně nebezpečného prostoru')): with doc.create(Subsection('Procenta požárně otevřených ploch')): doc.append(NoEscape(r'Dále je určeno, zda je možné jednotlivé otvory ve fasádě posuzovat samostatně, a to výpočtem procenta požárně otevřených ploch z celkové plochy každé fasády. Způsob odečtení plochy fasády a plochy otevřených ploch je patrný z obrázku \ref{SchemaPOP}.')) with doc.create(Figure(position='htb!')) as POP: POP.add_image('images/POP.jpg', width='200px') POP.add_caption('Odečtení požárně otevřených ploch') POP.append(Command('label', 'SchemaPOP')) doc.append(NoEscape(r'Procentuální výsledky jednotlivých fasád s požárně otevřenými plochami jsou patrné z tabulky \ref{POP}.')) with doc.create(LongTable("lcccc", pos=['htb'])) as data_table: doc.append(Command('caption', 'Odstupové vzdálenosti od objektu')) doc.append(Command('label', 'POP')) doc.append(Command('\ ')) data_table.append data_table.add_hline() data_table.add_row(["Popis", "Celková plocha", "Plocha POP", "Procento POP", "POP jednotlivě"]) data_table.add_row([" ", NoEscape('[m$^2$]'), NoEscape('[m$^2$]'), NoEscape('[\%]'), NoEscape(r'\textless 40 \%')]) data_table.add_hline() data_table.end_table_header() for i in range(0, len(data_used)): if i == 0: # \multicolumn{9}{l}{\textbf{1. nadzemní podlaží - POP společné}}\\ data_table.add_row((MultiColumn(5, align='l', data=utils.bold(df_popis_POP[i])),)) if i > 0 and df_popis_POP[i] != df_popis_POP[i-1]: data_table.add_hline() data_table.add_row((MultiColumn(5, align='l', data=utils.bold(df_popis_POP[i])),)) if (i) % 2 == 0: data_table.add_row(data_used[i], color="Hex") elif (i) % 2 != 0: data_table.add_row(data_used[i]) data_table.add_hline() doc.append(NoEscape(r'Ostatní v tabulce výše nezmíněné otvory na fasádách objektu jsou spočteny jako samostatné 100 \% otevřené požární plochy.'))
def dataframe_to_pdf(df, pdf_path): geometry_options = {"margin": "2.54cm", "includeheadfoot": True} doc = Document(page_numbers=True, geometry_options=geometry_options) num_cols = len(df.columns) # Generate data table with doc.create(LongTable("l " * num_cols)) as data_table: data_table.add_hline() data_table.add_row(list(df.columns.values)) data_table.add_hline() for i in range(len(df.index)): row_list = df.iloc[i].values.tolist() if "SEP" in row_list[0]: data_table.add_hline() else: data_table.add_row(row_list) data_table.add_hline() doc.generate_pdf(pdf_path, clean_tex=False)
def test_table(): # Tabular t = Tabular(table_spec='|c|c|', data=None, pos=None, width=2) t.add_hline(start=None, end=None) t.add_row((1, 2), escape=False, strict=True, mapper=[bold]) t.add_row(1, 2, escape=False, strict=True, mapper=[bold]) # MultiColumn/MultiRow. t.add_row((MultiColumn(size=2, align='|c|', data='MultiColumn'), ), strict=True) # One multiRow-cell in that table would not be proper LaTeX, # so strict is set to False t.add_row((MultiRow(size=2, width='*', data='MultiRow'), ), strict=False) repr(t) # TabularX tabularx = Tabularx(table_spec='X X X', width_argument=NoEscape(r"\textwidth")) tabularx.add_row(["test1", "test2", "test3"]) # Long Table longtable = LongTable(table_spec='c c c') longtable.add_row(["test", "test2", "test3"]) longtable.end_table_header() # Colored Tabu coloredtable = Tabu(table_spec='X[c] X[c]') coloredtable.add_row(["test", "test2"], color="gray", mapper=bold) # Colored Tabu with 'spread' coloredtable = Tabu(table_spec='X[c] X[c]', spread="1in") coloredtable.add_row(["test", "test2"], color="gray", mapper=bold) # Colored Tabu with 'to' coloredtable = Tabu(table_spec='X[c] X[c]', to="5in") coloredtable.add_row(["test", "test2"], color="gray", mapper=bold) # Colored Tabularx coloredtable = Tabularx(table_spec='X[c] X[c]') coloredtable.add_row(["test", "test2"], color="gray", mapper=bold) # Column column = ColumnType("R", "X", r"\raggedleft", parameters=2) repr(column)
def basic_run(): geometry_options = {"margin": "2.54cm", "includeheadfoot": True} doc = Document(page_numbers=True, geometry_options=geometry_options) clf_1 = LinearSVC(dual=False) clf_2 = LogisticRegression(multi_class="multinomial", solver="newton-cg") clf_3 = MultinomialNB() clf_4 = RandomForestClassifier() LinearSVC_f1_scores_dict = load_train_output_crossvalidation(clf_1) LogisticRegression_f1_scores_dict = load_train_output_crossvalidation( clf_2) MultinomialNB_f1_scores_dict = load_train_output_crossvalidation(clf_3) RandomForest_f1_scores_dict = load_train_output_crossvalidation(clf_4) row_1 = basic_generate_row("LinearSVC", LinearSVC_f1_scores_dict) row_2 = basic_generate_row("LogisticRegression", LogisticRegression_f1_scores_dict) row_3 = basic_generate_row("MultinomialNB", MultinomialNB_f1_scores_dict) row_4 = basic_generate_row("RandomForest", RandomForest_f1_scores_dict) rows = [] rows.append(row_1) rows.append(row_2) rows.append(row_3) rows.append(row_4) # Generate data table with doc.create(LongTable("l l l l l")) as data_table: data_table.add_hline() data_table.add_row([ "Model Name", "training f1", "valid f1", "training f1 for each technique", "valid f1 for each technique" ]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() for i in range(len(rows)): data_table.add_row(rows[i]) doc.generate_pdf("basic_run", clean_tex=False)
def save_latex(self, uiObj, Design_Check, reportsummary, filename, rel_path, Disp_2d_image, Disp_3d_image, module=''): companyname = str(reportsummary["ProfileSummary"]['CompanyName']) companylogo = str(reportsummary["ProfileSummary"]['CompanyLogo']) groupteamname = str(reportsummary["ProfileSummary"]['Group/TeamName']) designer = str(reportsummary["ProfileSummary"]['Designer']) projecttitle = str(reportsummary['ProjectTitle']) subtitle = str(reportsummary['Subtitle']) jobnumber = str(reportsummary['JobNumber']) client = str(reportsummary['Client']) does_design_exist = reportsummary['does_design_exist'] osdagheader = '/ResourceFiles/images/Osdag_header_report.png' # Add document header geometry_options = { "top": "5cm", "hmargin": "2cm", "headheight": "100pt", "footskip": "100pt", "bottom": "5cm" } doc = Document(geometry_options=geometry_options, indent=False) doc.packages.append(Package('amsmath')) doc.packages.append(Package('graphicx')) doc.packages.append(Package('needspace')) doc.append(pyl.Command('fontsize', arguments=[8, 12])) doc.append(pyl.Command('selectfont')) doc.add_color('OsdagGreen', 'RGB', '153,169,36') doc.add_color('PassColor', 'RGB', '153,169,36') doc.add_color('Red', 'RGB', '255,0,0') doc.add_color('Green', 'RGB', '0,200,0') doc.add_color('FailColor', 'HTML', '933A16') header = PageStyle("header") # Create center header with header.create(Head("C")): with header.create(Tabularx('|l|p{4cm}|l|X|')) as table: table.add_hline() # MultiColumn(4) table.add_row(( MultiColumn( 2, align='|c|', data=('' if companylogo is '' else StandAloneGraphic( image_options="height=0.95cm", filename=companylogo))), MultiColumn(2, align='|c|', data=[ 'Created with', StandAloneGraphic( image_options="width=4.0cm,height=1cm", filename=rel_path + osdagheader) ]), )) table.add_hline() table.add_row(('Company Name', companyname, 'Project Title', projecttitle), color='OsdagGreen') table.add_hline() table.add_row( ('Group/Team Name', groupteamname, 'Subtitle', subtitle), color='OsdagGreen') table.add_hline() table.add_row(('Designer', designer, 'Job Number', jobnumber), color='OsdagGreen') table.add_hline() table.add_row( ('Date', time.strftime("%d /%m /%Y"), 'Client', client), color='OsdagGreen') table.add_hline() # Create right footer with header.create(Foot("R")): header.append(NoEscape(r'Page \thepage')) # # doc.preamble.append(header) # doc.change_document_style("header") # Add Heading # with doc.create(MiniPage(align='c')): doc.preamble.append(header) doc.change_document_style("header") with doc.create(Section('Input Parameters')): with doc.create( LongTable('|p{5cm}|p{2.5cm}|p{1.5cm}|p{3cm}|p{3.5cm}|', row_height=1.2)) as table: table.add_hline() for i in uiObj: # row_cells = ('9', MultiColumn(3, align='|c|', data='Multicolumn not on left')) if i == "Selected Section Details" or i == KEY_DISP_ANGLE_LIST or i == KEY_DISP_TOPANGLE_LIST or i == KEY_DISP_CLEAT_ANGLE_LIST: # if type(uiObj[i]) == list: continue if type(uiObj[i]) == dict: table.add_hline() sectiondetails = uiObj[i] image_name = sectiondetails[KEY_DISP_SEC_PROFILE] Img_path = '/ResourceFiles/images/' + image_name + '.png' if (len(sectiondetails)) % 2 == 0: # merge_rows = int(round_up(len(sectiondetails),2)/2 + 2) merge_rows = int((len(sectiondetails) / 2)) + 2 else: merge_rows = round_up((len(sectiondetails) / 2), 2) if (len(sectiondetails)) % 2 == 0: sectiondetails[''] = '' a = list(sectiondetails.keys()) # index=0 for x in range(1, (merge_rows + 1)): # table.add_row("Col.Det.",i,columndetails[i]) if x == 1: table.add_row(( MultiRow( merge_rows, data=StandAloneGraphic( image_options= "width=5cm,height=5cm", filename=rel_path + Img_path)), MultiColumn(2, align='|c|', data=a[x]), MultiColumn(2, align='|c|', data=sectiondetails[a[x]]), )) elif x <= 4: table.add_row(( '', MultiColumn(2, align='|c|', data=NoEscape(a[x])), MultiColumn(2, align='|c|', data=NoEscape( sectiondetails[a[x]])), )) else: table.add_row(( '', NoEscape(a[x]), sectiondetails[a[x]], NoEscape(a[merge_rows + x - 4]), sectiondetails[a[merge_rows + x - 4]], )) table.add_hline(2, 5) elif uiObj[i] == "TITLE": table.add_hline() table.add_row((MultiColumn( 5, align='|c|', data=bold(i), ), )) table.add_hline() elif i == 'Section Size*': table.add_hline() table.add_row(( MultiColumn( 3, align='|c|', data=i, ), MultiColumn(2, align='|c|', data="Ref List of Input Section"), )) table.add_hline() elif len(str(uiObj[i])) > 55 and type( uiObj[i]) != pyl.math.Math: str_len = len(str(uiObj[i])) loop_len = round_up((str_len / 55), 1, 1) for j in range(1, loop_len + 1): b = 55 * j + 1 if j == 1: table.add_row(( MultiColumn(3, align='|c|', data=MultiRow(loop_len, data=i)), MultiColumn(2, align='|c|', data=uiObj[i][0:b]), )) else: table.add_row(( MultiColumn(3, align='|c|', data=MultiRow(loop_len, data="")), MultiColumn(2, align='|c|', data=uiObj[i][b - 55:b]), )) table.add_hline() else: table.add_hline() table.add_row(( MultiColumn(3, align='|c|', data=NoEscape(i)), MultiColumn(2, align='|c|', data=uiObj[i]), )) table.add_hline() for i in uiObj: if i == 'Section Size*' or i == KEY_DISP_ANGLE_LIST or i == KEY_DISP_TOPANGLE_LIST or i == KEY_DISP_CLEAT_ANGLE_LIST: with doc.create(Subsection("List of Input Section")): # with doc.create(LongTable('|p{8cm}|p{8cm}|', row_height=1.2)) as table: with doc.create(Tabularx('|p{4cm}|X|', row_height=1.2)) as table: table.add_hline() table.add_row(( MultiColumn( 1, align='|c|', data=i, ), MultiColumn(1, align='|X|', data=uiObj[i].strip("[]")), )) # str_len = len(uiObj[i]) # loop_len = round_up((str_len/100),1,1) # table.add_hline() # for j in range(1,loop_len+1): # b= 100*j+1 # if j ==1: # table.add_row((MultiColumn(1, align='|c|', data=i, ), # MultiColumn(1, align='|X|', data=uiObj[i][0:b]),)) # else: # table.add_row((MultiColumn(1, align='|c|', data=" ", ), # MultiColumn(1, align='|X|', data=uiObj[i][b-100:b]),)) table.add_hline() doc.append( pyl.Command('Needspace', arguments=NoEscape(r'10\baselineskip'))) doc.append(NewPage()) count = 0 with doc.create(Section('Design Checks')): with doc.create( Tabularx( r'|>{\centering}p{12.5cm}|>{\centering\arraybackslash}X|', row_height=1.2)) as table: table.add_hline() # Fail = TextColor("FailColor", bold("Fail")) # Pass = TextColor("PassColor", bold("Pass")) if does_design_exist != True: table.add_row(bold('Design Status'), color_cell("Red", bold("Fail"))) else: table.add_row(bold('Design Status'), color_cell("OsdagGreen", bold("Pass"))) table.add_hline() for check in Design_Check: if check[0] == 'SubSection': if count >= 1: # doc.append(NewPage()) doc.append( pyl.Command( 'Needspace', arguments=NoEscape(r'10\baselineskip'))) with doc.create(Subsection(check[1])): ######################### # if uiObj== "WELDImage": # table.add_hline() # table.add_row((MultiColumn(5, align='|c|', data=bold(i), ),)) # table.add_hline() # else: ######################### with doc.create(LongTable(check[2], row_height=1.2) ) as table: # todo anjali remove table.add_hline() table.add_row( ('Check', 'Required', 'Provided', 'Remarks'), color='OsdagGreen') table.add_hline() table.end_table_header() table.add_hline() count = count + 1 elif check[0] == "Selected": if count >= 1: # doc.append(NewPage()) doc.append( pyl.Command( 'Needspace', arguments=NoEscape(r'10\baselineskip'))) with doc.create(Subsection(check[1])): with doc.create(LongTable(check[2], row_height=1.2)) as table: table.add_hline() for i in uiObj: # row_cells = ('9', MultiColumn(3, align='|c|', data='Multicolumn not on left')) print(i) if type( uiObj[i] ) == dict and i == 'Selected Section Details': table.add_hline() sectiondetails = uiObj[i] image_name = sectiondetails[ KEY_DISP_SEC_PROFILE] Img_path = '/ResourceFiles/images/' + image_name + '.png' if (len(sectiondetails)) % 2 == 0: # merge_rows = int(round_up(len(sectiondetails),2)/2 + 2) merge_rows = int( round_up((len(sectiondetails) / 2), 1, 0) + 2) else: merge_rows = int( round_up((len(sectiondetails) / 2), 1, 0) + 1) print('Hi', len(sectiondetails) / 2, round_up(len(sectiondetails), 2) / 2, merge_rows) if (len(sectiondetails)) % 2 == 0: sectiondetails[''] = '' a = list(sectiondetails.keys()) # index=0 for x in range(1, (merge_rows + 1)): # table.add_row("Col.Det.",i,columndetails[i]) if x == 1: table.add_row(( MultiRow( merge_rows, data=StandAloneGraphic( image_options= "width=5cm,height=5cm", filename=rel_path + Img_path)), MultiColumn(2, align='|c|', data=NoEscape( a[x])), MultiColumn( 2, align='|c|', data=NoEscape( sectiondetails[a[x]])), )) elif x <= 4: table.add_row(( '', MultiColumn(2, align='|c|', data=NoEscape( a[x])), MultiColumn( 2, align='|c|', data=sectiondetails[a[x]]), )) else: table.add_row(( '', NoEscape(a[x]), sectiondetails[a[x]], NoEscape(a[merge_rows + x - 4]), sectiondetails[a[merge_rows + x - 4]], )) table.add_hline(2, 5) table.add_hline() count = count + 1 else: if check[3] == 'Fail': table.add_row((NoEscape(check[0])), check[1], check[2], TextColor("Red", bold(check[3]))) else: table.add_row((NoEscape(check[0])), check[1], check[2], TextColor("OsdagGreen", bold(check[3]))) table.add_hline() # 2D images if len(Disp_2d_image) != 0: if module == KEY_DISP_BCENDPLATE or module == KEY_DISP_BB_EP_SPLICE: if does_design_exist and sys.platform != 'darwin': doc.append(NewPage()) weld_details = rel_path + Disp_2d_image[0] detailing_details = rel_path + Disp_2d_image[1] stiffener_details = rel_path + Disp_2d_image[2] with doc.create(Section('2D Drawings (Typical)')): with doc.create(Figure()) as image: image.add_image(weld_details, width=NoEscape(r'0.7\textwidth'), placement=NoEscape(r'\centering')) image.add_caption( 'Typical Weld Details -- Beam to End Plate Connection' ) # doc.append(NewPage()) with doc.create(Figure()) as image_2: image_2.add_image( detailing_details, width=NoEscape(r'0.7\textwidth'), placement=NoEscape(r'\centering')) image_2.add_caption('Typical Detailing') # doc.append(NewPage()) with doc.create(Figure()) as image_3: image_3.add_image( stiffener_details, width=NoEscape(r'0.9\textwidth'), placement=NoEscape(r'\centering')) image_3.add_caption('Typical Stiffener Details') # doc.append(NewPage()) elif module == KEY_DISP_BASE_PLATE: if does_design_exist and sys.platform != 'darwin': doc.append(NewPage()) bp_sketch = rel_path + Disp_2d_image[0] bp_detailing = rel_path + Disp_2d_image[1] bp_weld = rel_path + Disp_2d_image[2] bp_anchor = rel_path + Disp_2d_image[3] bp_key = rel_path + Disp_2d_image[4] with doc.create(Section('2D Drawings (Typical)')): with doc.create(Figure()) as image_1: image_1.add_image( bp_sketch, width=NoEscape(r'1.0\textwidth'), placement=NoEscape(r'\centering')) image_1.add_caption('Typical Base Plate Details') # doc.append(NewPage()) with doc.create(Figure()) as image_2: image_2.add_image( bp_detailing, width=NoEscape(r'1.0\textwidth'), placement=NoEscape(r'\centering')) image_2.add_caption('Typical Base Plate Detailing') # doc.append(NewPage()) with doc.create(Figure()) as image_3: image_3.add_image( bp_weld, width=NoEscape(r'1.0\textwidth'), placement=NoEscape(r'\centering')) image_3.add_caption('Typical Weld Details') # doc.append(NewPage()) with doc.create(Figure()) as image_4: image_4.add_image( bp_anchor, width=NoEscape(r'0.5\textwidth'), placement=NoEscape(r'\centering')) image_4.add_caption('Typical Anchor Bolt Details') # doc.append(NewPage()) if len(Disp_2d_image[-1]) > 0: with doc.create(Figure()) as image_5: image_5.add_image( bp_key, width=NoEscape(r'0.9\textwidth'), placement=NoEscape(r'\centering')) image_5.add_caption( 'Typical Shear Key Details') # doc.append(NewPage()) if does_design_exist and sys.platform != 'darwin': doc.append(NewPage()) Disp_top_image = "/ResourceFiles/images/top.png" Disp_side_image = "/ResourceFiles/images/side.png" Disp_front_image = "/ResourceFiles/images/front.png" view_3dimg_path = rel_path + Disp_3d_image view_topimg_path = rel_path + Disp_top_image view_sideimg_path = rel_path + Disp_side_image view_frontimg_path = rel_path + Disp_front_image with doc.create(Section('3D Views')): with doc.create( Tabularx( r'|>{\centering}X|>{\centering\arraybackslash}X|', row_height=1.2)) as table: view_3dimg_path = rel_path + Disp_3d_image view_topimg_path = rel_path + Disp_top_image view_sideimg_path = rel_path + Disp_side_image view_frontimg_path = rel_path + Disp_front_image table.add_hline() table.add_row([ StandAloneGraphic(image_options="height=4cm", filename=view_3dimg_path), StandAloneGraphic(image_options="height=4cm", filename=view_topimg_path) ]) table.add_row('(a) 3D View', '(b) Top View') table.add_hline() table.add_row([ StandAloneGraphic(image_options="height=4cm", filename=view_sideimg_path), StandAloneGraphic(image_options="height=4cm", filename=view_frontimg_path) ]) table.add_row('(c) Side View', '(d) Front View') table.add_hline() # with doc.create(Figure(position='h!')) as view_3D: # view_3dimg_path = rel_path + Disp_3d_image # # view_3D.add_image(filename=view_3dimg_path, width=NoEscape(r'\linewidth')) # # view_3D.add_image(filename=view_3dimg_path,width=NoEscape(r'\linewidth,height=6.5cm')) # # view_3D.add_caption('3D View') with doc.create(Section('Design Log')): doc.append( pyl.Command('Needspace', arguments=NoEscape(r'10\baselineskip'))) logger_msgs = reportsummary['logger_messages'].split('\n') for msg in logger_msgs: if ('WARNING' in msg): colour = 'blue' elif ('INFO' in msg): colour = 'OsdagGreen' elif ('ERROR' in msg): colour = 'red' else: continue doc.append(TextColor(colour, '\n' + msg)) try: doc.generate_pdf(filename, compiler='pdflatex', clean_tex=False) except: pass
def generatePDF(obj): """ Function that generates the rendered .pdf """ geometry_options = {"margin": "2cm", "includeheadfoot": True} doc = Document(page_numbers=True, geometry_options=geometry_options) # Disable Page Numbering # https://jeltef.github.io/PyLaTeX/latest/examples/basic.html # https://tex.stackexchange.com/questions/7355/how-to-suppress-page-number#7357 doc.preamble.append(Command('pagenumbering', 'gobble')) # Include Packages doc.preamble.append(Command('usepackage', 'url')) doc.preamble.append(Command('usepackage', 'fancyhdr')) doc.preamble.append(Command('usepackage', 'graphicx')) # Set Pagestyle (fancy is used for page header and footer) doc.preamble.append(Command('pagestyle', 'fancy')) # Header and Footer doc.preamble.append( Command( 'cfoot', NoEscape( 'Aionda Zeiterfassung - \\url{https://github.com/AiondaDotCom/tools}' ))) doc.preamble.append( Command('chead', (Command('Large', bold('Arbeitnehmer: Check-In-Time'))))) #with doc.create(Section(NoEscape('Arbeitnehmer:\\\\Check-In-Time'), numbering=False)): # Create Table with informations about the employee with doc.create(LongTable("|l|l|l|")) as headerTable: headerTable.add_hline() headerTable.add_row([ "Name: {}".format(obj["employee"]), "Monat: {:02d}".format(obj["month"]), "Jahr: {}".format(obj["year"]) ]) headerTable.add_hline() # Generate table for time tracking informations with doc.create(LongTable("|c|c|r|r|r|")) as data_table: data_table.add_hline() data_table.add_row([ bold("Datum"), bold("von - bis"), bold("Pause"), bold("gesamte AZ"), bold(NoEscape("davon \\\"Uberstunden")) ]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() # Insert rows for row in obj["zeitAufzeichnungsTable"]: data_table.add_row(row) data_table.add_hline() data_table.add_hline() # Last row shows the total amount of hours arbeitszeitStr = "{}".format(obj["arbeitszeitKomplettRounded"]) data_table.add_row(["Summe", '', '', arbeitszeitStr, '']) data_table.add_hline() # Generate table with remarks with doc.create(LongTable("lllll")) as signTable: signTable.add_row( ['Bemerkungen:', 'Krankheitstage:', 'K', 'Schlechtwetter:', 'WA']) signTable.add_row( ['', 'Wochenende/Feiertag:', 'WF', 'Urlaubstage:', 'U']) # Insert vertical space doc.append(Command('vspace', '1cm')) # place for signatures with doc.create(LongTable("ll")) as signTable: if obj['signatureFile']: signTable.add_row([ Command('includegraphics', signatureFile, 'width=4cm'), Command('vspace', NoEscape('-.1cm')) ]) else: signTable.add_row(["", NoEscape("")]) signTable.add_hline() signTable.add_row([ NoEscape("Unterschrift Arbeitnehmer"), NoEscape("\hspace{4cm}Unterschrift Arbeitgeber") ]) # Finally the document is rendered doc.generate_pdf(obj['outputFilename'], clean_tex=False)
def appendix(data_dir, vystup_dir): os.chdir(data_dir) data_PU = read_results('raw_data_PU.csv') data_POP = read_results('raw_data_POP.csv') data_mid = read_results('results.csv') PurePath(vystup_dir) geometry_options = {"margin": "0.5cm", "includeheadfoot": True} doc = Document(page_numbers=True, geometry_options=geometry_options) doc.preamble.append(NoEscape(r'\definecolor{Hex}{RGB}{239,239,239}')) doc.documentclass.options = Options('10pt') doc.append(Command('textbf', 'Zadání hodnot pro výpočet: ')) doc.append( NoEscape(r'Tabulka \ref{rozmery} je souhrnem zadaných hodnot \ potřebných k výpočtu požárně výpočtového zatížení.')) def genenerate_longtabu(data): if data == data_PU: with doc.create(LongTable("l l c c c c c c l")) as data_table: doc.append(Command('caption', 'Zadání místností pro výpočet')) doc.append(Command('label', 'rozmery')) doc.append(Command('\ ')) data_table.append data_table.add_hline() data_table.add_row([ "PU", "Místnost", "Plocha", NoEscape('h$_s$'), NoEscape('a$_n$'), NoEscape('p$_n$'), NoEscape('p$_s$'), "c", "Tab. A.1" ]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() doc.append(Command('endfoot')) doc.append(Command('endlastfoot')) # data_table.end_table_footer() data_table.add_hline() if data == data_POP: with doc.create(LongTable("l l c c c c", pos=['l'])) as data_table: doc.append(Command('caption', 'Zadání okenních otvorů')) doc.append(Command('label', 'okna')) doc.append(Command('\ ')) data_table.append data_table.add_hline() data_table.add_row([ "PU", "Místnost", "n otvorů", "šířka otvorů", "výška otvorů", "Plocha" ]) data_table.add_row( [" ", " ", " ", "[m]", "[m]", NoEscape('m$^2$')]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() data_table.add_hline() for i in range(0, len(data)): if i % 2 == 0: data_table.add_row(data[i]) else: data_table.add_row(data[i], color="Hex") data_table.add_hline() for i in range(0, len(data_PU)): data_PU[i][2] = data_PU[i][2].replace(".", ",") data_PU[i][3] = data_PU[i][3].replace(".", ",") data_PU[i][4] = data_PU[i][4].replace(".", ",") data_PU[i][5] = data_PU[i][5].replace(".", ",") data_PU[i][6] = data_PU[i][6].replace(".", ",") data_PU[i][7] = data_PU[i][7].replace(".", ",") genenerate_longtabu(data_PU) doc.append( NoEscape(r'Okenní otvory nutné pro výpočet součinitele b jsou\ uvedeny v následující tabulce \ref{okna}')) for i in range(0, len(data_POP)): data_POP[i][3] = data_POP[i][3].replace(".", ",") data_POP[i][4] = data_POP[i][4].replace(".", ",") data_POP[i][5] = data_POP[i][5].replace(".", ",") genenerate_longtabu(data_POP) doc.append(Command('textbf', 'Mezivýsledky a výsledky: ')) doc.append( NoEscape(r'Mezivýsledky nutné pro stanovení parametru b jsou\ patrné z tabulky \ref{mezivysledky}')) ######################################################################### ''' Mezivýsledky parametr B ''' data_mid_ar = [] for i in range(0, len(data_mid)): data_mid_ar.append(data_mid[i][0]) data_mid_ar.append(data_mid[i][1]) data_mid_ar.append(data_mid[i][8]) data_mid_ar.append(data_mid[i][9]) data_mid_ar.append(data_mid[i][10]) data_mid_ar.append(data_mid[i][14]) data_mid_ar.append(data_mid[i][15]) data_mid_ar = list(chunks(data_mid_ar, 7)) for i in range(0, len(data_mid_ar)): data_mid_ar[i][1] = data_mid_ar[i][1].replace(".", ",") data_mid_ar[i][2] = data_mid_ar[i][2].replace(".", ",") data_mid_ar[i][3] = data_mid_ar[i][3].replace(".", ",") data_mid_ar[i][4] = data_mid_ar[i][4].replace(".", ",") data_mid_ar[i][5] = data_mid_ar[i][5].replace(".", ",") data_mid_ar[i][6] = data_mid_ar[i][6].replace(".", ",") with doc.create(LongTable("l c c c c c c", pos=['l'])) as data_table: doc.append(Command('caption', 'Mezivýsledky pro paramter b')) doc.append(Command('label', 'mezivysledky')) doc.append(Command('\ ')) data_table.append data_table.add_hline() data_table.add_row(["PÚ", "S", "S0", "hs", "h0", "n", "k"]) data_table.add_row([ " ", NoEscape('m$^2$'), NoEscape('m$^2$'), "[m]", "[m]", "[-]", "[-]" ]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() data_table.add_hline() for i in range(0, len(data_mid_ar)): if i % 2 == 0: data_table.add_row(data_mid_ar[i]) else: data_table.add_row(data_mid_ar[i], color="Hex") data_table.add_hline() ######################################################################### ''' Vysledky ''' doc.append( NoEscape(r'Přehled výsledků požárních úseků je patrný z tabulky\ \ref{Vysledky}')) data_res = [] for i in range(0, len(data_mid)): data_res.append(data_mid[i][0]) data_res.append(data_mid[i][1]) data_res.append(data_mid[i][2]) data_res.append(data_mid[i][3]) data_res.append(data_mid[i][4]) data_res.append(data_mid[i][13]) # a_n data_res.append(data_mid[i][12]) # p_n data_res.append(data_mid[i][11]) # p_s data_res.append(data_mid[i][7]) # p_only data_res.append(data_mid[i][5]) # p_v data_res = list(chunks(data_res, 10)) for i in range(0, len(data_res)): data_res[i][1] = data_res[i][1].replace(".", ",") data_res[i][2] = data_res[i][2].replace(".", ",") data_res[i][3] = data_res[i][3].replace(".", ",") data_res[i][4] = data_res[i][4].replace(".", ",") data_res[i][5] = data_res[i][5].replace(".", ",") data_res[i][6] = data_res[i][6].replace(".", ",") data_res[i][7] = data_res[i][7].replace(".", ",") data_res[i][8] = data_res[i][8].replace(".", ",") data_res[i][9] = data_res[i][9].replace(".", ",") with doc.create(LongTable("l c c c c c c c c c ", pos=['l'])) as data_table: doc.append( Command( 'caption', 'Přehled požárních úseků a jejich\ výsledky')) doc.append(Command('label', 'Vysledky')) doc.append(Command('\ ')) data_table.append data_table.add_hline() data_table.add_row([ "PÚ", "Plocha", "a", "b", "c", NoEscape('a$_n$'), NoEscape('p$_n$'), NoEscape('p$_s$'), NoEscape('p'), NoEscape('p$_v$') ]) data_table.add_row([ " ", NoEscape('m$^2$'), "[-]", "[-]", "[-]", "[-]", NoEscape('[kg/m$^2$]'), NoEscape('[kg/m$^2$]'), NoEscape('[kg/m$^2$]'), NoEscape('[kg/m$^2$]') ]) data_table.end_table_header() data_table.add_hline() for i in range(0, len(data_res)): if i % 2 == 0: data_table.add_row(data_res[i]) else: data_table.add_row(data_res[i], color="Hex") data_table.add_hline() os.chdir(vystup_dir) doc.generate_pdf("Appendix", clean_tex=False)
def odstup_generator(df): ''' Funkce k vytvoření kapitoly s vypočtenými odstupy Vstupní data musí být DataFrame o specifickém formátu o deseti sloupcích viz níže: df_result['Nazev/fasada', 'vyska', 'sirka', 'Samostatne']] df_result['POP'] = POP_store_list df_result['p_v'] = pv_list df_result['I_avrg'] = I_tok_list df_result['d'] = d_results df_result['d\''] = d_results_kraj df_result['d\'s'] = d_results_za_krajem Funkce tvoří přímo tabulku upravenou na míru pro prezentování odstupových vzdáleností v PBŘ zprávě ''' '''Poznámky k tabulce''' list_avaiable = ['Přístřešky pro auta DP3', 'Pergoly / přístřešky'] '''Práce s daty ''' data_popis = df['Samostatne'].values.tolist() # Data pro přidání popisků df = df.drop(columns=['Samostatne']) data_used = df.values.tolist() # Data pro tvoření tabulky '''Přidání popisků na správné místo''' check = [] if not data_popis: # Zjišťuje zda je nutné přidat popisky pass else: save_var = [] save_id = [] save_name = [] save_id_check = [] for i in range(0, len(data_popis)): if data_popis[i] in list_avaiable: check.append(i) save_var.append(data_popis[i]) save_id_check.append(i) count = list(range(1, len(save_var)+1)) pre = ' $^{' suf = ')}$' for i in range(0, len(save_var)): count[i] = pre + str(count[i]) count[i] = str(count[i]) + suf for n in range(0, len(save_var)): if n == 0: new_dict = dict(zip([save_var[n]], [count[n]])) save_id.append(save_id_check[n]) save_name.append(save_var[n]) # new_dict = dict(zip([save_var[n]], str(count[n]))) if n > 0: if save_var[n] in new_dict: save_id.append(save_id_check[n]) save_name.append(save_var[n]) else: len_dict = len(new_dict)+1 len_dict = pre + str(len_dict) len_dict = str(len_dict) + suf save_id.append(save_id_check[n]) save_name.append(save_var[n]) new_dict[save_var[n]] = len_dict for i in range(0, len(save_id)): data_used[save_id[i]][0] = NoEscape(data_used[save_id[i]][0] + new_dict[save_name[i]]) '''Přeměna čísel s . na čísla s ,''' for i in range(0, len(data_used)): data_used[i][1] = str(("%.2f" % data_used[i][1])).replace(".", ",") data_used[i][2] = str(("%.2f" % data_used[i][2])).replace(".", ",") data_used[i][3] = str(("%.2f" % data_used[i][3])).replace(".", ",") data_used[i][4] = str(("%.2f" % data_used[i][4])).replace(".", ",") data_used[i][5] = str(("%.2f" % data_used[i][5])).replace(".", ",") data_used[i][6] = str(("%.2f" % data_used[i][6])).replace(".", ",") data_used[i][7] = str(("%.2f" % data_used[i][7])).replace(".", ",") data_used[i][8] = str(("%.2f" % data_used[i][8])).replace(".", ",") '''Textová část kapitoly ''' with doc.create(Subsection('Stanovení odstupových vzdáleností')): if kcni_system != 'nehořlavý': doc.append(NoEscape(r'Požární zatížení je v souladu s čl. 10.4.4 normy ČSN 73 0802 vzhledem k zatřídění konstrukčního systému nově navrhované budovy ({}; zatřídění viz tabulka '.format(kcni_system))) doc.append(NoEscape(r'\ref{PTCH}) ')) doc.append(NoEscape(r' dále zvýšeno o {} kg/m$^2$.'.format(add_pv))) doc.append(NoEscape('\par')) doc.append(NoEscape(r'Odstupové vzdálenosti jsou stanoveny určením vzdáleností kritického tepelného toku 18,5 kW/m$^2$ od vnějšího líce požárně otevřené plochy, a to pomocí programu pro výpočet odstupových vzdáleností, který je zmíněn v kapitole \ref{cha-1}). Výsledné hodnoty odstupových vzdáleností z tohoto programu jsou názorně zakresleny v následujícím obrázku \ref{SchemaPOP2}.')) with doc.create(Figure(position='htb!')) as Odstup: Odstup.add_image('images/odstup.jpg', width='160px') Odstup.add_caption('Znázornění vypočtených hodnot') Odstup.append(Command('label', 'SchemaPOP2')) doc.append(NoEscape(r'Vypočtené odstupové vzdále\-nosti jsou patrné z tabulky \ref{odstup}.')) '''Ověření zda je možné přidat popisky + přidání popisků''' if len(check) > 0: doc.append(Command('begin', 'ThreePartTable')) doc.append(Command('begin', 'TableNotes')) doc.append(NoEscape('\small')) for key in new_dict.keys(): pre_str = '\item ' if key == 'Pergoly / přístřešky': loc_str = ' Odstupová vzdálenost z domnělých stěn otevřených přístřešků je spočtena pomocí normové křivky pro vnější požár. Maximální teplota požáru je tedy v tomto případě 658$^o$C. Požární zatížení je stanoveno pomocí čl. 10.4.4 normy ČSN 73 0802.' string = new_dict[key] doc.append(NoEscape(pre_str + string + loc_str)) if key == 'Přístřešky pro auta DP3': loc_str = ' Odstupová vzdálenost od otevřeného přístřešku pro auta je stanovena na základě čl. I.3.1 normy ČSN 73 0804. Uvažováno je s ekvivalentní dobou požáru 30 minut (zde označeno jako p$_v$ = 30 kg/m$^2$). s výškou přístřešku 1,5 m a skutečnou délkou přístřešku.' string = new_dict[key] doc.append(NoEscape(pre_str + string + loc_str)) if len(check) > 0: doc.append(Command('end', 'TableNotes')) with doc.create(LongTable("lcccccccc", pos=['htb'])) as data_table: doc.append(Command('caption', 'Odstupové vzdálenosti od objektu')) doc.append(Command('label', 'odstup')) doc.append(Command('\ ')) data_table.append data_table.add_hline() data_table.add_row([" ", "Výška", "Šířka", "POP", NoEscape('$p_v$'), NoEscape('$I_{avrg}$'), "d", "d\'", NoEscape("d\'$_s$")]) data_table.add_row([" ", "[m]", "[m]", NoEscape('[\%]'), NoEscape('[kg/m$^2$]'), NoEscape('[kW/m$^2$]'), "[m]", "[m]", "[m]"]) data_table.add_hline() data_table.end_table_header() for i in range(0, len(data_used)): if i == 0: # \multicolumn{9}{l}{\textbf{1. nadzemní podlaží - POP společné}}\\ data_table.add_row((MultiColumn(9, align='l', data=utils.bold(df_popis[i])),)) if i > 0 and df_popis[i] != df_popis[i-1]: data_table.add_hline() data_table.add_row((MultiColumn(9, align='l', data=utils.bold(df_popis[i])),)) if (i) % 2 == 0: data_table.add_row(data_used[i], color="Hex") elif (i) % 2 != 0: data_table.add_row(data_used[i]) data_table.add_hline() if len(check) > 0: doc.append(NoEscape('\insertTableNotes')) if len(check) > 0: doc.append(Command('end', 'ThreePartTable'))
def pile_report(self, pile, pile2, F, factor=1.25): """添加桩基长度计算内容""" self.soil = pile.soil llist = pl.get_l(self.soil, pile.d, F, factor, pile.h1, pile.rho, pile.t, pile.k2, pile2.type, pile2.completion) ra_1, ra_2 = pl.pile_l(llist, self.soil, pile.d, F, pile.h1, pile.rho, pile.t, pile.k2, pile2.type, pile2.completion) if ra_1.max() > ra_2.max(): ptype = '摩擦桩' ra = ra_1.max() else: ptype = '端承桩' ra = ra_2.max() t1 = f'''桩基直径:$d={pile.d:.2f}\,m$\n 桩基周长:$u={pile.u:.2f}\,m$\n 桩基截面积:$A_p={pile.pd:.2f}\,m^2$\n 桩基密度:$\gamma={pile.rho:.1f}\,kN/m^3$\n 容许承载力随深度的修正系数:$k_2={pile.k2:.1f}$\n 各土层加权平均重度:$\gamma_2={self.soil.rho:.1f}\,kN/m^3$\n 清底系数:$m_0={pile.m0:.1f}$ ''' t2 = '根据规范5.3.3可得,摩擦桩单桩承载力为' m1 = ['[R_a]', '=\\frac{1}{2}u\\sum_{i=1}^nq_{ik}l_i+A_pq_r'] t3 = '根据规范5.3.4可得,端承桩单桩承载力为' m2 = [ '[R_a]=', 'c_1A_pf_{rk}', '+u\\sum_{i=1}^mc_{2i}h_if_{rki}', '+\\frac{1}{2}\\xi_su\sum_{i=1}^nl_iq_{ik}' ] t4 = '考虑桩身自重与置换土重,桩基承载力为' m3 = ['R_a', '=[R_a]-G_p+G_s'] t5 = '代入不同长度桩长,可得摩擦桩与端承桩承载力如下图所示' t6 = '不同桩长具体承载力如下表所示' t7 = f'由上述分析可知,当桩长为{max(llist)}m时,{ptype}承载力为{ra:.0f}kN,安全系数为{ra/F:.2f},桩基承载力可满足规范要求。' with self.doc.create(Section('地基基本情况')): with self.doc.create(LongTabu("p{4cm}XXXXX")) as soil_table: header_row1 = self.soil.prop.columns.to_list()[:-1] soil_table.add_hline() soil_table.add_row(header_row1, mapper=[bold]) soil_table.add_hline() for i in self.soil.prop.index: soil_table.add_row(self.soil.prop.iloc[i].to_list()[:-1]) soil_table.add_hline() with self.doc.create(Section('桩基及其他参数取值')): self.doc.append(NoEscape(t1)) with self.doc.create(Section('桩长计算')): self.doc.append(t2) self.doc.append(Math(data=m1, escape=False)) self.doc.append(NoEscape('\n')) self.doc.append(t3) self.doc.append(Math(data=m2, escape=False)) self.doc.append(NoEscape('\n')) self.doc.append(t4) self.doc.append(Math(data=m3, escape=False)) self.doc.append(NoEscape('\n')) self.doc.append(t5) with self.doc.create(Figure(position='htbp')) as plot: plot.add_plot(width=NoEscape(r'1\textwidth'), dpi=300) self.doc.append(NoEscape('\n')) self.doc.append(t6) with self.doc.create(LongTable('p{1.5cm}|ll|ll')) as pll: pll.add_hline() pll.add_row(['桩长', '摩擦桩承载力', '安全系数', '端承桩承载力', '安全系数']) pll.add_hline() for i, j in enumerate(llist): pll.add_row([ j, f'{ra_1[i]:.0f}', f'{ra_1[i]/F:.2f}', f'{ra_2[i]:.0f}', f'{ra_2[i]/F:.2f}' ]) pll.add_hline() self.doc.append(t7)
direction_table.add_hline() direction_table.add_row((MultiColumn(1, align="|c|", data=FootnoteText(lorem(5))), )) direction_table.add_hline() direction_table.add_row((MultiColumn(1, align="|c|", data=SmallText(lorem(6))), )) direction_table.add_hline() direction_table.add_empty_row() #Info table with doc.create( LongTable( table_spec= "|m{1cm}|m{1.7cm}|m{1.6cm}|m{1.9cm}|m{1.8cm}|m{1.8cm}|m{1.5cm}|m{1.6cm}|m{1.8cm}|m{1.6cm}|m{1.6cm}|m{1.2cm}|m{1.4cm}|m{1.4cm}|" )) as info_table: info_table.add_hline() info_table.add_row(FootnoteText(lorem(1)), FootnoteText(lorem(3)), FootnoteText(lorem(3)), FootnoteText(lorem(4)), FootnoteText(lorem(3)), FootnoteText(lorem(3)), FootnoteText(lorem(3)), FootnoteText(lorem(3)), FootnoteText(lorem(4)), FootnoteText(lorem(4)), FootnoteText(lorem(2)), FootnoteText(lorem(2)), FootnoteText(lorem(2)), FootnoteText(bold(lorem(1)))) info_table.add_hline() for i in range(15): info_table.add_row(i + 1,
def generate_latex_document(trackers: List[Tracker], sequences: List[Sequence], results, storage: Storage, build=False, multipart=True): order_marks = {1: "first", 2: "second", 3: "third"} def format_cell(value, order): cell = format_value(value) if order in order_marks: cell = Command(order_marks[order], cell) return cell logger = logging.getLogger("vot") table_header, table_data, table_order = extract_measures_table( trackers, results) plots = extract_plots(trackers, results) doc = Document(page_numbers=True) doc.preamble.append(Package('pgf')) doc.preamble.append(Package('xcolor')) doc.preamble.append(Package('fullpage')) doc.preamble.append(NoEscape(read_resource("commands.tex"))) doc.preamble.append( UnsafeCommand('newcommand', r'\first', options=1, extra_arguments=r'{\color{red} #1 }')) doc.preamble.append( UnsafeCommand('newcommand', r'\second', options=1, extra_arguments=r'{\color{green} #1 }')) doc.preamble.append( UnsafeCommand('newcommand', r'\third', options=1, extra_arguments=r'{\color{blue} #1 }')) if multipart: container = Chunk() generate_symbols(container, trackers) with storage.write("symbols.tex") as out: container.dump(out) doc.preamble.append(Command("input", "symbols.tex")) else: generate_symbols(doc.preamble, trackers) doc.preamble.append(Command('title', 'VOT report')) doc.preamble.append( Command('author', 'Toolkit version ' + toolkit_version())) doc.preamble.append(Command('date', datetime.datetime.now().isoformat())) doc.append(NoEscape(r'\maketitle')) if len(table_header[2]) == 0: logger.debug("No measures found, skipping table") else: # Generate data table with doc.create(LongTable("l " * (len(table_header[2]) + 1))) as data_table: data_table.add_hline() data_table.add_row([" "] + [ MultiColumn(c[1], data=c[0].identifier) for c in merge_repeats(table_header[0]) ]) data_table.add_hline() data_table.add_row([" "] + [ MultiColumn(c[1], data=c[0].title) for c in merge_repeats(table_header[1]) ]) data_table.add_hline() data_table.add_row( ["Tracker"] + [" " + c.abbreviation + " " for c in table_header[2]]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() for tracker, data in table_data.items(): data_table.add_row([ UnsafeCommand("Tracker", [tracker.reference, TRACKER_GROUP]) ] + [ format_cell(x, order[tracker] if not order is None else None) for x, order in zip(data, table_order) ]) for experiment, experiment_plots in plots.items(): if len(experiment_plots) == 0: continue doc.append(Section("Experiment " + experiment.identifier)) for title, plot in experiment_plots: with doc.create(Figure(position='htbp')) as container: if multipart: plot_name = plot.identifier + ".pdf" with storage.write(plot_name, binary=True) as out: plot.save(out, "PDF") container.add_image(plot_name) else: container.append(insert_figure(plot)) container.add_caption(title) if build: temp = tempfile.mktemp() logger.debug("Generating to tempourary output %s", temp) doc.generate_pdf(temp, clean_tex=True) storage.copy(temp + ".pdf", "report.pdf") else: with storage.write("report.tex") as out: doc.dump(out)
def d_generator(data_dir, vystup_dir): os.chdir(data_dir) data_PU = read_results('results.csv') info_PU = read_results('raw_data_info.csv') # Arrange data to desired shape data_check = [] data_used = [] data_replace_p = [] data_check_p = [] data_replace_a = [] l_names = [] p_stale = [] for i in range(0, len(data_PU)): data_used.append(data_PU[i][0]) data_used.append(data_PU[i][16]) data_used.append(data_PU[i][1]) data_used.append(data_PU[i][2]) data_used.append(data_PU[i][3]) data_used.append(data_PU[i][4]) data_used.append(data_PU[i][5]) data_used.append(data_PU[i][6]) data_check.append(data_PU[i][19]) data_replace_p.append(data_PU[i][12]) data_replace_a.append(data_PU[i][13]) data_check_p.append(data_PU[i][7]) l_names.append(data_PU[i][19]) p_stale.append(data_PU[i][20]) data_used = list(chunks(data_used, 8)) list_avaiable = [ 'B.1 pol.1', 'B.1 pol.2', 'B.1 pol.3', 'B.1 pol.4', 'B.1 pol.5', 'B.1 pol.6', 'B.1 pol.7', 'B.1 pol.8', 'B.1 pol.9', 'B.1 pol.10', 'B.1 pol.11', 'B.1 pol.12', 'B.1 pol.13' ] list_avaiable2 = [ 'AZ1 Ordi.', 'AZ1 Lék.', 'AZ2 Ordi', 'AZ2 vyšet.', 'AZ2 Lék.', 'LZ1', 'LZ2 lůž', 'LZ2 int.péče', 'LZ2 Lék', 'LZ2 biochem', 'peč. Služ', 'soc.péče.ošetř.', 'soc.péče.lůž.', 'soc.péče.byt', 'Jesle' ] check = [] for i in range(0, len(data_check)): if data_check[i] in list_avaiable or\ data_check[i] == 'OB2 byt' or\ data_check[i] == 'OB3' or data_check[i] == 'OB4 ubyt.' or data_check[i] == 'OB4 sklad' or\ data_check[i] == 'CHÚC-A' or data_check[i] == 'CHÚC-B' or\ data_check[i] == 'CHÚC-C' or data_check[i] in list_avaiable2: check.append(i) sys.path.insert(0, "c:/Users/Honza/Google Drive/Work/Generator_zprav/minor/") from stupen import spb_def type_sys = info_PU[0] h_p = float(info_PU[1][0]) podlazi = float(info_PU[2][0]) if len(check) > 0: for item in check: data_used[item][4] = '-' '''data_used[item][4] = '-' if data_check[item] == 'OB2 byt': if (float(data_check_p[item]) - float(data_replace_p[item])) >= 0: data_used[item][6] = '%.2f' % 40 spb_fix = spb_def(h_p, type_sys[0], [40], [1.00], podlazi, str(data_PU[item][0])) data_used[item][7] = spb_fix[0] if (float(data_check_p[item]) - float(data_replace_p[item])) > 5: data_used[item][6] = '%.2f' % 45.00 spb_fix = spb_def(h_p, type_sys[0], [45], [1.00], podlazi, data_PU[item][0]) data_used[item][7] = spb_fix[0] if (float(data_check_p[item]) - float(data_replace_p[item])) >= 15: data_used[item][6] = '%.2f' % 50.00 spb_fix = spb_def(h_p, type_sys[0], [50], [1.00], podlazi, data_PU[item][0]) data_used[item][7] = spb_fix[0] data_used[item][3] = str('%.2f' % 1.00) # pozn = ' $^{1)}$' # data_used[item][0] = NoEscape(data_used[item][0] + pozn) if data_check[item] in list_avaiable: if 0 <= float(p_stale[item]) <= 5: data_used[item][6] = '%.2f' % float(data_replace_p[item]) if float(p_stale[item]) > 5: data_replace_p[item] = ((float(p_stale[item]) - 5) * 1.15)\ + float(data_replace_p[item]) data_used[item][6] = '%.2f' % data_replace_p[item] spb_fix = spb_def(h_p, type_sys[0], [float(data_replace_p[item])], [1.00], podlazi, data_PU[item][0]) data_used[item][7] = spb_fix[0] data_used[item][3] = str('%.2f' % 1.00) if data_check[item] in list_avaiable2: data_used[item][6] = '%.2f' % float(data_replace_p[item]) data_used[item][3] = data_replace_a[item] spb_fix = spb_def(h_p, type_sys[0], [float(data_used[item][6])], [float(data_used[item][3])], podlazi, data_PU[item][0]) data_used[item][7] = spb_fix[0] if data_check[item] == 'OB3' or data_check[i] == 'OB4 ubyt.': data_used[item][6] = '%.2f' % float(data_replace_p[item]) spb_fix = spb_def(h_p, type_sys[0], [30], [1.00], podlazi, data_PU[item][0]) data_used[item][7] = spb_fix[0] data_used[item][3] = str('%.2f' % 1.00) if data_check[item] == 'OB4 sklad': data_used[item][6] = '%.2f' % float(data_replace_p[item]) spb_fix = spb_def(h_p, type_sys[0], [60], [1.00], podlazi, data_PU[item][0]) data_used[item][7] = spb_fix[0] data_used[item][3] = str('%.2f' % 1.00)''' if data_check[item] == 'CHÚC-A' or data_check[item] == 'CHÚC-B' or\ data_check[item] == 'CHÚC-C': data_used[item][3] = '-' data_used[item][4] = '-' data_used[item][5] = '-' data_used[item][6] = '-' if h_p < 30: data_used[item][7] = 'II.' if h_p >= 30: data_used[item][7] = 'III.' if h_p >= 45: data_used[item][7] = 'IV.' with doc.create(Section('Posouzení požárních úseků')): with doc.create(Subsection('Vyhodnocení požárních úseků')): doc.append( NoEscape(r'Z tabulky \ref{PU} je patrné požární riziko\ a stupeň požární bezpečnosti všech řešených\ požárních úseků. Není-li v poznámce pod\ tabulkou uvedeno jinak, je požární riziko PÚ\ stanoveno výpočtem (viz příloha \ref{A-1}:\ Výpočet požárního rizika).')) save_var = [] save_id = [] save_name = [] save_id_check = [] for i in range(0, len(data_check)): if data_check[i] == 'B.1 pol.1' or data_check[i] == 'B.1 pol.2' or\ data_check[i] == 'B.1 pol.3' or data_check[i] == 'B.1 pol.4' or\ data_check[i] == 'B.1 pol.5' or data_check[i] == 'B.1 pol.6' or\ data_check[i] == 'B.1 pol.7' or data_check[i] == 'B.1 pol.8' or\ data_check[i] == 'B.1 pol.9' or data_check[i] == 'B.1 pol.10' or\ data_check[i] == 'B.1 pol.11' or data_check[i] == 'B.1 pol.12' or\ data_check[i] == 'B.1 pol.13' or data_check[i] == 'OB2 byt' or\ data_check[i] == 'CHÚC-A' or data_check[i] == 'CHÚC-B' or\ data_check[i] == 'CHÚC-C' or data_check[i] == 'OB3' or\ data_check[i] == 'OB4 ubyt.' or\ data_check[i] == 'OB4 sklad' or\ data_check[i] in list_avaiable2: save_var.append(data_check[i]) save_id_check.append(i) count = list(range(1, len(save_var) + 1)) pre = ' $^{' suf = ')}$' for i in range(0, len(save_var)): count[i] = pre + str(count[i]) count[i] = str(count[i]) + suf for n in range(0, len(save_var)): if n == 0: new_dict = dict(zip([save_var[n]], [count[n]])) save_id.append(save_id_check[n]) save_name.append(save_var[n]) # new_dict = dict(zip([save_var[n]], str(count[n]))) if n > 0: if save_var[n] in new_dict: save_id.append(save_id_check[n]) save_name.append(save_var[n]) else: len_dict = len(new_dict) + 1 len_dict = pre + str(len_dict) len_dict = str(len_dict) + suf save_id.append(save_id_check[n]) save_name.append(save_var[n]) new_dict[save_var[n]] = len_dict for i in range(0, len(save_id)): data_used[save_id[i]][1] = NoEscape(data_used[save_id[i]][1] + new_dict[save_name[i]]) doc.append(Command('begin', 'ThreePartTable')) if len(check) > 0: doc.append(Command('begin', 'TableNotes')) doc.append(NoEscape('\small')) for key in new_dict.keys(): pre_str = '\item' if key == 'OB2 byt': idx = l_names.index(key) suf_str = ' Uvažováno s požárně výpočetním zatížením (p$_v$)\ podle normy ČSN 73 0833 čl. 5.1.2.' string = new_dict[key] if 0 <= (float(data_check_p[idx]) - float(data_replace_p[idx])) < 5: loc_str = pre_str + string + suf_str doc.append(NoEscape(loc_str)) if (float(data_check_p[idx]) - float(data_replace_p[idx])) > 5: suf_str = suf_str + ' Stálé požární\ zatížení v požárním úseku je p$_s$ = 10 kg/m$^2$.\ Je tak přihlédnuto k poznámce stejného článku,\ která stanovuje požární zatížení na hodnotu\ p$_v$ = 45 kg/m$^2$.' loc_str = pre_str + string + suf_str doc.append(NoEscape(loc_str)) if key in list_avaiable: string = new_dict[key] idx = l_names.index(key) polozka = str(list_avaiable.index(key) + 1) suf_str_f = ' Hodnota požárně výpočtového zatížení je\ stanovena paušálně z položky ' suf_str_ff = ' tabulky B.1 normy ČSN 73 0802.' suf_str_b = ' Stálé požární zatížení je p$_s$ = ' suf_str_bb = ' kg/m$^2$. Při stanovení požárně výpočtového\ zaížení je tak přihlédnuto k čl. B.1.2.' if 0 <= (float(data_check_p[idx]) - float(data_replace_p[idx])) <= 5: suf_str_f = suf_str_f + polozka + suf_str_ff loc_str = pre_str + string + suf_str_f doc.append(NoEscape(loc_str)) if 5 < (float(data_check_p[idx]) - float(data_replace_p[idx])): suf_str_num = str(p_stale[idx]) suf_str_f = suf_str_f + polozka + suf_str_ff suf_str_b = suf_str_b + suf_str_num + suf_str_bb loc_str = pre_str + string + suf_str_f + suf_str_b doc.append(NoEscape(loc_str)) if key in list_avaiable2: string = new_dict[key] idx = l_names.index(key) suf_str_f = ' Hodnota požárně výpočtového zatížení je\ stanovena paušálně z ' suf_str_ff = ' z normy ČSN 73 0835.' if key == 'AZ1 Ordi.': odkaz = 'čl. 5.3.1' suf_str_b = ' Jedná se o zařízení lékařské péče\ zařazené do skupiny AZ1' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'AZ1 Lék.': odkaz = 'čl. 5.3.1' suf_str_b = ' Jedná se o lékárenské zařízení\ zařazené do skupiny AZ1' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'AZ2 Ordi': odkaz = 'čl. 6.2.1' suf_str_b = ' Jedná se o lékárenské pracoviště\ zařazené do skupiny AZ2' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'AZ2 vyšet.': odkaz = 'čl. 6.2.1' suf_str_b = ' Jedná se o vyšetřovací nebo léčebnou\ část budovy zařazené do skupiny AZ2' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'AZ2 Lék.': odkaz = 'čl. 6.2.1' suf_str_b = ' Jedná se o lékárenské zařízení\ zařazené do skupiny AZ2' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'LZ1': odkaz = 'čl. 7.2.1' suf_str_b = ' Jedná se o požární úsek, který je\ součástí budovy skupiny LZ1' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(No.Escape(loc_str)) if key == 'LZ2 lůž': odkaz = 'čl. 8.2.1' suf_str_b = ' Jedná se o lůžkové jednotky v\ budově skupiny LZ2' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'LZ2 int.péče': odkaz = 'čl. 8.2.1' suf_str_b = ' Jedná se o jednotky intenzivní péče,\ ansteziologicko resustitační oddělení,\ nebo o operační oddělení v budově\ zařazené do skupiny LZ2' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'LZ2 Lék': odkaz = 'čl. 8.2.1' suf_str_b = ' Jedná se o lékárenské zařízení v\ budově skupiny LZ2' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'LZ2 biochem': odkaz = 'čl. 8.2.1' suf_str_b = ' Jedná se o oddělení klinické biochemie\ v budově skupiny LZ2' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'peč. Služ': odkaz = 'čl. 9.3.1' suf_str_b = ' Jedná se o bytovou jednotku v domě\ s pečovatelskou službou' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'soc.péče.oštř.': odkaz = 'čl. 10.3.1' suf_str_b = ' Jedná se o ošetřovatelské oddělení v\ budově sociální péče' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'soc.péče.lůž.': odkaz = 'čl. 10.3.1' suf_str_b = ' Jedná se o lůžkovou část ústavu\ sociální péče' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'soc.péče.byt.': odkaz = 'čl. 10.3.1' suf_str_b = ' Jedná se o bytové jednotky v budově\ sociální péče.' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'Jesle.': odkaz = 'čl. 10.3.1' suf_str_b = ' Jedná se o zrdavotnické zařízení\ pro děti - jesle' loc_str = pre_str + string + suf_str_f + odkaz +\ suf_str_ff + suf_str_b doc.append(NoEscape(loc_str)) if key == 'OB3': string = new_dict[key] pre_str = '\item' suf_str = ' Požární riziko pro ubytovací jednotku bylo\ stanoveno paušálně pomocí čl. 6.1.1 normy\ ČSN 73 0833.' loc_str = pre_str + string + suf_str doc.append(NoEscape(loc_str)) if key == 'OB4 ubyt.': string = new_dict[key] pre_str = '\item' suf_str = ' Požární riziko pro ubytovací jednotku v budově OB4 bylo\ stanoveno paušálně pomocí čl. 7.1.1 normy\ ČSN 73 0833.' loc_str = pre_str + string + suf_str doc.append(NoEscape(loc_str)) if key == 'OB4 sklad': string = new_dict[key] pre_str = '\item' suf_str = ' Požární riziko pro ubytovací jednotku v budově OB4 bylo\ stanoveno paušálně pomocí čl. 7.1.3 normy\ ČSN 73 0833.' loc_str = pre_str + string + suf_str doc.append(NoEscape(loc_str)) if key == 'CHÚC-A' or key == 'CHÚC-B' or key == 'CHÚC-C': string = new_dict[key] pre_str = '\item' suf_str = ' CHÚC je zatříděna v souladu s čl. 9.3.2 normy\ ČSN 73 0802.' loc_str = pre_str + string + suf_str doc.append(NoEscape(loc_str)) if info_PU[3] == ['ANO']: try: new_dict except NameError: var_exist = False else: var_exist = True if var_exist is False: new_dict = [] pre_str = '\item' doc.append(Command('begin', 'TableNotes')) doc.append(NoEscape('\small')) if info_PU[5] == ['osobní výtahy, malé nákladní výtahy']: if h_p <= 22.5: s_vytah = [ 'Šv', 'Výtahové šachty', '-', '-', '-', '-', '-', 'II.' ] if 22.5 < h_p <= 45.0: s_vytah = [ 'Šv', 'Výtahové šachty', '-', '-', '-', '-', '-', 'III.' ] if h_p > 45.0: s_vytah = [ 'Šv', 'Výtahové šachty', '-', '-', '-', '-', '-', 'IV.' ] len_vytah = len(new_dict) + 1 app_vytah = pre + str(len_vytah) + suf suf_str_v = ' Výtahová šachta odpovídá čl. 8.10.2 a) normy\ ČSN 73 0802. Šachta slouží pro přepravu\ osob, nebo jako malý nákladní výtah.' loc_str_v = pre_str + app_vytah + suf_str_v doc.append(NoEscape(loc_str_v)) if info_PU[5] == ['osobně-nákladní, nákladní výtahy']: if h_p <= 30: s_vytah = [ 'Šv', 'Výtahové šachty', '-', '-', '-', '-', '-', 'III.' ] if h_p > 30: s_vytah = [ 'Šv', 'Výtahové šachty', '-', '-', '-', '-', '-', 'IV.' ] len_vytah = len(new_dict) + 1 app_vytah = pre + str(len_vytah) + suf suf_str_v = ' Výtahová šachta odpovídá čl. 8.10.2 normy\ ČSN 73 0802 bodu b). Šachta slouží jako\ osobo nákladní nebo nákladní výtah.' loc_str_v = pre_str + app_vytah + suf_str_v doc.append(NoEscape(loc_str_v)) app_this = pre + str(len_vytah) + suf s_vytah[1] = NoEscape(s_vytah[1] + app_this) if info_PU[4] == ['ANO']: try: new_dict except NameError: var_exist = False else: var_exist = True if var_exist is False: new_dict = [] pre_str = '\item' doc.append(Command('begin', 'TableNotes')) doc.append(NoEscape('\small')) if info_PU[6] == ['rozvody hořlavých látek – max 1000 mm2']: if h_p <= 22.5: s_inst = [ 'Ši', 'Instalační šachty', '-', '-', '-', '-', '-', 'II.' ] if 22.5 < h_p <= 45.0: s_inst = [ 'Ši', 'Instalační šachty', '-', '-', '-', '-', '-', 'III.' ] if h_p > 45.0: s_inst = [ 'Ši', 'Instalační šachty', '-', '-', '-', '-', '-', 'IV.' ] if info_PU[3] == ['ANO']: len_inst = len_vytah + 1 app_vytah = pre + str(len_inst) + suf suf_str_i = ' Instalační šachty jsou zatříděny v souladu s\ čl. 8.12.2 c). Šachty jsou dimenzovány\ pro rozvody hořlavých látek o celkovém\ průřezu 1000 m$^2$' loc_str_i = pre_str + app_vytah + suf_str_i doc.append(NoEscape(loc_str_i)) if info_PU[6] == [ 'rozvody nehořlavých látek – potrubí A1,\ A2' ]: s_inst = [ 'Ši', 'Instalační šachty', '-', '-', '-', '-', '-', 'I.' ] if info_PU[6] == ['rozvody nehořlavých látek – potrubí B-F']: s_inst = [ 'Ši', 'Instalační šachty', '-', '-', '-', '-', '-', 'II.' ] if info_PU[6] == ['rozvody hořlavých látek – 1000-8000 mm2']: if h_p <= 45.0: s_inst = [ 'Ši', 'Instalační šachty', '-', '-', '-', '-', '-', 'IV.' ] if h_p > 45.0: s_inst = [ 'Ši', 'Instalační šachty', '-', '-', '-', '-', '-', 'V.' ] if info_PU[6] == [ 'rozvody hořlavých látek – více než 8000 mm2' ]: s_inst = [ 'Ši', 'Instalační šachty', '-', '-', '-', '-', '-', 'VI.' ] if info_PU[3] == ['ANO']: len_inst = len_vytah + 1 else: len_inst = len(new_dict) + 1 app_this = pre + str(len_inst) + suf s_inst[1] = NoEscape(s_inst[1] + app_this) data_used.append(s_vytah) data_used.append(s_inst) if len(check) > 0 or info_PU[4] == ['ANO' ] or info_PU[3] == ['ANO']: doc.append(Command('end', 'TableNotes')) for i in range(0, len(data_used)): data_used[i][2] = data_used[i][2].replace(".", ",") data_used[i][3] = data_used[i][3].replace(".", ",") data_used[i][4] = data_used[i][4].replace(".", ",") data_used[i][5] = data_used[i][5].replace(".", ",") data_used[i][6] = data_used[i][6].replace(".", ",") with doc.create(LongTable("l l c c c c c c", pos=['htb'])) as data_table: doc.append( Command('caption', 'Přehled požárních úselků a jejich SPB')) doc.append(Command('label', 'PU')) doc.append(Command('\ ')) data_table.append data_table.add_hline() data_table.add_row([ "Číslo", "Popis", "Plocha", MultiColumn(3, data='Součinitelé'), NoEscape('p$_v$'), "SPB" ]) data_table.add_row([ " ", " ", NoEscape('[m$^2$]'), "a", "b", "c", NoEscape('[kg/m$^2$]'), '[-]' ]) data_table.add_hline() data_table.end_table_header() for i in range(0, len(data_used)): if i % 2 != 0: data_table.add_row(data_used[i], color="Hex") else: data_table.add_row(data_used[i]) data_table.add_hline() if len(check) > 0 or info_PU[4] == ['ANO' ] or info_PU[3] == ['ANO']: doc.append(NoEscape('\insertTableNotes')) os.chdir(vystup_dir) doc.append(Command('end', 'ThreePartTable')) doc.generate_pdf("D_PU", clean_tex=False)
def s2string(sbmlArgument, file_path=None): ''' Convert sbml to a latex string Args: param1 (string): file name to sbml OR sbml string file_path (string, optional): path for creation of a pdf file, only works with latexmk or pdflatex installed Returns: LaTeX string ''' try: import tesbml as libsbml except: import libsbml try: from libsbml import formulaToL3String, writeMathMLToString, parseFormula, readMathMLFromString except: from tesbml import formulaToL3String, writeMathMLToString, parseFormula, readMathMLFromString import math import pathlib # For extracting file extensions import os def getLaTeXFromAST(tree): # xmlstr = writeMathMLToString(tree) # # Strip out the header # xmlstr = xmlstr.replace ('<?xml version="1.0" encoding="UTF-8"?>', '') # # return mathml2latex_yarosh(xmlstr).strip ('$') from MATH import convertToInfix return convertToInfix(tree) #The zeroes are out of nessessity, I don't know why, but just having a single obj variable does not work #So, predefined all classes that are used later def listfiller(Commands, obj=0, R=0, Sp=0, ass=0, Par=0, tr=0, libsbml=libsbml, tofill=[], twoD=1): ''' Uses a dismal method of evaluating a piece of code from 'Commands' to fit a specific string into 'tofill' takes in a libsbml object as obj if twoD = 0, then does not fill 'tofill' with the templin as one element but returns the compiled templin as 1-D list ''' l = len(Commands) templin = [None] * l for i in range(l): templin[i] = eval(Commands[i]) if twoD == 1: tofill.append(templin) return tofill elif twoD == 0: return templin def round_half_up(n, decimals=0): ''' use this to round numbers that are way to big to put in a table ''' multiplier = 10**decimals return math.floor(n * multiplier + 0.5) / multiplier def lawcutter(prefix): ''' cuts up the string version of the KineticLaw object into something the mathml converter can read ''' lis = prefix.split('\n') i = len(lis) - 1 if (' <listOfParameters>' in lis): i = lis.index(' <listOfParameters>') lis = lis[1:i] for n in range(0, len(lis)): lis[n] = lis[n][ 2:] #so, here we are messing with indentation, not sure if it will be consistent #for all models or even if it is nessessary, but it's here newstr = '\n'.join(lis) return newstr def notecutter(prefix): ''' same as lawcutter but for notes ''' prefix = prefix.replace("\n", "") lis = prefix.split('>') i = len(lis) - 2 lis = lis[1:i] #for n in range(0, len(lis)): # lis[n] =lis[n][1:] newstr = '>'.join(lis) newstr = newstr + '>' return newstr # ---------------------------------------------- # Start of sb2l # ---------------------------------------------- reader = libsbml.SBMLReader() # Check if its a file name if os.path.isfile(sbmlArgument): suff = pathlib.Path(sbmlArgument).suffix if suff == '.xml' or suff == '.sbml': sbmldoc = reader.readSBMLFromFile(sbmlArgument) else: # If it's not a file, assume it's an sbml string sbmldoc = reader.readSBMLFromString( sbmlArgument) # Reading in the model errors = sbmldoc.getNumErrors() numReadErr = 0 numReadWarn = 0 for i in range(errors): severity = sbmldoc.getError(i).getSeverity() if (severity == libsbml.LIBSBML_SEV_ERROR) or ( severity == libsbml.LIBSBML_SEV_FATAL): seriousErrors = True numReadErr += 1 else: numReadWarn += 1 oss = libsbml.ostringstream() sbmldoc.printErrors(oss) errMsgRead = oss.str() raise RuntimeError(errMsgRead) Model_id = sbmldoc.model.getName( ) # This is essentially how each list is filled, using commands from LibSBML if len(Model_id) < 1: Model_id = sbmldoc.model.getId() Model_id = Model_id.replace(r'_', r'\_') Compartments = [] Species = [] Parameters = [] Reactions = [] Events = [] Rules = [] FunctionDefinitions = [] FunctionArgList = [] # making a notes list lis = None notes = sbmldoc.model.getNotesString() if len(notes) != 0: lis = notecutter(notes).split('<') lis = lis[2:len(lis)] del notes l = sbmldoc.model.getNumCompartments() if l != 0: ComList = [ 'obj.getId()', 'obj.getSBOTerm()', 'obj.getSpatialDimensions()', 'obj.getSize()', 'obj.getConstant()' ] for x in range(0, l): obj = sbmldoc.model.getCompartment(x) Compartments = listfiller( ComList, obj=obj, tofill=Compartments) # see the function above del (ComList) l = sbmldoc.model.getNumSpecies() if l != 0: SpecList = [ 'obj.getId()', 'obj.getInitialConcentration()', 'obj.getHasOnlySubstanceUnits()', ' obj.getBoundaryCondition()', 'obj.getConstant()' ] for x in range(0, l): obj = sbmldoc.model.getSpecies(x) Species = listfiller(SpecList, obj=obj, tofill=Species) if not math.isnan(Species[x][1]): if (Species[x][1] * 1000 < 1): # need this to round things to fit in the table Species[x][1] = round_half_up(Species[x][1], decimals=6) else: Species[x][1] = round_half_up(Species[x][1], decimals=4) del (SpecList) l = sbmldoc.model.getNumParameters() if l != 0: ParList = ['obj.getId()', 'obj.getValue()', 'obj.getConstant()'] for x in range(0, l): obj = sbmldoc.model.getParameter(x) Parameters = listfiller(ParList, obj=obj, tofill=Parameters) del (ParList) l = sbmldoc.model.getNumReactions() if l != 0: Rlist = ['R.getId()', 'R.getReversible()', 'R.getFast()'] ReProlist = [ 'Sp.getSpecies()', 'Sp.getStoichiometry()', 'Sp.getConstant()' ] Modlist = ['obj.getSpecies()'] for x in range(0, l): R = sbmldoc.model.getReaction(x) RL = listfiller( Rlist, R=R, twoD=0 ) #starting the element of common matrix/list to append at the end #making the list for reactants lRe = R.getNumReactants() ReL = [] for y in range(0, lRe): Sp = R.getReactant(y) ReL = listfiller(ReProlist, Sp=Sp, tofill=ReL) RL.append(ReL) del (lRe, ReL) #Adding reactants list to RL #making the list for products lPro = R.getNumProducts() ProL = [] for y in range(0, lPro): Sp = R.getProduct(y) ProL = listfiller(ReProlist, Sp=Sp, tofill=ProL) RL.append(ProL) del (Sp, ProL, y, lPro) #Adiing products list to RL #making the law thing law = R.getKineticLaw() prefix = law.toSBML() Formula = lawcutter(prefix) # repeating the deleted list for now, so that code works consitstnently ParList = [ 'Par.getId()', 'Par.getValue()', 'Par.getDerivedUnitDefinition()', 'Par.getConstant()' ] lPar = law.getNumParameters() ParL = [] for y in range(0, lPar): Par = law.getParameter(y) ParL = listfiller(ParList, Par=Par, tofill=ParL) KinLaw = [Formula, ParL] RL.append(KinLaw) del (Formula, law) lMod = R.getNumModifiers() if lMod > 0: ModL = [] for y in range(0, lMod): obj = R.getModifier(y) ModL = listfiller(Modlist, obj=obj, tofill=ModL) RL.append(ModL) Reactions.append( RL ) #Appending all info about a given reaction to the common list del (RL, R, Rlist, ReProlist, ParList, lPar, ParL, KinLaw, prefix) l = sbmldoc.model.getNumEvents() if l != 0: TrList = ['tr.getInitialValue()', 'tr.getPersistent()', 'tr.getMath()'] AsList = ['ass.getId()', 'ass.getMath()'] for x in range(0, l): eve = sbmldoc.model.getEvent(x) #get the event tr = eve.getTrigger() TrigL = [0, 0, 0] TrigL = listfiller(TrList, tr=tr, tofill=TrigL, twoD=0) #define trigger things m = eve.getNumEventAssignments() AssL = [] for i in range(0, m): ass = eve.getEventAssignment(i) AssL = listfiller( AsList, ass=ass, tofill=AssL ) #add up all of the ID = Formula in a single list del (i, m) Events.append([eve.getId(), eve.getName(), TrigL, AssL]) del (TrList, AsList, eve, tr, TrigL, ass, AssL) l = sbmldoc.model.getNumRules() if l != 0: RuList = ['obj.getVariable()', 'obj.getFormula()'] for x in range(0, l): obj = sbmldoc.model.getRule(x) Rules = listfiller(RuList, obj=obj, tofill=Rules) del (RuList) del (obj) l1 = sbmldoc.model.getNumFunctionDefinitions() if l1 != 0: FuncList = ['obj.getId()', 'obj.getBody()'] for x in range(0, l1): obj = sbmldoc.model.getFunctionDefinition(x) FunctionDefinitions = listfiller(FuncList, obj=obj, tofill=FunctionDefinitions) l2 = obj.getNumArguments() if l2 != 0: for k in range(0, l2): FunctionArgList.append(obj.getArgument(k)) del (libsbml, lawcutter, l, notecutter, listfiller) # The part where everything is compiled into the TeX file from pylatex import Document, Section, Subsection, Subsubsection, Command, Math, Tabular, LongTable from pylatex import Table, LineBreak from pylatex.utils import italic, NoEscape, bold doc = Document() # start a doc doc.packages.append(NoEscape(r'\usepackage{xcolor}')) doc.packages.append(NoEscape(r'\usepackage{titlesec}')) doc.packages.append(NoEscape(r"\usepackage{hyperref}")) doc.packages.append( NoEscape(r"\hypersetup{colorlinks=true,linkcolor=blue,urlcolor=blue}")) doc.packages.append(NoEscape(r"\usepackage{amsmath}")) doc.packages.append(NoEscape(r"\usepackage{breqn}")) doc.preamble.append(NoEscape(r'\definecolor{blue}{cmyk}{.93, .59, 0, 0}')) doc.preamble.append('') doc.preamble.append(NoEscape(r'\titleformat{\chapter}[display]')) doc.preamble.append( NoEscape(r' {\normalfont\sffamily\huge\bfseries\color{blue}}')) doc.preamble.append( NoEscape(r' {\chaptertitlename\ \thechapter}{20pt}{\Huge}')) doc.preamble.append(NoEscape(r'\titleformat{\section}')) doc.preamble.append( NoEscape(r' {\normalfont\sffamily\Large\bfseries\color{blue}}')) doc.preamble.append(NoEscape(r' {\thesection}{1em}{}')) doc.preamble.append(NoEscape(r'\titleformat{\subsection}')) doc.preamble.append( NoEscape(r' {\normalfont\sffamily\large\bfseries\color{blue}}')) doc.preamble.append(NoEscape(r' {\thesubsection}{1em}{}')) doc.preamble.append(NoEscape(r'\titleformat{\subsubsection}')) doc.preamble.append( NoEscape(r' {\normalfont\sffamily\normalsize\bfseries\color{blue}}')) doc.preamble.append(NoEscape(r' {\thesubsubsection}{1em}{}')) doc.append(NoEscape(r'\begin{center}')) doc.append( NoEscape(r'{\normalfont\sffamily\huge\bfseries SBML Model Report}\\')) doc.append(NoEscape(r'\vspace{5mm}')) doc.append( NoEscape( r'{\normalfont\sffamily\LARGE\bfseries\color{blue} Model name: ' + Model_id + r'}\\')) doc.append(NoEscape(r'\vspace{5mm}')) doc.append(NoEscape(r'\large\today')) doc.append(NoEscape(r'\end{center}')) def rxn_eq(Reaction, Command=Command): ''' Stitches up a list to plug into Math function for reaction equations ''' numRe = len( Reaction[3]) # the products info is stored as a list in position 3 numPr = len(Reaction[4]) try: numMod = len(Reaction[6]) except: numMod = 0 arrow = [] plus = ['+'] Re = [] Pr = [] if numRe > 0: for i in range(0, numRe): if (i > 0): Re = Re + plus Re.append(Command( command='text', arguments=Reaction[3][i] [0])) #Appends with IDs of species that are reactannts else: Re.append(Command(command='text', arguments=['None'])) if numPr > 0: for i in range(0, numPr): # Put in the form Math class can interpret if (i > 0): Pr = Pr + plus Pr.append(Command(command='text', arguments=Reaction[4][i][0])) else: Pr.append(Command(command='text', arguments=['None'])) if numMod > 0: arg = [] for i in range(0, numMod): arg.append(Reaction[6][i][0]) arg = ", ".join(arg) arrow = [ Command(command='xrightarrow', arguments=Command(command='text', arguments=arg)) ] else: arrow = [Command('longrightarrow')] DaList = Re + arrow + Pr return DaList if lis != None: # NOTES -- made from html string, can recognize: # <a href...>, <b>, <i>,<br/> and treats emphasis as italic or bold # there is a known issue with special characters such as # not being interpreted right # to fix that, follow the structure below leng = len(lis) with doc.create(Section('Notes')): def findOccurrences(s, ch): return [i for i, letter in enumerate(s) if letter == ch] doc.append(Command('raggedright')) doc.append(Command('frenchspacing')) for i in range(0, leng): if (leng < 2): doc.append(lis[i]) continue if ( ''' in lis[i] ): #THIS if statement is being referenced above, ' is the HTML code for #the apostrophe lis[i] = lis[i].replace("'", "'") if ('&' in lis[i]): lis[i] = lis[i].replace("&", "&") if ('$' in lis[i]): lis[i] = lis[i].replace("$", "$") if ('#' in lis[i]): lis[i] = lis[i].replace("#", "#") if ('+' in lis[i]): lis[i] = lis[i].replace("+", "+") if ('!' in lis[i]): lis[i] = lis[i].replace("!", "!") if ('?' in lis[i]): lis[i] = lis[i].replace("?", "?") if ('/' in lis[i] and 'br/>' not in lis[i] and '//' not in lis[i] and len(lis[i].replace(" ", "")) < 4 and 'strong>' not in lis[i]): continue #! trying to skip every instance of </something> assuming the 4 length as cutoff elif ('br/>' in lis[i] and len(lis[i].replace(" ", "")) < 4): doc.append(LineBreak()) elif ('br/>' in lis[i]): doc.append(LineBreak()) doc.append(lis[i].replace("br/>", "")) elif ('p>' in lis[i]): doc.append(Command('par')) doc.append(lis[i][2:len(lis[i])]) elif ('sub>' in lis[i] and '/sub>' not in lis[i]): temp = lis[i].replace("sub>", "") doc.append(NoEscape("$_{\\text{" + temp + "}}$")) elif (('b>' in lis[i] or 'strong>' in lis[i]) and ('/b>' not in lis[i]) and ('/strong>' not in lis[i]) and ('/sub>' not in lis[i])): temp = lis[i].replace("b>", "") temp = temp.replace("strong>", "") doc.append(bold(temp)) elif (('i>' in lis[i] or 'em>' in lis[i]) and ('/i>' not in lis[i]) and ('/em>' not in lis[i])): temp = lis[i].replace("i>", "") temp = temp.replace("em>", "") doc.append(italic(temp)) elif (('/b>' in lis[i]) or ('/strong>' in lis[i]) or ('/i>' in lis[i]) or ('/em>' in lis[i]) or ('/sub>' in lis[i])): temp = lis[i].replace("/i>", "") temp = temp.replace("/em>", "") temp = temp.replace("/b>", "") temp = temp.replace("/strong>", "") temp = temp.replace("/sub>", "") doc.append(temp) elif ('a href=' in lis[i]): t_list = lis[i].split('>') pos = findOccurrences(t_list[0], '\"') link = t_list[0][pos[0] + 1:pos[ 1]] #! Assuming that the first to places with " \" " #will surround the link doc.append( NoEscape("\href{" + link + "}" + "{" + t_list[1] + "}")) #! Assuming that in a hyperlink notation: # i. e <a href="http://link.com">text that the author wants to be seen</a> else: pos = findOccurrences(lis[i], '>') doc.append(lis[i][pos[0] + 1:]) del (leng) with doc.create(Section('Contents')): # Summary of contents of sbml model doc.append('The number of components in this model:') doc.append(NoEscape(r'\\[2mm]')) with doc.create(Table(position='htpb')) as table1: doc.append(NoEscape(r'\centering')) tbl_cmnd = '' tbl_cmnd = 'l|c|l|c' with doc.create(Tabular(tbl_cmnd, booktabs=True)) as table: table.add_row('Element', 'Quantity', 'Element', 'Quantity') table.add_hline() table.add_row('Compartment', str(len(Compartments)), 'Species', str(len(Species))) table.add_row('Reactions', str(len(Reactions)), 'Events', str(len(Events))) table.add_row('Global Parameters', str(len(Parameters)), 'Function Definitions', str(len(FunctionDefinitions))) table1.add_caption('Components in this model.') # COMPARTMENTS TABLE listlen = len(Compartments) #number of rows sublistlen = len(Compartments[0]) #number of columns tbl_cmnd = '' tbl_cmnd = tbl_cmnd.join('c|' for i in range(0, sublistlen)) tbl_cmnd = tbl_cmnd[:-1] with doc.create(Section('Compartments')): doc.append('Table of comparments in the model:') with doc.create(LongTable(tbl_cmnd, booktabs=True)) as table: table.add_row(('ID', 'SBO ', 'Spatial ', 'Size', 'Constant')) table.add_row(('', 'Term', 'Dimensions', '', '')) table.add_hline() for i in range(0, listlen): if math.isnan(Compartments[i][1]): Species[i][1] = 'undefined' table.add_row(tuple(Compartments[i])) # SPECIES TABLE # getting info from the list listlen = len(Species) #number of rows sublistlen = len(Species[0]) #number of columns tbl_cmnd = '' #tbl_cmnd.join('X|' for i in range(0, sublistlen)) tbl_cmnd = tbl_cmnd.join('c|' for i in range(0, sublistlen)) tbl_cmnd = tbl_cmnd[:-1] # Remove last character, dont want verical line # making a tble for latex # As the most simple way of doing this, we can convert the lists into tuples and just paste into # the add_row command. For something more complicated: some if statements would be useful with doc.create(Section('Species')): doc.append('Table of species in the model:') with doc.create(LongTable(tbl_cmnd, booktabs=True)) as table: table.add_row(('ID', 'Initial ', 'Only ', 'Boundary', 'Constant')) table.add_row( ('', 'Concentration', 'Substance Units', ' Conditions', '')) table.add_hline() for i in range(0, listlen): if math.isnan(Species[i][1]): Species[i][1] = 'undefined' table.add_row(tuple(Species[i])) # GLOBAL PARAMETER TABLE listlen = len(Parameters) #number of rows if (listlen < 1): with doc.create(Section('Parameters')): doc.append( 'The function could not identify any global Parameters in the model' ) else: sublistlen = len(Parameters[0]) #number of columns tbl_cmnd = '' #tbl_cmnd.join('X|' for i in range(0, sublistlen)) tbl_cmnd = tbl_cmnd.join('c|' for i in range(0, sublistlen)) tbl_cmnd = tbl_cmnd[: -1] # Remove last character, dont want verical line with doc.create(Section('Parameters')): doc.append( 'The following table is the list of Parameters in the model.') with doc.create(LongTable(tbl_cmnd, booktabs=True)) as table: table.add_row(('ID', 'Value', 'Constant')) table.add_hline() for i in range(0, listlen): table.add_row(tuple(Parameters[i])) # PROCESS RULES listlen = len(Rules) if (listlen >= 1): with doc.create(Section('Rules')): doc.append('Number of rules in the model: ' + str(listlen)) for i in range(0, listlen): with doc.create( Subsection('Rule ' + str(i + 1) + ': ' + Rules[i][0])): doc.append(Math(data=[Rules[i][0] + '=' + Rules[i][1]])) # PROCESS FUNCTION DEDFINITIONS listlen = len(FunctionDefinitions) if (listlen >= 1): with doc.create(Section('Function Definitions')): doc.append('Number of usr defined functions in the model: ' + str(listlen)) for i in range(0, listlen): latexstr = getLaTeXFromAST(FunctionDefinitions[i][1]) with doc.create(Subsection('Function ' + str(i + 1))): doc.append(Command("begin", "dmath*")) doc.append( NoEscape( '$$' + '\\text{' + FunctionDefinitions[i][0].replace('_', '\\_') + '}\ (')) for j in range(0, len(FunctionArgList)): latexarg = getLaTeXFromAST(FunctionArgList[j]) if j == len(FunctionArgList) - 1: doc.append( NoEscape(str(latexarg.replace('_', '\\_')))) else: doc.append( NoEscape(latexarg.replace('_', '\\_') + ',')) doc.append( NoEscape('): ' + latexstr.replace('_', '\\_') + '$$')) doc.append(Command("end", "dmath*")) # PROCESS EVENTS listlen = len(Events) if (listlen >= 1): with doc.create(Section('Events')): doc.append('Number of events defined in the model: ' + str(listlen)) for i in range(0, listlen): with doc.create( Subsection('Event ' + str(i + 1) + ': ' + Events[i][0])): if (len(Events[i][1]) > 0): with doc.create(Subsubsection('Name', numbering=False)): doc.append(Events[i][1]) with doc.create(Subsubsection('Trigger', numbering=False)): doc.append( NoEscape('$$' + getLaTeXFromAST(Events[i][2][2]) + '$$')) with doc.create( Subsubsection('Assignments', numbering=False)): for j in range(0, len(Events[i][3])): assTree = parseFormula(Events[i][3][j][0]) ass = '$$' + getLaTeXFromAST( assTree) + '=' + getLaTeXFromAST( Events[i][3][j][1]) + '$$' doc.append(NoEscape(ass)) # PROCESS REACTIONS listlen = len(Reactions) # number of rows with doc.create(Section('Reactions')): doc.append('Number of reactions in the model: ' + str(listlen)) for i in range(0, listlen): with doc.create( Subsection('Reaction ' + str(i + 1) + ': ' + Reactions[i][0])): with doc.create( Subsubsection('Reaction equation', numbering=False)): doc.append(Math(data=rxn_eq(Reaction=Reactions[i]))) with doc.create(Subsubsection('Kinetic Law', numbering=False)): m = readMathMLFromString(Reactions[i][5][0]) formula = getLaTeXFromAST(m) formula = formula.replace('\mathrm', '\ \mathrm') doc.append(Command("begin", "dmath*")) doc.append( NoEscape('$$v =' + formula.replace('_', '\\_') + '$$')) doc.append(Command("end", "dmath*")) with doc.create(Subsubsection('Local Parameters')): if len(Reactions[i][5][1]) != 0: sublistlen = len(Reactions[i][5][1][0]) tbl_cmnd = '' tbl_cmnd = '||' + tbl_cmnd.join( 'c|' for n in range(0, sublistlen)) + '|' with doc.create(LongTable(tbl_cmnd, booktabs=False)) as table: table.add_hline() table.add_row(('ID', 'Value', 'Units', 'Constant')) table.add_hline() table.add_hline() listleng = len(Reactions[i][5][1]) for j in range(0, listleng): table.add_row(tuple(Reactions[i][5][1][j])) table.add_hline() else: doc.append('No LOCAL Parameters found') del (Command, Document, NoEscape, Section, Subsection, italic) return doc.dumps()
def _document_instance_table(self, tests: List[Dict[str, Any]], with_id: bool): """Document a result table of per-instance tests. Args: tests: List of corresponding test dictionary to make a table. with_id: Whether the test information includes data ID. """ if with_id: table_spec = '|c|p{5cm}|c|c|p{5cm}|' column_num = 5 else: table_spec = '|c|p{10cm}|c|c|' column_num = 4 with self.doc.create(LongTable(table_spec, pos=['h!'], booktabs=True)) as tabular: package = Package('seqsplit') if package not in tabular.packages: tabular.packages.append(package) # add table heading row_cells = [ MultiColumn(size=1, align='|c|', data="Test ID"), MultiColumn(size=1, align='c|', data="Test Description"), MultiColumn(size=1, align='c|', data="Pass Threshold"), MultiColumn(size=1, align='c|', data="Failure Count") ] if with_id: row_cells.append( MultiColumn(size=1, align='c|', data="Failure Data Instance ID")) tabular.add_row(row_cells) # add table header and footer tabular.add_hline() tabular.end_table_header() tabular.add_hline() tabular.add_row((MultiColumn(column_num, align='r', data='Continued on Next Page'), )) tabular.add_hline() tabular.end_table_footer() tabular.end_table_last_footer() for idx, test in enumerate(tests): if idx > 0: tabular.add_hline() des_data = [ WrapText(data=x, threshold=27) for x in test["description"].split(" ") ] row_cells = [ self.test_id, IterJoin(data=des_data, token=" "), NoEscape(r'$\le $' + str(test["fail_threshold"])), test["fail_number"] ] if with_id: id_data = [ WrapText(data=x, threshold=27) for x in test["fail_id"] ] row_cells.append(IterJoin(data=id_data, token=", ")) tabular.add_row(row_cells) self.test_id += 1
def description_classification(full_data_files, description_folder): data_list = list() for d in full_data_files: name = d.split('/')[2].split('.')[0] df = pd.read_csv(d, sep='\s+', header=None) lines, columns = df.shape attribute = columns - 1 # Minus one because of target last_pos = attribute classes = np.unique(df[last_pos]) n_classes = len(classes) distribution_list = [len(df[df[last_pos] == c]) for c in classes] distribution_list.sort(reverse=True) distribution = tuple(distribution_list) data_list.append({'Dataset': name, 'Size': lines, 'Attributes': attribute, 'Classes': n_classes, 'Class distribution': distribution}) df = pd.DataFrame(data_list) df = df.sort_values('Size', ascending=False) cols = ['Dataset', 'Size', 'Attributes', 'Classes', 'Class distribution'] df = df[cols] df_copy = deepcopy(df) df_copy['Class distribution'] = [str(value).replace(', ', ';').replace(')', '').replace('(', '') for value in df_copy['Class distribution']] df_copy.to_csv(os.path.join(description_folder, 'data_description.csv'), sep=',', header=True, columns=['Dataset', 'Size', 'Attributes', 'Classes', 'Class distribution'], index=False) # # LaTeX df = df.set_index(['Dataset']) # Max classes per row max_classes = np.inf geometry_options = { "margin": "1.00cm", "includeheadfoot": True } doc = Document(page_numbers=True, geometry_options=geometry_options) # Generate data table with doc.create(LongTable("l l l l l")) as data_table: data_table.add_hline() header = ["Dataset", "Size", "#Attr.", "#Classes", "Class distribution"] data_table.add_row(header) data_table.add_hline() data_table.add_hline() for index in df.index.values: row = [index] + df.loc[index].values.tolist() if len(row[-1]) > max_classes: max = max_classes finished = False subrow = row.copy() # Select subtuple and removing last parenthesis subrow[-1] = str(subrow[-1][:max]).replace(')', ',') data_table.add_row(subrow) while finished is False: last_element = row[-1][max:max + max_classes] if len(last_element) == 1: # To string last_element = str(last_element) # Remove first and last parenthesis and comma last_element = last_element[1:-2] else: # To string last_element = str(last_element) # Remove first and last parenthesis last_element = last_element[1:-1] max = max + max_classes if max >= len(row[-1]): finished = True last_element += ')' else: # Remove last parenthesis or comma if len is 1 last_element = last_element[:-1] subrow = ['', '', '', '', last_element] data_table.add_row(subrow) else: data_table.add_row(row) doc.generate_pdf(os.path.join(description_folder, 'data_description'), clean_tex=False)
def tuning_LogisticRegression(): param_grid = { "tol": [1e-3, 1e-4, 1e-5], "C": [1e-5, 3e-5, 1e-4, 3e-4, 1e-3], "max_iter": [100, 200, 300] } result_list = [] optimized_param = [0, 0, 0, 0, 0, 0, 0] for tol in param_grid["tol"]: for C in param_grid["C"]: for max_iter in param_grid["max_iter"]: # try: current_param_and_eval = [tol, C, max_iter] clf = LogisticRegression(tol=tol, C=C, max_iter=max_iter, multi_class="multinomial", solver="newton-cg") f1_scores_dict = load_train_output_crossvalidation(clf) f1_scores_dict = round_f1_score(f1_scores_dict) f1_train_average = f1_scores_dict["f1_train_average"] f1_dev_average = f1_scores_dict["f1_dev_average"] f1_train_micro_average = f1_scores_dict[ "f1_train_micro_average"] f1_dev_micro_average = f1_scores_dict["f1_dev_micro_average"] current_param_and_eval.append(f1_train_micro_average) current_param_and_eval.append(f1_dev_micro_average) current_param_and_eval.append(f1_train_average) current_param_and_eval.append(f1_dev_average) result_list.append(current_param_and_eval) if current_param_and_eval[4] > optimized_param[4]: optimized_param = current_param_and_eval # except: # print("An exception occurs.") # Generate data table geometry_options = {"margin": "2.54cm", "includeheadfoot": True} doc = Document(page_numbers=True, geometry_options=geometry_options) with doc.create(LongTable("l l l l l l l")) as data_table: data_table.add_hline() data_table.add_row([ "tol", "C", "max_iter", "training f1", "valid f1", "training f1 for each technique", "valid f1 for each technique" ]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() for i in range(len(result_list)): data_table.add_row(result_list[i][0:7]) data_table.add_hline() with doc.create(LongTable("l l l l l l l")) as data_table: data_table.add_hline() data_table.add_row([ "tol", "C", "max_iter", "training f1", "valid f1", "training f1 for each technique", "valid f1 for each technique" ]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() data_table.add_row(optimized_param) data_table.add_hline() print("This is for LogisticRegression.") doc.generate_pdf("tuning_LogisticRegression", clean_tex=False)
def latex_report(system, curves, grid, filename): lines = system.lines trafos = system.trafos buses = system.buses geometry_options = {"tmargin": "1cm", "lmargin": "1cm", "rmargin": "1cm", "bmargin": "1cm", "includeheadfoot": True} doc = Document(page_numbers=True, geometry_options=geometry_options) doc.preamble.append(Command('usepackage', 'cmbright')) doc.preamble.append(Command('usepackage', 'tikz')) doc.preamble.append(Command('usepackage', 'amsmath')) doc.preamble.append(Command('usepackage', 'graphicx')) now = datetime.datetime.now() doc.append(f'Report auto-generated by Elegant at ' f'{now.day:02d}/{now.month:02d}/{now.year:02d} ' f'{now.hour:02d}:{now.minute:02d}:{now.second:02d}') wye_comm = UnsafeCommand( 'newcommand', '\\wye', extra_arguments=r'\mathbin{\text{\begin{tikzpicture}' r'[x=1pt, y=1pt, scale=2]' r'\draw ' r'(-0.9, 0) -- (0.9, 0) ' r'(-0.6, -0.5) -- (0.6, -0.5) ' r'(-0.3, -1) -- (0.3, -1) ' r'(0, 0) -- ++(0, 1.5) -- ++(1.2, 0) coordinate (tmp)' r'-- +(0, -2) ' r'(tmp) +(45:2) -- (tmp) -- +(135:2) ;' r'\end{tikzpicture}}}' ) why_comm = UnsafeCommand( 'newcommand', '\\why', extra_arguments=r'\mathbin{\text{\begin{tikzpicture}' r'[x=1pt, y=1pt, scale=2]' r'\draw ' r'(1.2, 1.5) coordinate (tmp)' r'-- +(0, -2) ' r'(tmp) +(45:2) -- (tmp) -- +(135:2) ;' r'\end{tikzpicture}}}' ) doc.append(wye_comm) doc.append(why_comm) doc.add_color(name="lightgray", model="gray", description="0.80") with doc.create(Section('Buses')): with doc.create(Subsection('Load-Flow Solution')): with doc.create(LongTable('c|ccccccc')) as tbl: tbl.add_hline() tbl.add_row(('Bus', NoEscape('$|V|$ (pu)'), NoEscape('$\\delta$ (deg)'), NoEscape('$P_G$ (MW)'), NoEscape('$Q_G$ (Mvar)'), NoEscape('$P_L$ (MW)'), NoEscape('$Q_L$ (Mvar)'), NoEscape('$Z_L$ (pu)'))) tbl.add_hline() tbl.end_table_header() tbl.add_hline() tbl.add_row((MultiColumn(8, align='r', data='Continued on Next Page'),)) tbl.add_hline() tbl.end_table_footer() tbl.add_hline() tbl.end_table_last_footer() for i, b in enumerate(buses): if i % 2 == 0: color = 'lightgray' else: color = None tbl.add_row((b.bus_id + 1, NoEscape('{:.04f}'.format(b.v)), NoEscape('${:.02f}$'.format(b.delta * 180 / np.pi)), NoEscape('{:.02f}'.format(b.pg * 100)), NoEscape('{:.02f}'.format(b.qg * 100)), NoEscape('{:.02f}'.format(b.pl * 100)), NoEscape('{:.02f}'.format(b.ql * 100)), safe_repr(b.Z)), color=color) with doc.create(Subsection('Fault Calculations')): with doc.create(LongTable('c|cccccccccc')) as tbl: tbl.add_hline() tbl.add_row((MultiRow(2, data='Bus'), MultiColumn(2, align='c', data=NoEscape('TPG')), MultiColumn(2, align='c', data=NoEscape('SLG')), MultiColumn(4, align='c', data=NoEscape('DLG')), MultiColumn(2, align='c', data=NoEscape('LL')))) tbl.add_hline(2, 11) tbl.add_row(('', NoEscape('$I_A$ (pu)'), NoEscape('$\\delta_A$ (deg)'), NoEscape('$I_A$ (pu)'), NoEscape('$\\delta_A$ (deg)'), NoEscape('$I_B$ (pu)'), NoEscape('$\\delta_B$ (deg)'), NoEscape('$I_C$ (pu)'), NoEscape('$\\delta_C$ (deg)'), NoEscape('$I_B$ (pu)'), NoEscape('$\\delta_B$ (deg)'))) tbl.add_hline() tbl.end_table_header() tbl.add_hline() tbl.add_row((MultiColumn(11, align='r', data='Continued on Next Page'),)) tbl.add_hline() tbl.end_table_footer() tbl.add_hline() tbl.end_table_last_footer() for i, b in enumerate(buses): if i % 2 == 0: color = 'lightgray' else: color = None tbl.add_row((b.bus_id + 1, safe_repr(np.abs(b.iTPG)), NoEscape('${:.02f}$'.format(np.angle(b.iTPG) * 180 / np.pi)), safe_repr(np.abs(b.iSLG)), NoEscape('${:.02f}$'.format(np.angle(b.iSLG) * 180 / np.pi)), safe_repr(np.abs(b.iDLGb)), NoEscape('${:.02f}$'.format(np.angle(b.iDLGb) * 180 / np.pi)), safe_repr(np.abs(b.iDLGc)), NoEscape('${:.02f}$'.format(np.angle(b.iDLGc) * 180 / np.pi)), safe_repr(np.abs(b.iLL)), NoEscape('${:.02f}$'.format(np.angle(b.iLL) * 180 / np.pi))), color=color) with doc.create(Section('Lines')): with doc.create(LongTable('c|cccccccc')) as tbl: tbl.add_hline() tbl.add_row((MultiRow(2, data='Line'), MultiColumn(3, align='c', data='Parametrization'), MultiColumn(2, align='c', data='Loss'), MultiColumn(3, align='c', data='Flow'))) tbl.add_hline(2, 9) tbl.add_row(('', NoEscape('$R$ (\\%pu)'), NoEscape('$X_L$ (\\%pu)'), NoEscape('$B_C$ (\\%pu)'), NoEscape('$P_{loss}$ (MW)'), NoEscape('$Q_{loss}$ (Mvar)'), NoEscape('$P$ (MW)'), NoEscape('$Q$ (Mvar)'), NoEscape('$I/I_{max}$ (\\%)'))) tbl.add_hline() tbl.end_table_header() tbl.add_hline() tbl.add_row((MultiColumn(9, align='r', data='Continued on Next Page'),)) tbl.add_hline() tbl.end_table_footer() tbl.add_hline() tbl.end_table_last_footer() for i, lt in enumerate(lines): if i % 2 == 0: color = 'lightgray' else: color = None tbl.add_row((NoEscape('{} -- {}'.format(lt.orig.bus_id + 1, lt.dest.bus_id + 1)), NoEscape('{:.04f}'.format(lt.Zpu.real * 100)), NoEscape('{:.04f}'.format(lt.Zpu.imag * 100)), NoEscape('{:.04f}'.format(lt.Ypu.imag * 100)), NoEscape('{:.02f}'.format(lt.Sper.real * 100)), NoEscape('{:.02f}'.format(lt.Sper.imag * 100)), NoEscape('{:.02f}'.format(lt.S2.real * 100)), NoEscape('{:.02f}'.format(lt.S2.imag * 100)), NoEscape('{:.02f}'.format(np.abs(lt.Ia) / lt.imax * 100))), color=color) with doc.create(Section('Trafos')): with doc.create(LongTable('c|ccccccc')) as tbl: tbl.add_hline() tbl.add_row((MultiRow(2, data='Trafo'), MultiColumn(3, align='c', data='Parametrization'), MultiColumn(1, align='c', data='Loss'), MultiColumn(3, align='c', data='Flow'))) tbl.add_hline(2, 8) tbl.add_row(('', NoEscape('$x^+$ (\\%pu)'), NoEscape('$x^0$ (\\%pu)'), 'Configuration', NoEscape('$Q_{loss}$ (Mvar)'), NoEscape('$P$ (MW)'), NoEscape('$Q$ (Mvar)'), NoEscape('$S/S_N$ (\\%)'))) tbl.add_hline() tbl.end_table_header() tbl.add_hline() tbl.add_row((MultiColumn(8, align='r', data='Continued on Next Page'),)) tbl.add_hline() tbl.end_table_footer() tbl.add_hline() tbl.end_table_last_footer() for i, tr in enumerate(trafos): if i % 2 == 0: color = 'lightgray' else: color = None tbl.add_row((NoEscape('{} -- {}'.format(tr.orig.bus_id + 1, tr.dest.bus_id + 1)), NoEscape('{:.02f}'.format(tr.Z1.imag * 100)), NoEscape('{:.02f}'.format(tr.Z0.imag * 100)), get_scheme(tr), NoEscape('{:.02f}'.format(tr.Sper.imag * 100)), NoEscape('{:.02f}'.format(tr.S2.real * 100)), NoEscape('{:.02f}'.format(tr.S2.imag * 100)), NoEscape('{:.02f}'.format(np.abs(tr.S2) * 1e8 / tr.snom * 100))), color=color) filepath = filename.strip('.pdf') make_system_schematic(curves, grid, initial_fontsize=9) doc.append(NewPage()) with doc.create(Section('System')): with doc.create(Figure(position='h')) as system_pic: system_pic.add_plot(bbox_inches='tight', width=NoEscape('\\textwidth')) doc.generate_pdf(filepath, clean_tex=True, compiler='latexmk', compiler_args=['-pdf'])
def save_report(model, file_name, detailed_traces=2): print('Saving analytics report to {}.tex and {}.pdf'.format( file_name, file_name)) inference_network = model._inference_network iter_per_sec = inference_network._total_train_iterations / inference_network._total_train_seconds traces_per_sec = inference_network._total_train_traces / inference_network._total_train_seconds traces_per_iter = inference_network._total_train_traces / inference_network._total_train_iterations train_loss_initial = inference_network._history_train_loss[0] train_loss_final = inference_network._history_train_loss[-1] train_loss_change = train_loss_final - train_loss_initial train_loss_change_per_sec = train_loss_change / inference_network._total_train_seconds train_loss_change_per_iter = train_loss_change / inference_network._total_train_iterations train_loss_change_per_trace = train_loss_change / inference_network._total_train_traces valid_loss_initial = inference_network._history_valid_loss[0] valid_loss_final = inference_network._history_valid_loss[-1] valid_loss_change = valid_loss_final - valid_loss_initial valid_loss_change_per_sec = valid_loss_change / inference_network._total_train_seconds valid_loss_change_per_iter = valid_loss_change / inference_network._total_train_iterations valid_loss_change_per_trace = valid_loss_change / inference_network._total_train_traces sys.stdout.write( 'Generating report... \r') sys.stdout.flush() geometry_options = { 'tmargin': '1.5cm', 'lmargin': '1cm', 'rmargin': '1cm', 'bmargin': '1.5cm' } doc = Document('basic', geometry_options=geometry_options) doc.preamble.append(NoEscape(r'\usepackage[none]{hyphenat}')) doc.preamble.append(NoEscape(r'\usepackage{float}')) # doc.preamble.append(NoEscape(r'\renewcommand{\familydefault}{\ttdefault}')) doc.preamble.append(Command('title', 'pyprob analytics: ' + model.name)) doc.preamble.append( Command( 'date', NoEscape(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))) doc.append(NoEscape(r'\maketitle')) # doc.append(NoEscape(r'\small')) print('Analytics: Current system') with doc.create(Section('Current system', numbering=False)): with doc.create(Tabularx('ll')) as table: table.add_row(('pyprob version', __version__)) table.add_row(('PyTorch version', torch.__version__)) # doc.append(NoEscape(r'\newpage')) print('Analytics: Inference network') with doc.create(Section('Inference network', numbering=False)): print('Analytics: Inference network.File') with doc.create(Section('File')): with doc.create(Tabularx('ll')) as table: # table.add_row(('File name', file_name)) # file_size = '{:,}'.format(os.path.getsize(file_name)) # table.add_row(('File size', file_size + ' Bytes')) table.add_row(('Created', inference_network._created)) table.add_row(('Modified', inference_network._modified)) table.add_row(('Updates to file', inference_network._updates)) print('Analytics: Inference network.Training') with doc.create(Section('Training')): with doc.create(Tabularx('ll')) as table: table.add_row( ('pyprob version', inference_network._pyprob_version)) table.add_row( ('PyTorch version', inference_network._torch_version)) table.add_row(('Trained on', inference_network._trained_on)) table.add_row(('Total training time', '{0}'.format( util.days_hours_mins_secs_str( inference_network._total_train_seconds)))) table.add_row( ('Total training traces', '{:,}'.format(inference_network._total_train_traces))) table.add_row(('Traces / s', '{:,.2f}'.format(traces_per_sec))) table.add_row( ('Traces / iteration', '{:,.2f}'.format(traces_per_iter))) table.add_row( ('Iterations', '{:,}'.format(inference_network._total_train_iterations))) table.add_row( ('Iterations / s', '{:,.2f}'.format(iter_per_sec))) table.add_row(('Optimizer', inference_network._optimizer_type)) table.add_row(('Validation set size', inference_network._valid_batch.length)) print('Analytics: Inference network.Training loss') with doc.create(Subsection('Training loss')): with doc.create(Tabularx('ll')) as table: table.add_row( ('Initial loss', '{:+.6e}'.format(train_loss_initial))) table.add_row( ('Final loss', '{:+.6e}'.format(train_loss_final))) table.add_row(('Loss change / s', '{:+.6e}'.format(train_loss_change_per_sec))) table.add_row(('Loss change / iteration', '{:+.6e}'.format(train_loss_change_per_iter))) table.add_row(('Loss change / trace', '{:+.6e}'.format(train_loss_change_per_trace))) print('Analytics: Inference network.Validation loss') with doc.create(Subsection('Validation loss')): with doc.create(Tabularx('ll')) as table: table.add_row( ('Initial loss', '{:+.6e}'.format(valid_loss_initial))) table.add_row( ('Final loss', '{:+.6e}'.format(valid_loss_final))) table.add_row(('Loss change / s', '{:+.6e}'.format(valid_loss_change_per_sec))) table.add_row(('Loss change / iteration', '{:+.6e}'.format(valid_loss_change_per_iter))) table.add_row(('Loss change / trace', '{:+.6e}'.format(valid_loss_change_per_trace))) with doc.create(Figure(position='H')) as plot: fig = plt.figure(figsize=(10, 6)) ax = plt.subplot(111) ax.plot(inference_network._history_train_loss_trace, inference_network._history_train_loss, label='Training') ax.plot(inference_network._history_valid_loss_trace, inference_network._history_valid_loss, label='Validation') ax.legend() plt.xlabel('Training traces') plt.ylabel('Loss') plt.grid() fig.tight_layout() plot.add_plot(width=NoEscape(r'\textwidth')) plot.add_caption('Loss plot.') print('Analytics: Inference network.Neural network modules') with doc.create(Section('Neural network modules')): with doc.create(Tabularx('ll')) as table: table.add_row( ('Total trainable parameters', '{:,}'.format(inference_network._history_num_params[-1]))) # table.add_row(('Softmax boost', inference_network.softmax_boost)) # table.add_row(('Dropout', inference_network.dropout)) # table.add_row(('Standardize inputs', inference_network.standardize_observes)) with doc.create(Figure(position='H')) as plot: fig = plt.figure(figsize=(10, 4)) ax = plt.subplot(111) ax.plot(inference_network._history_num_params_trace, inference_network._history_num_params) plt.xlabel('Training traces') plt.ylabel('Number of parameters') plt.grid() fig.tight_layout() plot.add_plot(width=NoEscape(r'\textwidth')) plot.add_caption('Number of parameters.') doc.append(NoEscape(r'\newpage')) print( 'Analytics: Inference network.Neural network modules.All modules' ) with doc.create(Subsection('All modules')): doc.append(str(inference_network)) for m_name, m in inference_network.named_modules(): if (m_name != ''): regex = r'(sample_embedding_layer\(\S*\)._)|(proposal_layer\(\S*\)._)|(_observe_embedding_layer.)|(_lstm)' if len(list(re.finditer(regex, m_name))) > 0: # if ('_observe_embedding_layer.' in m_name) or ('sample_embedding_layer.' in m_name) or ('proposal_layer.' in m_name): doc.append(NoEscape(r'\newpage')) with doc.create(Subsubsection(m_name)): doc.append(str(m)) for p_name, p in m.named_parameters(): if not 'bias' in p_name: with doc.create( Figure(position='H')) as plot: fig = plt.figure(figsize=(10, 10)) ax = plt.subplot(111) plt.imshow(np.transpose( util.weights_to_image(p), (1, 2, 0)), interpolation='none') plt.axis('off') plot.add_plot( width=NoEscape(r'\textwidth')) plot.add_caption(m_name + '_' + p_name) # doc.append(NoEscape(r'\newpage')) # print('Analytics: Inference network.Neural network modules.Address embeddings') # with doc.create(Subsection('Address embeddings')): # for p_name, p in inference_network.named_parameters(): # if ('address_embedding' in p_name): # with doc.create(Figure(position='H')) as plot: # fig = plt.figure(figsize=(10,10)) # ax = plt.subplot(111) # plt.imshow(np.transpose(util.weights_to_image(p),(1,2,0)), interpolation='none') # plt.axis('off') # plot.add_plot(width=NoEscape(r'\textwidth')) # plot.add_caption(FootnoteText(p_name.replace('::', ':: '))) gc.collect() doc.append(NoEscape(r'\newpage')) print('Analytics: Inference network.Traces') with doc.create(Section('Traces')): with doc.create(Tabularx('ll')) as table: table.add_row( ('Total training traces', '{:,}'.format(inference_network._total_train_traces))) print( 'Analytics: Inference network.Traces.Distributions encountered' ) with doc.create(Subsection('Distributions encountered')): with doc.create(Tabularx('ll')) as table: # print([v[2] for v in inference_network._address_stats.values()]) distributions = set([ v[2] for v in inference_network._address_stats.values() ]) num_distributions = len(distributions) table.add_row( ('Number of distributions', num_distributions)) table.add_empty_row() for distribution in distributions: table.add_row((distribution, '')) print('Analytics: Inference network.Traces.Addresses encountered') with doc.create(Subsection('Addresses encountered')): with doc.create(Tabularx('lX')) as table: num_addresses_all = len( inference_network._address_stats.keys()) table.add_row(('Number of addresses', num_addresses_all)) num_addresses_controlled = len([ k for k, v in inference_network._address_stats.items() if v[3] ]) num_addresses_replaced = len([ k for k, v in inference_network._address_stats.items() if v[3] and v[4] ]) num_addresses_observed = len([ k for k, v in inference_network._address_stats.items() if v[5] ]) table.add_row( (TextColor('red', 'Number of addresses (controlled)'), TextColor('red', num_addresses_controlled))) table.add_row((TextColor('green', 'Number of addresses (replaced)'), TextColor('green', num_addresses_replaced))) table.add_row((TextColor('blue', 'Number of addresses (observed)'), TextColor('blue', num_addresses_observed))) table.add_row( ('Number of addresses (uncontrolled)', num_addresses_all - num_addresses_controlled - num_addresses_observed)) table.add_empty_row() doc.append('\n') with doc.create(LongTable('llllllp{12cm}')) as table: # table.add_empty_row() table.add_row(FootnoteText('Count'), FootnoteText('ID'), FootnoteText('Distrib.'), FootnoteText('Ctrl.'), FootnoteText('Replace'), FootnoteText('Obs.'), FootnoteText('Address')) table.add_hline() # address_to_abbrev = {} # abbrev_to_address = # abbrev_i = 0 # sorted_addresses = sorted(inference_network.address_histogram.items(), key=lambda x:x[1], reverse=True) plt_all_addresses = [] plt_all_counts = [] plt_all_colors = [] plt_controlled_addresses = [] plt_controlled_counts = [] plt_controlled_colors = [] address_id_to_count = {} address_id_to_color = {} address_id_count_total = 0 for address, vals in inference_network._address_stats.items( ): address = address.replace('::', ':: ') count = vals[0] address_id = vals[1] distribution = vals[2] control = vals[3] replace = vals[4] observed = vals[5] plt_all_addresses.append(address_id) plt_all_counts.append(1 if replace else count) address_id_to_count[address_id] = count address_id_count_total += count if control: if replace: color = 'green' plt_controlled_counts.append(1) else: color = 'red' plt_controlled_counts.append(count) plt_controlled_addresses.append(address_id) plt_controlled_colors.append(color) elif observed: color = 'blue' plt_controlled_addresses.append(address_id) plt_controlled_colors.append(color) plt_controlled_counts.append(count) else: color = 'black' table.add_row( (TextColor(color, FootnoteText('{:,}'.format(count))), TextColor(color, FootnoteText(address_id)), TextColor(color, FootnoteText(distribution)), TextColor(color, FootnoteText(control)), TextColor(color, FootnoteText(replace)), TextColor(color, FootnoteText(observed)), TextColor(color, FootnoteText(address)))) plt_all_colors.append(color) address_id_to_color[address_id] = color gc.collect() with doc.create(Figure(position='H')) as plot: fig = plt.figure(figsize=(10, 5)) ax = plt.subplot(111) plt_x = range(len(plt_all_addresses)) ax.bar(plt_x, plt_all_counts, color=plt_all_colors) plt.xticks(plt_x, plt_all_addresses) plt.xlabel('Address ID') plt.ylabel('Count') # plt.grid() fig.tight_layout() plot.add_plot(width=NoEscape(r'\textwidth')) plot.add_caption( 'Histogram of all addresses. Red: controlled, green: replaced, black: uncontrolled, blue: observed.' ) with doc.create(Figure(position='H')) as plot: fig = plt.figure(figsize=(10, 5)) ax = plt.subplot(111) plt_x = range(len(plt_controlled_addresses)) ax.bar(plt_x, plt_controlled_counts, color=plt_controlled_colors) plt.xticks(plt_x, plt_controlled_addresses) plt.xlabel('Address ID') plt.ylabel('Count') # plt.grid() fig.tight_layout() plot.add_plot(width=NoEscape(r'\textwidth')) plot.add_caption( 'Histogram of controlled and observed addresses. Red: controlled, green: replaced, blue: observed.' ) gc.collect() print('Analytics: Inference network.Traces.Trace lengths') with doc.create(Subsection('Trace lengths')): with doc.create(Tabularx('ll')) as table: trace_lengths_controlled = [ v[3] for v in inference_network._trace_stats.values() ] trace_lengths_controlled_min = min( trace_lengths_controlled) trace_lengths_controlled_max = max( trace_lengths_controlled) trace_lengths_all = [ v[2] for v in inference_network._trace_stats.values() ] trace_lengths_all_min = min(trace_lengths_all) trace_lengths_all_max = max(trace_lengths_all) s_controlled = 0 s_all = 0 total_count = 0 for _, v in inference_network._trace_stats.items(): trace_length_controlled = v[3] trace_length_all = v[2] count = v[0] s_controlled += trace_length_controlled * count total_count += count s_all += trace_length_all * count trace_length_controlled_mean = s_controlled / total_count trace_length_all_mean = s_all / total_count table.add_row(('Trace length min', '{:,}'.format(trace_lengths_all_min))) table.add_row(('Trace length max', '{:,}'.format(trace_lengths_all_max))) table.add_row(('Trace length mean', '{:.2f}'.format(trace_length_all_mean))) table.add_row( ('Controlled trace length min', '{:,}'.format(trace_lengths_controlled_min))) table.add_row( ('Controlled trace length max', '{:,}'.format(trace_lengths_controlled_max))) table.add_row( ('Controlled trace length mean', '{:.2f}'.format(trace_length_controlled_mean))) with doc.create(Figure(position='H')) as plot: plt_counter = dict(Counter(trace_lengths_all)) plt_lengths = [ i for i in range(0, trace_lengths_all_max + 1) ] plt_counts = [ plt_counter[i] if i in plt_counter else 0 for i in range(0, trace_lengths_all_max + 1) ] fig = plt.figure(figsize=(10, 5)) ax = plt.subplot(111) ax.bar(plt_lengths, plt_counts, width=trace_lengths_all_max / 500.) plt.xlabel('Length') plt.ylabel('Count') # plt.yscale('log') # plt.grid() fig.tight_layout() plot.add_plot(width=NoEscape(r'\textwidth')) plot.add_caption('Histogram of trace lengths.') with doc.create(Figure(position='H')) as plot: plt_counter = dict(Counter(trace_lengths_controlled)) plt_lengths = [ i for i in range(0, trace_lengths_controlled_max + 1) ] plt_counts = [ plt_counter[i] if i in plt_counter else 0 for i in range(0, trace_lengths_controlled_max + 1) ] fig = plt.figure(figsize=(10, 5)) ax = plt.subplot(111) ax.bar(plt_lengths, plt_counts) plt.xlabel('Length') plt.ylabel('Count') # plt.yscale('log') # plt.grid() fig.tight_layout() plot.add_plot(width=NoEscape(r'\textwidth')) plot.add_caption('Histogram of controlled trace lengths.') gc.collect() print( 'Analytics: Inference network.Traces.Unique traces encountered' ) with doc.create(Subsection('Unique traces encountered')): detailed_traces = min(len(inference_network._trace_stats), detailed_traces) with doc.create(Tabularx('ll')) as table: table.add_row( ('Unique traces encountered', '{:,}'.format(len(inference_network._trace_stats)))) table.add_row(('Unique traces rendered in detail', '{:,}'.format(detailed_traces))) doc.append('\n') with doc.create(LongTable('llllp{15cm}')) as table: # table.add_empty_row() table.add_row(FootnoteText('Count'), FootnoteText('ID'), FootnoteText('Len.'), FootnoteText('Ctrl. len.'), FootnoteText('Unique trace')) table.add_hline() plt_traces = [] plt_counts = [] for trace_str, vals in inference_network._trace_stats.items( ): count = vals[0] trace_id = vals[1] length_all = vals[2] length_controlled = vals[3] addresses_controlled = vals[4] addresses_controlled_str = ' '.join( addresses_controlled) plt_traces.append(trace_id) plt_counts.append(count) table.add_row( (FootnoteText('{:,}'.format(count)), FootnoteText(trace_id), FootnoteText('{:,}'.format(length_all)), FootnoteText('{:,}'.format(length_controlled)), FootnoteText(addresses_controlled_str))) with doc.create(Figure(position='H')) as plot: fig = plt.figure(figsize=(10, 5)) ax = plt.subplot(111) plt_x = range(len(plt_traces)) ax.bar(plt_x, plt_counts) plt.xticks(plt_x, plt_traces) plt.xlabel('Trace ID') plt.ylabel('Count') # plt.grid() fig.tight_layout() plot.add_plot(width=NoEscape(r'\textwidth')) plot.add_caption('Histogram of unique traces.') with doc.create(Figure(position='H')) as plot: sorted_trace_stats = OrderedDict( sorted(dict(inference_network._trace_stats).items(), key=lambda x: x[1], reverse=True)) master_trace_pairs = {} transition_count_total = 0 for trace_str, vals in sorted_trace_stats.items(): count = vals[0] ta = vals[4] for left, right in zip(ta, ta[1:]): if (left, right) in master_trace_pairs: master_trace_pairs[(left, right)] += count else: master_trace_pairs[(left, right)] = count transition_count_total += count fig = plt.figure(figsize=(10, 5)) ax = plt.subplot(111) master_graph = pydotplus.graphviz.Dot(graph_type='digraph', rankdir='LR') transition_count_max = 0 for p, count in master_trace_pairs.items(): if count > transition_count_max: transition_count_max = count nodes = master_graph.get_node(p[0]) if len(nodes) > 0: n0 = nodes[0] else: n0 = pydotplus.Node(p[0]) master_graph.add_node(n0) nodes = master_graph.get_node(p[1]) if len(nodes) > 0: n1 = nodes[0] else: n1 = pydotplus.Node(p[1]) master_graph.add_node(n1) master_graph.add_edge( pydotplus.Edge(n0, n1, weight=count)) for node in master_graph.get_nodes(): node.set_color('gray') node.set_fontcolor('gray') for edge in master_graph.get_edges(): edge.set_color('gray') master_graph_annotated = pydotplus.graphviz.graph_from_dot_data( master_graph.to_string()) for node in master_graph_annotated.get_nodes(): # color = util.rgb_to_hex(util.rgb_blend((1, 1, 1), (1, 0, 0), address_id_to_count[node.obj_dict['name']] / address_id_count_total)) address_id = node.obj_dict['name'] node.set_style('filled') node.set_fillcolor(address_id_to_color[address_id]) node.set_color('black') node.set_fontcolor('black') for edge in master_graph_annotated.get_edges(): (left, right) = edge.obj_dict['points'] count = master_trace_pairs[(left, right)] edge.set_label(count) # color = util.rgb_to_hex((1.5*(count/transition_count_total), 0, 0)) edge.set_color('black') edge.set_penwidth(2.5 * count / transition_count_max) png_str = master_graph_annotated.create_png( prog=['dot', '-Gsize=90', '-Gdpi=600']) bio = BytesIO() bio.write(png_str) bio.seek(0) img = np.asarray(mpimg.imread(bio)) plt.imshow(util.crop_image(img), interpolation='bilinear') plt.axis('off') plot.add_plot(width=NoEscape(r'\textwidth')) plot.add_caption( 'Succession of controlled addresses (accumulated over all traces). Red: controlled, green: replaced, blue: observed.' ) for trace_str, vals in OrderedDict( islice(sorted_trace_stats.items(), 0, detailed_traces)).items(): count = vals[0] trace_id = vals[1] doc.append(NoEscape(r'\newpage')) with doc.create(Subsubsection('Unique trace ' + trace_id)): sys.stdout.write( 'Rendering unique trace {0}... \r' .format(trace_id)) sys.stdout.flush() addresses = len(plt_controlled_addresses) trace_addresses = vals[4] with doc.create(Tabularx('ll')) as table: table.add_row(FootnoteText('Count'), FootnoteText('{:,}'.format(count))) table.add_row( FootnoteText('Controlled length'), FootnoteText('{:,}'.format( len(trace_addresses)))) doc.append('\n') im = np.zeros((addresses, len(trace_addresses))) for i in range(len(trace_addresses)): address = trace_addresses[i] address_i = plt_controlled_addresses.index(address) im[address_i, i] = 1 truncate = 100 for col_start in range(0, len(trace_addresses), truncate): col_end = min(col_start + truncate, len(trace_addresses)) with doc.create(Figure(position='H')) as plot: fig = plt.figure(figsize=(20 * ( (col_end + 4 - col_start) / truncate), 4)) ax = plt.subplot(111) # ax.imshow(im,cmap=plt.get_cmap('Greys')) sns.heatmap( im[:, col_start:col_end], cbar=False, linecolor='lightgray', linewidths=.5, cmap='Greys', yticklabels=plt_controlled_addresses, xticklabels=np.arange(col_start, col_end)) plt.yticks(rotation=0) fig.tight_layout() plot.add_plot( width=NoEscape(r'{0}\textwidth'.format( (col_end + 4 - col_start) / truncate)), placement=NoEscape(r'\raggedright')) with doc.create(Figure(position='H')) as plot: pairs = {} for left, right in zip(trace_addresses, trace_addresses[1:]): if (left, right) in pairs: pairs[(left, right)] += 1 else: pairs[(left, right)] = 1 fig = plt.figure(figsize=(10, 5)) ax = plt.subplot(111) graph = pydotplus.graphviz.graph_from_dot_data( master_graph.to_string()) trace_address_to_count = {} for address in trace_addresses: if address in trace_address_to_count: trace_address_to_count[address] += 1 else: trace_address_to_count[address] = 1 transition_count_max = 0 for p, count in pairs.items(): if count > transition_count_max: transition_count_max = count left_node = graph.get_node(p[0])[0] right_node = graph.get_node(p[1])[0] edge = graph.get_edge(p[0], p[1])[0] # color = util.rgb_to_hex(util.rgb_blend((1,1,1), (1,0,0), trace_address_to_count[p[0]] / len(trace_addresses))) left_node.set_style('filled') left_node.set_fillcolor( address_id_to_color[p[0]]) left_node.set_color('black') left_node.set_fontcolor('black') # color = util.rgb_to_hex(util.rgb_blend((1,1,1), (1,0,0), trace_address_to_count[p[0]] / len(trace_addresses))) right_node.set_style('filled') right_node.set_fillcolor( address_id_to_color[p[1]]) right_node.set_color('black') right_node.set_fontcolor('black') # (left, right) = edge.obj_dict['points'] edge.set_label(count) # color = util.rgb_to_hex((1.5*(count/len(trace_addresses)),0,0)) edge.set_color('black') edge.set_penwidth(2.5 * count / transition_count_max) png_str = graph.create_png( prog=['dot', '-Gsize=90', '-Gdpi=600']) bio = BytesIO() bio.write(png_str) bio.seek(0) img = np.asarray(mpimg.imread(bio)) plt.imshow(util.crop_image(img), interpolation='bilinear') plt.axis('off') plot.add_plot(width=NoEscape(r'\textwidth')) plot.add_caption( 'Succession of controlled addresses (for one trace of type ' + trace_id + '). Red: controlled, green: replaced, blue: observed.' ) with doc.create(Tabularx('lp{16cm}')) as table: table.add_row( FootnoteText('Trace'), FootnoteText(' '.join(trace_addresses))) doc.generate_pdf(file_name, clean_tex=False) sys.stdout.write( ' \r') sys.stdout.flush()
def tuning_RandomForest(): param_grid = { "max_features": ["auto", "sqrt", "log2"], "n_estimators": [50, 100, 200], "min_sample_leaf": [25, 50, 100], "max_depth": [None, 10, 20, 40, 80], } result_list = [] optimized_param = [0, 0, 0, 0, 0, 0, 0, 0] for max_features in param_grid["max_features"]: for n_estimators in param_grid["n_estimators"]: for min_sample_leaf in param_grid["min_sample_leaf"]: for max_depth in param_grid["max_depth"]: # try: current_param_and_eval = [ max_features, n_estimators, min_sample_leaf, max_depth ] clf = RandomForestClassifier( max_features=max_features, n_estimators=n_estimators, min_samples_leaf=min_sample_leaf, max_depth=max_depth) f1_scores_dict = load_train_output_crossvalidation(clf) f1_scores_dict = round_f1_score(f1_scores_dict) f1_train_average = f1_scores_dict["f1_train_average"] f1_dev_average = f1_scores_dict["f1_dev_average"] f1_train_micro_average = f1_scores_dict[ "f1_train_micro_average"] f1_dev_micro_average = f1_scores_dict[ "f1_dev_micro_average"] current_param_and_eval.append(f1_train_micro_average) current_param_and_eval.append(f1_dev_micro_average) current_param_and_eval.append(f1_train_average) current_param_and_eval.append(f1_dev_average) result_list.append(current_param_and_eval) if current_param_and_eval[5] > optimized_param[5]: optimized_param = current_param_and_eval # except: # print("An exception occurs.") # Generate data table geometry_options = {"margin": "2.54cm", "includeheadfoot": True} doc = Document(page_numbers=True, geometry_options=geometry_options) with doc.create(LongTable("l l l l l l l l")) as data_table: data_table.add_hline() data_table.add_row([ "max_features", "n_estimators", "min_sample_leaf", "max_depth", "training f1", "valid f1", "training f1 for each technique", "valid f1 for each technique" ]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() for i in range(len(result_list)): data_table.add_row(result_list[i][0:8]) data_table.add_hline() with doc.create(LongTable("l l l l l l l l")) as data_table: data_table.add_hline() data_table.add_row([ "max_features", "n_estimators", "min_sample_leaf", "max_depth", "training f1", "valid f1", "training f1 for each technique", "valid f1 for each technique" ]) data_table.add_hline() data_table.end_table_header() data_table.add_hline() data_table.add_row(optimized_param) data_table.add_hline() print("This is for RandomForest.") doc.generate_pdf("tuning_RandomForest", clean_tex=False)