Exemple #1
0
    def _document_models(self) -> None:
        """Add model summaries to the traceability document.
        """
        with self.doc.create(Section("Models")):
            for model in humansorted(self.system.network.models,
                                     key=lambda m: m.model_name):
                if not isinstance(model, (tf.keras.Model, torch.nn.Module)):
                    continue
                self.doc.append(NoEscape(r'\FloatBarrier'))
                with self.doc.create(
                        Subsection(f"{model.model_name.capitalize()}")):
                    if isinstance(model, tf.keras.Model):
                        # Text Summary
                        summary = []
                        model.summary(line_length=92,
                                      print_fn=lambda x: summary.append(x))
                        summary = "\n".join(summary)
                        self.doc.append(Verbatim(summary))
                        with self.doc.create(Center()):
                            self.doc.append(
                                HrefFEID(FEID(id(model)), model.model_name))

                        # Visual Summary
                        # noinspection PyBroadException
                        try:
                            file_path = os.path.join(
                                self.resource_dir,
                                "{}_{}.pdf".format(self.report_name,
                                                   model.model_name))
                            dot = tf.keras.utils.model_to_dot(
                                model, show_shapes=True, expand_nested=True)
                            # LaTeX \maxdim is around 575cm (226 inches), so the image must have max dimension less than
                            # 226 inches. However, the 'size' parameter doesn't account for the whole node height, so
                            # set the limit lower (100 inches) to leave some wiggle room.
                            dot.set('size', '100')
                            dot.write(file_path, format='pdf')
                        except Exception:
                            file_path = None
                            print(
                                f"FastEstimator-Warn: Model {model.model_name} could not be visualized by Traceability"
                            )
                    elif isinstance(model, torch.nn.Module):
                        if hasattr(model, 'fe_input_spec'):
                            # Text Summary
                            # noinspection PyUnresolvedReferences
                            inputs = model.fe_input_spec.get_dummy_input()
                            self.doc.append(
                                Verbatim(
                                    pms.summary(
                                        model.module if
                                        self.system.num_devices > 1 else model,
                                        inputs,
                                        print_summary=False)))
                            with self.doc.create(Center()):
                                self.doc.append(
                                    HrefFEID(FEID(id(model)),
                                             model.model_name))
                            # Visual Summary
                            # Import has to be done while matplotlib is using the Agg backend
                            old_backend = matplotlib.get_backend() or 'Agg'
                            matplotlib.use('Agg')
                            # noinspection PyBroadException
                            try:
                                # Fake the IPython import when user isn't running from Jupyter
                                sys.modules.setdefault('IPython', MagicMock())
                                sys.modules.setdefault('IPython.display',
                                                       MagicMock())
                                import hiddenlayer as hl
                                with Suppressor():
                                    graph = hl.build_graph(
                                        model.module if
                                        self.system.num_devices > 1 else model,
                                        inputs)
                                graph = graph.build_dot()
                                graph.attr(
                                    rankdir='TB'
                                )  # Switch it to Top-to-Bottom instead of Left-to-Right
                                # LaTeX \maxdim is around 575cm (226 inches), so the image must have max dimension less
                                # than 226 inches. However, the 'size' parameter doesn't account for the whole node
                                # height, so set the limit lower (100 inches) to leave some wiggle room.
                                graph.attr(size="100,100")
                                graph.attr(margin='0')
                                file_path = graph.render(
                                    filename="{}_{}".format(
                                        self.report_name, model.model_name),
                                    directory=self.resource_dir,
                                    format='pdf',
                                    cleanup=True)
                            except Exception:
                                file_path = None
                                print(
                                    "FastEstimator-Warn: Model {} could not be visualized by Traceability"
                                    .format(model.model_name))
                            finally:
                                matplotlib.use(old_backend)
                        else:
                            file_path = None
                            self.doc.append(
                                "This model was not used by the Network during training."
                            )
                    if file_path:
                        with self.doc.create(Figure(position='ht!')) as fig:
                            fig.append(
                                Label(
                                    Marker(name=str(FEID(id(model))),
                                           prefix="model")))
                            fig.add_image(
                                os.path.relpath(file_path,
                                                start=self.save_dir),
                                width=NoEscape(
                                    r'1.0\textwidth,height=0.95\textheight,keepaspectratio'
                                ))
                            fig.add_caption(
                                NoEscape(
                                    HrefFEID(FEID(id(model)),
                                             model.model_name).dumps()))
Exemple #2
0
# Test for list structures in PyLaTeX.
# More info @ http://en.wikibooks.org/wiki/LaTeX/List_Structures
from pylatex import Document, Section, Itemize, Enumerate, Description, \
    Command, NoEscape

if __name__ == '__main__':
    doc = Document()

    # create a bulleted "itemize" list like the below:
    # \begin{itemize}
    #   \item The first item
    #   \item The second item
    #   \item The third etc \ldots
    # \end{itemize}

    with doc.create(Section('"Itemize" list')):
        with doc.create(Itemize()) as itemize:
            itemize.add_item("the first item")
            itemize.add_item("the second item")
            itemize.add_item("the third etc")
            # you can append to existing items
            itemize.append(Command("ldots"))

    # create a numbered "enumerate" list like the below:
    # \begin{enumerate}[label=\alph*),start=20]
    #   \item The first item
    #   \item The second item
    #   \item The third etc \ldots
    # \end{enumerate}

    with doc.create(Section('"Enumerate" list')):
Exemple #3
0
def generate_unique_gif_ps(sectors_irradiated, hv_irradiated, spark_irradiated, ps_hv, ps_spike):
	geometry_options = {
		"head": "40pt",
		"margin": "0.5in",
		"bottom": "1.0in",
		"includeheadfoot": True
	}
	doc = Document(indent=False,geometry_options=geometry_options)

	# Generating first page style
	first_page = PageStyle("firstpage")

	# Header image
	with first_page.create(Head("L")) as header_left:
		with header_left.create(MiniPage(width=NoEscape(r"0.49\textwidth"),
										 pos='c')) as logo_wrapper:
			logo_file = os.path.join(os.path.dirname(__file__),
									 '../cernlogo.png')
			logo_wrapper.append(StandAloneGraphic(image_options="width=80px",
								filename=logo_file))

	# Add document title
	with first_page.create(Head("R")) as right_header:
		with right_header.create(MiniPage(width=NoEscape(r"0.49\textwidth"),
								 pos='c', align='r')) as title_wrapper:
			title_wrapper.append(LargeText(bold("ATLAS New Small Wheel")))
			title_wrapper.append(LineBreak())
			title_wrapper.append(bold("Integration Tests"))
			title_wrapper.append(LineBreak())
			title_wrapper.append(bold(now.strftime("%d-%m-%Y")))
			title_wrapper.append(LineBreak())
			title_wrapper.append("\n")
			title_wrapper.append(LargeText(bold("Chamber: "+str(chambername))))
			title_wrapper.append(LineBreak())
			title_wrapper.append("ID: "+str(ID))
			title_wrapper.append(LineBreak())

	# Add footer
	with first_page.create(Foot("C")) as footer:

		with footer.create(Tabularx(
				"X X X ",
				width_argument=NoEscape(r"\textwidth"))) as footer_table:

			footer_table.add_empty_row()

			footer_table.add_hline(color="blue")

			branch_address1 = MiniPage(
				width=NoEscape(r"0.25\textwidth"),
				pos='t')
			branch_address1.append("Lorenzo Pezzotti")
			branch_address1.append("\n")
			branch_address1.append("*****@*****.**")

			branch_address2 = MiniPage(
				width=NoEscape(r"0.25\textwidth"),
				pos='t')
			branch_address2.append("Alan Peyaud")
			branch_address2.append("\n")
			branch_address2.append("*****@*****.**")

			branch_address3 = MiniPage(
				width=NoEscape(r"0.25\textwidth"),
				pos='t')
			branch_address3.append("Ivan Gnesi")
			branch_address3.append("\n")
			branch_address3.append("*****@*****.**")

			document_details = MiniPage(width=NoEscape(r"0.2\textwidth"),
										pos='t', align='r')
			document_details.append(" ")
			document_details.append(LineBreak())
			document_details.append(simple_page_number())

			footer_table.add_row([branch_address1, branch_address2,
								  branch_address3])

	doc.append(first_page)
	# End first page style
	redcircle = glob.glob("redcircle.png")
	redcircle = StandAloneGraphic(redcircle, image_options="width=220px")

	# Add customer information
	with doc.create(Tabu("X[r]")) as first_page_table:
		'''
		# Add branch information
		branch = MiniPage(width=NoEscape(r"0.49\textwidth"), pos='t!',
						  align='r')
		branch.append("Chamber name: ")
		branch.append(LineBreak())
		branch.append("ID: ")
		branch.append(LineBreak())

		first_page_table.add_row([branch])
		'''
		first_page_table.add_empty_row()

	doc.change_document_style("firstpage")
	doc.add_color(name="lightgray", model="gray", description="0.85")
	doc.add_color(name="lightgray2", model="gray", description="0.6")


	with doc.create(Section('HV irradiated at GIF', numbering=False)):
	   # Add statement table
		doc.append("\n")
		doc.append(timeslot_irradiated)
		doc.append(LineBreak())
		doc.append(str(deltatime_irradiated/60)+str("_min"))
		doc.append(LineBreak())
		doc.append("Spike_treshold_0.20_uA")
		doc.append(LineBreak())
		with doc.create(LongTabu("|X[l]| X[r]| X[r] |X[r] |X[r]| X[r]| X[r]|",
								 row_height=1.5)) as data_table_irradiated:
			data_table_irradiated.add_hline()
			data_table_irradiated.add_row(["Sector",
								"HV",
								"HV PS",
								"spark/min",
								"spark/min PS",
								"Efficiency",
								"Flag"],
							   mapper=bold,
							   color="lightgray2")
			data_table_irradiated.add_hline()
			data_table_irradiated.end_table_header()
			data_table_irradiated.add_hline()
			row = ["sector", "hv", "hv ps", "spark", "spark ps","eff", "0 or 1"]
			acceptedlist = []
			for i in range(len(hv_irradiated)):
				if (i % 2) == 0:
					'''
					if int(hv_notirradiated[i]) > 567.9 and spark_notirradiated[i]<1.0:
						accepted = 1
						acceptedlist.append(accepted)

					else:
						accepted = 0
						acceptedlist.append(accepted)
					'''
					if int(hv_irradiated[i]) > 567.9:
						hvcolor = "black"

					if 548.0 <= int(hv_irradiated[i]) <= 567.9:
						hvcolor = "orange"

					if int(hv_irradiated[i])< 548.0:
						hvcolor = "red"

					if spark_irradiated[i] > 6.0:
						sparkcolor = "red"

					if spark_irradiated[i] == 6.0:
						sparkcolor = "orange"

					if spark_irradiated[i] < 6.0:
						sparkcolor = "black"

					if efficiency_irradiated[i] < 80.0:
						effcolor = "red"

					if efficiency_irradiated[i] > 80.0:
						effcolor = "black"

					if efficiency_irradiated == 80.0:
						effcolor = "orange"

					if sparkcolor == "red" or hvcolor == "red" or effcolor == "red":
						acceptedcolor = "red"
						accepted = 0
						acceptedlist.append(accepted)

					else:
						acceptedcolor = "black"
						accepted = 1
						acceptedlist.append(accepted)

					data_table_irradiated.add_row([str(sectors_irradiated[i]), TextColor(hvcolor,str(int(hv_irradiated[i]))), TextColor("blue",str(ps_hv[i])),
					TextColor(sparkcolor, str(round(spark_irradiated[i],2))),  TextColor("blue",str(ps_spike[i])), TextColor(effcolor, str(round(efficiency_irradiated[i],1))),
					TextColor(acceptedcolor, "V")])
					data_table_irradiated.add_hline()
				else:

					if int(hv_irradiated[i]) > 567.9:
						hvcolor = "black"

					if 548.0 <= int(hv_irradiated[i]) <= 567.9:
						hvcolor = "orange"

					if int(hv_irradiated[i])< 548.0:
						hvcolor = "red"

					if spark_irradiated[i] > 6.0:
						sparkcolor = "red"

					if spark_irradiated[i] == 6.0:
						sparkcolor = "orange"

					if spark_irradiated[i] < 6.0:
						sparkcolor = "black"

					if efficiency_irradiated[i] < 80.0:
						effcolor = "red"

					if efficiency_irradiated[i] > 80.0:
						effcolor = "black"

					if efficiency_irradiated == 80.0:
						effcolor = "orange"

					if sparkcolor == "red" or hvcolor == "red" or effcolor == "red":
						acceptedcolor = "red"
						accepted = 0
						acceptedlist.append(accepted)
					else:
						acceptedcolor = "black"
						accepted = 1
						acceptedlist.append(accepted)

					data_table_irradiated.add_row([str(sectors_irradiated[i]), TextColor(hvcolor,str(int(hv_irradiated[i]))), TextColor("blue",str(ps_hv[i])),
					TextColor(sparkcolor, str(round(spark_irradiated[i],2))),  TextColor("blue",str(ps_spike[i])), TextColor(effcolor, str(round(efficiency_irradiated[i],1))),
					TextColor(acceptedcolor, "V")], color="lightgray")
					data_table_irradiated.add_hline()

			data_table_irradiated.add_hline()
			data_table_irradiated.add_row("Out of spec", str(len([x for x in hv_irradiated if x < 548.0])),"", str(len([x for x in spark_irradiated if x > 6.0])),"", str(len([x for x in efficiency_irradiated if x < 80.0])), str(acceptedlist.count(0)))
			data_table_irradiated.add_empty_row()
			data_table_irradiated.add_hline()
			data_table_irradiated.add_row("Chamber efficiency", "","","","", "", str(round(total_efficiency_irradiated)))
			data_table_irradiated.add_hline()

	doc.append(NewPage())
	with doc.create(Section('Summary irradiated at GIF', numbering=False)):

		piecart.create_pie([acceptedlist.count(1), acceptedlist.count(0)], "newpie.pdf")

		# Add cheque images
		with doc.create(LongTabu("X[c]")) as summary2_table:
			newpie = glob.iglob("newpie.pdf")
			#png_list = [StandAloneGraphic(x, image_options="width=120px") for x in png_list]
			pie2new = [StandAloneGraphic(x, image_options="width=220px") for x in newpie]
			summary2_table.add_row([pie2new[0]])

	#doc.append(NewPage())

		#here I have sectors_irradiated, hv_irradiated, spark_irradiated, acceptedlist
		SM1channels = ["L1","R1","L2","R2","L3","R3","L4","R4","L5","R5"]
		SM2channels = ["L6","R6","L7","R7","L8","R8"]
		badresultsall = []
		badresultseta = []
		badresultsstereo = []

		if chambername[0:3] == "SM1":
		   channels = SM1channels
		if chambername[0:3] == "SM2":
		   channels = SM2channels
		if chambername[0:3] == "LM1":
			channels = SM1channels
		if chambername[0:3] == "LM2":
			channels = SM2channels

		for channel in channels:
		   cntall = sum(1 for x, sector in enumerate(sectors_irradiated) if sector[2:4] == channel and acceptedlist[x] == 1)
		   cnteta = sum(1 for x, sector in enumerate(sectors_irradiated) if sector[2:4] == channel and (sector[1:2] == "1" or sector[1:2] == "2") and acceptedlist[x] == 1)
		   cntstereo = sum(1 for x, sector in enumerate(sectors_irradiated) if sector[2:4] == channel and (sector[1:2] == "3" or sector[1:2] == "4") and acceptedlist[x] == 1)
		   badresultsall.append(4-int(cntall))
		   badresultseta.append(2-int(cnteta))
		   badresultsstereo.append(2-int(cntstereo))

		#doc.append(NewPage())

		with doc.create(LongTabu("X[l] X[r] X[r] X[r]",
									 row_height=1.5)) as data_table2:
			data_table2.add_row(["Sector overimposed (from eta side)",
								"Eta",
								"Stereo",
								"Eta+Stereo"],
								mapper=bold,
								color="lightgray2")
			data_table2.add_empty_row()
			data_table2.add_hline()
			row = ["Sector (all layers)", "Out of spec (Eta)", "Out of spec (Stereo)", "Out of spec (E+S)"]

			for i in range(len(channels)):
				if (i % 2) == 0:
					data_table2.add_row([str(channels[i]), str(int(badresultseta[i])), str(badresultsstereo[i]), badresultsall[i]], color="lightgray")
				else:
					data_table2.add_row([str(channels[i]), str(int(badresultseta[i])), str(badresultsstereo[i]), badresultsall[i]])


		with doc.create(LongTabu("X[l] X[r]",
								 row_height=1.5)) as data_table3:
			data_table3.add_row(["Layer",
								"Mean Efficiency"],
								mapper=bold,
								color="lightgray2")
			data_table3.add_empty_row()
			data_table3.add_hline()
			row = ["layers", "efficiency"]
			channelsT3 = ["L1", "L2", "L3", "L4"]
			for i in range(len(layers_efficiency_irradiated)):
				if (i % 2) == 0:
					data_table3.add_row([str(channelsT3[i]), str(round(layers_efficiency_irradiated[i],1))], color="lightgray")
				else:
					data_table3.add_row([str(channelsT3[i]), str(round(layers_efficiency_irradiated[i],1))])

	doc.append(NewPage())

	with doc.create(Section('Current under irradiation', numbering=False)):

	# Add cheque images
		with doc.create(LongTabu("X[c] X[c] X[c] X[c]")) as cheque_table:
			png_list = glob.glob('GIF-i*.pdf')
			png_list.sort(key=os.path.getmtime)
			png_list = [StandAloneGraphic(x, image_options="width=120px") for x in png_list]
			print len(png_list)
			row_image = []
			i = 0
			for image in png_list:
				row_image.append(image)
				i = i +1
				if i==4:
					cheque_table.add_row([row_image[0], row_image[1], row_image[2], row_image[3]])
					row_image = []
					i=0

	png_list = []
	doc.append(NewPage())

	with doc.create(Section('Current vs. flux (GIF)', numbering=False)):

	# Add cheque images
		with doc.create(LongTabu("X[c] X[c] X[c] X[c]")) as cheque_table:
			png_list = glob.glob('i*.pdf')
			png_list.sort(key=os.path.getmtime)
			png_list = [StandAloneGraphic(x, image_options="width=120px") for x in png_list]
			print len(png_list)
			row_image = []
			i = 0
			for image in png_list:
				row_image.append(image)
				i = i +1
				if i==4:
					cheque_table.add_row([row_image[0], row_image[1], row_image[2], row_image[3]])
					row_image = []
					i=0

	doc.generate_pdf("complex_report", clean_tex=False, compiler='pdflatex')
def generateTex(filepath=None):
    #read all score files for different triple size and store them in lists
    #scores ordered in list as: BLEU METEOR TER
    # get Astronaut score
    mdBaseAstronaut = searchScores('baseAstronaut_score.txt')
    flatAstronaut = searchScores('flatAstronaut_score.txt')
    # get City score
    mdBaseCity = searchScores('baseCity_score.txt')
    flatCity = searchScores('flatCity_score.txt')
    # get Food score
    mdBaseFood = searchScores('baseFood_score.txt')
    flatFood = searchScores('flatFood_score.txt')
    # get SportTeam score
    mdBaseSportTeam = searchScores('baseSportsTeam_score.txt')
    flatSportTeam = searchScores('flatSportsTeam_score.txt')
    # get University score
    mdBaseUniversity = searchScores('baseUniversity_score.txt')
    flatUniversity = searchScores('flatUniversity_score.txt')

    geometry_options = {"tmargin": "1cm", "lmargin": "5cm"}
    doc = Document(geometry_options=geometry_options)

    with doc.create(
            Section(
                'The evaluation scores for different DBpedia categories of Seen dataset:'
            )):
        doc.append(italic('=============================================\n'))
        doc.append(
            'Comparing the evaluation scores between two model: (flat, modify baseline) according to different DBpedia categories, which are: Astronaut,City,University,Food,SportsTeam.\n'
        )
        doc.append(
            italic(
                'We mean by "flat" one model from the extended system which contains 3 model:flat,str1,str2\n'
            ))
        doc.append(
            italic(
                'Where we evaluate just this model in term of different DBpedia categories, because the result of other models i.e. str1&str2 for the whole dataset seen and unseen not very good comparing by this model\n'
            ))
        doc.append(
            italic(
                'We mean by "Modify baseline" the modified baseline of webnlg challeng \n'
            ))

        with doc.create(Subsection('Table of Evaluation in term of BLEU \n')):
            with doc.create(Tabular('|c|c|c|')) as table:
                table.add_hline()
                table.add_row(bold("Category Name"), bold("Model"),
                              bold("BLEU"))
                table.add_hline()
                table.add_row((MultiRow(3, data='Astronaut'),
                               "Modify Baseline", mdBaseAstronaut[0]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatAstronaut[0])))
                table.add_hline()
                table.add_hline()
                table.add_row(
                    (MultiRow(3,
                              data='City'), "Modify Baseline", mdBaseCity[0]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatCity[0])))
                table.add_hline()
                table.add_hline()
                table.add_row(
                    (MultiRow(3,
                              data='Food'), "Modify Baseline", mdBaseFood[0]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatFood[0])))
                table.add_hline()
                table.add_hline()
                table.add_row((MultiRow(3, data='SportTeam'),
                               "Modify Baseline", mdBaseSportTeam[0]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatSportTeam[0])))
                table.add_hline()
                table.add_hline()
                table.add_row((MultiRow(3, data='University'),
                               "Modify Baseline", mdBaseUniversity[0]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatUniversity[0])))
                table.add_hline()

# Generate METEOR score table
        with doc.create(
                Subsection('Table of Evaluation in term of METEOR \n')):
            with doc.create(Tabular('|c|c|c|')) as table:
                table.add_hline()
                table.add_row(bold("Category Name"), bold("Model"),
                              bold("METEOR"))
                table.add_hline()
                table.add_row((MultiRow(3, data='Astronaut'),
                               "Modify Baseline", mdBaseAstronaut[1]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatAstronaut[1])))
                table.add_hline()
                table.add_hline()
                table.add_row(
                    (MultiRow(3,
                              data='City'), "Modify Baseline", mdBaseCity[1]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatCity[1])))
                table.add_hline()
                table.add_hline()
                table.add_row(
                    (MultiRow(3,
                              data='Food'), "Modify Baseline", mdBaseFood[1]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatFood[1])))
                table.add_hline()
                table.add_hline()
                table.add_row((MultiRow(3, data='SportTeam'),
                               "Modify Baseline", mdBaseSportTeam[1]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatSportTeam[1])))
                table.add_hline()
                table.add_hline()
                table.add_row((MultiRow(3, data='University'),
                               "Modify Baseline", mdBaseUniversity[1]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatUniversity[1])))
                table.add_hline()


# Generate TER score table
        with doc.create(Subsection('Table of Evaluation in term of TER \n')):
            with doc.create(Tabular('|c|c|c|')) as table:
                table.add_hline()
                table.add_row(bold("Category Name"), bold("Model"),
                              bold("TER"))
                table.add_hline()
                table.add_row((MultiRow(3, data='Astronaut'),
                               "Modify Baseline", mdBaseAstronaut[2]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatAstronaut[2])))
                table.add_hline()
                table.add_hline()
                table.add_row(
                    (MultiRow(3,
                              data='City'), "Modify Baseline", mdBaseCity[2]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatCity[2])))
                table.add_hline()
                table.add_hline()
                table.add_row(
                    (MultiRow(3,
                              data='Food'), "Modify Baseline", mdBaseFood[2]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatFood[2])))
                table.add_hline()
                table.add_hline()
                table.add_row((MultiRow(3, data='SportTeam'),
                               "Modify Baseline", mdBaseSportTeam[2]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatSportTeam[2])))
                table.add_hline()
                table.add_hline()
                table.add_row((MultiRow(3, data='University'),
                               "Modify Baseline", mdBaseUniversity[2]))
                table.add_hline(2, 3)
                table.add_row(('', "Flat", bold(flatUniversity[2])))
                table.add_hline()
    doc.generate_pdf('Category_SeenSetEvaluation ', clean_tex=False)
Exemple #5
0
               documentclass='article',
               fontenc='T1',
               inputenc='utf8',
               author='',
               title='',
               date='',
               data=None,
               maketitle=False)

doc.append('Some text.')

doc.generate_tex(filepath='')
doc.generate_pdf(filepath='', clean=True)

# SectionBase
s = Section(title='', numbering=True, data=None)

# Math
m = Math(data=None, inline=False)

# Tabular
t = Tabular(table_spec='|c|c|', data=None, pos=None)

t.add_hline(start=None, end=None)

t.add_row(cells=(1, 2), escape=False)

t.add_multicolumn(size=2,
                  align='|c|',
                  content='Multicol',
                  cells=None,
doc.append(NewPage())

#Summary Chapter 1
doc.append(NoEscape(r'\chapter{Executive Summary}'))
doc.append(NewPage())

#Chapter 2
doc.append(
    NoEscape(
        r'\chapter{Central Government Debt: Bonds, Issuers and Investors}'))
doc.append(NewPage())

#2.1
with doc.create(
        Section(
            'By Residency [internal/local/resident; external/foreigner/non-resident]'
        )):
    doc.append(
        NoEscape(
            r"\href{https://www.trounceflow.com/app/dominican-republic/#tab_byresidency}{View the chart }"
        ))
    doc.append(
        'on trounceflow.com and download the data straight from the chart\n')
    doc.append(
        'Gross debt of the central administration (excluding eligible debt restructuring pending):\n'
    )

    doc.append(bold('USD bn\n'))
    with doc.create(Tabular('l|r|r|r')) as table:
        table.add_row(('Date', 'Domestic', 'External', 'Total'))
        table.add_hline()
    def discover_experiment_data(self, experiment_name, experiment_type, tasks,
                                 task_counts, description):
        if experiment_name not in self.sections.keys():
            self.sections[experiment_name] = Section(experiment_name)
            self.sections[experiment_name].append(description)
        if experiment_type not in self.experiment_types:
            self.experiment_types.append(experiment_type)

        workloads_results = Subsection('')
        # create table with results
        table = Tabular('|c|c|c|c|c|')
        table.add_hline()
        table.add_row(('name', 'avg latency', 'avg throughput', 'q0.9 latency',
                       'q0.9 throughput'))
        table.add_hline()

        for task in tasks:
            task_name = self._strip_task_name(task)
            task_count = task_counts[task_name]
            average_latency = round(
                float(
                    tasks[task].performance_metrics[Metric.TASK_LATENCY][AVG]),
                3)
            average_throughput = round(
                float(tasks[task].performance_metrics[Metric.TASK_THROUGHPUT]
                      [AVG]), 3)
            q09_latency = round(
                float(
                    tasks[task].performance_metrics[Metric.TASK_LATENCY][Q09]),
                3)
            q09_throughput = round(
                float(tasks[task].performance_metrics[Metric.TASK_THROUGHPUT]
                      [Q09]), 3)
            table.add_row((tasks[task].name.replace('default/',
                                                    ''), average_latency,
                           average_throughput, q09_latency, q09_throughput))
            table.add_hline()

            task_metrics = {
                AVG_LATENCY: average_latency,
                AVG_THROUGHPUT: average_throughput,
                Q09_LATENCY: q09_latency,
                Q09_THROUGHPUT: q09_throughput
            }

            task_index = self._get_task_index(task)
            task_name_with_index = task_name + '-' + task_index
            task_name_with_index = self._strip_memory_suffix(
                task_name_with_index)
            for metric_name, metric_value in task_metrics.items():
                if task_count in self.metric_values[metric_name]:
                    if task_name_with_index in self.metric_values[metric_name][
                            task_count]:
                        self.metric_values[metric_name][task_count][
                            task_name_with_index].update(
                                {experiment_type: metric_value})
                    else:
                        self.metric_values[metric_name][task_count][task_name_with_index] = \
                            {experiment_type: metric_value}
                else:
                    self.metric_values[metric_name][task_count] = \
                        {task_name_with_index: {experiment_type: metric_value}}

        workloads_results.append(table)
        self.sections[experiment_name].append(workloads_results)
Exemple #8
0
import numpy as np  # Используем библиотеку NumPY
from pylatex import Document, Section, Subsection, Tabular, Math, TikZ, Axis, Plot, Matrix, Alignat  # Импортируем нужные классы
from pylatex.utils import italic  # Импорт шрифта курсива

if __name__ == '__main__':  # Запуск программы
    # tmargin - верхний (top) отступ
    # lmargin - левый (left) отступ
    geometry_options = {"tmargin": "1cm", "lmargin": "1cm"}

    # Переменной doc (для дальнейшего использования присваиваем класс Документ с настройками границ выше
    doc = Document(geometry_options=geometry_options)

    # Создаём секцию. Заголовок 1.
    with doc.create(Section('Simple things')):
        # Добавляем обычный текст
        doc.append('Some basic text, and even ')
        # Добавляем курсив
        doc.append(italic('italic bold. '))
        # Переход на следующую строку, и вывод символов
        doc.append('\nAnd some symbols: $&#{}')
        # Создаём многоуровневую секцию 1.1.
        with doc.create(Subsection('Math that is incorrect: ')):
            # Добавляем математическое выражение
            doc.append(Math(data=['2*3', '=', 9]))

        # Создаём многоуровневую секцию (теперь уже 1.2.)
        with doc.create(Subsection('Tables, or kinda:')):
            # Создаём таблицу ('ccc|c') создаёт 4 строчечную таблицу с границей после 3 колонки
            with doc.create(Tabular('ccc|c')) as table:
                # Добавляем верхнюю границу на всю таблицу
                table.add_hline()
Exemple #9
0
if __name__ == '__main__':
    #f2 = Funkcija(mintermi=[0,1,2,3,6,7,8,10,11,13,14,15],n=4)
    #f2 = Funkcija("11001010")
    # f2 = Funkcija(mintermi=[2,3,4,5,9,10,11,12,13],n=4)
    #f2 = from_latex("1 \\lor x_2 \\implies ( x_3 \\lor x_4)",name="g")
    #f2 = from_latex("\\overline{\\overline{x_1} \\overline{x_2}\\lor x_3 } ")
    #f2 = Funkcija(mintermi=[0,1,2,3,6,7,8,10,11,13,14,15], n=4, name="f")
    f2 = Funkcija(mintermi=[3, 6, 9, 10, 11, 12, 14, 15],
                  spremenljivke=['x_1', 'x_2', 'x_3', 'q'])
    print("F2", f2)
    qf2_tex, q2_vseb, q2_cena, (mdno_tex, mkno_tex) = quin(f2)
    print("q2 vseb", q2_vseb)

    d = Document('basic')
    with d.create(Section('Section no. 1')):
        d.append("Funkcija f")
        d.append(NewLine())
        d.append(NewLine())

        d.append(pdno(f2))
        d.append(NewLine())
        d.append(NewLine())

        d.append(pkno(f2))
        d.append(NewLine())
        d.append(NewLine())

        d.append(tabela(f2))
        d.append(NewLine())
                                                     db.data,
                                                     db.target,
                                                     cv=folds,
                                                     n_jobs=-1)
        scores = print_to_latex_accsespf1g(testpredict, testtarget)

        for i, score in enumerate(scores):
            rows[i].append(score)
        print("----------")
        print(str(clf))
        print_scores(testpredict, testtarget)
    for table, row in zip(tables, rows):
        max_v = max(row[1:])
        new_row = []
        for item in row:
            if item == max_v:
                new_row.append(bold(max_v))
            else:
                new_row.append(item)

        table.add_row(new_row)
        table.add_hline()

doc = Document("test_ensemble_rating_withvoting")
for i, (tab, sec) in enumerate(zip(tables, sections)):
    section = Section(sec)
    section.append(tab)
    doc.append(section)
doc.generate_tex(os.path.join(path, 'wyniki/pliki_tex/'))
doc.generate_pdf(os.path.join(path, 'wyniki/pliki_pdf/'))
Exemple #11
0
        if unparsed_text[i] == "@":
            master_list.append(i)
    return master_list


if x.get_solution_style() == True:
    parts = part_splitter(z)
    solutions = Document()
    solutions.preamble.append(Command('usepackage', 'amsmath'))
    solutions.preamble.append(Command('title', question_type))
    solutions.preamble.append(Command('author', 'Generated Solutions'))
    solutions.preamble.append(Command('date', NoEscape(r'\today')))
    # solutions.append(NoEscape(r'\maketitle')) Use if a title is desired.

    with solutions.create(
            Section('Solutions Generated for ' + question_type,
                    numbering=False)):
        for i in range(len(parts) - 1):
            x = z[parts[i] + 1:parts[i + 1]]
            solutions.append(NoEscape(x))

    solutions.generate_tex(
        'Solved-TeX'
    )  # Generates .tex file, check in folder for the .tex dump.
    solutions.generate_pdf(
        'Solved')  # Generates .pdf file, check in folder for the .pdf dump.
    print("PDF sucessfully created! Opening now...")
    os.popen('./pdfopen.sh')

else:
    print(x.get_answer()[:-1])
Exemple #12
0
    def delete_file(self):
        filename = self.order_number.get()
        try:
            os.remove(f"{ORDERS_FOLDER}/{filename}")
        except:
            pass

    def export(self):
        if not (filename := filedialog.asksaveasfilename(
                # initialdir="/",
                initialfile=self.order_number.get(),
                title="Lagre som")):
            return

        doc = self.create_document()
        doc.append(Section("", numbering=False))
        doc.append(self.get_general_info_as_tabularx())
        doc.append(Section("Pakkeliste", numbering=False))
        doc.append(self.get_packaging_table_as_tabularx())
        doc.append(Section("Tegning", numbering=False))
        figures = self.get_canvas_as_figure_and_filename()
        for fig in figures[0]:
            doc.append(fig)

        doc.generate_pdf(filename, compiler="pdflatex")

        # Remove the temporary canvas image(s)
        try:
            for img in figures[1]:
                os.remove(img)
        except:
    def _document_models(self) -> None:
        """Add model summaries to the traceability document.
        """
        with self.doc.create(Section("Models")):
            for model in humansorted(self.system.network.models,
                                     key=lambda m: m.model_name):
                if not isinstance(model, (tf.keras.Model, torch.nn.Module)):
                    continue
                self.doc.append(NoEscape(r'\FloatBarrier'))
                with self.doc.create(Subsection(f"{model.model_name}")):
                    if isinstance(model, tf.keras.Model):
                        # Text Summary
                        summary = []
                        model.summary(line_length=92,
                                      print_fn=lambda x: summary.append(x))
                        summary = "\n".join(summary)
                        self.doc.append(Verbatim(summary))
                        with self.doc.create(Center()):
                            self.doc.append(
                                HrefFEID(FEID(id(model)), model.model_name))

                        # Visual Summary
                        # noinspection PyBroadException
                        try:
                            file_path = os.path.join(
                                self.figure_dir,
                                f"FE_Model_{model.model_name}.pdf")
                            tf.keras.utils.plot_model(model,
                                                      to_file=file_path,
                                                      show_shapes=True,
                                                      expand_nested=True)
                            # TODO - cap output image size like in the pytorch implementation in case of huge network
                            # TODO - save raw .dot file in case system lacks graphviz
                        except Exception:
                            file_path = None
                            print(
                                f"FastEstimator-Warn: Model {model.model_name} could not be visualized by Traceability"
                            )
                    elif isinstance(model, torch.nn.Module):
                        if hasattr(model, 'fe_input_spec'):
                            # Text Summary
                            # noinspection PyUnresolvedReferences
                            inputs = model.fe_input_spec.get_dummy_input()
                            self.doc.append(
                                Verbatim(pms.summary(model, inputs)))
                            with self.doc.create(Center()):
                                self.doc.append(
                                    HrefFEID(FEID(id(model)),
                                             model.model_name))

                            # Visual Summary
                            # Import has to be done while matplotlib is using the Agg backend
                            old_backend = matplotlib.get_backend() or 'Agg'
                            matplotlib.use('Agg')
                            # noinspection PyBroadException
                            try:
                                # Fake the IPython import when user isn't running from Jupyter
                                sys.modules.setdefault('IPython', MagicMock())
                                sys.modules.setdefault('IPython.display',
                                                       MagicMock())
                                import hiddenlayer as hl
                                with Suppressor():
                                    graph = hl.build_graph(model, inputs)
                                graph = graph.build_dot()
                                graph.attr(
                                    rankdir='TB'
                                )  # Switch it to Top-to-Bottom instead of Left-to-Right
                                graph.attr(
                                    size="200,200"
                                )  # LaTeX \maxdim is around 575cm (226 inches)
                                graph.attr(margin='0')
                                # TODO - save raw .dot file in case system lacks graphviz
                                file_path = graph.render(
                                    filename=f"FE_Model_{model.model_name}",
                                    directory=self.figure_dir,
                                    format='pdf',
                                    cleanup=True)
                            except Exception:
                                file_path = None
                                print(
                                    "FastEstimator-Warn: Model {} could not be visualized by Traceability"
                                    .format(model.model_name))
                            finally:
                                matplotlib.use(old_backend)
                        else:
                            self.doc.append(
                                "This model was not used by the Network during training."
                            )
                    if file_path:
                        with self.doc.create(Figure(position='ht!')) as fig:
                            fig.append(
                                Label(
                                    Marker(name=str(FEID(id(model))),
                                           prefix="model")))
                            fig.add_image(
                                os.path.relpath(file_path,
                                                start=self.save_dir),
                                width=NoEscape(
                                    r'1.0\textwidth,height=0.95\textheight,keepaspectratio'
                                ))
                            fig.add_caption(
                                NoEscape(
                                    HrefFEID(FEID(id(model)),
                                             model.model_name).dumps()))
Exemple #14
0
def generate_latex_report(match_table, predicted_types, study_name):
    from pylatex import NoEscape, Document, Section, Subsection, Tabular, Math, TikZ, Axis, Plot, Figure, Matrix, Alignat
    from pylatex.utils import italic
    import os

    file_directory = ''

    geometry_options = {
        "tmargin": "1in",
        "lmargin": "1in",
        "bmargin": "1in",
        "rmargin": "1in"
    }
    doc = Document(geometry_options=geometry_options)

    with doc.create(Section('cBioPortal new study report')):
        doc.append('Report for study: ' + study_name)

        with doc.create(Subsection('Attribute matches')):
            doc.append(
                '  Below are the possible matches between attributes from existing data on the cBioPortal and the new study.'
            )
            doc.append(
                '  The metric used to detect each match is denoted by the symbols follwing the attribute name of the match.'
            )
            doc.append(
                '  Additionally, the number of studies in which the matching attribute occurs is given to indicate how popular the attribute is among existing studies.'
            )
            if predicted_types is not None:
                doc.append(
                    '  In the second table, predictions are given as to whether an attribute is a patient or sample attribute.'
                )
                doc.append(
                    '  The sample/patient prediction is based on what is most common for that particular attribute in the existing cBioPortal studies.'
                )
            doc.append(NoEscape(r'\\'))
            doc.append(NoEscape(r'\\'))

            with doc.create(Tabular('|c|c|')) as table:
                table.add_hline()
                table.add_row(list(match_table))
                table.add_hline()
                table.add_hline()

                for row in match_table.index:
                    table.add_row(list(match_table.loc[row, :]))
                    table.add_hline()
        doc.append(
            NoEscape(
                r'\string^ represents matches found based on the attribute names\\'
            ))
        doc.append(
            NoEscape(
                r'\string* represents matches found based on clustering of the attribute values\\'
            ))
        doc.append(
            NoEscape(
                r'NOTE: PATIENT_ID and SAMPLE_ID are omitted as they should be present in every study.\\'
            ))

        if predicted_types is not None:
            with doc.create(Subsection('Matching attribute types')):
                with doc.create(Tabular('|c|c|')) as table:
                    table.add_hline()
                    table.add_row(list(predicted_types))
                    table.add_hline()
                    table.add_hline()

                for row in predicted_types.index:
                    table.add_row(list(predicted_types.loc[row, :]))
                    table.add_hline()

        doc.append(NoEscape(r'\newpage'))

        with doc.create(Subsection('Number of attribute distributions')):
            with doc.create(Figure(position='h!')) as n_attributes:
                n_attributes.add_image(file_directory +
                                       'n_attribute_distribution.png',
                                       width=NoEscape(r'0.99\textwidth'))
                n_attributes.add_caption(
                    'Comparison between the number of attributes in the new study and the number of attributes in each existing study on cBioPortal.  The dashed black line indicates the number of attributes in the new study, while the histogram shows the data for existing cBioPortal studies.'
                )

        #with doc.create(Subsection('Number of unique attributes')):
            with doc.create(Figure(position='h!')) as n_unique_attributes:
                n_unique_attributes.add_image(
                    file_directory + 'n_unique_attribute_distribution.png',
                    width=NoEscape(r'0.99\textwidth'))
                n_unique_attributes.add_caption(
                    'Comparison between the number of unique attributes in the new study and the number of unique attributes in each existing study on cBioPortal.  The dashed black line indicates the number of unqiue attributes in the new study, while the histogram shows the data for existing cBioPortal studies.'
                )

        #with doc.create(Subsection('Number of common attributes')):
            with doc.create(Figure(position='h!')) as n_common_attributes:
                n_common_attributes.add_image(
                    file_directory + 'n_common_attribute_distribution.png',
                    width=NoEscape(r'0.99\textwidth'))
                n_common_attributes.add_caption(
                    'Comparison between the number of common attributes in the new study and the number of common attributes in each existing study on cBioPortal.  The dashed black line indicates the number of common attributes in the new study, while the histogram shows the data for existing cBioPortal studies.'
                )

    doc.generate_pdf('report', clean_tex=True)
Exemple #15
0
# begin-doc-include
import numpy as np

from pylatex import Document, Section, Subsection, Tabular, Math, TikZ, Axis, \
    Plot, Figure, Matrix
from pylatex.utils import italic
import os

if __name__ == '__main__':
    image_filename = os.path.join(os.path.dirname(__file__), 'kitten.jpg')

    geometry_options = {"tmargin": "1cm", "lmargin": "10cm"}
    doc = Document(geometry_options=geometry_options)

    with doc.create(Section('The simple stuff')):
        doc.append('Some regular text and some')
        doc.append(italic('italic text. '))
        doc.append('\nAlso some crazy characters: $&#{}')
        with doc.create(Subsection('Math that is incorrect')):
            doc.append(Math(data=['2*3', '=', 9]))

        with doc.create(Subsection('Table of something')):
            with doc.create(Tabular('rc|cl')) as table:
                table.add_hline()
                table.add_row((1, 2, 3, 4))
                table.add_hline(1, 2)
                table.add_empty_row()
                table.add_row((4, 5, 6, 7))

    a = np.array([[100, 10, 20]]).T
Exemple #16
0
__author__ = 'cprudhom'
import numpy as np

from pylatex import Document, Section, Subsection, Math
from pylatex.numpy import Matrix, VectorName

a = np.array([[100, 10, 20]]).T

doc = Document()
section = Section('Numpy tests')
subsection = Subsection('Array')

vec = Matrix(a)
vec_name = VectorName('a')
math = Math(data=[vec_name, '=', vec])

subsection.append(math)
section.append(subsection)

subsection = Subsection('Matrix')
M = np.matrix([[2, 3, 4], [0, 0, 1], [0, 0, 2]])
matrix = Matrix(M, mtype='b')
math = Math(data=['M=', matrix])

subsection.append(math)
section.append(subsection)

subsection = Subsection('Product')

math = Math(data=['M', vec_name, '=', Matrix(M * a)])
subsection.append(math)
Exemple #17
0
from pylatex.package import Package
from pylatex import Document, Section
from pylatex.utils import NoEscape


class AllTT(Environment):
    """A class to wrap LaTeX's alltt environment."""

    packages = [Package('alltt')]
    escape = False
    content_separator = "\n"


# Create a new document
doc = Document()
with doc.create(Section('Wrapping Latex Environments')):
    doc.append(
        NoEscape(r"""
        The following is a demonstration of a custom \LaTeX{}
        command with a couple of parameters.
        """))

    # Put some data inside the AllTT environment
    with doc.create(AllTT()):
        verbatim = ("This is verbatim, alltt, text.\n\n\n"
                    "Setting \\underline{escape} to \\underline{False} "
                    "ensures that text in the environment is not\n"
                    "subject to escaping...\n\n\n"
                    "Setting \\underline{content_separator} "
                    "ensures that line endings are broken in\n"
                    "the latex just as they are in the input text.\n"
Exemple #18
0
def make_pdf_report():
    """ assume profile available """

    pieces = gd.sessionInfo['Pieces']
    cntrating = [cr for nm, x, cr, tl in pieces]

    # we need a (single) temp dir for intermediates.
    tmpdir = Path(tempfile.gettempdir()) / 'RtcApp'
    if not tmpdir.is_dir():
        tmpdir.mkdir()
    # subdir
    if not reportsDir().is_dir():
        reportsDir().mkdir()

    reportfile = reportsDir() / gd.config['Session']

    crewname = gd.metaData['CrewName']

    geometry_options = {
        "top": "5mm",
        "bottom": "5mm",
        "right": "5mm",
        "left": "5mm"
    }
    doc = Document(documentclass='article',
                   geometry_options=geometry_options,
                   document_options=["12pt"])

    doc.preamble.append(NoEscape(r'\usepackage{graphicx}'))

    # see https://doc.qt.io/qt-5/qml-color.html for colors
    doc.append(NoEscape(r'\definecolor{aquamarine}{HTML}{7fffd4}'))
    doc.append(NoEscape(r'\definecolor{gainsboro}{HTML}{dcdcdc}'))

    #   First page
    with doc.create(
            Section(f'Boat report {gd.metaData["CrewName"]}',
                    numbering=False)):

        r = gd.metaData["Rowers"]
        rwrcnt = gd.sessionInfo['RowerCnt']
        if rwrcnt == 1:
            doc.append('Rowers: ')
            doc.append(f'{r[0][0]} ')
        else:
            doc.append('Rowers from bow: ')
            for i in range(rwrcnt):
                doc.append(f'{r[i][0]}, ')
        doc.append(NewLine())
        doc.append(f'Boattype: {gd.metaData["BoatType"]}\n')
        doc.append(f'Calibration: {gd.metaData["Calibration"]}\n')
        doc.append(f'Misc: {gd.metaData["Misc"]}\n')
        doc.append(f'Powerline: {gd.metaData["PowerLine"]}\n')
        doc.append(f'Venue: {gd.metaData["Venue"]}\n')
        doc.append(VerticalSpace("5pt"))
        doc.append(NewLine())

        # get table from boat report
        rows = gd.boattablemodel.rowCount()
        columns = gd.boattablemodel.columnCount()
        boattab = 'l|' + ''.join(['r' for i in range(columns - 1)]) + '|'
        with doc.create(Tabular(boattab)) as table:
            table.add_hline()
            row = []
            for j in range(columns):
                index = QAbstractTableModel.index(gd.boattablemodel, 0, j)
                row.append(str(gd.boattablemodel.data(index)))
            table.add_row(row, color='aquamarine')
            table.add_hline()

            cnt = 0
            for i in range(rows):
                row = []
                if i == 0:
                    continue
                for j in range(columns):
                    index = QAbstractTableModel.index(gd.boattablemodel, i, j)
                    row.append(str(gd.boattablemodel.data(index)))
                if cnt % 2 == 0:
                    table.add_row(row, color='gainsboro')
                else:
                    table.add_row(row, color='aquamarine')
                cnt += 1
            table.add_hline()
            """

            table.add_empty_row()
            table.add_row((4, 5, 6, 7))
            """
    doc.append(NewPage())

    # for the plots
    fontP = FontProperties()
    fontP.set_size('xx-small')

    # Second page
    with doc.create(
            Section(f'Boat report {gd.metaData["CrewName"]}',
                    numbering=False)):

        av = ''
        filt = ''
        if gd.averaging:
            av = 'averaging'
        if gd.filter:
            filt = 'filtered'
        pcs = ['all'] + gd.p_names + ['average']
        doc.append(f'Using piece "{pcs[gd.boatPiece]}": {av} {filt}\n')
        doc.append(VerticalSpace("5pt"))
        doc.append(NewLine())

        sensors = gd.sessionInfo['Header']
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)

        ax1.set_title('Speed')
        ax1.grid(True)
        ax2.set_title('Acceleration')
        ax2.grid(True)
        ax3.set_title('Pitch')
        ax3.grid(True)
        ax4.set_title('Accel-Tempo per Piece')
        ax4.grid(True)

        piece = gd.boatPiece
        if piece == 0:
            for i in range(len(gd.p_names)):
                ax1.plot(gd.norm_arrays[i, :, sensors.index('Speed')],
                         linewidth=0.6,
                         label=gd.p_names[i])
                ax2.plot(gd.norm_arrays[i, :, sensors.index('Accel')],
                         linewidth=0.6,
                         label=gd.p_names[i])
                ax3.plot(gd.norm_arrays[i, :,
                                        sensors.index('Pitch Angle')],
                         linewidth=0.6,
                         label=gd.p_names[i])
        elif piece == 7:
            speed = np.zeros(gd.norm_arrays[0, :, 1].shape)
            accel = np.zeros(gd.norm_arrays[0, :, 1].shape)
            pitch = np.zeros(gd.norm_arrays[0, :, 1].shape)
            for i in range(len(gd.p_names)):
                speed += gd.norm_arrays[i, :, sensors.index('Speed')]
                accel += gd.norm_arrays[i, :, sensors.index('Accel')]
                pitch += gd.norm_arrays[i, :, sensors.index('Pitch Angle')]
            ax1.plot(speed / 6, linewidth=0.6, label=gd.p_names[i])
            ax2.plot(accel / 6, linewidth=0.6, label=gd.p_names[i])
            ax3.plot(pitch / 6, linewidth=0.6, label=gd.p_names[i])
        else:
            i = piece - 1
            ax1.plot(gd.norm_arrays[i, :, sensors.index('Speed')],
                     linewidth=0.6,
                     label=gd.p_names[i])
            ax2.plot(gd.norm_arrays[i, :, sensors.index('Accel')],
                     linewidth=0.6,
                     label=gd.p_names[i])
            ax3.plot(gd.norm_arrays[i, :, sensors.index('Pitch Angle')],
                     linewidth=0.6,
                     label=gd.p_names[i])

        pa = []
        for i in range(len(gd.p_names)):
            # accel and tempo per piece
            d, a = gd.prof_data[i]
            pa.append((d['Speed'], cntrating[i][1]))
        pa = list(zip(*pa))
        p = [10 * x for x in pa[0]]  # ad hoc scaling
        ax4.scatter(list(range(len(gd.p_names))), p, marker='H', color='green')
        ax4.scatter(list(range(len(gd.p_names))),
                    pa[1],
                    marker='H',
                    color='blue')

        ax1.legend(loc='lower right', prop=fontP)
        plt.tight_layout()

        tmpfig = tmpdir / gd.config['Session']
        plt.savefig(tmpfig)
        tmpfig = re.sub('\\\\', '/', str(tmpfig))  # for windows
        doc.append(
            NoEscape(r'\includegraphics[width=1.0\textwidth]{' + f'{tmpfig}' +
                     r'}'))
        plt.close(fig)

    ##   Third page
    doc.append(NewPage())
    with doc.create(Section('Crew report', numbering=False)):
        pcs = gd.p_names + ['average']
        doc.append(f'Piece "{pcs[gd.crewPiece]}" used.\n')

        fig = plt.figure()
        fig.subplots_adjust(hspace=0.7)
        gs = fig.add_gridspec(5, 2)
        ax1 = fig.add_subplot(gs[0:3, :])
        ax2 = fig.add_subplot(gs[3:, 0])
        ax3 = fig.add_subplot(gs[3:, 1])

        ax1.set_title('Gate Angle - GateForceX/Y')
        ax1.grid(True)
        ax2.set_title('Stretcher ForceX')
        ax2.grid(True)
        ax3.set_title('Power')
        ax3.grid(True)

        rcnt = gd.sessionInfo['RowerCnt']
        piece = gd.crewPiece
        if piece < len(gd.prof_data):
            # a seperate piece, from the tumbler
            cp = gd.crewPiece
            d, aa = gd.prof_data[cp]

            for r in range(rcnt):
                sns = rowersensors(r)
                if gd.sessionInfo['ScullSweep'] == 'sweep':
                    i = sns['GateAngle']
                    j = sns['GateForceX']
                    k = sns['GateForceY']
                else:
                    i = sns['P GateAngle']
                    j = sns['P GateForceX']
                    k = sns['P GateForceY']

                # stretchers not always present!
                # k = sns['Stretcher Z']
                # todo: create switch to control working in this case

                ax1.plot(gd.norm_arrays[cp, :, i],
                         gd.norm_arrays[cp, :, j],
                         linewidth=0.6,
                         label=f'R {r+1}')
                ax1.plot(gd.norm_arrays[cp, :, i],
                         gd.norm_arrays[cp, :, k],
                         linestyle=stippel,
                         linewidth=0.6,
                         label=f'R {r+1}Y')

                #twee = self.ax2.plot(gd.norm_arrays[gd.crewPiece, :, i], linewidth=0.6, label=f'R {r+1}')

                ax3.plot([gd.gmin[gd.crewPiece]], [0], marker='v', color='b')
                ax3.plot([gd.gmax[gd.crewPiece]], [0], marker='^', color='b')

                # reference curve derived from the stroke
                sns = rowersensors(rcnt - 1)
                fmean = d[rcnt - 1]['GFEff']
                if gd.sessionInfo['ScullSweep'] == 'sweep':
                    i = sns['GateAngle']
                    j = sns['GateForceX']
                else:
                    i = sns['P GateAngle']
                    j = sns['P GateForceX']
                minpos = min(gd.norm_arrays[cp, :, i])
                maxpos = max(gd.norm_arrays[cp, :, i])
                minarg = np.argmin(gd.norm_arrays[cp, :, i])
                maxarg = np.argmax(gd.norm_arrays[cp, :, i])
                fmin = gd.norm_arrays[cp, minarg, j]
                fmax = gd.norm_arrays[cp, maxarg, j]
                xstep = (maxpos - minpos) / 20
                ystep = (fmin - fmax) / 20  # assume fmin > fmax

                if gd.sessionInfo['ScullSweep'] == 'sweep':
                    xref = np.array([
                        minpos, minpos + 0.4 * xstep, minpos + 2 * xstep,
                        minpos + 5 * xstep, minpos + 7 * xstep,
                        minpos + 9 * xstep, minpos + 11 * xstep,
                        minpos + 14 * xstep, minpos + 16 * xstep,
                        minpos + 20 * xstep
                    ])
                    yref = np.array([
                        fmin, fmin + 20, 1.1 * fmean, 1.6 * fmean,
                        1.65 * fmean, 1.7 * fmean, 1.6 * fmean, 1.25 * fmean,
                        0.8 * fmean, fmax
                    ])
                else:
                    xref = np.array([
                        minpos, minpos + 0.4 * xstep, minpos + 2 * xstep,
                        minpos + 5 * xstep, minpos + 7 * xstep,
                        minpos + 9 * xstep, minpos + 11 * xstep,
                        minpos + 14 * xstep, minpos + 16 * xstep,
                        minpos + 20 * xstep
                    ])
                    yref = np.array([
                        fmin, fmin + 20, 1.1 * fmean, 1.6 * fmean,
                        1.65 * fmean, 1.7 * fmean, 1.6 * fmean, 1.25 * fmean,
                        0.8 * fmean, fmax
                    ])

                curveref = make_interp_spline(xref, yref, 2)
                xrefnew = np.linspace(min(xref), max(xref),
                                      int(maxpos - minpos))

                ax1.plot(xrefnew,
                         curveref(xrefnew),
                         color='black',
                         linewidth=0.5,
                         linestyle=(0, (3, 6)))
                ax3.plot(aa[0 + r], linewidth=0.6, label=f'R {r+1}')
        else:
            # average
            for r in range(rcnt):
                sns = rowersensors(r)
                if gd.sessionInfo['ScullSweep'] == 'sweep':
                    i = sns['GateAngle']
                    j = sns['GateForceX']
                else:
                    i = sns['P GateAngle']
                    j = sns['P GateForceX']
                # stretchers not always available!
                # k = sns['Stretcher Z']

                # average
                nmbrpieces = len(gd.p_names)
                angle = np.zeros((100, ))
                force = np.zeros((100, ))
                power = np.zeros((100, ))
                for p in range(nmbrpieces):
                    angle += gd.norm_arrays[p, :, i]
                    force += gd.norm_arrays[p, :, j]
                    # stretcherZ = gd.norm_arrays[p, :, k]
                    d, a = gd.prof_data[p]
                    power += aa[0 + r]

                # plot
                #ax1.plot(angle/nmbrpieces, linewidth=0.6, label=f'R {r+1}')
                #ax2.plot(force/nmbrpieces, linewidth=0.6, label=f'R {r+1}')

                ax3.plot(power / nmbrpieces, linewidth=0.6, label=f'R {r+1}')

        ax3.legend(loc='upper right', prop=fontP)
        plt.tight_layout()

        # we keep using the same name
        tmpfig = tmpdir / (gd.config['Session'] + '_crew')
        plt.savefig(tmpfig)
        tmpfig = re.sub('\\\\', '/', str(tmpfig))  # for windows
        doc.append(
            NoEscape(r'\includegraphics[width=1.0\textwidth]{' + f'{tmpfig}' +
                     r'}'))
        plt.close(fig)

    # Rower pages
    doc.append(NewPage())

    rwrcnt = gd.sessionInfo['RowerCnt']
    fig = [None for i in range(rwrcnt)]
    rax1 = [None for i in range(rwrcnt)]
    sax1 = [None for i in range(rwrcnt)]

    for rwr in range(rwrcnt):
        pcs = ['all'] + gd.p_names + ['average']
        with doc.create(
                Section(
                    f'Rower: {gd.metaData["Rowers"][rwr][0]}, using piece "{pcs[gd.rowerPiece[rwr]]}"',
                    numbering=False)):

            rows = gd.rowertablemodel[rwr].rowCount()
            columns = gd.rowertablemodel[rwr].columnCount()
            rowertab = 'l|' + ''.join(['r' for i in range(columns - 1)]) + '|'
            with doc.create(Tabular(rowertab)) as table:
                table.add_hline()
                row = []
                for j in range(columns):
                    index = QAbstractTableModel.index(gd.rowertablemodel[rwr],
                                                      0, j)
                    row.append(str(gd.rowertablemodel[rwr].data(index)))
                table.add_row(row, color='aquamarine')
                table.add_hline()

                cnt = 0
                for i in range(rows):
                    row = []
                    if i == 0:
                        continue
                    for j in range(columns):
                        index = QAbstractTableModel.index(
                            gd.rowertablemodel[rwr], i, j)
                        row.append(str(gd.rowertablemodel[rwr].data(index)))
                    if cnt % 2 == 0:
                        table.add_row(row, color='gainsboro')
                    else:
                        table.add_row(row, color='aquamarine')
                    cnt += 1
                table.add_hline()

            doc.append('\n')

            fig[rwr], ((rax1[rwr])) = plt.subplots(nrows=1, ncols=1)
            rax1[rwr].set_title('GateAngle - GateForceX/Y')
            rax1[rwr].grid(True)

            rsens = rowersensors(rwr)
            piece = gd.rowerPiece[rwr]

            scaleAngle = 10
            if gd.rowerPiece[rwr] == 0:
                # all
                for i in range(len(gd.p_names)):
                    if gd.sessionInfo['ScullSweep'] == 'sweep':
                        # print(f'Make rowerplot for {self.rower}')
                        rax1[rwr].plot(gd.norm_arrays[i, :,
                                                      rsens['GateAngle']],
                                       gd.norm_arrays[i, :,
                                                      rsens['GateForceX']],
                                       linewidth=0.6,
                                       label=f'{gd.p_names[i]}')
                        rax1[rwr].plot(gd.norm_arrays[i, :,
                                                      rsens['GateAngle']],
                                       gd.norm_arrays[i, :,
                                                      rsens['GateForceY']],
                                       linestyle=(0, (7, 10)),
                                       linewidth=0.6,
                                       label=f'{gd.p_names[i]}')
                    else:
                        rax1[rwr].plot(gd.norm_arrays[i, :,
                                                      rsens['P GateAngle']],
                                       gd.norm_arrays[i, :,
                                                      rsens['P GateForceX']],
                                       linewidth=0.6,
                                       label=f'{gd.p_names[i]}')
                        rax1[rwr].plot(gd.norm_arrays[i, :,
                                                      rsens['P GateAngle']],
                                       gd.norm_arrays[i, :,
                                                      rsens['P GateForceY']],
                                       linestyle=(0, (7, 10)),
                                       linewidth=0.6,
                                       label=f'{gd.p_names[i]}')
            elif gd.rowerPiece[rwr] == 7:
                # average
                angle = np.zeros((100, ))
                forceX = np.zeros((100, ))
                if gd.sessionInfo['ScullSweep'] == 'sweep':
                    for i in range(len(gd.p_names)):
                        angle += gd.norm_arrays[i, :, rsens['GateAngle']]
                        forceX += gd.norm_arrays[i, :, rsens['GateForceX']]
                        forceY += gd.norm_arrays[i, :, rsens['GateForceY']]
                    rax1[rwr].plot(angle / 6,
                                   forceX / 6,
                                   linewidth=0.6,
                                   label='FX')
                    rax1[rwr].plot(angle / 6,
                                   forceY / 6,
                                   linestyle=(0, (7, 10)),
                                   linewidth=0.6,
                                   label='FY')
                else:
                    for i in range(len(gd.p_names)):
                        angle += gd.norm_arrays[i, :, rsens['P GateAngle']]
                        forceX += gd.norm_arrays[i, :, rsens['P GateForceX']]
                        forceY += gd.norm_arrays[i, :, rsens['P GateForceY']]
                    rax1[rwr].plot(angle / 6,
                                   forceX / 6,
                                   linewidth=0.6,
                                   label='FX')
                    rax1[rwr].plot(angle / 6,
                                   forceY / 6,
                                   linestyle=(0, (7, 10)),
                                   linewidth=0.6,
                                   label='FY')
            else:
                rp = gd.rowerPiece[rwr] - 1
                sns = rowersensors(rwr)

                # ad hoc angle x 10. Bettet via (max-min). Scale is for force
                # print(f'Create rowerplot for {self.rower}')
                outboat = [d for d, e in gd.prof_data]
                ri = [a[rwr] for a in outboat]  # rower info per piece
                fmean = ri[rp]['GFEff']

                if gd.sessionInfo['ScullSweep'] == 'sweep':
                    i = sns['GateAngle']
                    j = sns['GateForceX']
                    k = sns['GateForceY']
                else:
                    i = sns['P GateAngle']
                    j = sns['P GateForceX']
                    k = sns['P GateForceY']

                # TESTING referentie curve
                # lengte uit tabel? Voorlopig 100, begin goed zetten
                # scale with avarage force
                minpos = min(gd.norm_arrays[rp, :, i])
                maxpos = max(gd.norm_arrays[rp, :, i])
                minarg = np.argmin(gd.norm_arrays[rp, :, i])
                maxarg = np.argmax(gd.norm_arrays[rp, :, i])
                fmin = gd.norm_arrays[rp, minarg, j]
                fmax = gd.norm_arrays[rp, maxarg, j]
                xstep = (maxpos - minpos) / 20
                ystep = (fmin - fmax) / 20  # assume fmin > fmax

                if gd.sessionInfo['ScullSweep'] == 'sweep':
                    xref = np.array([
                        minpos, minpos + 0.4 * xstep, minpos + 2 * xstep,
                        minpos + 5 * xstep, minpos + 7 * xstep,
                        minpos + 9 * xstep, minpos + 11 * xstep,
                        minpos + 14 * xstep, minpos + 16 * xstep,
                        minpos + 20 * xstep
                    ])
                    yref = np.array([
                        fmin, fmin + 20, 1.1 * fmean, 1.6 * fmean,
                        1.65 * fmean, 1.7 * fmean, 1.6 * fmean, 1.25 * fmean,
                        0.8 * fmean, fmax
                    ])
                else:
                    xref = np.array([
                        minpos, minpos + 0.4 * xstep, minpos + 2 * xstep,
                        minpos + 5 * xstep, minpos + 7 * xstep,
                        minpos + 9 * xstep, minpos + 11 * xstep,
                        minpos + 14 * xstep, minpos + 16 * xstep,
                        minpos + 20 * xstep
                    ])
                    yref = np.array([
                        fmin, fmin + 20, 1.1 * fmean, 1.6 * fmean,
                        1.65 * fmean, 1.7 * fmean, 1.6 * fmean, 1.25 * fmean,
                        0.8 * fmean, fmax
                    ])

                curveref = make_interp_spline(xref, yref, 2)
                xrefnew = np.linspace(min(xref), max(xref),
                                      int(maxpos - minpos))

                rax1[rwr].plot(gd.norm_arrays[rp, :, i],
                               gd.norm_arrays[rp, :, j],
                               linewidth=0.6,
                               label=f'{gd.p_names[rp]} FX')
                rax1[rwr].plot(gd.norm_arrays[rp, :, i],
                               gd.norm_arrays[rp, :, k],
                               linestyle=stippel,
                               linewidth=0.6,
                               label=f'{gd.p_names[rp]} FY')
                rax1[rwr].plot(xrefnew,
                               curveref(xrefnew),
                               color='black',
                               linewidth=0.5,
                               linestyle=(0, (3, 6)))

            # rax1[rwr].legend(loc='lower right', prop=fontP, bbox_to_anchor=(1.05, 1))
            rax1[rwr].legend(loc='upper right', prop=fontP)
            plt.tight_layout()

            tmpfig = tmpdir / (gd.config['Session'] + f'_{rwr}')
            plt.savefig(tmpfig)
            tmpfig = re.sub('\\\\', '/', str(tmpfig))  # for windows
            doc.append(
                NoEscape(r'\includegraphics[width=0.9\textwidth]{' +
                         f'{tmpfig}' + r'}'))
            plt.close(fig[rwr])

            if 'StretcherForceX' in sensors:
                doc.append('\n')

                # stretcher plot
                fig[rwr], sax1[rwr] = plt.subplots()
                sax1[rwr].set_title('Stretcher')
                sax1[rwr].grid(True)

                rsens = rowersensors(rwr)
                if gd.rowerPiece[rwr] == 0:
                    # all DOEN WE NIET
                    pass
                elif gd.rowerPiece[rwr] == len(gd.p_names) + 1:
                    # average DOEN WE NIET
                    pass
                else:
                    # a piece (alleen dit)
                    i = gd.rowerPiece[rwr] - 1
                    name, se, nr, sp = pieces[i]
                    sax1[rwr].plot(gd.dataObject[sp[0]:sp[1],
                                                 rsens['StretcherForceX']],
                                   linewidth=0.6,
                                   label='StretcherForceX')
                    sax1[rwr].plot(
                        10 * gd.dataObject[sp[0]:sp[1], rsens['Stretcher RL']],
                        linewidth=0.6,
                        label='Stretcher RL')
                    sax1[rwr].plot(
                        10 * gd.dataObject[sp[0]:sp[1], rsens['Stretcher TB']],
                        linewidth=0.6,
                        label='Stretcher TB')

                    sax1[rwr].legend(loc='lower right', prop=fontP)
                    plt.tight_layout()

            tmpfig = tmpdir / (gd.config['Session'] + f'_{rwr}_s')
            plt.savefig(tmpfig)
            tmpfig = re.sub('\\\\', '/', str(tmpfig))  # for windows
            doc.append(
                NoEscape(r'\includegraphics[width=0.6\textwidth]{' +
                         f'{tmpfig}' + r'}'))

            plt.close(fig[rwr])

            if rwr != rwrcnt - 1:
                doc.append(NewPage())

    # Extra page
    if gd.extraplot:
        doc.append(NewPage())

        fig, extr = plt.subplots()
        s2 = gd.config['Session2']
        if s2 == '':
            extr.set_title('Custom plot')
        else:
            extr.set_title(f'Custom plot (second session: {s2})')
        extr.grid(True)

        # data from update_plot from View piece, can we do this simpler?
        [strt, end, strttime, center, scalex, slist,
         secslist] = gd.extrasettings
        times = list(map(lambda x: x / Hz, list(range(gd.view_tr.shape[0]))))

        for i, name, scaley in slist:
            extr.plot(times,
                      gd.view_tr[:, i] * scaley,
                      linewidth=0.6,
                      label=name)
        for i, name, scale in secslist:
            extr.plot(times,
                      gd.view_tr2[:, i] * scaley,
                      linewidth=0.6,
                      label=name,
                      linestyle=stippel)

        dist = (end - strt)
        xFrom = center - scalex * dist / 2
        xTo = center + scalex * dist / 2

        extr.set_xlim(xFrom, xTo)
        # start at correct beginvalue
        locs = extr.get_xticks()
        ticks = [item + strttime for item in locs]
        extr.set_xticklabels(ticks)
        extr.legend()
        plt.tight_layout()

        # we keep using the same name
        tmpfig = tmpdir / (gd.config['Session'] + '_extra')
        plt.savefig(tmpfig)
        tmpfig = re.sub('\\\\', '/', str(tmpfig))  # for windows
        doc.append(
            NoEscape(r'\includegraphics[width=1.0\textwidth]{' + f'{tmpfig}' +
                     r'}'))
        plt.close(fig)

        doc.append(NewLine())
        doc.append(VerticalSpace("10pt"))
        doc.append(f' Piece: {gd.selPiece}')
        if gd.sd_selPiece != '':
            doc.append(NewLine())
            doc.append(VerticalSpace("5pt"))
            doc.append(f'Secondary piece: {gd.sd_selPiece}')

    # generate report
    doc.generate_pdf(reportfile, clean_tex=True)
Exemple #19
0
def report(context, json_report, json_varreport, rulegraph_img):

    config = json_report
    sample_config = json.load(open(json_report))
    var_config = json.load(open(get_config(json_varreport)))

    tex_path = os.path.abspath(
        os.path.join(sample_config["analysis"]["analysis_dir"],
                     "delivery_report"))

    if not rulegraph_img:
        rulegraph_img = sample_config['analysis']['dag']

    os.makedirs(tex_path, exist_ok=True)

    geometry_options = {
        "head": "40pt",
        "headheight": "130pt",
        "headsep": "1cm",
        "margin": "1.5cm",
        "bottom": "1.5cm",
        "includeheadfoot": True
    }
    doc = Document(geometry_options=geometry_options)

    doc.packages.append(Package('lscape'))
    doc.packages.append(Package('longtable'))
    doc.packages.append(Package('float'))
    doc.packages.append(Package('caption', options='labelfont=bf'))
    doc.append(
        NoEscape(
            r'\captionsetup[table]{labelsep=space, justification=raggedright, singlelinecheck=off}'
        ))

    #Add first page style
    first_page = PageStyle("header", header_thickness=1)

    #Add Header
    with first_page.create(Head("C")) as mid_header:

        with mid_header.create(
                MiniPage(width=NoEscape(r"0.2\textwidth"),
                         pos='c')) as logo_wrapper:
            logo_file = os.path.join(os.path.dirname(__file__), '..',
                                     'assests/cg.png')
            logo_wrapper.append(
                StandAloneGraphic(image_options="width=50px",
                                  filename=logo_file))

        with mid_header.create(
                Tabularx(
                    "p{3cm} p{2cm} X X p{4cm} p{3cm}",
                    width_argument=NoEscape(r"0.8\textwidth"))) as mid_table:
            mid_table.add_row(
                [MultiColumn(6, align='r', data=simple_page_number())])
            mid_table.add_row([
                MultiColumn(6,
                            align='c',
                            data=MediumText("Molecular report on"))
            ])
            mid_table.add_row([
                MultiColumn(6,
                            align='c',
                            data=MediumText(get_sample_name(config)))
            ])
            mid_table.add_empty_row()
            mid_table.add_row([
                'gender', "NA", " ", " ", 'Sample recieved:',
                sample_config['analysis']['date']['sample_received']
            ])
            mid_table.add_row([
                'tumor type', "NA", " ", " ", 'Analysis completion:',
                sample_config['analysis']['date']['analysis_finish']
            ])
            mid_table.add_row([
                'analysis type', "NA", " ", " ", 'PDF Report date:',
                datetime.now().strftime("%Y-%m-%d %H:%M")
            ])
            mid_table.add_row(
                ['sample type', "NA", " ", " ", 'Delivery date', "NA"])
            mid_table.add_row([
                'sample origin', "NA", " ", " ", 'Analysis:',
                r'BALSAMIC v' + sample_config['analysis']['BALSAMIC']
            ])

    doc.preamble.append(first_page)

    #End First page

    #    doc.preamble.append(
    #        Command(
    #            'title',
    #            NoEscape(r'BALSAMIC v' + sample_config["analysis"]["BALSAMIC"] +
    #                     r'\\ \large Developer Report')))
    #    doc.preamble.append(
    #        Command('author', 'Patient ID: ' + get_sample_name(config)))
    #    doc.preamble.append(Command('date', NoEscape(r'\today')))
    #    doc.append(NoEscape(r'\maketitle'))
    doc.change_document_style("header")

    with doc.create(Section(title='Analysis report', numbering=True)):

        with doc.create(
                Subsection('Summary of variants and variant callers',
                           numbering=True)):
            doc.append(
                "Placeholder for text about BAM alignment metrics and variant callers. Here comes the info on reads, "
                +
                "QC metrics, align metrics, and general sample information. preferabily in table format."
            )
            doc.append("\n")

            summary_tables = ["TMB", "VarClass", "VarCaller", "VarCallerClass"]
            for i in summary_tables:

                shellcmd = [
                    os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                 "..", "..", "scripts/VariantReport.R")
                ]
                shellcmd.extend([
                    "--infile", sample_config["vcf"]["merged"]["SNV"],
                    "--genomeSize", sample_config["bed"]["genome_size"],
                    "--type", "latex", "--mode", i, "--outfile",
                    os.path.join(tex_path,
                                 sample_config['analysis']['sample_id'])
                ])
                print(" ".join(shellcmd))

                outTab = subprocess.check_output(shellcmd)
                doc.append(
                    NoEscape(
                        outTab.decode('utf-8').replace("\\centering",
                                                       "\\small")))
            doc.append(NoEscape(r'\normalsize'))
            doc.append(NewPage())

        with doc.create(Subsection("Summary of MVL report", numbering=True)):
            doc.append(
                "Placeholder for general description of MVL settings. A mention to summary "
                +
                "pipeline, summary of MVL settings. Gene coverage for identified genes should go here. Figures!"
            )
            outCov = dict()

            cmd_param = defaultdict(list)
            J = defaultdict(list)
            for i in var_config["filters"]:
                cmd_param["TUMOR_DP"].append(
                    var_config["filters"][i]["TUMOR"]["DP"])
                cmd_param["TUMOR_AD"].append(
                    var_config["filters"][i]["TUMOR"]["AD"])
                cmd_param["TUMOR_AFmax"].append(
                    var_config["filters"][i]["TUMOR"]["AF_max"])
                cmd_param["TUMOR_AFmin"].append(
                    var_config["filters"][i]["TUMOR"]["AF_min"])
                cmd_param["TUMOR_inMVL"].append(
                    var_config["filters"][i]["in_mvl"])
                cmd_param["var_type"].append(",".join(
                    ["SNP", "INDEL", "MNP", "OTHER"]))
                cmd_param["varcaller"].append(",".join(
                    var_config["filters"][i]["variantcaller"]))
                cmd_param["ann"].append(
                    ",".join(var_config["filters"][i]["annotation"]["SNV"]) +
                    "," +
                    ",".join(var_config["filters"][i]["annotation"]["INDEL"]))
                cmd_param["name"].append(i.replace("_", "\_"))
                cmd_param["outfile_tex"].append(tex_path + "/" + i + ".tex")
                cmd_param["outfile_gene"].append(tex_path + "/" + i +
                                                 ".genelist")

            for i in cmd_param:
                J[i] = ";".join(cmd_param[i])

            shellcmd = [
                os.path.join(os.path.dirname(os.path.abspath(__file__)), "..",
                             "..", "scripts/VariantReport.R")
            ]
            shellcmd.extend([
                "--infile", "'" + sample_config["vcf"]["merged"]["SNV"] + "'",
                "--dp", "'" + J["TUMOR_DP"] + "'", "--tumorad",
                "'" + J["TUMOR_AD"] + "'", "--afmax",
                "'" + J["TUMOR_AFmax"] + "'", "--afmin",
                "'" + J["TUMOR_AFmin"] + "'", "--inMVL",
                "'" + J["TUMOR_inMVL"] + "'", "--exclusiveSets", "TRUE",
                "--vartype", "'" + J["var_type"] + "'", "--varcaller",
                "'" + J["varcaller"] + "'", "--ann", "'" + J["ann"] + "'",
                "--name", "'" + J["name"] + "'", "--type", "latex"
            ])

            subprocess.check_output(
                " ".join(shellcmd +
                         ["--outfile", "'" + J["outfile_tex"] + "'"]),
                shell=True)

            print(" ".join(shellcmd +
                           ["--outfile", "'" + J["outfile_tex"] + "'"]))
            subprocess.check_output(" ".join(shellcmd + [
                "--outfile", "'" + J["outfile_gene"] + "'", "--exportGene", "T"
            ]),
                                    shell=True)

            for c, i in enumerate(var_config["filters"]):
                with doc.create(
                        Subsubsection(var_config["filters"][i]["name"],
                                      numbering=True)):
                    print(cmd_param["outfile_tex"])
                    fname = cmd_param["outfile_tex"][c]
                    if os.stat(fname).st_size > 10:
                        #get gene list
                        with open(cmd_param["outfile_gene"][c]) as myfile:
                            genes = myfile.read().replace('\n', '')

                        with open(fname, 'r') as myfile:
                            data = myfile.read()  #.replace('\n', '')

                        #doc.append(NoEscape(r'\begin{landscape}'))
                        #longtable instead of tabular makes the table span multiple pages, but the header doesn't span. Occasionally
                        #the alignment also is messed up. There must be a hidden package conflict OR general alignment issues.
                        #doc.append(NoEscape(varreport.replace("{tabular}","{longtable}")))
                        doc.append(
                            NoEscape(
                                data.replace("\\centering", "\\scriptsize")))

                        for s in sample_config["bed"]["exon_cov"]:
                            shellcmd = [
                                os.path.join(
                                    os.path.dirname(os.path.abspath(__file__)),
                                    "..", "scripts/CoverageRep.R")
                            ]
                            shellcmd.extend([
                                "--infile",
                                sample_config["bed"]["exon_cov"][s],
                                "--genename", genes, "--name",
                                s.replace("_", "\_"), "--type", "latex"
                            ])
                            outCov = subprocess.check_output(shellcmd)

                            doc.append(
                                NoEscape(
                                    outCov.decode('utf-8').replace(
                                        "\\centering", "\\scriptsize")))
                        #doc.append(NoEscape(r'\end{landscape}'))
                    else:
                        doc.append("No variants were found for this filter")

#                doc.append(NoEscape(r'\normalsize'))

            doc.append(NewPage())

        with doc.create(Subsection('Coverage report')):
            for s in sample_config["bed"]["target_cov"]:
                with doc.create(Figure(position='h!')) as cov_img:
                    covplot = ".".join(
                        [os.path.join(tex_path, s), "Coverage.pdf"])
                    shellcmd = [
                        os.path.join(
                            os.path.dirname(os.path.abspath(__file__)), "..",
                            "..", "scripts/CoveragePlot.R")
                    ]
                    shellcmd.extend([
                        "--infile", sample_config["bed"]["target_cov"][s],
                        "--outfile", covplot, "--title",
                        s.replace("_", "\_")
                    ])
                    subprocess.check_output(shellcmd)
                    cov_img.add_image(covplot, width='450px')
                    cov_img.add_caption('Coverage report for sample ' +
                                        s.replace("_", "\_"))

            doc.append(NewPage())
        with doc.create(Subsection('Analysis pipeline')):
            with doc.create(Figure(position='h!')) as pipeline_img:
                pipeline_img.add_image(rulegraph_img, width='450px')
                pipeline_img.add_caption('BALSAMIC pipeline')
            doc.append(NewPage())

    with doc.create(Section(title="Appendix", numbering=True)):
        with doc.create(Subsection("MVL settings", numbering=True)):
            fmt = "p{3cm}" * (len(var_config["filters"]) + 1)
            with doc.create(Tabular(fmt)) as data_table:
                header_row1 = [""]
                for i in var_config["filters"]:
                    header_row1.append(var_config["filters"][i]["name"])
                data_table.add_hline()
                data_table.add_row(header_row1,
                                   mapper=[bold],
                                   color="lightgray")
                data_table.add_hline()
                data_table.add_empty_row()
                column = list(var_config["filters"][next(
                    iter(var_config["filters"]))]["TUMOR"].keys())
                for i in column:
                    row = [i]
                    for j in var_config["filters"]:
                        row.append(var_config["filters"][j]["TUMOR"][i])
                    data_table.add_row(row)

                row = ["MVL"]
                for i in var_config["filters"]:
                    row.append(var_config["filters"][i]["in_mvl"])

                row = ["Variantcallers"]
                for i in var_config["filters"]:
                    row.append("\n".join(
                        var_config["filters"][i]["variantcaller"]))
                data_table.add_row(row)
                data_table.add_hline()

        with doc.create(
                Subsection("Bioinformatic tool in pipeline", numbering=True)):
            doc.append(
                "The following Bioinformatic tools were used in the analysis:\n\n"
            )
            with doc.create(Tabular("p{4cm}p{4cm}")) as data_table:
                data_table.add_hline()
                conda_env = glob.glob(
                    os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                 "..", "..", "conda_yaml/*.yaml"))

                pkgs = get_package_split(conda_env)

                data_table.add_row(["Package", "Version"], color="lightgray")
                data_table.add_hline()
                data_table.add_row(
                    ["BALSAMIC", sample_config['analysis']['BALSAMIC']])

                for k, v in pkgs.items():
                    data_table.add_row([k, v])
            doc.append(NewPage())

    print(tex_path)
    doc.generate_tex(os.path.join(tex_path, get_sample_name(config)))
    #    doc.generate_pdf(
    #        os.path.join(tex_path, get_sample_name(config)), clean_tex=False)
    shellcmd = [
        "pdflatex", "-output-directory=" + tex_path,
        os.path.join(tex_path, get_sample_name(config)) + ".tex", "1>",
        "/dev/null"
    ]
    #generate_pdf doesn't run AUX files properly and ends up with incorrect total page numbers. So subprocess for
    #pdflatex is called twice instead.

    print(" ".join(shellcmd))
    subprocess.run(" ".join(shellcmd), shell=True)
    subprocess.run(" ".join(shellcmd), shell=True)
 def append_result_drawing(self):
     self.gen.doc.append(NoEscape(r'\newpage'))
     with self.gen.doc.create(Section(self.gen.tpdf.drawing_circle_3d)):
         with self.gen.doc.create(Figure(position='H')) as fig_mohrleft:
             fig_mohrleft.add_image("figs\\mohrfig", width='500px')
             fig_mohrleft.add_caption(NoEscape(self.gen.tpdf.drawing_circle_3d_label))
Exemple #21
0
def generate_latex_document(trackers: List[Tracker],
                            sequences: List[Sequence],
                            results,
                            storage: Storage,
                            build=False,
                            multipart=True,
                            order=None):

    order_marks = {1: "first", 2: "second", 3: "third"}

    def format_cell(value, order):
        cell = format_value(value)
        if order in order_marks:
            cell = Command(order_marks[order], cell)
        return cell

    logger = get_logger()

    table_header, table_data, table_order = extract_measures_table(
        trackers, results)

    if order is not None:
        ordered_trackers = [trackers[i] for i in order]
    else:
        ordered_trackers = trackers

    doc = Document(page_numbers=True)

    doc.preamble.append(Package('pgf'))
    doc.preamble.append(Package('xcolor'))
    doc.preamble.append(Package('fullpage'))

    doc.preamble.append(NoEscape(read_resource("commands.tex")))

    doc.preamble.append(
        UnsafeCommand('newcommand',
                      r'\first',
                      options=1,
                      extra_arguments=r'{\color{red} #1 }'))
    doc.preamble.append(
        UnsafeCommand('newcommand',
                      r'\second',
                      options=1,
                      extra_arguments=r'{\color{green} #1 }'))
    doc.preamble.append(
        UnsafeCommand('newcommand',
                      r'\third',
                      options=1,
                      extra_arguments=r'{\color{blue} #1 }'))

    if multipart:
        container = Chunk()
        generate_symbols(container, ordered_trackers)
        with storage.write("symbols.tex") as out:
            container.dump(out)
        doc.preamble.append(Command("input", "symbols.tex"))
    else:
        generate_symbols(doc.preamble, ordered_trackers)

    doc.preamble.append(Command('title', 'VOT report'))
    doc.preamble.append(
        Command('author', 'Toolkit version ' + toolkit_version()))
    doc.preamble.append(Command('date', datetime.datetime.now().isoformat()))
    doc.append(NoEscape(r'\maketitle'))

    if len(table_header[2]) == 0:
        logger.debug("No measures found, skipping table")
    else:

        # Generate data table
        with doc.create(LongTable("l " *
                                  (len(table_header[2]) + 1))) as data_table:
            data_table.add_hline()
            data_table.add_row([" "] + [
                MultiColumn(c[1], data=c[0].identifier)
                for c in merge_repeats(table_header[0])
            ])
            data_table.add_hline()
            data_table.add_row([" "] + [
                MultiColumn(c[1], data=c[0].title)
                for c in merge_repeats(table_header[1])
            ])
            data_table.add_hline()
            data_table.add_row(
                ["Tracker"] +
                [" " + c.abbreviation + " " for c in table_header[2]])
            data_table.add_hline()
            data_table.end_table_header()
            data_table.add_hline()

            for tracker in ordered_trackers:
                data = table_data[tracker]
                data_table.add_row([
                    UnsafeCommand("Tracker",
                                  [tracker.reference, TRACKER_GROUP])
                ] + [
                    format_cell(x,
                                order[tracker] if not order is None else None)
                    for x, order in zip(data, table_order)
                ])

    if order is not None:
        z_order = [0] * len(order)
        for i, j in enumerate(order):
            z_order[max(order) - i] = j
    else:
        z_order = list(range(len(trackers)))

    plots = extract_plots(trackers, results, z_order)

    for experiment, experiment_plots in plots.items():
        if len(experiment_plots) == 0:
            continue

        doc.append(Section("Experiment " + experiment.identifier))

        for title, plot in experiment_plots:

            with doc.create(Figure(position='htbp')) as container:
                if multipart:
                    plot_name = plot.identifier + ".pdf"
                    with storage.write(plot_name, binary=True) as out:
                        plot.save(out, "PDF")
                    container.add_image(plot_name)
                else:
                    container.append(insert_figure(plot))
                container.add_caption(title)

    if build:
        temp = tempfile.mktemp()
        logger.debug("Generating to temporary output %s", temp)
        doc.generate_pdf(temp, clean_tex=True)
        storage.copy(temp + ".pdf", "report.pdf")
    else:
        with storage.write("report.tex") as out:
            doc.dump(out)
    .then(lambda words : " ".join(words))\
    .then(lambda tidbit : NoEscape(tidbit))

# Produces pure word or math sentences
SentenceProducer = ListProducer([WordSentenceProducer, MathSentenceProducer],
                                [0.8, 0.2])\
    .then(lambda lst : lst[0])\
    .then(lambda sentence : sentence if BoolCoinFlip() else sentence + EndPunctuationProducer())\
    .then(lambda sentence : NoEscape(sentence))

#endregion

#region REGIONS: SECTIONS, SUBSECTIONS, ETC

SectionProducer = TidbitProducer\
    .then(lambda tidbit : Section(tidbit, label=False))

#endregion

#region COLLECTIONS: PARAGRAPHS, LISTS, SE

# Produces paragraphs (contain words or math)
ParagraphProducer = NormalProducer(mu=5, sigma=2)\
    .then(lambda x: int(abs(x)))\
    .then(lambda num_sentences : [SentenceProducer() for i in range(num_sentences)])\
    .then(lambda sentences : " ".join(sentences))\
    .then(lambda paragraph : NoEscape(paragraph))

# To produce items for a list
ListItemProducer = ListProducer(
    [EquationProducer, TidbitProducer, SentenceProducer, ParagraphProducer],
Exemple #23
0
    nvertices_3D = text[53:58]
    errors_3D = text[58:63]
    coeff_3D = text[63:67]
    time_3D = text[67:72]

    weak_scaling_3D = text[72:77]
    strong_scaling_2D = text[77:82]

    doc = Document('basic')

    doc.preamble.append(Command('title', 'Continuous Integration'))
    doc.preamble.append(Command('author', 'MSO4SC'))
    doc.preamble.append(Command('date', NoEscape(r'\today')))
    doc.append(NoEscape(r'\maketitle'))

    with doc.create(Section('2D 1 process')):

        with doc.create(Subsection('N Vertices - Convergence rate')):
            plotty(nvertices_2D_5, errors_2D_1, 'estimate', 4)

        with doc.create(Subsection('N Vertices - Errors')):
            plotty(nvertices_2D_5, errors_2D_1, 'errors')

        with doc.create(Subsection('N Vertices - Total time')):
            plotty(nvertices_2D_5, time_2D_1, 'total time')

    with doc.create(Section('2D 5 process')):

        with doc.create(Subsection('N Vertices - Convergence rate')):
            plotty(nvertices_2D_5, coeff_2D_5, 'estimate', 4)
Exemple #24
0
        with doc.create(Subsection('A subsection')):
            doc.append('Also some crazy characters: $&#{}')


if __name__ == '__main__':
    # Basic document
    doc = Document('basic')
    fill_document(doc)

    doc.generate_pdf()
    doc.generate_tex()

    # Document with `\maketitle` command activated
    doc = Document()

    doc.preamble.append(Command('title', 'Awesome Title'))
    doc.preamble.append(Command('author', 'Anonymous author'))
    doc.preamble.append(Command('date', NoEscape(r'\today')))
    doc.append(NoEscape(r'\maketitle'))

    fill_document(doc)

    doc.generate_pdf('basic_maketitle', clean=False)

    # Add stuff to the document
    with doc.create(Section('A second section')):
        doc.append('Some text.')

    doc.generate_pdf('basic_maketitle2')
    tex = doc.dumps()  # The document as string in LaTeX syntax
#Generate Document
doc = Document(Table_Folder + Table_Name)

doc.packages.append(
    Package(
        'geometry',
        options=['tmargin=2cm', 'lmargin=1cm', 'rmargin=1cm', 'bmargin=2cm']))
doc.packages.append(Package('floatrow'))
doc.preamble.append(r'\DeclareFloatFont{huge}{\huge}')
doc.preamble.append(r'\floatsetup[table]{font=huge}')

# doc.append(r'\DeclareFloatFont{tiny}{\tiny}')
# doc.append(r'\floatsetup[table]{font=tiny}')

#Main section manuscript
with doc.create(Section('HII Galaxies properties:')):

    for i in range(len(HII_Galaxy_List)):
        Galaxy = HII_Galaxy_List[i]

        with doc.create(Subsection('Galaxy ' + Galaxy)):
            with doc.create(Table('|c|c|')) as table:
                for parameter in parameter_List:
                    Label = Properties_dict[parameter]['label']
                    value = str(Data_Dict[Galaxy][parameter])
                    table.add_hline()
                    table.add_row((Label, value))

            doc.append(r'\newpage')
            doc.append(r'\tiny')
def main():
    try:
        parser = argparse.ArgumentParser(
            description='pyprob ' + pyprob.__version__ + ' (Analytics)',
            formatter_class=argparse.ArgumentDefaultsHelpFormatter)
        parser.add_argument('-v',
                            '--version',
                            help='show version information',
                            action='store_true')
        parser.add_argument(
            '--dir',
            help='directory for loading artifacts and saving logs',
            default='.')
        parser.add_argument('--cuda', help='use CUDA', action='store_true')
        parser.add_argument(
            '--device',
            help=
            'selected CUDA device (-1: all, 0: 1st device, 1: 2nd device, etc.)',
            default=-1,
            type=int)
        parser.add_argument('--seed',
                            help='random seed',
                            default=123,
                            type=int)
        parser.add_argument(
            '--structure',
            help='show extra information about artifact structure',
            action='store_true')
        parser.add_argument('--saveReport',
                            help='save a full analytics report (tex and pdf)',
                            type=str)
        parser.add_argument(
            '--maxTraces',
            help=
            'maximum number of unique traces to plot in the full analytics report',
            default=20,
            type=int)
        parser.add_argument(
            '--saveLoss',
            help='save training and validation loss history (csv)',
            type=str)
        parser.add_argument('--saveAddresses',
                            help='save histogram of addresses (csv)',
                            type=str)
        parser.add_argument('--saveTraceLengths',
                            help='save histogram of trace lengths (csv)',
                            type=str)
        opt = parser.parse_args()

        if opt.version:
            print(pyprob.__version__)
            quit()

        util.set_random_seed(opt.seed)
        util.set_cuda(opt.cuda, opt.device)

        util.logger.reset()
        util.logger.log_config()

        file_name = util.file_starting_with(
            '{0}/{1}'.format(opt.dir, 'pyprob-artifact'), -1)
        util.logger.log(
            colored('Resuming previous artifact: {}'.format(file_name),
                    'blue',
                    attrs=['bold']))
        artifact = util.load_artifact(file_name, util.cuda_enabled,
                                      util.cuda_device)

        util.logger.log(artifact.get_info())
        util.logger.log()

        if opt.structure:
            util.logger.log()
            util.logger.log(
                colored('Artifact structure', 'blue', attrs=['bold']))
            util.logger.log()

            util.logger.log(artifact.get_structure_str())
            util.logger.log(artifact.get_parameter_str())

        if opt.saveLoss:
            util.logger.log(
                'Saving training and validation loss history to file: ' +
                opt.saveLoss)
            with open(opt.saveLoss, 'w') as f:
                data = [
                    artifact.train_history_trace, artifact.train_history_loss,
                    artifact.valid_history_trace, artifact.valid_history_loss
                ]
                writer = csv.writer(f)
                writer.writerow(
                    ['train_trace', 'train_loss', 'valid_trace', 'valid_loss'])
                for values in zip_longest(*data):
                    writer.writerow(values)

        if opt.saveAddresses:
            util.logger.log('Saving address histogram to file: ' +
                            opt.saveAddresses)
            with open(opt.saveAddresses, 'w') as f:
                data_count = []
                data_address = []
                data_abbrev = []
                abbrev_i = 0
                for address, count in sorted(
                        artifact.address_histogram.items(),
                        key=lambda x: x[1],
                        reverse=True):
                    abbrev_i += 1
                    data_abbrev.append('A' + str(abbrev_i))
                    data_address.append(address)
                    data_count.append(count)
                data = [data_count, data_abbrev, data_address]
                writer = csv.writer(f)
                writer.writerow(['count', 'unique_address_id', 'full_address'])
                for values in zip_longest(*data):
                    writer.writerow(values)

        if opt.saveTraceLengths:
            util.logger.log('Saving trace length histogram to file: ' +
                            opt.saveTraceLengths)
            with open(opt.saveTraceLengths, 'w') as f:
                data_trace_length = []
                data_count = []
                for trace_length in artifact.trace_length_histogram:
                    data_trace_length.append(trace_length)
                    data_count.append(
                        artifact.trace_length_histogram[trace_length])
                data = [data_trace_length, data_count]
                writer = csv.writer(f)
                writer.writerow(['trace_length', 'count'])
                for values in zip_longest(*data):
                    writer.writerow(values)

        if opt.saveReport:
            util.logger.log('Saving analytics report to files: ' +
                            opt.saveReport + '.tex and ' + opt.saveReport +
                            '.pdf')

            iter_per_sec = artifact.total_iterations / artifact.total_training_seconds
            traces_per_sec = artifact.total_traces / artifact.total_training_seconds
            traces_per_iter = artifact.total_traces / artifact.total_iterations
            train_loss_initial = artifact.train_history_loss[0]
            train_loss_final = artifact.train_history_loss[-1]
            train_loss_change = train_loss_final - train_loss_initial
            train_loss_change_per_sec = train_loss_change / artifact.total_training_seconds
            train_loss_change_per_iter = train_loss_change / artifact.total_iterations
            train_loss_change_per_trace = train_loss_change / artifact.total_traces
            valid_loss_initial = artifact.valid_history_loss[0]
            valid_loss_final = artifact.valid_history_loss[-1]
            valid_loss_change = valid_loss_final - valid_loss_initial
            valid_loss_change_per_sec = valid_loss_change / artifact.total_training_seconds
            valid_loss_change_per_iter = valid_loss_change / artifact.total_iterations
            valid_loss_change_per_trace = valid_loss_change / artifact.total_traces

            sys.stdout.write(
                'Generating report...                                           \r'
            )
            sys.stdout.flush()

            geometry_options = {
                'tmargin': '1.5cm',
                'lmargin': '1cm',
                'rmargin': '1cm',
                'bmargin': '1.5cm'
            }
            doc = Document('basic', geometry_options=geometry_options)
            doc.preamble.append(NoEscape(r'\usepackage[none]{hyphenat}'))
            doc.preamble.append(NoEscape(r'\usepackage{float}'))
            # doc.preamble.append(NoEscape(r'\renewcommand{\familydefault}{\ttdefault}'))

            doc.preamble.append(
                Command('title', 'Inference Compilation Analytics'))
            doc.preamble.append(
                Command(
                    'date',
                    NoEscape(datetime.datetime.now().strftime(
                        "%Y-%m-%d %H:%M:%S"))))
            doc.append(NoEscape(r'\maketitle'))
            # doc.append(NoEscape(r'\small'))

            with doc.create(Section('Current system')):
                with doc.create(Tabularx('ll')) as table:
                    table.add_row(('pyprob version', pyprob.__version__))
                    table.add_row(('PyTorch version', torch.__version__))

            # doc.append(NoEscape(r'\newpage'))
            with doc.create(Section('Artifact')):
                with doc.create(Subsection('File')):
                    with doc.create(Tabularx('ll')) as table:
                        table.add_row(('File name', file_name))
                        file_size = '{:,}'.format(os.path.getsize(file_name))
                        table.add_row(('File size', file_size + ' Bytes'))
                        table.add_row(('Created', artifact.created))
                        table.add_row(('Modified', artifact.modified))
                        table.add_row(('Updates to file', artifact.updates))
                with doc.create(Subsection('Training system')):
                    with doc.create(Tabularx('ll')) as table:
                        table.add_row(
                            ('pyprob version', artifact.code_version))
                        table.add_row(
                            ('PyTorch version', artifact.pytorch_version))
                        table.add_row(('Trained on', artifact.trained_on))
                with doc.create(Subsection('Neural network')):
                    with doc.create(Tabularx('ll')) as table:
                        table.add_row(('Trainable parameters', '{:,}'.format(
                            artifact.num_params_history_num_params[-1])))
                        table.add_row(
                            ('Softmax boost', artifact.softmax_boost))
                        table.add_row(('Dropout', artifact.dropout))
                        table.add_row(('Standardize inputs',
                                       artifact.standardize_observes))
                    with doc.create(Figure(position='H')) as plot:
                        fig = plt.figure(figsize=(10, 4))
                        ax = plt.subplot(111)
                        ax.plot(artifact.num_params_history_trace,
                                artifact.num_params_history_num_params)
                        plt.xlabel('Training traces')
                        plt.ylabel('Number of parameters')
                        plt.grid()
                        fig.tight_layout()
                        plot.add_plot(width=NoEscape(r'\textwidth'))
                        plot.add_caption('Number of parameters.')

                    for m_name, m in artifact.named_modules():
                        if not ('.' in m_name or m_name == ''):
                            doc.append(NoEscape(r'\newpage'))
                            with doc.create(Subsubsection(m_name)):
                                doc.append(str(m))
                                for p_name, p in m.named_parameters():
                                    if not 'bias' in p_name:
                                        with doc.create(
                                                Figure(position='H')) as plot:
                                            fig = plt.figure(figsize=(10, 10))
                                            ax = plt.subplot(111)
                                            plt.imshow(np.transpose(
                                                util.weights_to_image(p),
                                                (1, 2, 0)),
                                                       interpolation='none')
                                            plt.axis('off')
                                            plot.add_plot(
                                                width=NoEscape(r'\textwidth'))
                                            plot.add_caption(m_name + '_' +
                                                             p_name)

            doc.append(NoEscape(r'\newpage'))
            with doc.create(Section('Training')):
                with doc.create(Tabularx('ll')) as table:
                    table.add_row(('Total training time', '{0}'.format(
                        util.days_hours_mins_secs(
                            artifact.total_training_seconds))))
                    table.add_row(('Total training traces',
                                   '{:,}'.format(artifact.total_traces)))
                    table.add_row(
                        ('Traces / s', '{:,.2f}'.format(traces_per_sec)))
                    table.add_row(('Traces / iteration',
                                   '{:,.2f}'.format(traces_per_iter)))
                    table.add_row(('Iterations',
                                   '{:,}'.format(artifact.total_iterations)))
                    table.add_row(
                        ('Iterations / s', '{:,.2f}'.format(iter_per_sec)))
                    table.add_row(('Optimizer', artifact.optimizer))
                    table.add_row(('Validation set size', artifact.valid_size))

                with doc.create(Subsection('Training loss')):
                    with doc.create(Tabularx('ll')) as table:
                        table.add_row(('Initial loss',
                                       '{:+.6e}'.format(train_loss_initial)))
                        table.add_row(
                            ('Final loss', '{:+.6e}'.format(train_loss_final)))
                        table.add_row(
                            ('Loss change / s',
                             '{:+.6e}'.format(train_loss_change_per_sec)))
                        table.add_row(
                            ('Loss change / iteration',
                             '{:+.6e}'.format(train_loss_change_per_iter)))
                        table.add_row(
                            ('Loss change / trace',
                             '{:+.6e}'.format(train_loss_change_per_trace)))
                with doc.create(Subsection('Validation loss')):
                    with doc.create(Tabularx('ll')) as table:
                        table.add_row(('Initial loss',
                                       '{:+.6e}'.format(valid_loss_initial)))
                        table.add_row(
                            ('Final loss', '{:+.6e}'.format(valid_loss_final)))
                        table.add_row(
                            ('Loss change / s',
                             '{:+.6e}'.format(valid_loss_change_per_sec)))
                        table.add_row(
                            ('Loss change / iteration',
                             '{:+.6e}'.format(valid_loss_change_per_iter)))
                        table.add_row(
                            ('Loss change / trace',
                             '{:+.6e}'.format(valid_loss_change_per_trace)))
                with doc.create(Figure(position='H')) as plot:
                    fig = plt.figure(figsize=(10, 6))
                    ax = plt.subplot(111)
                    ax.plot(artifact.train_history_trace,
                            artifact.train_history_loss,
                            label='Training')
                    ax.plot(artifact.valid_history_trace,
                            artifact.valid_history_loss,
                            label='Validation')
                    ax.legend()
                    plt.xlabel('Training traces')
                    plt.ylabel('Loss')
                    plt.grid()
                    fig.tight_layout()
                    plot.add_plot(width=NoEscape(r'\textwidth'))
                    plot.add_caption('Loss plot.')

            doc.append(NoEscape(r'\newpage'))
            with doc.create(Section('Traces')):
                with doc.create(Tabularx('ll')) as table:
                    table.add_row(('Total training traces',
                                   '{:,}'.format(artifact.total_traces)))
                with doc.create(Subsection('Distributions encountered')):
                    with doc.create(Tabularx('ll')) as table:
                        num_distributions = len(
                            artifact.one_hot_distribution.keys())
                        table.add_row(
                            ('Number of distributions', num_distributions))
                        table.add_empty_row()
                        for distribution in artifact.one_hot_distribution.keys(
                        ):
                            table.add_row((distribution, ''))
                with doc.create(Subsection('Unique addresses encountered')):
                    with doc.create(Tabularx('lX')) as table:
                        num_addresses = len(artifact.one_hot_address.keys())
                        table.add_row(('Number of addresses', num_addresses))
                        address_collisions = max(
                            0, num_addresses - artifact.one_hot_address_dim)
                        table.add_row(
                            ('Address collisions', address_collisions))
                        table.add_empty_row()
                    doc.append('\n')
                    with doc.create(LongTable('llp{16cm}')) as table:
                        # table.add_empty_row()
                        table.add_row('Count', 'ID', 'Unique address')
                        table.add_hline()

                        address_to_abbrev = {}
                        abbrev_to_address = {}
                        abbrev_i = 0
                        sorted_addresses = sorted(
                            artifact.address_histogram.items(),
                            key=lambda x: x[1],
                            reverse=True)
                        plt_addresses = []
                        plt_counts = []
                        address_to_count = {}
                        address_count_total = 0
                        for address, count in sorted_addresses:
                            abbrev_i += 1
                            abbrev = 'A' + str(abbrev_i)
                            address_to_abbrev[address] = abbrev
                            abbrev_to_address[abbrev] = address
                            plt_addresses.append(abbrev)
                            plt_counts.append(count)
                            address_to_count[abbrev] = count
                            address_count_total += count
                            table.add_row(('{:,}'.format(count), abbrev,
                                           FootnoteText(address)))

                    with doc.create(Figure(position='H')) as plot:
                        fig = plt.figure(figsize=(10, 5))
                        ax = plt.subplot(111)
                        plt_x = range(len(plt_addresses))
                        ax.bar(plt_x, plt_counts)
                        plt.xticks(plt_x, plt_addresses)
                        plt.xlabel('Unique address ID')
                        plt.ylabel('Count')
                        plt.grid()
                        fig.tight_layout()
                        plot.add_plot(width=NoEscape(r'\textwidth'))
                        plot.add_caption('Histogram of address hits.')

                with doc.create(Subsection('Lengths')):
                    with doc.create(Tabularx('ll')) as table:
                        table.add_row(
                            ('Min trace length',
                             '{:,}'.format(artifact.trace_length_min)))
                        table.add_row(
                            ('Max trace length',
                             '{:,}'.format(artifact.trace_length_max)))
                        s = 0
                        total_count = 0
                        for trace_length in artifact.trace_length_histogram:
                            count = artifact.trace_length_histogram[
                                trace_length]
                            s += trace_length * count
                            total_count += count
                        trace_length_mean = s / total_count
                        table.add_row(('Mean trace length',
                                       '{:.2f}'.format(trace_length_mean)))
                    with doc.create(Figure(position='H')) as plot:
                        plt_lengths = [
                            i for i in range(0, artifact.trace_length_max + 1)
                        ]
                        plt_counts = [
                            artifact.trace_length_histogram[i]
                            if i in artifact.trace_length_histogram else 0
                            for i in range(0, artifact.trace_length_max + 1)
                        ]
                        fig = plt.figure(figsize=(10, 5))
                        ax = plt.subplot(111)
                        ax.bar(plt_lengths, plt_counts)
                        plt.xlabel('Length')
                        plt.ylabel('Count')
                        # plt.yscale('log')
                        plt.grid()
                        fig.tight_layout()
                        plot.add_plot(width=NoEscape(r'\textwidth'))
                        plot.add_caption(
                            'Histogram of trace lengths (of all traces used during training).'
                        )

                with doc.create(Subsection('Unique traces encountered')):
                    with doc.create(Tabularx('ll')) as table:
                        table.add_row(
                            ('Unique traces encountered', '{:,}'.format(
                                len(artifact.trace_examples_histogram))))
                        table.add_row(
                            ('Unique trace memory capacity',
                             '{:,}'.format(artifact.trace_examples_limit)))
                        table.add_row(
                            ('Unique traces rendered in detail', '{:,}'.format(
                                min(len(artifact.trace_examples_histogram),
                                    opt.maxTraces))))
                    doc.append('\n')
                    with doc.create(LongTable('lllp{16cm}')) as table:
                        # table.add_empty_row()
                        table.add_row('Count', 'ID', 'Len.', 'Unique trace')
                        table.add_hline()

                        trace_to_abbrev = {}
                        abbrev_to_trace = {}
                        abbrev_to_addresses = {}
                        abbrev_i = 0
                        sorted_traces = sorted(
                            artifact.trace_examples_histogram.items(),
                            key=lambda x: x[1],
                            reverse=True)
                        plt_traces = []
                        plt_counts = []
                        trace_to_count = {}
                        trace_count_total = 0
                        for trace, count in sorted_traces:
                            abbrev_i += 1
                            abbrev = 'T' + str(abbrev_i)
                            trace_to_abbrev[trace] = abbrev
                            abbrev_to_trace[abbrev] = trace
                            abbrev_to_addresses[abbrev] = list(
                                map(lambda x: address_to_abbrev[x],
                                    artifact.trace_examples_addresses[trace]))
                            trace_addresses = abbrev_to_addresses[abbrev]
                            trace_addresses_repetitions = util.pack_repetitions(
                                trace_addresses)
                            plt_traces.append(abbrev)
                            plt_counts.append(count)
                            trace_to_count[trace] = count
                            trace_count_total += count
                            length = len(
                                artifact.trace_examples_addresses[trace])
                            table.add_row(
                                ('{:,}'.format(count), abbrev,
                                 '{:,}'.format(length),
                                 FootnoteText('-'.join([
                                     a + 'x' + str(i) if i > 1 else a
                                     for a, i in trace_addresses_repetitions
                                 ]))))

                    with doc.create(Figure(position='H')) as plot:
                        fig = plt.figure(figsize=(10, 5))
                        ax = plt.subplot(111)
                        plt_x = range(len(plt_traces))
                        ax.bar(plt_x, plt_counts)
                        plt.xticks(plt_x, plt_traces)
                        plt.xlabel('Unique trace ID')
                        plt.ylabel('Count')
                        plt.grid()
                        fig.tight_layout()
                        plot.add_plot(width=NoEscape(r'\textwidth'))
                        plot.add_caption('Histogram of unique traces.')

                    with doc.create(Figure(position='H')) as plot:
                        master_trace_pairs = {}
                        transition_count_total = 0
                        for trace, count in sorted_traces:
                            ta = abbrev_to_addresses[trace_to_abbrev[trace]]
                            for left, right in zip(ta, ta[1:]):
                                if (left, right) in master_trace_pairs:
                                    master_trace_pairs[(left, right)] += count
                                else:
                                    master_trace_pairs[(left, right)] = count
                                transition_count_total += count
                        fig = plt.figure(figsize=(10, 5))
                        ax = plt.subplot(111)
                        master_graph = pydotplus.graphviz.Dot(
                            graph_type='digraph', rankdir='LR')
                        for p, w in master_trace_pairs.items():
                            nodes = master_graph.get_node(p[0])
                            if len(nodes) > 0:
                                n0 = nodes[0]
                            else:
                                n0 = pydotplus.Node(p[0])
                                master_graph.add_node(n0)
                            nodes = master_graph.get_node(p[1])
                            if len(nodes) > 0:
                                n1 = nodes[0]
                            else:
                                n1 = pydotplus.Node(p[1])
                                master_graph.add_node(n1)
                            master_graph.add_edge(
                                pydotplus.Edge(n0, n1, weight=w))
                        for node in master_graph.get_nodes():
                            node.set_color('gray')
                            node.set_fontcolor('gray')
                        for edge in master_graph.get_edges():
                            edge.set_color('gray')

                        master_graph_annotated = pydotplus.graphviz.graph_from_dot_data(
                            master_graph.to_string())
                        for node in master_graph_annotated.get_nodes():
                            color = util.rgb_to_hex(
                                util.rgb_blend(
                                    (1, 1, 1), (1, 0, 0),
                                    address_to_count[node.obj_dict['name']] /
                                    address_count_total))
                            node.set_style('filled')
                            node.set_fillcolor(color)
                            node.set_color('black')
                            node.set_fontcolor('black')
                        for edge in master_graph_annotated.get_edges():
                            (left, right) = edge.obj_dict['points']
                            count = master_trace_pairs[(left, right)]
                            edge.set_label(count)
                            color = util.rgb_to_hex(
                                (1.5 * (count / transition_count_total), 0, 0))
                            edge.set_color(color)

                        png_str = master_graph_annotated.create_png(
                            prog=['dot', '-Gsize=15', '-Gdpi=600'])
                        bio = BytesIO()
                        bio.write(png_str)
                        bio.seek(0)
                        img = np.asarray(mpimg.imread(bio))
                        plt.imshow(util.crop_image(img),
                                   interpolation='bilinear')
                        plt.axis('off')
                        plot.add_plot(width=NoEscape(r'\textwidth'))
                        plot.add_caption(
                            'Succession of unique address IDs (accumulated over all traces).'
                        )

                    for trace, count in sorted_traces[:opt.maxTraces]:
                        trace = trace_to_abbrev[trace]
                        doc.append(NoEscape(r'\newpage'))
                        with doc.create(Subsubsection('Unique trace ' +
                                                      trace)):
                            sys.stdout.write(
                                'Rendering unique trace {0}...                                       \r'
                                .format(trace))
                            sys.stdout.flush()

                            addresses = len(address_to_abbrev)
                            trace_addresses = abbrev_to_addresses[trace]

                            with doc.create(Tabularx('ll')) as table:
                                table.add_row(
                                    FootnoteText('Count'),
                                    FootnoteText('{:,}'.format(count)))
                                table.add_row(
                                    FootnoteText('Length'),
                                    FootnoteText('{:,}'.format(
                                        len(trace_addresses))))
                            doc.append('\n')

                            im = np.zeros((addresses, len(trace_addresses)))
                            for i in range(len(trace_addresses)):
                                address = trace_addresses[i]
                                address_i = plt_addresses.index(address)
                                im[address_i, i] = 1
                            truncate = 100
                            for col_start in range(0, len(trace_addresses),
                                                   truncate):
                                col_end = min(col_start + truncate,
                                              len(trace_addresses))
                                with doc.create(Figure(position='H')) as plot:
                                    fig = plt.figure(figsize=(20 * (
                                        (col_end + 4 - col_start) / truncate),
                                                              4))
                                    ax = plt.subplot(111)
                                    # ax.imshow(im,cmap=plt.get_cmap('Greys'))
                                    sns.heatmap(im[:, col_start:col_end],
                                                cbar=False,
                                                linecolor='lightgray',
                                                linewidths=.5,
                                                cmap='Greys',
                                                yticklabels=plt_addresses,
                                                xticklabels=np.arange(
                                                    col_start, col_end))
                                    plt.yticks(rotation=0)
                                    fig.tight_layout()
                                    plot.add_plot(
                                        width=NoEscape(r'{0}\textwidth'.format(
                                            (col_end + 4 - col_start) /
                                            truncate)),
                                        placement=NoEscape(r'\raggedright'))

                            with doc.create(Figure(position='H')) as plot:
                                pairs = {}
                                for left, right in zip(trace_addresses,
                                                       trace_addresses[1:]):
                                    if (left, right) in pairs:
                                        pairs[(left, right)] += 1
                                    else:
                                        pairs[(left, right)] = 1

                                fig = plt.figure(figsize=(10, 5))
                                ax = plt.subplot(111)
                                graph = pydotplus.graphviz.graph_from_dot_data(
                                    master_graph.to_string())

                                trace_address_to_count = {}
                                for address in trace_addresses:
                                    if address in trace_address_to_count:
                                        trace_address_to_count[address] += 1
                                    else:
                                        trace_address_to_count[address] = 1

                                for p, w in pairs.items():
                                    left_node = graph.get_node(p[0])[0]
                                    right_node = graph.get_node(p[1])[0]
                                    edge = graph.get_edge(p[0], p[1])[0]

                                    color = util.rgb_to_hex(
                                        util.rgb_blend(
                                            (1, 1, 1), (1, 0, 0),
                                            trace_address_to_count[p[0]] /
                                            len(trace_addresses)))
                                    left_node.set_style('filled')
                                    left_node.set_fillcolor(color)
                                    left_node.set_color('black')
                                    left_node.set_fontcolor('black')

                                    color = util.rgb_to_hex(
                                        util.rgb_blend(
                                            (1, 1, 1), (1, 0, 0),
                                            trace_address_to_count[p[0]] /
                                            len(trace_addresses)))
                                    right_node.set_style('filled')
                                    right_node.set_fillcolor(color)
                                    right_node.set_color('black')
                                    right_node.set_fontcolor('black')

                                    (left, right) = edge.obj_dict['points']
                                    edge.set_label(w)
                                    color = util.rgb_to_hex(
                                        (1.5 * (w / len(trace_addresses)), 0,
                                         0))
                                    edge.set_color(color)

                                png_str = graph.create_png(
                                    prog=['dot', '-Gsize=30', '-Gdpi=600'])
                                bio = BytesIO()
                                bio.write(png_str)
                                bio.seek(0)
                                img = np.asarray(mpimg.imread(bio))
                                plt.imshow(util.crop_image(img),
                                           interpolation='bilinear')
                                plt.axis('off')
                                plot.add_plot(width=NoEscape(r'\textwidth'))
                                plot.add_caption(
                                    'Succession of unique address IDs (for one trace of type '
                                    + trace + ').')

                            with doc.create(Tabularx('lp{16cm}')) as table:
                                trace_addresses_repetitions = util.pack_repetitions(
                                    trace_addresses)
                                table.add_row(
                                    FootnoteText('Trace'),
                                    FootnoteText('-'.join([
                                        a + 'x' + str(i) if i > 1 else a
                                        for a, i in trace_addresses_repetitions
                                    ])))

            doc.generate_pdf(opt.saveReport, clean_tex=False)
            sys.stdout.write(
                '                                                               \r'
            )
            sys.stdout.flush()

    except KeyboardInterrupt:
        util.logger.log('Stopped')
    except Exception:
        traceback.print_exc(file=sys.stdout)
    sys.exit(0)
Exemple #27
0
with doc.create(MiniPage(align='l')):

    #doc.append(NoEscape(r"\noindent\rule{\textwidth}{1pt}"))
    #doc.append(NewLine())
    doc.append('Name : ' + str(sub_info.Name[0]))
    doc.append('\nAge: ' + str(sub_info.Age[0]))
    doc.append('\nGender: ' + str(sub_info.Gender[0]))
    doc.append('\nUH Id: ' + str(sub_info.UHID[0]))
    doc.append('\nDate: ' + d2)
    doc.append(LineBreak())
    doc.append(NoEscape(r"\noindent\rule{\textwidth}{1pt}"))
    doc.append(NewLine())
    doc.append(LineBreak())

    with doc.create(MiniPage(align='c')):
        doc.append(LargeText(bold('Vowel Triangle Analysis Report')))

# Analysis result and image

with doc.create(Section('Vowel Triangle Analysis')):
    with doc.create(Figure(position='h!')) as img_plot:
        img_plot.add_image(img1, width='300px')
        img_plot.add_caption('Vowel Triangle PLot')
    with doc.create(Subsection('Vowel Triangle')):
        doc.append('Area : ' + str(area))
    doc.append(NoEscape(r"\pagebreak[4]"))

# saving the pdf and tex

doc.generate_pdf(filepath='media/VT.report1', clean_tex=False)
Exemple #28
0
def generateLatex(phrases, key, result, flag):

    geometry_options = {"tmargin": "1cm", "lmargin": "3cm", "rmargin": "3cm"}
    doc = Document(geometry_options=geometry_options)

    if(flag == "E"):
        with doc.create(Section('Encyption Input')):
            doc.append('Text: ' + "".join(phrases) + "\n")
            doc.append('Key: ' +key)

        with doc.create(Section("Matrix Mltiplications")):
            for phrase in phrases:
                M = createEncryptMatrix(key)
                messageMatrix =  np.array([[getCapitalAlphaMod(phrase[0]),getCapitalAlphaMod(phrase[1]),getCapitalAlphaMod(phrase[2])]]).astype("float64").T
                doc.append(Math(data=[r'1/' + str(26), Matrix(M), Matrix(messageMatrix), '=', Matrix(getModMatrix(M @ messageMatrix))]))
                doc.append("\n")
                doc.append("Encrypted chunk: " + getStringFromMatrix(getModMatrix(M @ messageMatrix)))

        with doc.create(Section('Encyption Result')):
            doc.append('Cipher: ' + result)
    elif(flag == "D"):
        image_filename = './LookupHill.png'
        
        with doc.create(Section('Introduction')):
            doc.append('In this project, I implement a 3  ×  3 Hill Cipher Machine in Python. This machine automatically generates LaTex reports to decipher user-input step by step. \n')
            doc.append('We will be deciphering: ' + "".join(phrases) + ' using the key: ' + key + '. \n')

        with doc.create(Section("Enryption Matrix")):
            with doc.create(Figure(position='h!')) as lookup_hill:
                lookup_hill.add_image(image_filename, width='120px')
                lookup_hill.add_caption('Lookup Table of Hill Cipher')
                
            doc.append('We use the Lookup Table above to create the Encryption Matrix below. \n')
            M = createEncryptMatrix(key)
            doc.append(Math(data=[Matrix(M)]))

        with doc.create(Section("Encryption Matrix Mod 26 Inverse")):
            '''iM = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
            E21E31 = iM.copy()
            E21E31[(1,0)] = -1 * (augM[(1,0)]/augM[(0,0)])
            E21E31[(2,0)] = -1 * (augM[(2,0)]/augM[(0,0)])
            augM = E21E31.dot(augM)
            E32 = iM.copy()
            E32[(2,1)] = -1 * (augM[(2,1)]/augM[(1,1)])
            augM = E32.dot(augM)
            E23E13 = iM.copy()
            E23E13[(1,2)] = -1 * (augM[(1,2)]/augM[(2,2)])
            E23E13[(0,2)] = -1 * (augM[(0,2)]/augM[(2,2)])
            augM = E23E13.dot(augM)
            E12 = iM.copy()
            E12[(0,1)] = -1 * (augM[(0,1)]/augM[(1,1)])
            augM = E12.dot(augM)

            det = augM[(0,0)] * augM[(1,1)] * augM[(2,2)]
            if(det == 0 or math.isnan(det)):
                raise ValueError("Matrix Non-Invertible")
            #print("Det: " + str(det))
            if(egcd(int(round(det)), 26)[0] != 1):
                raise ValueError("Key Matrix determinent not co-prime with 26")

            #print("Mod inv of det: " + str(modinv(det, 26)))
            D = iM.copy()
            D[(0,0)] = 1/augM[(0,0)]
            D[(1,1)] = 1/augM[(1,1)]
            D[(2,2)] = 1/augM[(2,2)]
            augM = D.dot(augM)

            #Here are the additional steps needed to find the modular inverse of a matrix
            augM = augM * det
            augM = augM * modinv(int(round(det)), 26)

            modAugM = getModMatrix(augM[0:, 3:])

            return modAugM'''
            invMat = gaussianInverseMod26(createEncryptMatrixAug(key))
            doc.append(Math(data=[Matrix(invMat)]))
            doc.append("\n")
        
        with doc.create(Section("Matrix Multiplications")):
            for phrase in phrases:
                M = invMat
                #print(M)
                messageMatrix =  np.array([[getCapitalAlphaMod(phrase[0]),getCapitalAlphaMod(phrase[1]),getCapitalAlphaMod(phrase[2])]]).astype("float64").T
                doc.append(Math(data=[Matrix(M), Matrix(messageMatrix), '=', Matrix(getModMatrix(M @ messageMatrix))]))
                doc.append("\n")
                doc.append("Decrypted chunk: " + getStringFromMatrix(getModMatrix(M @ messageMatrix)))

        with doc.create(Section('Decryption Result')):
            doc.append('Plaintext: ' + result)

    doc.generate_pdf('full', clean_tex=False)

    subprocess.call(['open', 'full.pdf'])
Exemple #29
0
#!/usr/bin/python
"""
This example shows how multirow and multicolumns can be used.

..  :copyright: (c) 2014 by Jelte Fennema.
    :license: MIT, see License for more details.
"""

# begin-doc-include
from pylatex import Document, Section, Subsection, Tabular, MultiColumn,\
    MultiRow

doc = Document("multirow")
section = Section('Multirow Test')

test1 = Subsection('MultiColumn')
test2 = Subsection('MultiRow')
test3 = Subsection('MultiColumn and MultiRow')
test4 = Subsection('Vext01')

table1 = Tabular('|c|c|c|c|')
table1.add_hline()
table1.add_row((MultiColumn(4, align='|c|', data='Multicolumn'), ))
table1.add_hline()
table1.add_row((1, 2, 3, 4))
table1.add_hline()
table1.add_row((5, 6, 7, 8))
table1.add_hline()
row_cells = ('9', MultiColumn(3, align='|c|', data='Multicolumn not on left'))
table1.add_row(row_cells)
table1.add_hline()
Exemple #30
0
 def _document_init_params(self) -> None:
     """Add initialization parameters to the traceability document.
     """
     from fastestimator.estimator import Estimator  # Avoid circular import
     with self.doc.create(Section("Parameters")):
         model_ids = {
             FEID(id(model))
             for model in self.system.network.models
             if isinstance(model, (tf.keras.Model, torch.nn.Module))
         }
         # Locate the datasets in order to provide extra details about them later in the summary
         datasets = {}
         for mode in ['train', 'eval', 'test']:
             objs = to_list(self.system.pipeline.data.get(mode, None))
             idx = 0
             while idx < len(objs):
                 obj = objs[idx]
                 if obj:
                     feid = FEID(id(obj))
                     if feid not in datasets:
                         datasets[feid] = ({mode}, obj)
                     else:
                         datasets[feid][0].add(mode)
                 if isinstance(obj, Scheduler):
                     objs.extend(obj.get_all_values())
                 idx += 1
         # Parse the config tables
         start = 0
         start = self._loop_tables(start,
                                   classes=(Estimator, BaseNetwork,
                                            Pipeline),
                                   name="Base Classes",
                                   model_ids=model_ids,
                                   datasets=datasets)
         start = self._loop_tables(start,
                                   classes=Scheduler,
                                   name="Schedulers",
                                   model_ids=model_ids,
                                   datasets=datasets)
         start = self._loop_tables(start,
                                   classes=Trace,
                                   name="Traces",
                                   model_ids=model_ids,
                                   datasets=datasets)
         start = self._loop_tables(start,
                                   classes=Op,
                                   name="Operators",
                                   model_ids=model_ids,
                                   datasets=datasets)
         start = self._loop_tables(start,
                                   classes=(Dataset, tf.data.Dataset),
                                   name="Datasets",
                                   model_ids=model_ids,
                                   datasets=datasets)
         start = self._loop_tables(start,
                                   classes=(tf.keras.Model,
                                            torch.nn.Module),
                                   name="Models",
                                   model_ids=model_ids,
                                   datasets=datasets)
         start = self._loop_tables(start,
                                   classes=types.FunctionType,
                                   name="Functions",
                                   model_ids=model_ids,
                                   datasets=datasets)
         start = self._loop_tables(start,
                                   classes=(np.ndarray, tf.Tensor,
                                            tf.Variable, torch.Tensor),
                                   name="Tensors",
                                   model_ids=model_ids,
                                   datasets=datasets)
         self._loop_tables(start,
                           classes=Any,
                           name="Miscellaneous",
                           model_ids=model_ids,
                           datasets=datasets)