Esempio n. 1
0
    def output_support_mismatch(self, ref_name: str, prof_name: str):
        """Output algorithm support differences section"""

        tags.h4("Differences in algorithm support:",
                style="color:var(--red-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.support_mismatch:
            ref = self.support_mismatch[key][0]
            prof = self.support_mismatch[key][1]

            reftext = "Supported" if ref.support else "Unsupported"
            if ref.error:
                reftext = ref.error

            proftext = "Supported" if prof.support else "Unsupported"
            if prof.error:
                proftext = prof.error

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("support_mismatch_div")

        with sm_div:
            tags.p(
                "If an algorithm is supported by the reference card, but not "
                "the profiled card and vice versa, the cards almost certainly "
                "do not match.")
            table(data, header,
                  green_value="Supported",
                  red_value="Unsupported")
Esempio n. 2
0
    def output_mismatch(self, ref_name, prof_name):
        """Output mismatch section"""

        tags.h4("List of algorithms with different results:",
                style="color:var(--red-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.mismatch:
            ref = self.mismatch[key][0]
            prof = self.mismatch[key][1]

            reftext = "{:.2f}".format(ref.operation_avg()) + " ms"
            proftext = "{:.2f}".format(prof.operation_avg()) + " ms"

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("performance_mismatch_div", hide=False)

        with sm_div:
            tags.p(
                "These are the algorithms in which the cards performed "
                "with different results."
            )
            table(data, header,
                  red_value="ms")
Esempio n. 3
0
    def output_memory_mismatch(self, ref_name: str, prof_name: str):
        """Output memory mismatch section"""

        tags.h4("Differences in memory allocation during tests:",
                style="color:var(--orange-color);display:inline-block")

        sm_div = show_hide_div("support_memory_mismatch_div", hide=False)

        with sm_div:
            tags.p("Differences in bytes of allocated memory above "
                   "certain threshold might be suspicious, as the memory "
                   "allocated during the test of the same algorithm should "
                   "remain similar.")

            for dataset in [self.memory_mismatch,
                            self.reset_mismatch,
                            self.deselect_mismatch]:
                if dataset:
                    self.output_single_memory_mismatch(ref_name, prof_name,
                                                       dataset)
Esempio n. 4
0
    def output_missing(self, ref_name, prof_name):
        """Output missing measurements section"""

        tags.h4("Missing measurements in algorithm performance:",
                style="color:var(--yellow-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.missing:
            ref = self.missing[key][0]
            prof = self.missing[key][1]

            reftext = "Failed: " + str(ref.error)
            proftext = "Failed: " + str(prof.error)

            if not ref:
                reftext = "Result missing"
            elif not ref.error:
                reftext = "{:.2f}".format(ref.operation_avg()) + " ms"

            if not prof:
                proftext = "Result missing"
            elif not prof.error:
                proftext = "{:.2f}".format(prof.operation_avg()) + " ms"

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("performance_missing_div", hide=True)

        with sm_div:
            tags.p(
                "These are the algorithms which had their results missing on "
                "one of the cards. These should be checked manually."
            )
            table(data, header,
                  green_value="ms",
                  red_value="Failed")
Esempio n. 5
0
    def output_erroneous(self, ref_name, prof_name):
        """Output erroneous section"""

        tags.h4("List of algorithms with mismatch in error:",
                style="color:var(--orange-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.erroneous:
            ref = self.erroneous[key][0]
            prof = self.erroneous[key][1]

            reftext = "Failed: " + str(ref.error)
            proftext = "Failed: " + str(prof.error)

            if not ref.error:
                reftext = "{:.2f}".format(ref.operation_avg()) + " ms"

            if not prof.error:
                proftext = "{:.2f}".format(prof.operation_avg()) + " ms"

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("performance_erroneous_div", hide=False)

        with sm_div:
            tags.p(
                "These are the algorithms in which the cards failed with "
                "different error. You should manually check this table."
                "The errors were probably caused by random exceptions during "
                "performance testing. It is recommended to rerun these "
                "algorithms manually to ascertain that the card is not broken."
            )
            table(data, header,
                  green_value="ms",
                  red_value="Failed")
Esempio n. 6
0
    def output_skipped(self, ref_name, prof_name):
        """Output skipped section"""

        tags.h4("List of algorithms not used for verification:",
                style="display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.skipped:
            ref = self.skipped[key][0]
            prof = self.skipped[key][1]

            reftext = "Failed: " + str(ref.error)
            proftext = "Failed: " + str(prof.error)

            if not ref.error:
                reftext = "{:.2f}".format(ref.operation_avg()) + " ms"

            if not prof.error:
                proftext = "{:.2f}".format(prof.operation_avg()) + " ms"

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("performance_skipped_div", hide=True)

        with sm_div:
            tags.p(
                "These are the algorithms that run fast overall. Differences "
                "of few milliseconds can happen due to measurement errors. "
                "These measurements have information value, but are omitted "
                "in automated mismatch detection."
            )
            table(data, header,
                  green_value="ms",
                  red_value="Failed")
Esempio n. 7
0
    def output_matching(self, ref_name, prof_name):
        """Output matching section"""

        tags.h4("List of algorithms with matching results:",
                style="color:var(--green-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.matching:
            ref = self.matching[key][0]
            prof = self.matching[key][1]

            reftext = "Supported" if ref.support else "Unsupported"
            if ref.error:
                reftext = ref.error

            proftext = "Supported" if prof.support else "Unsupported"
            if prof.error:
                proftext = prof.error

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("support_matching_div", hide=True)

        with sm_div:
            tags.p(
                "These are the algorithms which had their results matching "
                "between cards with no significant differences in the memory "
                "allocation."
            )
            table(data, header,
                  green_value="Supported",
                  red_value="Unsupported")
Esempio n. 8
0
    def output_missing(self, ref_name, prof_name):
        """Output missing measurements section"""

        tags.h4("Missing measurements in algorithm support:",
                style="color:var(--yellow-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.missing:
            ref = self.missing[key][0]
            prof = self.missing[key][1]

            if ref:
                ref = "Supported" if ref.support else "Unsupported"
            else:
                ref = "Result missing"
            if prof:
                prof = "Supported" if prof.support else "Unsupported"
            else:
                prof = "Result missing"

            data.append([key, ref, prof])

        sm_div = show_hide_div("support_missing_div", hide=True)

        with sm_div:
            tags.p(
                "These are the algorithms which had their results missing on "
                "one of the cards. These should be checked manually."
            )
            table(data, header,
                  green_value="Supported",
                  red_value="Unsupported")
Esempio n. 9
0
    def output_matching(self, ref_name, prof_name):
        """Output matching section"""

        tags.h4("List of algorithms with matching results:",
                style="color:var(--green-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.matching:
            ref = self.matching[key][0]
            prof = self.matching[key][1]

            reftext = "Failed: " + str(ref.error)
            proftext = "Failed: " + str(prof.error)

            if not ref.error:
                reftext = "{:.2f}".format(ref.operation_avg()) + " ms"

            if not prof.error:
                proftext = "{:.2f}".format(prof.operation_avg()) + " ms"

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("performance_matching_div", hide=True)

        with sm_div:
            tags.p(
                "These are the algorithms in which the cards performed "
                "similarly, or on which they failed with the same error."
            )
            table(data, header,
                  green_value="ms",
                  red_value="Failed")
Esempio n. 10
0
                    worst_contrast_state = contrast_class

                if contrast_class.value >= ContrastState.WARN.value:
                    suspicions += 1

                with tags.span(cls="dot " + contrast_class.name.lower()):
                    tags.span(TOOLTIP_TEXT[contrast_class],
                              cls="tooltiptext " + contrast_class.name.lower())
                with intro_div:
                    with tags.span(cls="dot " + contrast_class.name.lower()):
                        tags.span(TOOLTIP_TEXT[contrast_class],
                                  cls="tooltiptext " +
                                  contrast_class.name.lower())

                tags.h2("Module: " + str(m), style="display: inline-block;")
                module_div = show_hide_div(divname, hide=True)
                with module_div:
                    m.project_html(contrast.ref_name, contrast.prof_name)

                tags.br()
                module_count += 1

        with intro_div:
            tags.br()
            tags.p(RESULT_TEXT[worst_contrast_state](suspicions))

            tags.h3("Quick visibility settings")
            show_all_button()
            hide_all_button()
            default_button()