Beispiel #1
0
    def output_support_mismatch(self, ref_name: str, prof_name: str):
        """Output algorithm support differences section"""

        tags.h4("Differences in algorithm support:",
                style="color:var(--red-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.support_mismatch:
            ref = self.support_mismatch[key][0]
            prof = self.support_mismatch[key][1]

            reftext = "Supported" if ref.support else "Unsupported"
            if ref.error:
                reftext = ref.error

            proftext = "Supported" if prof.support else "Unsupported"
            if prof.error:
                proftext = prof.error

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("support_mismatch_div")

        with sm_div:
            tags.p(
                "If an algorithm is supported by the reference card, but not "
                "the profiled card and vice versa, the cards almost certainly "
                "do not match.")
            table(data, header,
                  green_value="Supported",
                  red_value="Unsupported")
Beispiel #2
0
    def output_single_memory_mismatch(self, ref_name: str, prof_name: str,
                                      dataset: Dict[str, List[SupportResult]]):
        """Output part of memory mismatch section"""

        mismatch = ""
        if dataset is self.memory_mismatch:
            mismatch = "persistent memory allocation"
        elif dataset is self.reset_mismatch:
            mismatch = "memory allocation during reset call"
        elif dataset is self.deselect_mismatch:
            mismatch = "memory allocation during deselect call"
        tags.h4("Differences in " + mismatch + ":",
                style="color:var(--orange-color)")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in dataset.keys():
            if dataset is self.memory_mismatch:
                ref = str(dataset[key][0].persistent_memory)
                prof = str(dataset[key][1].persistent_memory)
            elif dataset is self.reset_mismatch:
                ref = str(dataset[key][0].ram_reset)
                prof = str(dataset[key][1].ram_reset)
            elif dataset is self.deselect_mismatch:
                ref = str(dataset[key][0].ram_deselect)
                prof = str(dataset[key][1].ram_deselect)
            else:
                raise Exception("Wrong parameter in output_memory_mismatch")
            data.append([key, ref, prof])

        table(data, header)
Beispiel #3
0
    def output_mismatch(self, ref_name, prof_name):
        """Output mismatch section"""

        tags.h4("List of algorithms with different results:",
                style="color:var(--red-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.mismatch:
            ref = self.mismatch[key][0]
            prof = self.mismatch[key][1]

            reftext = "{:.2f}".format(ref.operation_avg()) + " ms"
            proftext = "{:.2f}".format(prof.operation_avg()) + " ms"

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("performance_mismatch_div", hide=False)

        with sm_div:
            tags.p(
                "These are the algorithms in which the cards performed "
                "with different results."
            )
            table(data, header,
                  red_value="ms")
Beispiel #4
0
    def project_html(self, ref_name, prof_name):

        tags.h3("ATR comparison results")
        tags.p("This module compares ATR of the smart cards "
               "and searches database of known smart cards "
               "for additional information.")

        tags.h4("ATR:")
        table([[ref_name + " (reference)", self.ref_atr],
               [prof_name + " (profiled)", self.prof_atr]],
              header=["Card", "ATR"])

        if self.ref_atr == self.prof_atr:
            tags.p("The ATR of tested card matches the reference. "
                   "This would suggest the same smart card model.")
        else:
            tags.p("The ATR of tested card does not match the reference. "
                   "This would suggest different smart card models.")

        tags.h4("Additional info from smart card database")

        tags.p("This information was taken from database of known "
               "smart cards, distributed under GNU GPLv2.")
        tags.p("For complete list, check:")
        tags.a(config.URL.SMARTCARD_LIST, href=config.URL.SMARTCARD_LIST)

        if self.ref_info:
            tags.p("The reference card (" + ref_name +
                   ") was found in the database:")
            with tags.div():
                for i in self.ref_info:
                    tags.p(i)
        else:
            tags.p("The reference card (" + ref_name +
                   ") was not found in the database.")
        if self.prof_info:
            tags.p("The profiled card (" + prof_name +
                   ") was found in the database:")
            with tags.div():
                for i in self.prof_info:
                    tags.p(i)
        else:
            tags.p("The profiled card (" + prof_name +
                   ") was not found in the database.")
Beispiel #5
0
    def output_table(self, ref_name: str, prof_name: str):
        """Output CPLC comparison table"""

        header = [
            "CPLC Field", ref_name + " (reference)", prof_name + " (profiled)"
        ]

        data = []
        keys = set()
        keys.update(self.reference.keys())
        keys.update(self.profiled.keys())

        for key in keys:
            ref = self.reference[key] if key in self.reference else "Missing"
            prof = self.profiled[key] if key in self.profiled else "Missing"
            data.append([key, ref, prof])

        table(data,
              header,
              red_predicate=lambda line: line[1].split(" ")[0] != line[2].
              split(" ")[0])
Beispiel #6
0
    def output_missing(self, ref_name, prof_name):
        """Output missing measurements section"""

        tags.h4("Missing measurements in algorithm performance:",
                style="color:var(--yellow-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.missing:
            ref = self.missing[key][0]
            prof = self.missing[key][1]

            reftext = "Failed: " + str(ref.error)
            proftext = "Failed: " + str(prof.error)

            if not ref:
                reftext = "Result missing"
            elif not ref.error:
                reftext = "{:.2f}".format(ref.operation_avg()) + " ms"

            if not prof:
                proftext = "Result missing"
            elif not prof.error:
                proftext = "{:.2f}".format(prof.operation_avg()) + " ms"

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("performance_missing_div", hide=True)

        with sm_div:
            tags.p(
                "These are the algorithms which had their results missing on "
                "one of the cards. These should be checked manually."
            )
            table(data, header,
                  green_value="ms",
                  red_value="Failed")
Beispiel #7
0
    def output_erroneous(self, ref_name, prof_name):
        """Output erroneous section"""

        tags.h4("List of algorithms with mismatch in error:",
                style="color:var(--orange-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.erroneous:
            ref = self.erroneous[key][0]
            prof = self.erroneous[key][1]

            reftext = "Failed: " + str(ref.error)
            proftext = "Failed: " + str(prof.error)

            if not ref.error:
                reftext = "{:.2f}".format(ref.operation_avg()) + " ms"

            if not prof.error:
                proftext = "{:.2f}".format(prof.operation_avg()) + " ms"

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("performance_erroneous_div", hide=False)

        with sm_div:
            tags.p(
                "These are the algorithms in which the cards failed with "
                "different error. You should manually check this table."
                "The errors were probably caused by random exceptions during "
                "performance testing. It is recommended to rerun these "
                "algorithms manually to ascertain that the card is not broken."
            )
            table(data, header,
                  green_value="ms",
                  red_value="Failed")
Beispiel #8
0
    def output_skipped(self, ref_name, prof_name):
        """Output skipped section"""

        tags.h4("List of algorithms not used for verification:",
                style="display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.skipped:
            ref = self.skipped[key][0]
            prof = self.skipped[key][1]

            reftext = "Failed: " + str(ref.error)
            proftext = "Failed: " + str(prof.error)

            if not ref.error:
                reftext = "{:.2f}".format(ref.operation_avg()) + " ms"

            if not prof.error:
                proftext = "{:.2f}".format(prof.operation_avg()) + " ms"

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("performance_skipped_div", hide=True)

        with sm_div:
            tags.p(
                "These are the algorithms that run fast overall. Differences "
                "of few milliseconds can happen due to measurement errors. "
                "These measurements have information value, but are omitted "
                "in automated mismatch detection."
            )
            table(data, header,
                  green_value="ms",
                  red_value="Failed")
Beispiel #9
0
    def output_matching(self, ref_name, prof_name):
        """Output matching section"""

        tags.h4("List of algorithms with matching results:",
                style="color:var(--green-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.matching:
            ref = self.matching[key][0]
            prof = self.matching[key][1]

            reftext = "Supported" if ref.support else "Unsupported"
            if ref.error:
                reftext = ref.error

            proftext = "Supported" if prof.support else "Unsupported"
            if prof.error:
                proftext = prof.error

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("support_matching_div", hide=True)

        with sm_div:
            tags.p(
                "These are the algorithms which had their results matching "
                "between cards with no significant differences in the memory "
                "allocation."
            )
            table(data, header,
                  green_value="Supported",
                  red_value="Unsupported")
Beispiel #10
0
    def output_missing(self, ref_name, prof_name):
        """Output missing measurements section"""

        tags.h4("Missing measurements in algorithm support:",
                style="color:var(--yellow-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.missing:
            ref = self.missing[key][0]
            prof = self.missing[key][1]

            if ref:
                ref = "Supported" if ref.support else "Unsupported"
            else:
                ref = "Result missing"
            if prof:
                prof = "Supported" if prof.support else "Unsupported"
            else:
                prof = "Result missing"

            data.append([key, ref, prof])

        sm_div = show_hide_div("support_missing_div", hide=True)

        with sm_div:
            tags.p(
                "These are the algorithms which had their results missing on "
                "one of the cards. These should be checked manually."
            )
            table(data, header,
                  green_value="Supported",
                  red_value="Unsupported")
Beispiel #11
0
    def output_matching(self, ref_name, prof_name):
        """Output matching section"""

        tags.h4("List of algorithms with matching results:",
                style="color:var(--green-color);display:inline-block")

        header = ["Algorithm",
                  ref_name + " (reference)",
                  prof_name + " (profiled)"]

        data = []
        for key in self.matching:
            ref = self.matching[key][0]
            prof = self.matching[key][1]

            reftext = "Failed: " + str(ref.error)
            proftext = "Failed: " + str(prof.error)

            if not ref.error:
                reftext = "{:.2f}".format(ref.operation_avg()) + " ms"

            if not prof.error:
                proftext = "{:.2f}".format(prof.operation_avg()) + " ms"

            data.append([key, reftext, proftext])

        sm_div = show_hide_div("performance_matching_div", hide=True)

        with sm_div:
            tags.p(
                "These are the algorithms in which the cards performed "
                "similarly, or on which they failed with the same error."
            )
            table(data, header,
                  green_value="ms",
                  red_value="Failed")