Esempio n. 1
0
File: Task.py Progetto: wngjan/lnst
class PerfRepoAPI(object):
    def __init__(self):
        self._rest_api = None

    def connected(self):
        return self._rest_api is not None

    def connect(self, url, username, password):
        self._rest_api = PerfRepoRESTAPI(url, username, password)

    def new_result(self, testUid, name):
        result = PerfRepoResult(testUid, name)
        return result

    def save_result(self, result):
        if not isinstance(result, PerfRepoResult):
            raise TaskError("Parameter result must be an instance "\
                            "of PerfRepoResult")
        elif self._rest_api is None:
            raise TaskError("Not connected to PerfRepo.")
        else:
            self._rest_api.testExecution_create(result.get_testExecution())

    def get_baseline(self, report_id):
        report = self._rest_api.report_get_by_id(report_id)
        return report.get_baseline()

    def compare_to_baseline(self, result, report_id, metric_name):
        baseline = self.get_baseline(report_id)
        baseline_exec_id = baseline["execId"]
        baseline_testExec = self._rest_api.testExecution_get(baseline_exec_id)
        result_testExec = result.get_testExecution()

        return self.compare_testExecutions(result_testExec,
                                           baseline_testExec,
                                           metric_name)

    def compare_testExecutions(self, first, second, metric_name):
        first_value = first.get_value(metric_name)
        first_min = first.get_value(metric_name + "_min")
        first_max = first.get_value(metric_name + "_max")

        second_value = second.get_value(metric_name)
        second_min = second.get_value(metric_name + "_min")
        second_max = second.get_value(metric_name + "_max")

        comp = second_value.get_comparator()
        if comp == "HB":
            if second_min.get_result() > first_max.get_result():
                return False
            return True
        elif comp == "LB":
            if first_min.get_result() > second_max.get_result():
                return False
            return True
        else:
            return False
        return False
Esempio n. 2
0
File: Task.py Progetto: wngjan/lnst
 def connect(self, url, username, password):
     self._rest_api = PerfRepoRESTAPI(url, username, password)
Esempio n. 3
0
 def connect(self, url, username, password):
     self._rest_api = PerfRepoRESTAPI(url, username, password)
Esempio n. 4
0
class PerfRepoAPI(object):
    def __init__(self):
        self._rest_api = None
        self._mapping = None

    def load_mapping(self, file_path):
        try:
            self._mapping = PerfRepoMapping(file_path.resolve())
        except:
            logging.error("Failed to load PerfRepo mapping file '%s'" %\
                          file_path.abs_path())
            self._mapping = None

    def get_mapping(self):
        return self._mapping

    def connected(self):
        if self._rest_api is not None and self._mapping is not None:
            return True
        else:
            return False

    def connect(self, url, username, password):
        self._rest_api = PerfRepoRESTAPI(url, username, password)

    def new_result(self, mapping_key, name, hash_ignore=[]):
        if not self.connected():
            return Noop()

        mapping_id = self._mapping.get_id(mapping_key)
        if mapping_id is None:
            logging.debug("Test key '%s' has no mapping defined!" % mapping_key)
            return Noop()

        logging.debug("Test key '%s' mapped to id '%s'" % (mapping_key,
                                                           mapping_id))

        test = self._rest_api.test_get_by_id(mapping_id, log=False)
        if test is None:
            test = self._rest_api.test_get_by_uid(mapping_id, log=False)

        if test is not None:
            test_url = self._rest_api.get_obj_url(test)
            logging.debug("Found Test with id='%s' and uid='%s'! %s" % \
                            (test.get_id(), test.get_uid(), test_url))
        else:
            logging.debug("No Test with id or uid '%s' found!" % mapping_id)
            return Noop()

        logging.info("Creating a new result object for PerfRepo")
        result = PerfRepoResult(test, name, hash_ignore)
        return result

    def save_result(self, result):
        if self._rest_api is None:
            raise TaskError("Not connected to PerfRepo.")
        elif isinstance(result, Noop):
            return
        elif isinstance(result, PerfRepoResult):
            if len(result.get_testExecution().get_values()) < 1:
                logging.debug("PerfRepoResult with no result data, skipping "\
                              "send to PerfRepo.")
                return
            h = result.generate_hash()
            logging.debug("Adding hash '%s' as tag to result." % h)
            result.add_tag(h)
            logging.info("Sending TestExecution to PerfRepo.")
            self._rest_api.testExecution_create(result.get_testExecution())

            report_id = self._mapping.get_id(h)
            if not report_id and result.get_testExecution().get_id() != None:
                logging.debug("No mapping defined for hash '%s'" % h)
                logging.debug("If you want to create a new report and set "\
                              "this result as the baseline run this command:")
                cmd = "perfrepo report create"
                cmd += " name REPORTNAME"

                test = result.get_test()
                cmd += " chart CHARTNAME"
                cmd += " testid %s" % test.get_id()
                series_num = 0
                for m in test.get_metrics():
                    cmd += " series NAME%d" % series_num
                    cmd += " metric %s" % m.get_id()
                    cmd += " tags %s" % h
                    series_num += 1
                cmd += " baseline BASELINENAME"
                cmd += " execid %s" % result.get_testExecution().get_id()
                cmd += " metric %s" % test.get_metrics()[0].get_id()
                logging.debug(cmd)
        else:
            raise TaskError("Parameter result must be an instance "\
                            "of PerfRepoResult")

    def get_baseline(self, report_id):
        if report_id is None:
            return Noop()

        report = self._rest_api.report_get_by_id(report_id, log=False)
        if report is None:
            logging.debug("No report with id %s found!" % report_id)
            return Noop()
        logging.debug("Report found: %s" %\
                        self._rest_api.get_obj_url(report))

        baseline = report.get_baseline()

        if baseline is None:
            logging.debug("No baseline set for report %s" %\
                            self._rest_api.get_obj_url(report))
            return Noop()

        baseline_exec_id = baseline["execId"]
        baseline_testExec = self._rest_api.testExecution_get(baseline_exec_id,
                                                             log=False)

        logging.debug("TestExecution of baseline: %s" %\
                        self._rest_api.get_obj_url(baseline_testExec))
        return PerfRepoBaseline(baseline_testExec)

    def get_baseline_of_result(self, result):
        if not isinstance(result, PerfRepoResult):
            return Noop()

        res_hash = result.generate_hash()
        logging.debug("Result hash is: '%s'" % res_hash)

        report_id = self._mapping.get_id(res_hash)
        if report_id is not None:
            logging.debug("Hash '%s' maps to report id '%s'" % (res_hash,
                                                               report_id))
        else:
            logging.debug("Hash '%s' has no mapping defined!" % res_hash)
            return Noop()

        baseline = self.get_baseline(report_id)

        if baseline.get_texec() is None:
            logging.debug("No baseline set for results with hash %s" % res_hash)
        return baseline

    def compare_to_baseline(self, result, report_id, metric_name):
        baseline_testExec = self.get_baseline(report_id)
        result_testExec = result.get_testExecution()

        return self.compare_testExecutions(result_testExec,
                                           baseline_testExec,
                                           metric_name)

    def compare_testExecutions(self, first, second, metric_name):
        first_value = first.get_value(metric_name)
        first_min = first.get_value(metric_name + "_min")
        first_max = first.get_value(metric_name + "_max")

        second_value = second.get_value(metric_name)
        second_min = second.get_value(metric_name + "_min")
        second_max = second.get_value(metric_name + "_max")

        comp = second_value.get_comparator()
        if comp == "HB":
            if second_min.get_result() > first_max.get_result():
                return False
            return True
        elif comp == "LB":
            if first_min.get_result() > second_max.get_result():
                return False
            return True
        else:
            return False
        return False