import datetime import jinja2 import os import scenarioResult as sr from scenarios import config as cf # manage conf import utils.reporting_utils as rp_utils installers = rp_utils.get_config('general.installers') versions = rp_utils.get_config('general.versions') PERIOD = rp_utils.get_config('general.period') # Logger logger = rp_utils.getLogger("Yardstick-Status") reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") logger.info("*******************************************") logger.info("* Generating reporting scenario status *") logger.info("* Data retention = %s days *" % PERIOD) logger.info("* *") logger.info("*******************************************") # For all the versions for version in versions: # For all the installers for installer in installers: # get scenarios results data scenario_results = rp_utils.getScenarioStatus(installer, version) if 'colorado' == version:
import os # manage conf import utils.reporting_utils as rp_utils installers = rp_utils.get_config('general.installers') items = ["tests", "Success rate", "duration"] CURRENT_DIR = os.getcwd() PERIOD = rp_utils.get_config('general.period') criteria_nb_test = 165 criteria_duration = 1800 criteria_success_rate = 90 logger = rp_utils.getLogger("Tempest") logger.info("************************************************") logger.info("* Generating reporting Tempest_smoke_serial *") logger.info("* Data retention = %s days *" % PERIOD) logger.info("* *") logger.info("************************************************") logger.info("Success criteria:") logger.info("nb tests executed > %s s " % criteria_nb_test) logger.info("test duration < %s s " % criteria_duration) logger.info("success rate > %s " % criteria_success_rate) # For all the versions for version in rp_utils.get_config('general.versions'): for installer in installers: # we consider the Tempest results of the last PERIOD days
# # http://www.apache.org/licenses/LICENSE-2.0 # import datetime import os import jinja2 import utils.reporting_utils as rp_utils import utils.scenarioResult as sr installers = rp_utils.get_config('general.installers') versions = rp_utils.get_config('general.versions') PERIOD = rp_utils.get_config('general.period') # Logger logger = rp_utils.getLogger("Qtip-Status") reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") logger.info("*******************************************") logger.info("* Generating reporting scenario status *") logger.info("* Data retention = {} days *".format(PERIOD)) logger.info("* *") logger.info("*******************************************") def prepare_profile_file(version): profile_dir = './display/{}/qtip'.format(version) if not os.path.exists(profile_dir): os.makedirs(profile_dir) profile_file = "{}/{}/scenario_history.txt".format(profile_dir, version)
# import datetime import jinja2 import os # manage conf import utils.reporting_utils as rp_utils import utils.scenarioResult as sr installers = rp_utils.get_config('general.installers') versions = rp_utils.get_config('general.versions') PERIOD = rp_utils.get_config('general.period') # Logger logger = rp_utils.getLogger("Storperf-Status") reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") logger.info("*******************************************") logger.info("* Generating reporting scenario status *") logger.info("* Data retention = %s days *" % PERIOD) logger.info("* *") logger.info("*******************************************") # retrieve the list of storperf tests storperf_tests = rp_utils.get_config('storperf.test_list') logger.info("Storperf tests: %s" % storperf_tests) # For all the versions for version in versions: # For all the installers
from urllib2 import Request, urlopen, URLError import json import jinja2 # manage conf import utils.reporting_utils as rp_utils logger = rp_utils.getLogger("vIMS") def sig_test_format(sig_test): nbPassed = 0 nbFailures = 0 nbSkipped = 0 for data_test in sig_test: if data_test['result'] == "Passed": nbPassed += 1 elif data_test['result'] == "Failed": nbFailures += 1 elif data_test['result'] == "Skipped": nbSkipped += 1 total_sig_test_result = {} total_sig_test_result['passed'] = nbPassed total_sig_test_result['failures'] = nbFailures total_sig_test_result['skipped'] = nbSkipped return total_sig_test_result period = rp_utils.get_config('general.period') versions = rp_utils.get_config('general.versions') url_base = rp_utils.get_config('testapi.url')
# import datetime import jinja2 import os import sys import time import testCase as tc import scenarioResult as sr # manage conf import utils.reporting_utils as rp_utils """Functest reporting status""" # Logger logger = rp_utils.getLogger("Functest-Status") # Initialization testValid = [] otherTestCases = [] reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") # init just connection_check to get the list of scenarios # as all the scenarios run connection_check healthcheck = tc.TestCase("connection_check", "functest", -1) # Retrieve the Functest configuration to detect which tests are relevant # according to the installer, scenario cf = rp_utils.get_config('functest.test_conf') period = rp_utils.get_config('general.period') versions = rp_utils.get_config('general.versions')