def __init__(self, config, existing_reqs=None, quick_query=True, ): """ :param project_id: :param result_path: :param template_id: :param requirement_prefix: :param testrun_prefix: :param existing_reqs: :param quick_query: :param base_queries: :param testrun_suffix: :return: """ self.testrun_prefix = config.testrun_prefix self.testrun_suffix = config.testrun_suffix self.template_id = config.testrun_template self.result_path = config.result_path self.project_id = config.project_id self._existing_requirements = existing_reqs self.quick_query = quick_query self.testcases_query = [] if config.testcases_query is None else config.testcases_query self.config = config existing_test_cases = [] for base in self.testcases_query: log.info("Performing Polarion query of {}".format(base)) tcs = query_test_case(base) existing_test_cases.extend(tcs) self.existing_test_cases = existing_test_cases
def collect(self): """ :return: """ testng_suites = self.transformer.parse_suite() self.tests = testng_suites for k, tests in testng_suites.items(): not_skipped = tests if not self.transformer.config.test_case_skips: not_skipped = filter(lambda x: x.status != SKIP, tests) # TODO: It would be nice to have show which Tests got skipped due to dependency on another # test that failed, or because of a BZ blocker if TESTING: import random random.shuffle(not_skipped) not_skipped = itz.take(5, not_skipped) total = len(not_skipped) - 1 updated = [] for i, test_case in enumerate(not_skipped, start=0): log.info("Getting TestCase: {} out of {}".format(i, total)) pyl_tc = test_case.create_polarion_tc() test_case.polarion_tc = pyl_tc updated.append(test_case) self.tests[k] = updated for k, tests in self.tests.items(): for tc in tests: if tc.polarion_tc is None: log.info("WTF. {} has tc.polarion_tc is None".format(tc.title))
def link_requirements(self, tc_obj): """ :param tc_obj: :return: """ linked_items = tc_obj.linked_work_items if not self.requirement: log.warning("No requirement exists for this test case") else: duplicates = filter(lambda x: x == self.requirement, [li.work_item_id for li in linked_items]) num_duplicates = len(duplicates) if num_duplicates == 0: log.info("Linking requirement {} to TestCase {}".format( self.requirement, tc_obj.work_item_id)) tc_obj.add_linked_item(self.requirement, "verifies") elif num_duplicates > 1: msg = "Found duplicate linked Requirements {} for TestCase {}. Cleaning...." log.warning( msg.format(itz.first(duplicates), tc_obj.work_item_id)) for _ in range(len(duplicates) - 1): tc_obj.remove_linked_item(self.requirement, "verifies") else: msg = "Requirement {} already linked to TestCase {}" log.info(msg.format(itz.first(duplicates), tc_obj.work_item_id))
def create_test_record(self, test_run, run_by="stoner"): """ Adds a TestRecord to a TestRun and associates it with the TestCase :param test_run: a pylarion TestRun object :param run_by: (str) identifies who executed the test """ tc_id = self.polarion_tc.work_item_id result = self.status executed_by = run_by def comment_string(i, s): base = "{} {}\t".format(i, s.status) if s.exception: exc_info = s.exception.get("message", "") + "<br>" + \ s.exception.get("stack_trace", "") + "<br>" base += "<br>" + exc_info return base comment = "<br>".join( comment_string(i, step) for i, step in enumerate(self.step_results)) # Get the first step for our start time, and the last for our finish formatter = "%Y-%m-%dT%H:%M:%SZ" strptime = datetime.datetime.strptime if self.step_results: last_step = itz.last(self.step_results) dt_start = strptime(self.step_results[0].started, formatter) dt_finish = strptime(last_step.started, formatter) time_delta = dt_finish - dt_start duration = time_delta.seconds + float(last_step.duration) else: duration = float(self.attributes["duration-ms"]) dt_start = strptime(self.attributes["started-at"], formatter) result = convert_status(result) if result == "waiting": log.info("Skipping TestRecord for {} due to status of SKIP".format( tc_id)) return comment = unicode(comment, encoding='utf-8') kwds = { "test_comment": comment, "test_case_id": tc_id, "test_result": result, "executed": dt_start, "duration": duration, "executed_by": executed_by } log.info("Creating TestRecord for {}".format(self.title)) self.add_test_record(test_run, **kwds)
def parse_test_methods(self, test, titles=None, tests=None, requirement=None): """ Does the parsing of the <test-method> element :param test: :param titles: :param tests: :param requirement: :return: """ if titles is None: titles = set() if tests is None: tests = [] tc_prefix = self.config.testcase_prefix req_work_id = "" if requirement: req_work_id = requirement.work_item_id for klass in test.iter("class"): #tc_query = '"{}"'.format(tc_prefix + klass.attrib["name"]) t_class = TNGTestClass(test, klass.attrib, '"{}"'.format(klass.attrib["name"]), tc_prefix) testng = None testng_test_name = tc_prefix + test.attrib["name"] last_test_method = None iteration = 1 cached_lookup = self.existing_test_cases for test_method in klass: if "is-config" in test_method.attrib and test_method.attrib["is-config"] == "true": continue tm = TNGTestMethod(test_method, t_class, cached_query=cached_lookup, tc_prefix=tc_prefix) if last_test_method is None: last_test_method = tm.method_name elif tm.method_name != last_test_method: last_test_method = tm.method_name test_case_title = tm.full_name if test_case_title not in titles: iteration = 1 template = "\tIteration {}: parsing {} {}" log.info(template.format(iteration, test_case_title, tm.attribs['started-at'])) iteration += 1 if test_case_title not in titles: testng = tm.make_testngtopolarion(req_work_id, testng_test_name) titles.add(test_case_title) tests.append(testng) else: # We only get multiple test_case_title if it was a data-provider test so append results testng.step_results.append(tm.result) return titles, tests
def create_requirement(project_id, title, description="", reqtype="functional", severity="should_have"): if True: log.warning("No longer creating a requirement automatically") return None else: from pylarion.work_item import Requirement req = is_requirement_exists(title) if req: log.info("Found existing Requirement {}".format(req.title)) return req else: log.info("Creating a new Requirement: {}".format(title)) return Requirement.create(project_id, title, description, severity=severity, reqtype=reqtype)
def create_test_record(self, test_run, run_by="stoner"): """ Adds a TestRecord to a TestRun and associates it with the TestCase :param test_run: a pylarion TestRun object :param run_by: (str) identifies who executed the test """ tc_id = self.polarion_tc.work_item_id result = self.status executed_by = run_by def comment_string(i, s): base = "{} {}\t".format(i, s.status) if s.exception: exc_info = s.exception.get("message", "") + "<br>" + \ s.exception.get("stack_trace", "") + "<br>" base += "<br>" + exc_info return base comment = "<br>".join(comment_string(i, step) for i,step in enumerate(self.step_results)) # Get the first step for our start time, and the last for our finish formatter = "%Y-%m-%dT%H:%M:%SZ" strptime = datetime.datetime.strptime if self.step_results: last_step = itz.last(self.step_results) dt_start = strptime(self.step_results[0].started, formatter) dt_finish = strptime(last_step.started, formatter) time_delta = dt_finish - dt_start duration = time_delta.seconds + float(last_step.duration) else: duration = float(self.attributes["duration-ms"]) dt_start = strptime(self.attributes["started-at"], formatter) result = convert_status(result) if result == "waiting": log.info("Skipping TestRecord for {} due to status of SKIP".format(tc_id)) return comment = unicode(comment, encoding='utf-8') kwds = {"test_comment": comment, "test_case_id": tc_id, "test_result": result, "executed": dt_start, "duration": duration, "executed_by": executed_by} log.info("Creating TestRecord for {}".format(self.title)) self.add_test_record(test_run, **kwds)
def parse_tests(self, suite): """ This function dives into the <suite> element, looking for it's <test> children. Once it finds a <test> element, it grabs the name attribute and uses the name in order to generate a title for a Requirement. It then kicks off parsing of the <test-methods> passing in a list of the tests (which will contain all the generated TestNGToPolarion objects) and the titles of all the test methods. :param req_prefix: :param suite: :return: """ log.info("Getting tests from suite {}...".format(suite.attrib["name"])) tests = self.parse_requirements(suite) log.info("End parsing of xml results file") return tests
def parse_suite(self): """ Gets all the <test> elements, and generates a Requirement if needed, then grabs all the <class> elements generating (or updating) a TestCase if needed. :param suite: :param req_prefix: :return: """ log.info("Beginning parsing of {}...".format(self.result_path)) suites = self.parse_by_element(self.result_path, "suite") testng_suites = {} for suite in suites: suite_name = suite.attrib["name"] tests = self.parse_tests(suite) testng_suites[suite_name] = tests return testng_suites
def find_matching_polarion_tc(self): """ Uses the cached lookup to find a matching class.methodname :return: pylarion.work_item.TestCase """ matches = self.parent_class.find_me(self.method_name, existing_tests=self.cached, multiple=True) ptc = None if self._p_testcase is None: from pylarion.work_item import TestCase as PylTestCase for match in matches: class_method = match.title.replace(self.tc_prefix, "") if class_method == self.full_name: log.info("Found existing TestCase in Polarion: {}".format(match.title)) ptc = PylTestCase(uri=match.uri) break else: ptc = self._p_testcase return ptc
def link_requirements(self, tc_obj): """ :param tc_obj: :return: """ linked_items = tc_obj.linked_work_items if not self.requirement: log.warning("No requirement exists for this test case") else: duplicates = filter(lambda x: x == self.requirement, [li.work_item_id for li in linked_items]) num_duplicates = len(duplicates) if num_duplicates == 0: log.info("Linking requirement {} to TestCase {}".format(self.requirement, tc_obj.work_item_id)) tc_obj.add_linked_item(self.requirement, "verifies") elif num_duplicates > 1: msg = "Found duplicate linked Requirements {} for TestCase {}. Cleaning...." log.warning(msg.format(itz.first(duplicates), tc_obj.work_item_id)) for _ in range(len(duplicates) - 1): tc_obj.remove_linked_item(self.requirement, "verifies") else: msg = "Requirement {} already linked to TestCase {}" log.info(msg.format(itz.first(duplicates), tc_obj.work_item_id))
def parse_requirements(self, suite): """ The <test> element contains the logical grouping of what the tests are testing. So this is a natural place to autogenerate a Requirement. Since <test-methods> are children of a <test>, and the <test> element maps to a TestNGToPolarion, we will pass the Requirement to the TestNGToPolarion object so we can link the TestCase to the Requirement :param suite: xml element of <suite> :return: """ titles = set() requirements_set = set() req = None tests = [] prefix = self.config.requirement_prefix for test in suite.iter("test"): attributes = test.attrib requirement_name = testify_requirement_name(attributes["name"], prefix=prefix) base_requirement = attributes["name"] if requirement_name not in requirements_set: # First, check to see if we've got a requirement with this name, and if not, create one # query = title_query(requirement_name) if self.quick_query: req = preq.is_in_requirements(requirement_name, self.existing_requirements) else: req = preq.is_requirement_exists(requirement_name) if not req: req = preq.create_requirement(self.project_id, requirement_name) requirements_set.add(requirement_name) # CHANGED: We are no longer auto generating if req is None: log.info("No requirements were found or created") _, t = self.parse_test_methods(test, titles=titles, tests=tests, requirement=req) return tests
def create_polarion_tc(self): """ Given the pong.TestCase, convert it to the equivalent pylarion.work_item.TestCase """ t = lambda x: unicode.encode(x, encoding="utf-8", errors="ignore" ) if isinstance(x, unicode) else x desc, title = [t(x) for x in [self.description, self.title]] # Check to see if we already have an existing test case tc = None if self.polarion_tc: log.info("Getting TestCase for {}: {}".format(title, desc)) tc = self.polarion_tc self.validate_test(tc) if not self.polarion_tc.title.startswith(self.prefix): self.polarion_tc.title = self.prefix + self.polarion_tc.title # See if the Polarion Test Case has steps. The TestCase will contain a TestSteps array of size 1 # The step will have 2 columns (or key-value pairs) # | step | expectedResult # +======+=============== # | args | PASS test_steps = tc.get_test_steps() steps = test_steps.steps # If this TestCase has more than 1 TestStep, it's the older workaround where a TestStep was a row # of data in the 2d array. Moving to the SR2 2015 release with parameterized testing instead if len(steps) > 1: tc.set_test_steps() # Empty the TestSteps if len(steps) == 0: step = self.make_polarion_test_step() tc.set_test_steps([step]) else: log.info("Generating new TestCase for {} : {}".format(title, desc)) WORKAROUND_949 = False try: self.description.decode(encoding="utf-8") except UnicodeError: raw = self.description.encode("utf-8") self.description = unicode(raw, encoding="utf-8", errors="replace") try: self.description.decode(encoding="utf-8") except UnicodeError: WORKAROUND_949 = True if WORKAROUND_949: self.description = unicode("", encoding="utf-8") from pylarion.work_item import TestCase as PylTestCase tc = PylTestCase.create(self.project, self.title, self.description, **TC_KEYS) # Create PylTestSteps if needed and add it if self.step_results: step = self.make_polarion_test_step() tc.set_test_steps([step]) if not tc: raise Exception("Could not create TestCase for {}".format( self.title)) else: self.polarion_tc = tc self.link_requirements(tc) self.polarion_tc.update() return tc
def get_default_projectid(): log.info(get_default_project())
def query_testcase(query): for test in query_test_case(query): msg = ((test.work_item_id + u' ') + test.title) log.info(msg)
mapped = [] for class_meth, tc in matched: mapping = {} class_prefix = "rhsm.{}.tests.".format(class_meth[0]) klass, meth = class_meth[1].split(".") klass = class_prefix + klass try: class_group = reflected[klass] except KeyError as ke: log.warning(ke.message) continue found = list(filter(lambda m: m['methodName'] == meth, class_group)) if not found: log.info("No Polarion test case was found for {}.{}".format(klass,meth)) continue ptc = toolz.first(found) fullname = "{}.{}".format(klass, meth) log.info("Found matching test case for {}".format(fullname)) reqs = [req.work_item_id for req in tc.linked_work_items] mapping[fullname] = {"testcase": tc.work_item_id, "requirements": reqs} mapped.append(mapping) with open("map-file.json", "w") as mapper: json.dump(mapped, mapper, sort_keys=True, indent=2, separators=(',', ':')) for clazz, maps in reflected.items(): for m in maps: methname = m['methodName']
def inner(*args, **kwargs): log.info("===================FIXME========================") log.info("{} {}".format(fn.__name__, msg)) log.info("================================================") return fn(*args, **kwargs)
def existing_requirements(self): if self._existing_requirements is None: log.info("Performing Requirements query: {}".format(self.config.requirements_query)) self._existing_requirements = query_requirement(self.config.requirements_query) return self._existing_requirements
def export(result=None): """ This is the method that actually does the processing of the configuration map and does what needs to be done :param result: :return: """ if result is None: result = kickstart() translate_to_cli = cli_print(result["config"]) log.info("Calling equivalent: python -m pong.exporter {}".format(translate_to_cli)) cli_cfg = result["cli_cfg"] args = cli_cfg.args config = result["config"] # Save off our original .pylarion in case the user passes in a project-id that is different # If the user selects --set-project-id, changes from -p are permanent reset_project_id = False using_pylarion_path = config.pylarion_path original_project_id = cli_cfg.original_project_id # FIXME: Turn these into functions and decorate them if args.query_testcase: tests = query_test_case(args.query_testcase) for test in tests: msg = test.work_item_id + " " + test.title log.info(msg) if args.get_default_project_id: log.info(get_default_project()) if args.set_project: reset_project_id = True CLIConfigurator.set_project_id(config.pylarion_path, config.set_project) if args.get_latest_testrun: tr = get_latest_test_run(args.get_latest_testrun) for k, v in make_iterable(tr): print "{}={}".format(k, v) if any([args.query_testcase, args.get_default_project_id, args.get_latest_testrun]): sys.exit(0) # Get the project_id. If the passed in value is different, we need to edit the .pylarion file default_project_id = cli_cfg.original_project_id if config.project_id != default_project_id: CLIConfigurator.set_project_id(using_pylarion_path, config.project_id) default_queries = [] if args.testcases_query is None else args.testcases_query transformer = Transformer(config) suite = Exporter(transformer) # Once the suite object has been initialized, generate a test run with associated test records if not config.generate_only: if config.update_run: update_id = config.update_run log.info("Updating test run {}".format(update_id)) tr = Exporter.get_test_run(update_id) suite.update_test_run(tr) else: suite.create_test_run(config.testrun_template) log.info("TestRun information completed to Polarion") if reset_project_id: try: import shutil backup = using_pylarion_path + ".bak" shutil.move(backup, using_pylarion_path) except Exception as ex: CLIConfigurator.set_project_id(using_pylarion_path, original_project_id)
def create_test_run(self, template_id, test_run_base=None, runner=None): """ Creates a new Polarion TestRun :param template_id: id of the template to use for TestRun :param test_run_base: a str to look up most recent TestRuns (eg "Jenkins Run" if the full name of TestRuns is "Jenkins Run 200" :param runner: str of the user id (eg stoner, not "Sean Toner") :return: None """ from pylarion.test_run import TestRun runner = self.get_runner(runner) tr_temp = self.get_template(template_id) log.info(tr_temp.plannedin) for s, testngs in self.tests.items(): if not testngs: continue if test_run_base is None: base_name = self.transformer.generate_base_testrun_id(s) else: base_name = test_run_base # Find our latest run. If it doesn't exist, we'll generate one tr = get_latest_test_run(base_name) if tr: new_id = make_test_run_id_from_latest(tr) else: base_name = remove_run(base_name) new_id = base_name + " Run 1" log.info("Creating new Test Run ID: {}".format(new_id)) plannedin = self.transformer.config.testrun_plannedin assignee = self.transformer.config.testrun_assignee retries = 3 while retries > 0: retries -= 1 if not plannedin: if hasattr(tr_temp, "plannedin") and tr_temp.plannedin: plannedin = tr_temp.plannedin else: raise PlannedinException("No plannedin value in template or from config") if not assignee: if hasattr(tr_temp, "assignee") and tr_temp.assignee: assignee = tr_temp.assignee else: raise AssigneeException("No assignee value in template or from config") try: test_run = TestRun.create(self.project, new_id, template_id, plannedin=plannedin, assignee=assignee) break except PlannedinException as pex: log.error(pex.message) raise pex except AssigneeException as aex: log.error(aex.message) raise aex except Exception as ex: log.warning("Retrying {} more times".format(retries)) else: raise Exception("Could not create a new TestRun") test_run.status = "inprogress" test_run.variant = [self.transformer.config.distro.variant.lower()] test_run.jenkinsjobs = self.transformer.config.testrun_jenkinsjobs test_run.notes = self.transformer.config.testrun_notes test_run.arch = [self.transformer.config.distro.arch.replace("_", "")] test_run.group_id = self.transformer.config.testrun_group_id for tc in testngs: tc.create_test_record(test_run, run_by=runner) test_run.status = "finished" test_run.update() log.info("Created test run for {}".format(new_id))
def create_polarion_tc(self): """ Given the pong.TestCase, convert it to the equivalent pylarion.work_item.TestCase """ t = lambda x: unicode.encode(x, encoding="utf-8", errors="ignore") if isinstance(x, unicode) else x desc, title = [t(x) for x in [self.description, self.title]] # Check to see if we already have an existing test case tc = None if self.polarion_tc: log.info("Getting TestCase for {}: {}".format(title, desc)) tc = self.polarion_tc self.validate_test(tc) if not self.polarion_tc.title.startswith(self.prefix): self.polarion_tc.title = self.prefix + self.polarion_tc.title # See if the Polarion Test Case has steps. The TestCase will contain a TestSteps array of size 1 # The step will have 2 columns (or key-value pairs) # | step | expectedResult # +======+=============== # | args | PASS test_steps = tc.get_test_steps() steps = test_steps.steps # If this TestCase has more than 1 TestStep, it's the older workaround where a TestStep was a row # of data in the 2d array. Moving to the SR2 2015 release with parameterized testing instead if len(steps) > 1: tc.set_test_steps() # Empty the TestSteps if len(steps) == 0: step = self.make_polarion_test_step() tc.set_test_steps([step]) else: log.info("Generating new TestCase for {} : {}".format(title, desc)) WORKAROUND_949 = False try: self.description.decode(encoding="utf-8") except UnicodeError: raw = self.description.encode("utf-8") self.description = unicode(raw, encoding="utf-8", errors="replace") try: self.description.decode(encoding="utf-8") except UnicodeError: WORKAROUND_949 = True if WORKAROUND_949: self.description = unicode("", encoding="utf-8") from pylarion.work_item import TestCase as PylTestCase tc = PylTestCase.create(self.project, self.title, self.description, **TC_KEYS) # Create PylTestSteps if needed and add it if self.step_results: step = self.make_polarion_test_step() tc.set_test_steps([step]) if not tc: raise Exception("Could not create TestCase for {}".format(self.title)) else: self.polarion_tc = tc self.link_requirements(tc) self.polarion_tc.update() return tc