def dump_db(): """Dump all test campaign results from the DB. It allows collecting all the results from the DB. It could be overriden if the common implementation is not suitable. The next vars must be set in env: * TEST_DB_URL, * BUILD_TAG. Returns: Campaign.EX_OK if results were collected from DB. Campaign.EX_DUMP_FROM_DB_ERROR otherwise. """ try: url = env.get('TEST_DB_URL') req = requests.get(f"{url}?build_tag={env.get('BUILD_TAG')}", headers=testcase.TestCase.headers) req.raise_for_status() output = req.json() Campaign.__logger.debug("data from DB: \n%s", output) for i, _ in enumerate(output["results"]): for j, _ in enumerate( output["results"][i]["details"]["links"]): output["results"][i]["details"]["links"][j] = re.sub( "^{os.environ['HTTP_DST_URL']}/*", '', output["results"][i]["details"]["links"][j]) Campaign.__logger.debug("data to archive: \n%s", output) with open(f"{env.get('BUILD_TAG')}.json", "w", encoding='utf-8') as dfile: json.dump(output, dfile) except Exception: # pylint: disable=broad-except Campaign.__logger.exception( "The results cannot be collected from DB") return Campaign.EX_DUMP_FROM_DB_ERROR return Campaign.EX_OK
def generate_tiers(self): if self.dic_tier_array is None: self.read_test_yaml() del self.tier_objects[:] for dic_tier in self.dic_tier_array: tier = tier_handler.Tier(name=dic_tier['name'], order=dic_tier['order'], description=dic_tier['description']) for dic_testcase in dic_tier['testcases']: testcase = tier_handler.TestCase( name=dic_testcase['case_name'], enabled=dic_testcase.get('enabled', True), skipped=False, criteria=dic_testcase['criteria'], blocking=dic_testcase['blocking'], description=dic_testcase['description'], project=dic_testcase['project_name']) if not dic_testcase.get('dependencies'): if testcase.is_enabled(): tier.add_test(testcase) else: testcase.skipped = True tier.skip_test(testcase) else: for dependency in dic_testcase['dependencies']: kenv = list(dependency.keys())[0] if not re.search(dependency[kenv], env.get(kenv) or ''): testcase.skipped = True tier.skip_test(testcase) break else: if testcase.is_enabled(): tier.add_test(testcase) else: testcase.skipped = True tier.skip_test(testcase) self.tier_objects.append(tier)
def test_get_env2(self): self.assertEqual(env.get('BUILD_TAG'), 'master')
def test_get_unset_env2(self): del os.environ['BUILD_TAG'] self.assertEqual(env.get('BUILD_TAG'), env.INPUTS['BUILD_TAG'])
def test_get_env(self): self.assertEqual(env.get('CI_LOOP'), 'weekly')
def test_get_unset_env(self): del os.environ['CI_LOOP'] self.assertEqual(env.get('CI_LOOP'), env.INPUTS['CI_LOOP'])
def test_get_unknown_env(self): self.assertEqual(env.get('FOO'), 'foo') importlib.reload(env)
def test_get_unset_unknown_env(self): del os.environ['FOO'] self.assertEqual(env.get('FOO'), None)
def zip_campaign_files(): # pylint: disable=too-many-locals """Archive and publish all test campaign data to the S3 repository. It allows collecting all the artifacts from the S3 repository. It could be overriden if the common implementation is not suitable. The credentials must be configured before publishing the artifacts: * fill ~/.aws/credentials or ~/.boto, * set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY in env. The next vars must be set in env: * S3_ENDPOINT_URL (http://127.0.0.1:9000), * S3_DST_URL (s3://xtesting/prefix), Returns: Campaign.EX_OK if artifacts were published to repository. Campaign.EX_DUMP_ARTIFACTS_ERROR otherwise. """ try: build_tag = env.get('BUILD_TAG') assert Campaign.dump_db() == Campaign.EX_OK assert Campaign.dump_artifacts() == Campaign.EX_OK with zipfile.ZipFile(f'{build_tag}.zip', 'w', zipfile.ZIP_DEFLATED) as zfile: zfile.write(f"{build_tag}.json") for root, _, files in os.walk(build_tag): for filename in files: zfile.write(os.path.join(root, filename)) b3resource = boto3.resource( 's3', endpoint_url=os.environ["S3_ENDPOINT_URL"]) dst_s3_url = os.environ["S3_DST_URL"] multipart_threshold = 5 * 1024**5 if "google" in os.environ[ "S3_ENDPOINT_URL"] else 8 * 1024 * 1024 tconfig = TransferConfig(multipart_threshold=multipart_threshold) bucket_name = urllib.parse.urlparse(dst_s3_url).netloc mime_type = mimetypes.guess_type(f'{build_tag}.zip') path = urllib.parse.urlparse(dst_s3_url).path.strip("/") # pylint: disable=no-member b3resource.Bucket(bucket_name).upload_file( f'{build_tag}.zip', os.path.join(path, f'{build_tag}.zip'), Config=tconfig, ExtraArgs={ 'ContentType': mime_type[0] or 'application/octet-stream' }) dst_http_url = os.environ["HTTP_DST_URL"] link = os.path.join(dst_http_url, f'{build_tag}.zip') Campaign.__logger.info( "All data were successfully published:\n\n%s", link) return Campaign.EX_OK except KeyError as ex: Campaign.__logger.error("Please check env var: %s", str(ex)) return Campaign.EX_ZIP_CAMPAIGN_FILES_ERROR except botocore.exceptions.NoCredentialsError: Campaign.__logger.error( "Please fill ~/.aws/credentials, ~/.boto or set " "AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY in env") return Campaign.EX_ZIP_CAMPAIGN_FILES_ERROR except Exception: # pylint: disable=broad-except Campaign.__logger.exception("Cannot publish the artifacts") return Campaign.EX_ZIP_CAMPAIGN_FILES_ERROR
def test_get_unknown_env(self): self.assertEqual(env.get('FOO'), 'foo') reload_module(env)
def push_to_db(self): """Push the results of the test case to the DB. It allows publishing the results and checking the status. It could be overriden if the common implementation is not suitable. The following attributes must be set before pushing the results to DB: * project_name, * case_name, * result, * start_time, * stop_time. The next vars must be set in env: * TEST_DB_URL, * INSTALLER_TYPE, * DEPLOY_SCENARIO, * NODE_NAME, * BUILD_TAG. Returns: TestCase.EX_OK if results were pushed to DB. TestCase.EX_PUSH_TO_DB_ERROR otherwise. """ try: if self.is_skipped: return TestCase.EX_PUSH_TO_DB_ERROR assert self.project_name assert self.case_name assert self.start_time assert self.stop_time url = env.get('TEST_DB_URL') data = { "project_name": self.project_name, "case_name": self.case_name, "details": self.details } data["installer"] = env.get('INSTALLER_TYPE') data["scenario"] = env.get('DEPLOY_SCENARIO') data["pod_name"] = env.get('NODE_NAME') data["build_tag"] = env.get('BUILD_TAG') data["criteria"] = 'PASS' if self.is_successful( ) == TestCase.EX_OK else 'FAIL' data["start_date"] = datetime.fromtimestamp( self.start_time).strftime('%Y-%m-%d %H:%M:%S') data["stop_date"] = datetime.fromtimestamp( self.stop_time).strftime('%Y-%m-%d %H:%M:%S') try: data["version"] = re.search(TestCase._job_name_rule, env.get('BUILD_TAG')).group(2) except Exception: # pylint: disable=broad-except data["version"] = "unknown" req = requests.post(url, data=json.dumps(data, sort_keys=True), headers=self.headers) req.raise_for_status() if urllib.parse.urlparse(url).scheme != "file": # href must be postprocessed as OPNFV testapi is misconfigured # (localhost is returned) uid = re.sub(r'^.*/api/v1/results/*', '', req.json()["href"]) netloc = env.get('TEST_DB_EXT_URL') if env.get( 'TEST_DB_EXT_URL') else env.get('TEST_DB_URL') self.__logger.info( "The results were successfully pushed to DB: \n\n%s\n", os.path.join(netloc, uid)) except AssertionError: self.__logger.exception( "Please run test before publishing the results") return TestCase.EX_PUSH_TO_DB_ERROR except requests.exceptions.HTTPError: self.__logger.exception("The HTTP request raises issues") return TestCase.EX_PUSH_TO_DB_ERROR except Exception: # pylint: disable=broad-except self.__logger.exception("The results cannot be pushed to DB") return TestCase.EX_PUSH_TO_DB_ERROR return TestCase.EX_OK