def __init__(self, suite_id, database): """Initialize ETOS, dataset, provider registry and splitter. :param suite_id: Suite ID to get an environment for :type suite_id: str :param database: Database class to use. :type database: class """ self.suite_id = suite_id FORMAT_CONFIG.identifier = suite_id self.logger.info("Initializing EnvironmentProvider task.") self.etos = ETOS( "ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider" ) with self.lock: # Since celery workers can share memory between them we need to make the configuration # of ETOS library unique as it uses the memory sharing feature with the internal # configuration dictionary. # The impact of not doing this is that the environment provider would re-use # another workers configuration instead of using its own. self.etos.config.config = deepcopy( self.etos.config.config ) # pylint:disable=protected-access self.jsontas = JsonTas() self.dataset = self.jsontas.dataset self.dataset.add("json_dumps", JsonDumps) self.dataset.add("uuid_generate", UuidGenerate) self.dataset.add("join", Join) self.registry = ProviderRegistry(self.etos, self.jsontas, database) self.splitter = Splitter(self.etos, {})
def on_get(request, response): """Get an already configured environment based on suite ID. Use only to verify that the environment has been configured properly. :param request: Falcon request object. :type request: :obj:`falcon.request` :param response: Falcon response object. :type response: :obj:`falcon.response` """ suite_id = request.get_param("suite_id") FORMAT_CONFIG.identifier = suite_id etos = ETOS("ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider") jsontas = JsonTas() registry = ProviderRegistry(etos, jsontas) if suite_id is None: raise falcon.HTTPBadRequest("Missing parameters", "'suite_id' is a required parameter.") response.status = falcon.HTTP_200 iut_provider = registry.iut_provider(suite_id) log_area_provider = registry.log_area_provider(suite_id) execution_space_provider = registry.execution_space_provider(suite_id) response.media = { "iut_provider": iut_provider.ruleset if iut_provider else None, "log_area_provider": log_area_provider.ruleset if log_area_provider else None, "execution_space_provider": execution_space_provider.ruleset if execution_space_provider else None, "dataset": registry.dataset(suite_id), }
def on_post(self, request, response): """Register a new provider. :param request: Falcon request object. :type request: :obj:`falcon.request` :param response: Falcon response object. :type response: :obj:`falcon.response` """ self.request = request if not any([ self.iut_provider, self.log_area_provider, self.execution_space_provider ]): raise falcon.HTTPBadRequest( "Missing parameters", "At least one of 'iut_provider', 'log_area_provider' " "& 'execution_space_provider' is a required parameter.", ) etos = ETOS("ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider") jsontas = JsonTas() registry = ProviderRegistry(etos, jsontas) if self.iut_provider: registry.register_iut_provider(self.iut_provider) if self.execution_space_provider: registry.register_execution_space_provider( self.execution_space_provider) if self.log_area_provider: registry.register_log_area_provider(self.log_area_provider) response.status = falcon.HTTP_204
def release(self, response, task_id): # pylint:disable=too-many-locals """Release an environment. :param response: Response object to edit and return. :type response: :obj:`falcon.response` :param task_id: Task to release. :type task_id: str """ etos = ETOS( "ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider", ) jsontas = JsonTas() registry = ProviderRegistry(etos, jsontas, self.database()) task_result = self.celery_worker.AsyncResult(task_id) success, message = release_environment(etos, jsontas, registry, task_result, task_id) if not success: response.media = { "error": "Failed to release environment", "details": message, "status": task_result.status if task_result else "PENDING", } return response.status = falcon.HTTP_200 response.media = { "status": task_result.status if task_result else "PENDING" }
def on_post(self, request, response): """Register a new provider. :param request: Falcon request object. :type request: :obj:`falcon.request` :param response: Falcon response object. :type response: :obj:`falcon.response` """ etos = ETOS("ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider") jsontas = JsonTas() registry = ProviderRegistry(etos, jsontas, self.database()) registered = register( registry, iut_provider=get_iut_provider(request), log_area_provider=get_log_area_provider(request), execution_space_provider=get_execution_space_provider(request), ) if registered is False: raise falcon.HTTPBadRequest( "Missing parameters", "At least one of 'iut_provider', 'log_area_provider' " "& 'execution_space_provider' is a required parameter.", ) response.status = falcon.HTTP_204
def on_post(self, request, response): """Verify that all parameters are available and configure the provider registry. :param request: Falcon request object. :type request: :obj:`falcon.request` :param response: Falcon response object. :type response: :obj:`falcon.response` """ etos = ETOS("ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider") jsontas = JsonTas() registry = ProviderRegistry(etos, jsontas, self.database()) suite_id = get_suite_id(request) FORMAT_CONFIG.identifier = suite_id success, message = configure( registry, get_iut_provider_id(request), get_execution_space_provider_id(request), get_log_area_provider_id(request), get_dataset(request), get_suite_id(request), ) if not success: self.logger.error(message) raise falcon.HTTPBadRequest("Bad request", message) response.status = falcon.HTTP_200
def on_post(self, request, response): """Verify that all parameters are available and configure the provider registry. :param request: Falcon request object. :type request: :obj:`falcon.request` :param response: Falcon response object. :type response: :obj:`falcon.response` """ self.request = request etos = ETOS("ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider") jsontas = JsonTas() self.registry = ProviderRegistry(etos, jsontas) try: assert self.suite_id is not None, "Invalid suite ID" FORMAT_CONFIG.identifier = self.suite_id iut_provider = self.iut_provider log_area_provider = self.log_area_provider execution_space_provider = self.execution_space_provider assert ( iut_provider is not None ), f"No such IUT provider {self.request.media.get('iut_provider')}" assert execution_space_provider is not None, ( "No such execution space provider" f"{self.request.media.get('execution_space_provider')}") assert ( log_area_provider is not None ), f"No such log area provider {self.request.media.get('log_area_provider')}" assert self.dataset is not None, "Invalid dataset." response.media = { "IUTProvider": iut_provider, "ExecutionSpaceProvider": execution_space_provider, "LogAreaProvider": log_area_provider, } self.registry.configure_environment_provider_for_suite( self.suite_id, iut_provider, log_area_provider, execution_space_provider, self.dataset, ) except AssertionError as exception: raise falcon.HTTPBadRequest("Invalid provider", str(exception))
def on_get(self, request, response): """Get an already configured environment based on suite ID. Use only to verify that the environment has been configured properly. :param request: Falcon request object. :type request: :obj:`falcon.request` :param response: Falcon response object. :type response: :obj:`falcon.response` """ etos = ETOS("ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider") jsontas = JsonTas() registry = ProviderRegistry(etos, jsontas, self.database()) suite_id = get_suite_id(request) if suite_id is None: raise falcon.HTTPBadRequest("Missing parameters", "'suite_id' is a required parameter.") FORMAT_CONFIG.identifier = suite_id response.status = falcon.HTTP_200 response.media = get_configuration(registry, suite_id)
def main(args): """Entry point allowing external calls. Args: args ([str]): command line parameter list """ args = parse_args(args) etos = ETOS("ETOS Client", os.getenv("HOSTNAME"), "ETOS Client") os.environ["ETOS_TESTER_API"] = args.cluster setup_logging(args.loglevel) info = generate_spinner(args.no_tty) identity = PackageURL.from_string(args.identity) for key, value in args._get_kwargs(): # pylint:disable=protected-access etos.config.set(key, value) etos.config.set("identity", identity) etos.config.set("dataset", json.loads(args.dataset)) with info(text="Checking connectivity to ETOS", spinner="dots") as spinner: spinner.info("Running in cluster: '{}'".format(etos.debug.etos_api)) spinner.info("Configuration:") spinner.info("{}".format(etos.config.config)) try: check_etos_connectivity(etos.debug.etos_api) except Exception as exception: # pylint:disable=broad-except spinner.fail(str(exception)) sys.exit(1) spinner.start() spinner.succeed("Connection successful.") spinner.succeed("Ready to launch ETOS.") # Start execution etos_client = ETOSClient(etos) spinner.start("Triggering ETOS.") success = etos_client.start(spinner) if not success: # Unix : 0 == Success, 1 == Fail # Python: 1 == True , 0 == False sys.exit(not success) spinner.info("Suite ID: {}".format(etos_client.test_suite_id)) etos.config.set("suite_id", etos_client.test_suite_id) # Wait for test results test_result_handler = ETOSTestResultHandler(etos) spinner.start("Waiting for ETOS.") success, results = test_result_handler.wait_for_test_suite_finished( spinner) if not success: spinner.fail(results) else: spinner.succeed(results) # Download reports if args.download_reports is None: answer = input( "Do you want to download all logs for this test execution? (y/n): " ) while answer.lower() not in ("y", "yes", "no", "n"): print("Please answer 'yes' or 'no'") answer = input() else: answer = args.download_reports if answer.lower() in ("y", "yes"): log_handler = ETOSLogHandler(etos, test_result_handler.events) spinner.start("Downloading test logs.") logs_downloaded_successfully = log_handler.download_logs(spinner) if not logs_downloaded_successfully: sys.exit("ETOS logs did not download successfully.")
class EnvironmentProvider: # pylint:disable=too-many-instance-attributes """Environment provider celery Task.""" logger = logging.getLogger("EnvironmentProvider") environment_provider_config = None iut_provider = None log_area_provider = None execution_space_provider = None task_track_started = True lock = Lock() def __init__(self, suite_id, database): """Initialize ETOS, dataset, provider registry and splitter. :param suite_id: Suite ID to get an environment for :type suite_id: str :param database: Database class to use. :type database: class """ self.suite_id = suite_id FORMAT_CONFIG.identifier = suite_id self.logger.info("Initializing EnvironmentProvider task.") self.etos = ETOS( "ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider" ) with self.lock: # Since celery workers can share memory between them we need to make the configuration # of ETOS library unique as it uses the memory sharing feature with the internal # configuration dictionary. # The impact of not doing this is that the environment provider would re-use # another workers configuration instead of using its own. self.etos.config.config = deepcopy( self.etos.config.config ) # pylint:disable=protected-access self.jsontas = JsonTas() self.dataset = self.jsontas.dataset self.dataset.add("json_dumps", JsonDumps) self.dataset.add("uuid_generate", UuidGenerate) self.dataset.add("join", Join) self.registry = ProviderRegistry(self.etos, self.jsontas, database) self.splitter = Splitter(self.etos, {}) def configure(self, suite_id): """Configure environment provider and start RabbitMQ publisher. :param suite_id: Suite ID for this task. :type suite_id: str """ self.logger.info("Configure environment provider.") if not self.registry.wait_for_configuration(suite_id): # TODO: Add link ref to docs that describe how the config is done. raise EnvironmentProviderNotConfigured( "Please do a proper configuration of " "EnvironmentProvider before requesting an " "environment." ) self.logger.info("Registry is configured.") self.iut_provider = self.registry.iut_provider(suite_id) self.log_area_provider = self.registry.log_area_provider(suite_id) self.execution_space_provider = self.registry.execution_space_provider(suite_id) self.etos.config.set( "EVENT_DATA_TIMEOUT", int(os.getenv("ETOS_EVENT_DATA_TIMEOUT", "10")) ) self.etos.config.set( "WAIT_FOR_IUT_TIMEOUT", int(os.getenv("ETOS_WAIT_FOR_IUT_TIMEOUT", "10")) ) self.etos.config.set( "WAIT_FOR_EXECUTION_SPACE_TIMEOUT", int(os.getenv("ETOS_WAIT_FOR_EXECUTION_SPACE_TIMEOUT", "10")), ) self.etos.config.set( "WAIT_FOR_LOG_AREA_TIMEOUT", int(os.getenv("ETOS_WAIT_FOR_LOG_AREA_TIMEOUT", "10")), ) self.etos.config.set("SUITE_ID", suite_id) self.etos.config.rabbitmq_publisher_from_environment() self.etos.start_publisher() self.environment_provider_config = Config(self.etos, suite_id) if not self.environment_provider_config.generated: missing = [ name for name, value in [ ("tercc", self.environment_provider_config.tercc), ( "artifact_created", self.environment_provider_config.artifact_created, ), ( "activity_triggered", self.environment_provider_config.activity_triggered, ), ] if value is None ] raise NoEventDataFound(f"Missing: {', '.join(missing)}") self.dataset.add("environment", os.environ) self.dataset.add("config", self.etos.config) self.dataset.add("identity", self.environment_provider_config.identity) self.dataset.add("artifact_id", self.environment_provider_config.artifact_id) self.dataset.add("context", self.environment_provider_config.context) self.dataset.add("custom_data", self.environment_provider_config.custom_data) self.dataset.add("uuid", str(uuid.uuid4())) self.dataset.add( "artifact_created", self.environment_provider_config.artifact_created ) self.dataset.add( "artifact_published", self.environment_provider_config.artifact_published ) self.dataset.add("tercc", self.environment_provider_config.tercc) self.dataset.merge(self.registry.dataset(suite_id)) def cleanup(self): """Clean up by checkin in all checked out providers.""" self.logger.info("Cleanup by checking in all checked out providers.") for provider in self.etos.config.get("PROVIDERS"): try: provider.checkin_all() except: # noqa pylint:disable=bare-except pass @staticmethod def get_constraint(recipe, key): """Get a constraint key from an ETOS recipe. :param recipe: Recipe to get key from. :type recipe: dict :param key: Key to get value from, from the constraints. :type key: str :return: Constraint value. :rtype: any """ for constraint in recipe.get("constraints", []): if constraint.get("key") == key: return constraint.get("value") return None def create_test_suite_dict(self): """Create a test suite dictionary based on test runners. I.e. If there is only one test_runner the dictionary would be:: { "test_suite_name": { "MyTestrunner": { "docker": "MyTestrunner", "priority": 1, "unsplit_recipes": [...] } } } Or two:: { "test_suite_name": { "MyTestrunner": { "docker": "MyTestrunner", "priority": 1, "unsplit_recipes": [...] }, "MyOtherTestrunner": { "docker": "MyOtherTestrunner", "priority": 1, "unsplit_recipes": [...] } } } etc. :return: A test suite dictionary based on test runners. :rtype: dict """ self.logger.info("Create new test suite dictionary.") test_suites = {} for test_suite in self.environment_provider_config.test_suite: test_runners = test_suites.setdefault(test_suite.get("name"), {}) for recipe in test_suite.get("recipes", []): test_runner = self.get_constraint(recipe, "TEST_RUNNER") test_runners.setdefault( test_runner, { "docker": test_runner, "priority": test_suite.get("priority"), "unsplit_recipes": [], }, ) test_runners[test_runner]["unsplit_recipes"].append(recipe) return test_suites def set_total_test_count_and_test_runners(self, test_runners): """Set total test count and test runners to be used by the splitter algorithm. :param test_runners: Dictionary with test_runners as keys. :type test_runners: dict """ total_test_count = 0 for _, data in test_runners.items(): total_test_count += len(data["unsplit_recipes"]) self.etos.config.set("TOTAL_TEST_COUNT", total_test_count) self.etos.config.set("NUMBER_OF_TESTRUNNERS", len(test_runners.keys())) def checkout_and_assign_iuts_to_test_runners(self, test_runners): """Checkout IUTs from the IUT provider and assign them to the test_runners dictionary. :param test_runners: Dictionary with test_runners as keys. :type test_runners: dict """ iuts = self.iut_provider.wait_for_and_checkout_iuts( minimum_amount=self.etos.config.get("NUMBER_OF_TESTRUNNERS"), maximum_amount=self.etos.config.get("TOTAL_TEST_COUNT"), ) self.etos.config.set("NUMBER_OF_IUTS", len(iuts)) unused_iuts = self.splitter.assign_iuts(test_runners, self.dataset.get("iuts")) for iut in unused_iuts: self.iut_provider.checkin(iut) def checkout_log_area(self): """Checkout a log area. Called for each executor so only a single log area needs to be checked out. """ return self.log_area_provider.wait_for_and_checkout_log_areas( minimum_amount=1, maximum_amount=1 ) def checkout_and_assign_executors_to_iuts(self, test_runner, iuts): """Checkout and assign executors to each available IUT. :param test_runner: Test runner which will be added to dataset in order for JSONTas to get more information when running. :type test_runner: dict :param iuts: Dictionary of IUTs to assign executors to. :type iuts: dict """ self.dataset.add("test_runner", test_runner) executors = ( self.execution_space_provider.wait_for_and_checkout_execution_spaces( minimum_amount=len(iuts), maximum_amount=len(iuts), ) ) for iut, suite in iuts.items(): try: suite["executor"] = executors.pop(0) except IndexError: break self.dataset.add("executor", suite["executor"]) self.dataset.add("iut", iut) # This index will always exist or 'checkout' would raise an exception. suite["log_area"] = self.checkout_log_area()[0] # Checkin the unassigned executors. for executor in executors: self.execution_space_provider.checkin(executor) def checkin_iuts_without_executors(self, iuts): """Find all IUTs without an assigned executor and check them in. :param iuts: IUTs to check for executors. :type iuts: dict :return: IUTs that were removed. :rtype: list """ remove = [] for iut, suite in iuts.items(): if suite.get("executor") is None: self.iut_provider.checkin(iut) remove.append(iut) return remove def verify_json(self, json_data): """Verify that JSON data can be serialized properly. :param json_data: JSON data to test. :type json_data: str or dict """ try: if isinstance(json_data, dict): json_data = json.dumps(json_data) json.loads(json_data) except (json.decoder.JSONDecodeError, TypeError): self.logger.error(json_data) raise def run(self): """Run the environment provider task. :return: Test suite JSON with assigned IUTs, execution spaces and log areas. :rtype: dict """ try: self.configure(self.suite_id) test_suites = self.create_test_suite_dict() for test_suite_name, test_runners in test_suites.items(): self.set_total_test_count_and_test_runners(test_runners) self.logger.info( "Total test count : %r", self.etos.config.get("TOTAL_TEST_COUNT") ) self.logger.info( "Total testrunners: %r", self.etos.config.get("NUMBER_OF_TESTRUNNERS"), ) self.checkout_and_assign_iuts_to_test_runners(test_runners) for test_runner, values in test_runners.items(): self.checkout_and_assign_executors_to_iuts( test_runner, values["iuts"] ) for iut in self.checkin_iuts_without_executors(values["iuts"]): values["iuts"].remove(iut) for sub_suite in test_runners.values(): self.splitter.split(sub_suite) test_suite = TestSuite( test_suite_name, test_runners, self.environment_provider_config ) # This is where the resulting test suite is generated. # The resulting test suite will be a dictionary with test runners, IUTs # execution spaces and log areas with tests split up over as many as # possible. The resulting test suite definition is further explained in # :obj:`environment_provider.lib.test_suite.TestSuite` test_suite.generate() test_suite_json = test_suite.to_json() # Test that the test suite JSON is serializable so that the # exception is caught here and not by the webserver. # This makes sure that we can cleanup if anything breaks. self.verify_json(test_suite_json) # TODO: Handle multiple test suites. return test_suite_json except Exception as exception: # pylint:disable=broad-except self.cleanup() traceback.print_exc() return {"error": str(exception), "details": traceback.format_exc()} finally: if self.etos.publisher is not None: self.etos.publisher.stop()
def release(response, task_id): """Release an environment. :param response: Response object to edit and return. :type response: :obj:`falcon.response` :param task_id: Task to release. :type task_id: str """ try: task_result = APP.AsyncResult(task_id) result = { "status": task_result.status, } response.status = falcon.HTTP_200 if task_result.result: etos = ETOS( "ETOS Environment Provider", os.getenv("HOSTNAME"), "Environment Provider", ) jsontas = JsonTas() registry = ProviderRegistry(etos, jsontas) failure = None for suite in task_result.result.get("suites", []): try: iut = suite.get("iut") ruleset = registry.get_iut_provider_by_id( iut.get("provider_id")) provider = IutProvider(etos, jsontas, ruleset["iut"]) provider.checkin(Iut(**iut)) except Exception as exception: # pylint:disable=broad-except failure = exception try: executor = suite.get("executor") ruleset = registry.get_execution_space_provider_by_id( executor.get("provider_id")) provider = ExecutionSpaceProvider( etos, jsontas, ruleset["execution_space"]) provider.checkin(ExecutionSpace(**executor)) except Exception as exception: # pylint:disable=broad-except failure = exception try: log_area = suite.get("log_area") ruleset = registry.get_log_area_provider_by_id( log_area.get("provider_id")) provider = LogAreaProvider(etos, jsontas, ruleset["log"]) provider.checkin(LogArea(**log_area)) except Exception as exception: # pylint:disable=broad-except failure = exception task_result.forget() if failure: raise failure response.media = {**result} else: response.media = { "warning": f"Nothing to release with task_id '{task_id}'", **result, } except Exception as exception: # pylint:disable=broad-except traceback.print_exc() response.media = { "error": str(exception), "details": traceback.format_exc(), **result, }