class AnsibleExecutorPlugin(ExecutorPlugin): """ The main executor for Teflo. The AnsibleExecutorPlugin class provides three different types on how you can execute tests. Its intention is to be generic enough where you just need to supply your test commands and it will process them. All tests executed against remote hosts will be run through ansible. """ # keeping the name as runner for backward compatibility __executor_name__ = 'runner' __schema_file_path__ = os.path.abspath( os.path.join(os.path.dirname(__file__), "files/schema.yml")) __schema_ext_path__ = os.path.abspath( os.path.join(os.path.dirname(__file__), "files/extensions.py")) def __init__(self, package): """Constructor. :param package: execute resource :type package: object """ super(AnsibleExecutorPlugin, self).__init__(package) # set required attributes self._desc = getattr(package, 'description') self.all_hosts = getattr(package, 'all_hosts', []) self.playbook = getattr(package, 'playbook', None) self.script = getattr(package, 'script', None) self.shell = getattr(package, 'shell', None) self.git = getattr(package, 'git', None) self.artifacts = getattr(package, 'artifacts') self.options = getattr(package, 'ansible_options', None) self.ignorerc = getattr(package, 'ignore_rc', False) self.validrc = getattr(package, 'valid_rc', None) self.injector = DataInjector(self.all_hosts) self.ans_service = AnsibleService( self.config, self.hosts, self.all_hosts, self.options, concurrency=self.config['TASK_CONCURRENCY']['EXECUTE'].lower()) self.ans_verbosity = get_ans_verbosity(self.config) # attribute defining overall status of test execution. why is this # needed? when a test fails we handle the exception raised and call # the method to archive test artifacts. once fetching artifacts is # finished this status is used to fail teflo (if needed) self.status = 0 def validate(self): """Validate.""" # schema validation for ansible_orchestrate schema schema_validator(schema_data=self.build_profile(self.execute), schema_files=[self.__schema_file_path__], schema_ext_files=[self.__schema_ext_path__]) def __git__(self): self.status = self.ans_service.run_git_playbook(self.git) if self.status != 0: raise TefloExecuteError('Failed to clone git repositories!') def __shell__(self): self.logger.info('Executing shell commands:') for index, shell in enumerate(self.shell): result = self.ans_service.run_shell_playbook(shell) ignorerc = self.ignorerc validrc = self.validrc if "ignore_rc" in shell and shell['ignore_rc']: ignorerc = shell['ignore_rc'] elif "valid_rc" in shell and shell['valid_rc']: validrc = shell['valid_rc'] if ignorerc: self.logger.info("Ignoring the rc for: %s" % shell['command']) elif validrc: if result['rc'] not in validrc: self.status = 1 self.logger.error( 'Shell command %s failed. Host=%s rc=%d Error: %s' % (shell['command'], result['host'], result['rc'], result['err'])) else: if result['rc'] != 0: self.status = 1 self.logger.error( 'Shell command %s failed. Host=%s rc=%d Error: %s' % (shell['command'], result['host'], result['rc'], result['err'])) if self.status == 1: raise ArchiveArtifactsError( 'Script %s failed to run successfully!' % shell['name']) else: self.logger.info('Successfully executed command : %s' % shell['command']) def __script__(self): self.logger.info('Executing scripts:') for index, script in enumerate(self.script): result = self.ans_service.run_script_playbook(script) ignorerc = self.ignorerc validrc = self.validrc if "ignore_rc" in script and script['ignore_rc']: ignorerc = script['ignore_rc'] elif "valid_rc" in script and script['valid_rc']: validrc = script['valid_rc'] if ignorerc: self.logger.info("Ignoring the rc for: %s" % script['name']) elif validrc: if result['rc'] not in validrc: self.status = 1 self.logger.error( 'Script %s failed. Host=%s rc=%d Error: %s' % (script['name'], result['host'], result['rc'], result['err'])) else: if result['rc'] != 0: self.status = 1 self.logger.error( 'Script %s failed. Host=%s rc=%d Error: %s' % (script['name'], result['host'], result['rc'], result['err'])) if self.status == 1: raise ArchiveArtifactsError('Script %s failed to run ' 'successfully!' % script['name']) else: self.logger.info('Successfully executed script : %s' % script['name']) def __playbook__(self): self.logger.info('Executing playbooks:') for index, playbook in enumerate(self.playbook): results = self.ans_service.run_playbook(playbook) ignorerc = self.ignorerc if "ignore_rc" in playbook and playbook['ignore_rc']: ignorerc = playbook['ignore_rc'] if ignorerc: self.logger.info("Ignoring the rc for: %s" % playbook['name']) elif results[0] != 0: self.status = 1 raise ArchiveArtifactsError( 'Playbook %s failed to run successfully!' % playbook['name']) else: self.logger.info('Successfully executed playbook : %s' % playbook['name']) def __artifacts__(self): """Archive artifacts produced by the tests. This method takes a string formatted playbook, writes it to disk, provides the test artifacts details to the playbook and runs it. The result is on the machine where teflo is run, all test artifacts will be archived inside the data folder. Example artifacts archive structure: artifacts/ host_01/ test_01_output.log results/ .. host_02/ test_01_output.log results/ .. """ # local path on disk to save artifacts destination = self.config['ARTIFACT_FOLDER'] self.logger.info('Fetching test artifacts @ %s' % destination) artifact_location = list() # settings required by synchronize module os.environ['ANSIBLE_LOCAL_TEMP'] = '$HOME/.ansible/tmp' os.environ['ANSIBLE_REMOTE_TEMP'] = '$HOME/.ansible/tmp' # setting variable so to no display any skipped tasks os.environ['DISPLAY_SKIPPED_HOSTS'] = 'False' results = self.ans_service.run_artifact_playbook( destination, self.artifacts) if results[0] != 0: self.logger.error(results[1]) raise TefloExecuteError('A failure occurred while trying to copy ' 'test artifacts.') # Get results from file try: with open('sync-results-' + self.ans_service.uid + '.txt') as fp: lines = fp.read().splitlines() except (IOError, OSError) as ex: self.logger.error(ex) raise TefloExecuteError( 'Failed to find the sync-results.txt file ' 'which means there was an uncaught failure running ' 'the synchronization playbook. Please enable verbose Ansible ' 'logging in the teflo.cfg file and try again.') # Build Results sync_results = [] for line in lines: host, artifact, dest, skipped, rc = ast.literal_eval( textwrap.dedent(line).strip()) sync_results.append({ 'host': host, 'artifact': artifact, 'destination': dest, 'skipped': skipped, 'rc': rc }) # remove Sync Results file os.remove('sync-results-' + self.ans_service.uid + '.txt') for r in sync_results: if r['rc'] != 0 and not r['skipped']: # checking if exit on error is set to true in teflo.cfg file if self.config.get('RUNNER_EXIT_ON_ERROR', 'False').lower() == 'true': raise TefloExecuteError( 'Failed to copy the artifact(s), %s, from %s' % (r['artifact'], r['host'])) else: self.logger.error( 'Failed to copy the artifact(s), %s, from %s' % (r['artifact'], r['host'])) if r['rc'] == 0 and not r['skipped']: temp_list = r['artifact'].replace('[', '').replace(']', '').replace( "'", "").split(',') res_folder_parts = self.config['RESULTS_FOLDER'].split('/') dest_path_parts = r['destination'].split('/') if not self.ans_service.ans_extra_vars['localhost']: art_list = [a[11:] for a in temp_list if 'cd+' not in a] path = '/'.join(r['destination'].split('/')[-3:]) else: path = '/'.join( r['destination'].split('/')[len(res_folder_parts):-1]) art_list = [ '/'.join( a.replace('’', "").split('->')[-1].split('/')[( len(dest_path_parts) - 1):]) for a in temp_list ] self.logger.info('Copied the artifact(s), %s, from %s' % (art_list, r['host'])) # Adding the only the artifacts which are not already present for artifact in art_list: art = os.path.join(path, artifact) if art not in artifact_location: artifact_location.append(art) if r['skipped']: self.logger.warning( 'Could not find artifact(s), %s, on %s. Make sure the file exists ' 'and defined properly in the definition file.' % (r['artifact'], r['host'])) # Update the execute resource with the location of artifacts if self.execute.artifact_locations: for item in artifact_location: if item not in self.execute.artifact_locations: self.execute.artifact_locations.append(item) else: self.execute.artifact_locations = artifact_location if self.config.get('RUNNER_TESTRUN_RESULTS') and self.config.get( 'RUNNER_TESTRUN_RESULTS').lower() == 'false': self.execute.testrun_results = {} else: self.execute.testrun_results = create_testrun_results( self.injector.inject_list(self.execute.artifact_locations), self.config) # printing out the testrun results on the console self._print_testrun_results() def _print_testrun_results(self): """ This method prints out the aggregate and individual test results for the xml files in the artifacts collected """ if self.execute.testrun_results and self.execute.testrun_results.get( 'individual_results'): self.logger.info('\n') self.logger.info('-' * 79) self.logger.info('TESTRUN RESULTS SUMMARY'.center(79)) self.logger.info('-' * 79) self.logger.info(' * AGGREGATE RESULTS * '.center(79)) self.logger.info('-' * 79) self.logger.info( ' * Total Tests : %s' % self.execute.testrun_results['aggregate_testrun_results'] ['total_tests']) self.logger.info( ' * Failed Tests : %s' % self.execute.testrun_results['aggregate_testrun_results'] ['failed_tests']) self.logger.info( ' * Skipped Tests : %s' % self.execute.testrun_results['aggregate_testrun_results'] ['skipped_tests']) self.logger.info( ' * Passed Tests : %s' % self.execute.testrun_results['aggregate_testrun_results'] ['passed_tests']) self.logger.info('-' * 79) if len(self.execute.testrun_results.get('individual_results')) > 1: self.logger.info(' * INDIVIDUAL RESULTS * '.center(79)) self.logger.info('-' * 79) for res in self.execute.testrun_results.get( 'individual_results'): for keys, values in res.items(): self.logger.info(' * File Name : %s' % keys) self.logger.info(' * Total Tests : %s' % values['total_tests']) self.logger.info(' * Failed Tests : %s' % values['failed_tests']) self.logger.info(' * Skipped Tests : %s' % values['skipped_tests']) self.logger.info(' * Passed Tests : %s' % values['passed_tests']) self.logger.info('-' * 79) else: self.logger.info( ' No artifacts were collected OR artifacts collected had no xml files' ) self.logger.info('-' * 79 + '\n') def run(self): """Run. The run method is the main entry point for the runner executor. This method will invoke various other methods in order to successfully run the runners execute types given. """ for attr in ['git', 'shell', 'playbook', 'script', 'artifacts']: # skip if the execute resource does not have the attribute defined if not getattr(self, attr): continue # call the method associated to the execute resource attribute try: getattr(self, '__%s__' % attr)() except (ArchiveArtifactsError, TefloExecuteError, AnsibleServiceError) as ex: # test execution failed, test artifacts may still have been # generated. lets go ahead and archive these for debugging # purposes self.logger.error(ex.message) if (attr != 'git' or attr != 'artifacts') and self.artifacts is not None: self.logger.info( 'Test Execution has failed but still fetching any test generated artifacts' ) self.__artifacts__() self.status = 1 if self.status: break finally: self.ans_service.alog_update(folder_name='ansible_executor') return self.status
class AnsibleOrchestratorPlugin(OrchestratorPlugin): """Ansible orchestrator Plugin. This class primary responsibility is for processing teflo actions. These actions for the ansible orchestrator plugin could be in the form of a playbook or module call. """ __plugin_name__ = 'ansible' __schema_file_path__ = os.path.abspath( os.path.join(os.path.dirname(__file__), "files/schema.yml")) __schema_ext_path__ = os.path.abspath( os.path.join(os.path.dirname(__file__), "files/extensions.py")) def __init__(self, package): """Constructor. :param package: action resource :type package: object """ super(AnsibleOrchestratorPlugin, self).__init__(package) self.options = getattr(package, 'ansible_options', None) self.galaxy_options = getattr(package, 'ansible_galaxy_options', None) self.playbook = getattr(package, 'ansible_playbook', None) self.script = getattr(package, 'ansible_script', None) self.shell = getattr(package, 'ansible_shell', None) self.all_hosts = getattr(package, 'all_hosts', []) # calling the method to do a backward compatibility check in case user is defining name field as a path for # script or playbook # TODO delete this if we want to remove backward compatibility for later releases self.backwards_compat_check() # ansible service object self.ans_service = AnsibleService( self.config, self.hosts, self.all_hosts, self.options, self.galaxy_options, concurrency=self.config['TASK_CONCURRENCY']['ORCHESTRATE'].lower()) def backwards_compat_check(self): """ This method does the check if name field is a script/playbook path or name of the orchestrator task by checking is '/' i spresent in the string. If it is a path then it checks if ansible_script field is a boolean and is True . If so a new dictionary is created with key=name and the value= script path. This is then assigned to ansible_script. If the ansible_script is not present then it is understood that the path belongs to a playbook. a new dictionary is created with key=name and the value= playbook path. This is then assigned to ansible_playbook. """ if os.sep in self.action_name: self.logger.warning( 'Using name field to provide ansible_script/ansible_playbook path' ) self.logger.debug( 'Joining current workspace %s to the ansible_script/playbook path %s' % (self.workspace, self.action_name)) new_item = {'name': os.path.join(self.workspace, self.action_name)} self.logger.debug( 'Converting ansible_script/playbook path to dictionary %s' % new_item) if isinstance(self.script, bool) and self.script: self.script = new_item elif not self.script: self.playbook = new_item else: raise TefloOrchestratorError( 'Error in defining the orchestrate name/ansible_script/ansible_playbook' ' fields') def validate(self): """Validate that script/playbook path is valid and exists.""" # schema validation for ansible_orchestrate schema schema_validator(schema_data=self.build_profile(self.action), schema_files=[self.__schema_file_path__], schema_ext_files=[self.__schema_ext_path__]) # verifying when script or playbook is present in the orchestrate task, the name key provides a path that exist if self.script: if os.path.exists(self.script.get('name').split(' ', 1)[0]): self.logger.debug('Found Action resource script %s' % self.script.get('name')) else: raise TefloOrchestratorError( 'Cannot find Action resource script %s' % self.script.get('name')) elif self.playbook: if os.path.exists(self.playbook.get('name').split(' ', 1)[0]): self.logger.debug('Found Action resource playbook %s' % self.playbook.get('name')) else: raise TefloOrchestratorError( 'Cannot find Action resource playbook %s' % self.playbook.get('name')) def __playbook__(self): self.logger.info('Executing playbook:') results = self.ans_service.run_playbook(self.playbook) if results[0] != 0: raise TefloOrchestratorError( 'Playbook %s failed to run successfully!' % self.playbook['name']) else: self.logger.info('Successfully completed playbook : %s' % self.playbook['name']) def __script__(self): self.logger.info('Executing script:') result = self.ans_service.run_script_playbook(self.script) if result['rc'] != 0: raise TefloOrchestratorError( 'Script %s failed. Host=%s rc=%d Error: %s' % (self.script['name'], result['host'], result['rc'], result['err'])) else: self.logger.info('Successfully completed script : %s' % self.script['name']) def __shell__(self): self.logger.info('Executing shell command:') for shell in self.shell: result = self.ans_service.run_shell_playbook(shell) if result['rc'] != 0: raise TefloOrchestratorError( 'Command %s failed. Host=%s rc=%d Error: %s' % (shell['command'], result['host'], result['rc'], result['err'])) else: self.logger.info('Successfully completed command : %s' % shell['command']) def run(self): """Run method for orchestrator. """ # Orchestrate supports only one action_types( playbook, script or shell) per task # if more than one action types are declared then the first action_type found will be executed flag = 0 res = self.action.status for item in ['playbook', 'script', 'shell']: # Orchestrate supports only one action_types( playbook, script or shell) per task # if more than one action types are declared then the first action_type found will be executed if getattr(self, item): flag += 1 # Download ansible roles (if applicable) if flag == 1: try: self.ans_service.download_roles() except (TefloOrchestratorError, AnsibleServiceError): if 'retry' in self.galaxy_options and self.galaxy_options[ 'retry']: self.logger.Info( "Download failed. Sleeping 5 seconds and \ trying again") time.sleep(5) self.ans_service.download_roles() try: getattr(self, '__%s__' % item)() # If every script/playbook/shell command within the each orchestrate has passed, # mark that task as successful self.logger.debug( "Successful completion of orchestrate task %s with return value %s" % (self.action_name, res)) res = 0 except (TefloOrchestratorError, AnsibleServiceError, Exception) as e: res = 1 self.logger.error("Orchestration failed : %s" % e) break finally: # get ansible logs as needed # providing folder_name as 'ansible_orchestrator' so all ansible logs wil be under this folder self.ans_service.alog_update( folder_name='ansible_orchestrator') else: self.logger.warning( 'Found more than one action types (ansible_playbook, ansible_script ,' 'ansible_shell )in the orchestrate task, only the first found' ' action type was executed, the rest are skipped.') break return res
class ResourceChecker(object): def __init__(self, scenario, config): """Constructor. :param scenario: teflo scenario object :type scenario: object :param config: scenario config :type config: object """ self.scenario = scenario self.config = config def validate_resources(self): if getattr(self.scenario, 'resource_check').get('monitored_services', None) and \ self.config['RESOURCE_CHECK_ENDPOINT']: # Verify dependency check components are supported/valid self.__check_service() if getattr(self.scenario, 'resource_check').get('playbook', None) or \ getattr(self.scenario, 'resource_check').get('script', None): self.__check_custom_resource() def __check_custom_resource(self): # method to run playbook or scripts as a part of resource validation on local hosts before the scenario # provisioning is started # creating ansible service class object self.ans_service = AnsibleService(self.config, hosts=['localhost'], all_hosts=[], ansible_options=None) LOG.info("Running validation using user provided playbook/scripts") for type in ['playbook', 'script']: if not getattr(self.scenario, 'resource_check').get(type, None): continue for item in getattr(self.scenario, 'resource_check').get(type): status = 0 self.ans_service.options = item.get('ansible_options', None) self.ans_service.galaxy_options = item.get( 'ansible_galaxy_options', None) error_msg = '' try: if type == 'script': # running the script result = self.ans_service.run_script_playbook(item) if result['rc'] != 0: status = 1 LOG.error( 'Script %s failed with return code %s and error %s' % (item['name'], result['rc'], result['err'])) error_msg = result['err'] else: # running the playbook result = self.ans_service.run_playbook(item) if result[0] != 0: status = 1 error_msg = result[1] if status == 1: raise TefloError( "Failed to run validation playbook/script %s. ERROR: %s" % (item['name'], error_msg)) else: LOG.info( "Successfully completed resource_check validation for playbook/script: %s", item['name']) except TefloError: raise TefloError( "Failed to run resource_check validation for playbook/script. ERROR: %s" % error_msg) def __check_service(self): """ External Component Dependency Check Throws exception if all components are not UP :param scenario: teflo scenario object :param config: teflo config object """ # External Dependency Check # Available components to check ci-rhos, zabbix-sysops, brew, covscan # polarion, rpmdiff, umb, errata, rdo-cloud # gerrit # Verify dependency check components are supported/valid then # Check status (UP/DOWN) # Only check if dependency check endpoint set and components given # Else it is ignored LOG.info('Running external resource validation') if self.config['RESOURCE_CHECK_ENDPOINT']: endpoint = self.config['RESOURCE_CHECK_ENDPOINT'] ext_resources_avail = True # component_names = self.scenario.resource_check['service'] component_names = getattr(self.scenario, 'resource_check').get( 'monitored_services', None) urllib3.disable_warnings() components = cachet.Components(endpoint=endpoint, verify=False) LOG.info(' DEPENDENCY CHECK '.center(64, '-')) for comp in component_names: comp_resource_invalid = False comp_resource_avail = False for attempts in range(1, 6): component_data = components.get(params={'name': comp}) if json.loads(component_data)['data']: comp_status = json.loads( component_data)['data'][0]['status'] if comp_status == 4: comp_resource_avail = False time.sleep(30) continue else: comp_resource_avail = True break else: comp_resource_invalid = True if comp_resource_avail is not True or comp_resource_invalid is True: ext_resources_avail = False if comp_resource_invalid: LOG.info('{:>40} {:<9} - Attempts {}'.format( comp.upper(), ': INVALID', attempts)) else: LOG.info('{:>40} {:<9} - Attempts {}'.format( comp.upper(), ': UP' if comp_resource_avail else ': DOWN', attempts)) warnings.resetwarnings() LOG.info(''.center(64, '-')) if ext_resources_avail is not True: LOG.error( "ERROR: Not all external resources are available or valid. Not running scenario" ) raise TefloError( 'Scenario %s will not be run! Not all external resources are available or valid' % getattr(self.scenario, 'name'))