class TestNode(object): def __init__(self, log, config, max_log_time=MAX_LOG_TIME, max_temp_time=MAX_TEMP_TIME): self.testnode_log = log self.log = log self.config = config or {} self.process_manager = ProcessManager(log) self.working_directory = config['working_directory'] self.node_test_suite_dict = {} self.file_handler = None self.max_log_time = max_log_time self.max_temp_time = max_temp_time self.url_access = "https://[0::0]:0123" # Ipv6 + port of the node def checkOldTestSuite(self, test_suite_data): config = self.config installed_reference_set = set(os.listdir(self.working_directory)) wished_reference_set = set( [x['test_suite_reference'] for x in test_suite_data]) to_remove_reference_set = installed_reference_set.difference( wished_reference_set) for y in to_remove_reference_set: fpath = os.path.join(self.working_directory, y) self.delNodeTestSuite(y) self.log("testnode.checkOldTestSuite, DELETING : %r" % (fpath, )) if os.path.isdir(fpath): shutil.rmtree(fpath) else: os.remove(fpath) def getNodeTestSuite(self, reference): node_test_suite = self.node_test_suite_dict.get(reference) if node_test_suite is None: node_test_suite = NodeTestSuite(reference) self.node_test_suite_dict[reference] = node_test_suite node_test_suite.edit(log=self.log, config=self.config, process_manager=self.process_manager) return node_test_suite def delNodeTestSuite(self, reference): if self.node_test_suite_dict.has_key(reference): self.node_test_suite_dict.pop(reference) def constructProfile(self, node_test_suite, test_type, use_relative_path=False): config = self.config profile_content = '' assert len(node_test_suite.vcs_repository_list ), "we must have at least one repository" profile_path_count = 0 profile_content_list = [] for vcs_repository in node_test_suite.vcs_repository_list: url = vcs_repository['url'] buildout_section_id = vcs_repository.get('buildout_section_id', None) repository_path = vcs_repository['repository_path'] try: profile_path = vcs_repository[PROFILE_PATH_KEY] except KeyError: pass else: profile_path_count += 1 if profile_path_count > 1: raise ValueError(PROFILE_PATH_KEY + ' defined more than once') # Absolute path to relative path software_config_path = os.path.join(repository_path, profile_path) if use_relative_path: from_path = os.path.join(self.working_directory, node_test_suite.reference) software_config_path = os.path.relpath( software_config_path, from_path) profile_content_list.append( """ [buildout] extends = %(software_config_path)s """ % {'software_config_path': software_config_path}) # Construct sections if not (buildout_section_id is None): # Absolute path to relative if use_relative_path: from_path = os.path.join(self.working_directory, node_test_suite.reference) repository_path = os.path.relpath(repository_path, from_path) if test_type == "ScalabilityTest": # updater = Updater(repository_path, git_binary=self.config['git_binary'], # branch = vcs_repository.get('branch','master'), log=self.log, process_manager=self.process_manager) # updater.checkout() # revision = updater.getRevision()[1] all_revision = node_test_suite.revision # from 'sec1=xx-azer,sec2=yy-qwer,..' to [[sec1,azer],[sec2,qwer],..] revision_list = [[ x.split('=')[0], x.split('=')[1].split('-')[1] ] for x in all_revision.split(',')] # from [[sec1,azer],[sec2,qwer],..] to {sec1:azer,sec2:qwer,..} revision_dict = { branch: revision for branch, revision in revision_list } # <obfuscated_url> word is modified by in runner.prepareSlapOSForTestSuite() profile_content_list.append( """ [%(buildout_section_id)s] repository = <obfuscated_url>/%(buildout_section_id)s/%(buildout_section_id)s.git revision = %(revision)s ignore-ssl-certificate = true develop = false """ % { 'buildout_section_id': buildout_section_id, 'revision': revision_dict[buildout_section_id] }) else: profile_content_list.append( """ [%(buildout_section_id)s] repository = %(repository_path)s branch = %(branch)s develop = false """ % { 'buildout_section_id': buildout_section_id, 'repository_path': repository_path, 'branch': vcs_repository.get('branch', 'master') }) if not profile_path_count: raise ValueError(PROFILE_PATH_KEY + ' not defined') # Write file custom_profile = open(node_test_suite.custom_profile_path, 'w') # sort to have buildout section first profile_content_list.sort( key=lambda x: [x, ''][x.startswith('\n[buildout]')]) custom_profile.write(''.join(profile_content_list)) custom_profile.close() sys.path.append(repository_path) def getAndUpdateFullRevisionList(self, node_test_suite): full_revision_list = [] config = self.config log = self.log for vcs_repository in node_test_suite.vcs_repository_list: repository_path = vcs_repository['repository_path'] repository_id = vcs_repository['repository_id'] branch = vcs_repository.get('branch') # Make sure we have local repository updater = Updater( repository_path, git_binary=config['git_binary'], branch=branch, log=log, process_manager=self.process_manager, working_directory=node_test_suite.working_directory, url=vcs_repository["url"]) updater.checkout() revision = "-".join(updater.getRevision()) full_revision_list.append('%s=%s' % (repository_id, revision)) node_test_suite.revision = ','.join(full_revision_list) return full_revision_list def registerSuiteLog(self, test_result, node_test_suite): """ Create a log dedicated for the test suite, and register the url to master node. """ suite_log_path, folder_id = node_test_suite.createSuiteLog() self._initializeSuiteLog(suite_log_path) # TODO make the path into url test_result.reportStatus( 'LOG url', "%s/%s" % (self.config.get('httpd_url'), folder_id), '') self.log("going to switch to log %r" % suite_log_path) self.process_manager.log = self.log = self.getSuiteLog() return suite_log_path def getSuiteLog(self): return self.suite_log def _initializeSuiteLog(self, suite_log_path): # remove previous handlers logger = logging.getLogger('testsuite') if self.file_handler is not None: logger.removeHandler(self.file_handler) # and replace it with new handler logger_format = '%(asctime)s %(name)-13s: %(levelname)-8s %(message)s' formatter = logging.Formatter(logger_format) logging.basicConfig(level=logging.INFO, format=logger_format) self.file_handler = logging.FileHandler(filename=suite_log_path) self.file_handler.setFormatter(formatter) logger.addHandler(self.file_handler) logger.info('Activated logfile %r output' % suite_log_path) self.suite_log = logger.info def checkRevision(self, test_result, node_test_suite): config = self.config log = self.log if log is None: log = self.log if node_test_suite.revision != test_result.revision: log('Disagreement on tested revision, checking out: %r' % ((node_test_suite.revision, test_result.revision), )) for i, repository_revision in enumerate( test_result.revision.split(',')): vcs_repository = node_test_suite.vcs_repository_list[i] repository_path = vcs_repository['repository_path'] revision = repository_revision.rsplit('-', 1)[1] # other testnodes on other boxes are already ready to test another # revision log(' %s at %s' % (repository_path, node_test_suite.revision)) updater = Updater(repository_path, git_binary=config['git_binary'], revision=revision, log=log, process_manager=self.process_manager) updater.checkout() updater.git_update_server_info() updater.git_create_repository_link() node_test_suite.revision = test_result.revision def _cleanupLog(self): config = self.config log_directory = self.config['log_directory'] now = time.time() for log_folder in os.listdir(log_directory): folder_path = os.path.join(log_directory, log_folder) if os.path.isdir(folder_path): if (now - os.stat(folder_path).st_mtime ) / 86400 > self.max_log_time: self.log("deleting log directory %r" % (folder_path, )) shutil.rmtree(folder_path) def _cleanupTemporaryFiles(self): """ buildout seems letting files under /tmp. To avoid regular error of missing disk space, remove old logs """ temp_directory = self.config["system_temp_folder"] now = time.time() user_id = os.geteuid() for temp_folder in os.listdir(temp_directory): folder_path = os.path.join(temp_directory, temp_folder) if (temp_folder.startswith("tmp") or temp_folder.startswith("buildout")): try: stat = os.stat(folder_path) if stat.st_uid == user_id and \ (now - stat.st_mtime)/86400 > self.max_temp_time: self.log("deleting temp directory %r" % (folder_path, )) if os.path.isdir(folder_path): shutil.rmtree(folder_path) else: os.remove(folder_path) except OSError: self.log("_cleanupTemporaryFiles exception", exc_info=sys.exc_info()) def cleanUp(self, test_result): log = self.log log('Testnode.cleanUp') self.process_manager.killPreviousRun() self._cleanupLog() self._cleanupTemporaryFiles() def run(self): log = self.log config = self.config slapgrid = None previous_revision_dict = {} revision_dict = {} test_result = None test_node_slapos = SlapOSInstance() test_node_slapos.edit( working_directory=self.config['slapos_directory']) try: while True: try: node_test_suite = None self.log = self.process_manager.log = self.testnode_log self.cleanUp(None) remote_test_result_needs_cleanup = False begin = time.time() portal_url = config['test_suite_master_url'] portal = taskdistribution.TaskDistributionTool( portal_url, logger=DummyLogger(log)) self.portal = portal self.test_suite_portal = taskdistribution.TaskDistributor( portal_url, logger=DummyLogger(log)) self.test_suite_portal.subscribeNode( node_title=config['test_node_title'], computer_guid=config['computer_id']) test_suite_data = self.test_suite_portal.startTestSuite( node_title=config['test_node_title'], computer_guid=config['computer_id']) if type(test_suite_data) == str: # Backward compatiblity test_suite_data = json.loads(test_suite_data) test_suite_data = Utils.deunicodeData(test_suite_data) log("Got following test suite data from master : %r" % \ (test_suite_data,)) try: my_test_type = self.test_suite_portal.getTestType() except: log("testnode, error during requesting getTestType() method \ from the distributor.") raise # Select runner according to the test type if my_test_type == 'UnitTest': runner = UnitTestRunner(self) elif my_test_type == 'ScalabilityTest': runner = ScalabilityTestRunner(self) else: log("testnode, Runner type %s not implemented.", my_test_type) raise NotImplementedError log("Type of current test is %s" % (my_test_type, )) # master testnode gets test_suites, slaves get nothing runner.prepareSlapOSForTestNode(test_node_slapos) # Clean-up test suites self.checkOldTestSuite(test_suite_data) for test_suite in test_suite_data: remote_test_result_needs_cleanup = False node_test_suite = self.getNodeTestSuite( test_suite["test_suite_reference"]) node_test_suite.edit( working_directory=self.config['working_directory'], log_directory=self.config['log_directory']) node_test_suite.edit(**test_suite) if my_test_type == 'UnitTest': runner = UnitTestRunner(node_test_suite) elif my_test_type == 'ScalabilityTest': runner = ScalabilityTestRunner(node_test_suite) else: log("testnode, Runner type %s not implemented.", my_test_type) raise NotImplementedError # XXX: temporary hack to prevent empty test_suite if not hasattr(node_test_suite, 'test_suite'): node_test_suite.edit(test_suite='') run_software = True # kill processes from previous loop if any self.process_manager.killPreviousRun() self.getAndUpdateFullRevisionList(node_test_suite) # Write our own software.cfg to use the local repository self.constructProfile(node_test_suite, my_test_type, runner.getRelativePathUsage()) # Make sure we have local repository test_result = portal.createTestResult( node_test_suite.revision, [], config['test_node_title'], False, node_test_suite.test_suite_title, node_test_suite.project_title) remote_test_result_needs_cleanup = True log("testnode, test_result : %r" % (test_result, )) if test_result is not None: self.registerSuiteLog(test_result, node_test_suite) self.checkRevision(test_result, node_test_suite) node_test_suite.edit(test_result=test_result) # Now prepare the installation of SlapOS and create instance status_dict = runner.prepareSlapOSForTestSuite( node_test_suite) # Give some time so computer partitions may start # as partitions can be of any kind we have and likely will never have # a reliable way to check if they are up or not ... time.sleep(20) if my_test_type == 'UnitTest': runner.runTestSuite(node_test_suite, portal_url) elif my_test_type == 'ScalabilityTest': error_message = None # A problem is appeared during runTestSuite if status_dict['status_code'] == 1: error_message = "Software installation too long or error(s) are present during SR install." else: status_dict = runner.runTestSuite( node_test_suite, portal_url) # A problem is appeared during runTestSuite if status_dict['status_code'] == 1: error_message = status_dict[ 'error_message'] # If an error is appeared if error_message: test_result.reportFailure( stdout=error_message) self.log(error_message) raise ValueError(error_message) else: raise NotImplementedError # break the loop to get latest priorities from master break self.cleanUp(test_result) except (SubprocessError, CalledProcessError) as e: log("SubprocessError", exc_info=sys.exc_info()) if remote_test_result_needs_cleanup: status_dict = e.status_dict or {} test_result.reportFailure( command=status_dict.get('command'), stdout=status_dict.get('stdout'), stderr=status_dict.get('stderr'), ) continue except ValueError as e: # This could at least happens if runTestSuite is not found log("ValueError", exc_info=sys.exc_info()) if node_test_suite is not None: node_test_suite.retry_software_count += 1 except CancellationError, e: log("CancellationError", exc_info=sys.exc_info()) self.process_manager.under_cancellation = False node_test_suite.retry = True continue except: ex_type, ex, tb = sys.exc_info() traceback.print_tb(tb) log("erp5testnode exception", exc_info=sys.exc_info()) raise now = time.time() self.cleanUp(test_result) if (now - begin) < 120: sleep_time = 120 - (now - begin) log("End of processing, going to sleep %s" % sleep_time) time.sleep(sleep_time)
class TestNode(object): def __init__(self, log, config, max_log_time=MAX_LOG_TIME, max_temp_time=MAX_TEMP_TIME): self.testnode_log = log self.log = log self.config = config or {} self.process_manager = ProcessManager(log) self.working_directory = config['working_directory'] self.node_test_suite_dict = {} self.file_handler = None self.max_log_time = max_log_time self.max_temp_time = max_temp_time self.url_access = "https://[0::0]:0123" # Ipv6 + port of the node def checkOldTestSuite(self,test_suite_data): config = self.config installed_reference_set = set(os.listdir(self.working_directory)) wished_reference_set = set([x['test_suite_reference'] for x in test_suite_data]) to_remove_reference_set = installed_reference_set.difference( wished_reference_set) for y in to_remove_reference_set: fpath = os.path.join(self.working_directory,y) self.delNodeTestSuite(y) self.log("testnode.checkOldTestSuite, DELETING : %r" % (fpath,)) if os.path.isdir(fpath): shutil.rmtree(fpath) else: os.remove(fpath) def getNodeTestSuite(self, reference): node_test_suite = self.node_test_suite_dict.get(reference) if node_test_suite is None: node_test_suite = NodeTestSuite(reference) self.node_test_suite_dict[reference] = node_test_suite node_test_suite.edit( log=self.log, config=self.config, process_manager=self.process_manager) return node_test_suite def delNodeTestSuite(self, reference): if self.node_test_suite_dict.has_key(reference): self.node_test_suite_dict.pop(reference) def constructProfile(self, node_test_suite, test_type, use_relative_path=False): config = self.config profile_content = '' assert len(node_test_suite.vcs_repository_list), "we must have at least one repository" profile_path_count = 0 profile_content_list = [] for vcs_repository in node_test_suite.vcs_repository_list: url = vcs_repository['url'] buildout_section_id = vcs_repository.get('buildout_section_id', None) repository_path = vcs_repository['repository_path'] try: profile_path = vcs_repository[PROFILE_PATH_KEY] except KeyError: pass else: profile_path_count += 1 if profile_path_count > 1: raise ValueError(PROFILE_PATH_KEY + ' defined more than once') # Absolute path to relative path software_config_path = os.path.join(repository_path, profile_path) if use_relative_path : from_path = os.path.join(self.working_directory, node_test_suite.reference) software_config_path = os.path.relpath(software_config_path, from_path) profile_content_list.append(""" [buildout] extends = %(software_config_path)s """ % {'software_config_path': software_config_path}) # Construct sections if not(buildout_section_id is None): # Absolute path to relative if use_relative_path: from_path = os.path.join(self.working_directory, node_test_suite.reference) repository_path = os.path.relpath(repository_path, from_path) if test_type=="ScalabilityTest": # updater = Updater(repository_path, git_binary=self.config['git_binary'], # branch = vcs_repository.get('branch','master'), log=self.log, process_manager=self.process_manager) # updater.checkout() # revision = updater.getRevision()[1] all_revision = node_test_suite.revision # from 'sec1=xx-azer,sec2=yy-qwer,..' to [[sec1,azer],[sec2,qwer],..] revision_list = [ [x.split('=')[0],x.split('=')[1].split('-')[1]] for x in all_revision.split(',') ] # from [[sec1,azer],[sec2,qwer],..] to {sec1:azer,sec2:qwer,..} revision_dict = {branch:revision for branch,revision in revision_list} # <obfuscated_url> word is modified by in runner.prepareSlapOSForTestSuite() profile_content_list.append(""" [%(buildout_section_id)s] repository = <obfuscated_url>/%(buildout_section_id)s/%(buildout_section_id)s.git revision = %(revision)s ignore-ssl-certificate = true develop = false """ % {'buildout_section_id': buildout_section_id, 'revision': revision_dict[buildout_section_id]}) else: profile_content_list.append(""" [%(buildout_section_id)s] repository = %(repository_path)s branch = %(branch)s develop = false """ % {'buildout_section_id': buildout_section_id, 'repository_path' : repository_path, 'branch' : vcs_repository.get('branch','master')}) if not profile_path_count: raise ValueError(PROFILE_PATH_KEY + ' not defined') # Write file custom_profile = open(node_test_suite.custom_profile_path, 'w') # sort to have buildout section first profile_content_list.sort(key=lambda x: [x, ''][x.startswith('\n[buildout]')]) custom_profile.write(''.join(profile_content_list)) custom_profile.close() sys.path.append(repository_path) def getAndUpdateFullRevisionList(self, node_test_suite): full_revision_list = [] config = self.config log = self.log for vcs_repository in node_test_suite.vcs_repository_list: repository_path = vcs_repository['repository_path'] repository_id = vcs_repository['repository_id'] branch = vcs_repository.get('branch') # Make sure we have local repository updater = Updater(repository_path, git_binary=config['git_binary'], branch=branch, log=log, process_manager=self.process_manager, working_directory=node_test_suite.working_directory, url=vcs_repository["url"]) updater.checkout() revision = "-".join(updater.getRevision()) full_revision_list.append('%s=%s' % (repository_id, revision)) node_test_suite.revision = ','.join(full_revision_list) return full_revision_list def registerSuiteLog(self, test_result, node_test_suite): """ Create a log dedicated for the test suite, and register the url to master node. """ suite_log_path, folder_id = node_test_suite.createSuiteLog() self._initializeSuiteLog(suite_log_path) # TODO make the path into url test_result.reportStatus('LOG url', "%s/%s" % (self.config.get('httpd_url'), folder_id), '') self.log("going to switch to log %r" % suite_log_path) self.process_manager.log = self.log = self.getSuiteLog() return suite_log_path def getSuiteLog(self): return self.suite_log def _initializeSuiteLog(self, suite_log_path): # remove previous handlers logger = logging.getLogger('testsuite') if self.file_handler is not None: logger.removeHandler(self.file_handler) # and replace it with new handler logger_format = '%(asctime)s %(name)-13s: %(levelname)-8s %(message)s' formatter = logging.Formatter(logger_format) logging.basicConfig(level=logging.INFO, format=logger_format) self.file_handler = logging.FileHandler(filename=suite_log_path) self.file_handler.setFormatter(formatter) logger.addHandler(self.file_handler) logger.info('Activated logfile %r output' % suite_log_path) self.suite_log = logger.info def checkRevision(self, test_result, node_test_suite): config = self.config log = self.log if log is None: log = self.log if node_test_suite.revision != test_result.revision: log('Disagreement on tested revision, checking out: %r' % ( (node_test_suite.revision,test_result.revision),)) for i, repository_revision in enumerate(test_result.revision.split(',')): vcs_repository = node_test_suite.vcs_repository_list[i] repository_path = vcs_repository['repository_path'] revision = repository_revision.rsplit('-', 1)[1] # other testnodes on other boxes are already ready to test another # revision log(' %s at %s' % (repository_path, node_test_suite.revision)) updater = Updater(repository_path, git_binary=config['git_binary'], revision=revision, log=log, process_manager=self.process_manager) updater.checkout() updater.git_update_server_info() updater.git_create_repository_link() node_test_suite.revision = test_result.revision def _cleanupLog(self): config = self.config log_directory = self.config['log_directory'] now = time.time() for log_folder in os.listdir(log_directory): folder_path = os.path.join(log_directory, log_folder) if os.path.isdir(folder_path): if (now - os.stat(folder_path).st_mtime)/86400 > self.max_log_time: self.log("deleting log directory %r" % (folder_path,)) shutil.rmtree(folder_path) def _cleanupTemporaryFiles(self): """ buildout seems letting files under /tmp. To avoid regular error of missing disk space, remove old logs """ temp_directory = self.config["system_temp_folder"] now = time.time() user_id = os.geteuid() for temp_folder in os.listdir(temp_directory): folder_path = os.path.join(temp_directory, temp_folder) if (temp_folder.startswith("tmp") or temp_folder.startswith("buildout")): try: stat = os.stat(folder_path) if stat.st_uid == user_id and \ (now - stat.st_mtime)/86400 > self.max_temp_time: self.log("deleting temp directory %r" % (folder_path,)) if os.path.isdir(folder_path): shutil.rmtree(folder_path) else: os.remove(folder_path) except OSError: self.log("_cleanupTemporaryFiles exception", exc_info=sys.exc_info()) def cleanUp(self,test_result): log = self.log log('Testnode.cleanUp') self.process_manager.killPreviousRun() self._cleanupLog() self._cleanupTemporaryFiles() def run(self): log = self.log config = self.config slapgrid = None previous_revision_dict = {} revision_dict = {} test_result = None test_node_slapos = SlapOSInstance() test_node_slapos.edit(working_directory=self.config['slapos_directory']) try: while True: try: node_test_suite = None self.log = self.process_manager.log = self.testnode_log self.cleanUp(None) remote_test_result_needs_cleanup = False begin = time.time() portal_url = config['test_suite_master_url'] portal = taskdistribution.TaskDistributionTool(portal_url, logger=DummyLogger(log)) self.portal = portal self.test_suite_portal = taskdistribution.TaskDistributor( portal_url, logger=DummyLogger(log)) self.test_suite_portal.subscribeNode(node_title=config['test_node_title'], computer_guid=config['computer_id']) test_suite_data = self.test_suite_portal.startTestSuite( node_title=config['test_node_title'], computer_guid=config['computer_id']) if type(test_suite_data) == str: # Backward compatiblity test_suite_data = json.loads(test_suite_data) test_suite_data = Utils.deunicodeData(test_suite_data) log("Got following test suite data from master : %r" % \ (test_suite_data,)) try: my_test_type = self.test_suite_portal.getTestType() except: log("testnode, error during requesting getTestType() method \ from the distributor.") raise # Select runner according to the test type if my_test_type == 'UnitTest': runner = UnitTestRunner(self) elif my_test_type == 'ScalabilityTest': runner = ScalabilityTestRunner(self) else: log("testnode, Runner type %s not implemented.", my_test_type) raise NotImplementedError log("Type of current test is %s" % (my_test_type,)) # master testnode gets test_suites, slaves get nothing runner.prepareSlapOSForTestNode(test_node_slapos) # Clean-up test suites self.checkOldTestSuite(test_suite_data) for test_suite in test_suite_data: remote_test_result_needs_cleanup = False node_test_suite = self.getNodeTestSuite( test_suite["test_suite_reference"]) node_test_suite.edit( working_directory=self.config['working_directory'], log_directory=self.config['log_directory']) node_test_suite.edit(**test_suite) if my_test_type == 'UnitTest': runner = UnitTestRunner(node_test_suite) elif my_test_type == 'ScalabilityTest': runner = ScalabilityTestRunner(node_test_suite) else: log("testnode, Runner type %s not implemented.", my_test_type) raise NotImplementedError # XXX: temporary hack to prevent empty test_suite if not hasattr(node_test_suite, 'test_suite'): node_test_suite.edit(test_suite='') run_software = True # kill processes from previous loop if any self.process_manager.killPreviousRun() self.getAndUpdateFullRevisionList(node_test_suite) # Write our own software.cfg to use the local repository self.constructProfile(node_test_suite, my_test_type, runner.getRelativePathUsage()) # Make sure we have local repository test_result = portal.createTestResult(node_test_suite.revision, [], config['test_node_title'], False, node_test_suite.test_suite_title, node_test_suite.project_title) remote_test_result_needs_cleanup = True log("testnode, test_result : %r" % (test_result, )) if test_result is not None: self.registerSuiteLog(test_result, node_test_suite) self.checkRevision(test_result,node_test_suite) node_test_suite.edit(test_result=test_result) # Now prepare the installation of SlapOS and create instance status_dict = runner.prepareSlapOSForTestSuite(node_test_suite) # Give some time so computer partitions may start # as partitions can be of any kind we have and likely will never have # a reliable way to check if they are up or not ... time.sleep(20) if my_test_type == 'UnitTest': runner.runTestSuite(node_test_suite, portal_url) elif my_test_type == 'ScalabilityTest': error_message = None # A problem is appeared during runTestSuite if status_dict['status_code'] == 1: error_message = "Software installation too long or error(s) are present during SR install." else: status_dict = runner.runTestSuite(node_test_suite, portal_url) # A problem is appeared during runTestSuite if status_dict['status_code'] == 1: error_message = status_dict['error_message'] # If an error is appeared if error_message: test_result.reportFailure( stdout=error_message ) self.log(error_message) raise ValueError(error_message) else: raise NotImplementedError # break the loop to get latest priorities from master break self.cleanUp(test_result) except (SubprocessError, CalledProcessError) as e: log("SubprocessError", exc_info=sys.exc_info()) if remote_test_result_needs_cleanup: status_dict = e.status_dict or {} test_result.reportFailure( command=status_dict.get('command'), stdout=status_dict.get('stdout'), stderr=status_dict.get('stderr'), ) continue except ValueError as e: # This could at least happens if runTestSuite is not found log("ValueError", exc_info=sys.exc_info()) if node_test_suite is not None: node_test_suite.retry_software_count += 1 except CancellationError, e: log("CancellationError", exc_info=sys.exc_info()) self.process_manager.under_cancellation = False node_test_suite.retry = True continue except: ex_type, ex, tb = sys.exc_info() traceback.print_tb(tb) log("erp5testnode exception", exc_info=sys.exc_info()) raise now = time.time() self.cleanUp(test_result) if (now-begin) < 120: sleep_time = 120 - (now-begin) log("End of processing, going to sleep %s" % sleep_time) time.sleep(sleep_time)
class TestNode(object): def __init__(self, log, config): self.log = log self.config = config or {} self.process_manager = ProcessManager(log) self.node_test_suite_dict = {} # hack until slapos.cookbook is updated if self.config.get('working_directory', '').endswith("slapos/"): self.config['working_directory'] = self.config[ 'working_directory'][:-(len("slapos/"))] + "testnode" def checkOldTestSuite(self,test_suite_data): config = self.config installed_reference_set = set(os.listdir(config['working_directory'])) wished_reference_set = set([x['test_suite_reference'] for x in test_suite_data]) to_remove_reference_set = installed_reference_set.difference( wished_reference_set) for y in to_remove_reference_set: fpath = os.path.join(config['working_directory'],y) self.delNodeTestSuite(y) if os.path.isdir(fpath): shutil.rmtree(fpath) else: os.remove(fpath) def getNodeTestSuite(self, reference): node_test_suite = self.node_test_suite_dict.get(reference) if node_test_suite is None: node_test_suite = NodeTestSuite(reference) self.node_test_suite_dict[reference] = node_test_suite return node_test_suite def delNodeTestSuite(self, reference): if self.node_test_suite_dict.has_key(reference): self.node_test_suite_dict.pop(reference) def constructProfile(self, node_test_suite): config = self.config profile_content = '' assert len(node_test_suite.vcs_repository_list), "we must have at least one repository" profile_path_count = 0 profile_content_list = [] for vcs_repository in node_test_suite.vcs_repository_list: url = vcs_repository['url'] buildout_section_id = vcs_repository.get('buildout_section_id', None) repository_path = vcs_repository['repository_path'] try: profile_path = vcs_repository[PROFILE_PATH_KEY] except KeyError: pass else: profile_path_count += 1 if profile_path_count > 1: raise ValueError(PROFILE_PATH_KEY + ' defined more than once') profile_content_list.append(""" [buildout] extends = %(software_config_path)s """ % {'software_config_path': os.path.join(repository_path, profile_path)}) if not(buildout_section_id is None): profile_content_list.append(""" [%(buildout_section_id)s] repository = %(repository_path)s branch = %(branch)s """ % {'buildout_section_id': buildout_section_id, 'repository_path' : repository_path, 'branch' : vcs_repository.get('branch','master')}) if not profile_path_count: raise ValueError(PROFILE_PATH_KEY + ' not defined') custom_profile = open(node_test_suite.custom_profile_path, 'w') # sort to have buildout section first profile_content_list.sort(key=lambda x: [x, ''][x.startswith('\n[buildout]')]) custom_profile.write(''.join(profile_content_list)) custom_profile.close() sys.path.append(repository_path) def getAndUpdateFullRevisionList(self, node_test_suite): full_revision_list = [] config = self.config log = self.log for vcs_repository in node_test_suite.vcs_repository_list: repository_path = vcs_repository['repository_path'] repository_id = vcs_repository['repository_id'] if not os.path.exists(repository_path): parameter_list = [config['git_binary'], 'clone', vcs_repository['url']] if vcs_repository.get('branch') is not None: parameter_list.extend(['-b',vcs_repository.get('branch')]) parameter_list.append(repository_path) log(subprocess.check_output(parameter_list, stderr=subprocess.STDOUT)) # Make sure we have local repository updater = Updater(repository_path, git_binary=config['git_binary'], log=log, process_manager=self.process_manager) updater.checkout() revision = "-".join(updater.getRevision()) full_revision_list.append('%s=%s' % (repository_id, revision)) node_test_suite.revision = ','.join(full_revision_list) return full_revision_list def addWatcher(self,test_result): config = self.config if config.get('log_file'): log_file_name = config['log_file'] log_file = open(log_file_name) log_file.seek(0, 2) log_file.seek(-min(5000, log_file.tell()), 2) test_result.addWatch(log_file_name,log_file,max_history_bytes=10000) return log_file_name def checkRevision(self, test_result, node_test_suite): config = self.config log = self.log if node_test_suite.revision != test_result.revision: log('Disagreement on tested revision, checking out: %r' % ( (node_test_suite.revision,test_result.revision),)) for i, repository_revision in enumerate(test_result.revision.split(',')): vcs_repository = node_test_suite.vcs_repository_list[i] repository_path = vcs_repository['repository_path'] revision = repository_revision.rsplit('-', 1)[1] # other testnodes on other boxes are already ready to test another # revision log(' %s at %s' % (repository_path, node_test_suite.revision)) updater = Updater(repository_path, git_binary=config['git_binary'], revision=revision, log=log, process_manager=self.process_manager) updater.checkout() node_test_suite.revision = test_result.revision def _prepareSlapOS(self, working_directory, slapos_instance, create_partition=1, software_path_list=None, **kw): """ Launch slapos to build software and partitions """ slapproxy_log = os.path.join(self.config['log_directory'], 'slapproxy.log') self.log('Configured slapproxy log to %r' % slapproxy_log) reset_software = slapos_instance.retry_software_count > 10 self.log('testnode, retry_software_count : %r' % \ slapos_instance.retry_software_count) self.slapos_controler = SlapOSControler.SlapOSControler( working_directory, self.config, self.log) self.slapos_controler.initializeSlapOSControler(slapproxy_log=slapproxy_log, process_manager=self.process_manager, reset_software=reset_software, software_path_list=software_path_list) self.process_manager.supervisord_pid_file = os.path.join(\ self.slapos_controler.instance_root, 'var', 'run', 'supervisord.pid') method_list= ["runSoftwareRelease"] if create_partition: method_list.append("runComputerPartition") for method_name in method_list: slapos_method = getattr(self.slapos_controler, method_name) status_dict = slapos_method(self.config, environment=self.config['environment'], ) if status_dict['status_code'] != 0: slapos_instance.retry = True slapos_instance.retry_software_count += 1 raise SubprocessError(status_dict) else: slapos_instance.retry_software_count = 0 return status_dict def prepareSlapOSForTestNode(self, test_node_slapos): """ We will build slapos software needed by the testnode itself, like the building of selenium-runner by default """ return self._prepareSlapOS(self.config['slapos_directory'], test_node_slapos, create_partition=0, software_path_list=self.config.get("software_list")) def prepareSlapOSForTestSuite(self, node_test_suite): return self._prepareSlapOS(node_test_suite.working_directory, node_test_suite, software_path_list=[node_test_suite.custom_profile_path]) def _dealShebang(self,run_test_suite_path): line = open(run_test_suite_path, 'r').readline() invocation_list = [] if line[:2] == '#!': invocation_list = line[2:].split() return invocation_list def runTestSuite(self, node_test_suite, portal_url): config = self.config parameter_list = [] run_test_suite_path_list = glob.glob("%s/*/bin/runTestSuite" % \ self.slapos_controler.instance_root) if not len(run_test_suite_path_list): raise ValueError('No runTestSuite provided in installed partitions.') run_test_suite_path = run_test_suite_path_list[0] run_test_suite_revision = node_test_suite.revision # Deal with Shebang size limitation invocation_list = self._dealShebang(run_test_suite_path) invocation_list.extend([run_test_suite_path, '--test_suite', node_test_suite.test_suite, '--revision', node_test_suite.revision, '--test_suite_title', node_test_suite.test_suite_title, '--node_quantity', config['node_quantity'], '--master_url', portal_url]) firefox_bin_list = glob.glob("%s/soft/*/parts/firefox/firefox-slapos" % \ config["slapos_directory"]) if len(firefox_bin_list): parameter_list.append('--firefox_bin') xvfb_bin_list = glob.glob("%s/soft/*/parts/xserver/bin/Xvfb" % \ config["slapos_directory"]) if len(xvfb_bin_list): parameter_list.append('--xvfb_bin') supported_paramater_set = self.process_manager.getSupportedParameterSet( run_test_suite_path, parameter_list) if '--firefox_bin' in supported_paramater_set: invocation_list.extend(["--firefox_bin", firefox_bin_list[0]]) if '--xvfb_bin' in supported_paramater_set: invocation_list.extend(["--xvfb_bin", xvfb_bin_list[0]]) bt5_path_list = config.get("bt5_path") if bt5_path_list not in ('', None,): invocation_list.extend(["--bt5_path", bt5_path_list]) # From this point, test runner becomes responsible for updating test # result. We only do cleanup if the test runner itself is not able # to run. SlapOSControler.createFolder(node_test_suite.test_suite_directory, clean=True) self.process_manager.spawn(*invocation_list, cwd=node_test_suite.test_suite_directory, log_prefix='runTestSuite', get_output=False) def cleanUp(self,test_result): log = self.log log('Testnode.cleanUp') self.process_manager.killPreviousRun() if test_result is not None: try: test_result.removeWatch(self.config['log_file']) except KeyError: log("KeyError, Watcher already deleted or not added correctly") def run(self): log = self.log config = self.config slapgrid = None previous_revision_dict = {} revision_dict = {} test_result = None test_node_slapos = SlapOSInstance() test_node_slapos.edit(working_directory=self.config['slapos_directory']) try: while True: try: self.cleanUp(None) remote_test_result_needs_cleanup = False begin = time.time() self.prepareSlapOSForTestNode(test_node_slapos) portal_url = config['test_suite_master_url'] portal = taskdistribution.TaskDistributionTool(portal_url, logger=DummyLogger(log)) test_suite_portal = taskdistribution.TaskDistributor(portal_url, logger=DummyLogger(log)) test_suite_json = test_suite_portal.startTestSuite(config['test_node_title']) test_suite_data = deunicodeData(json.loads(test_suite_json)) log("Got following test suite data from master : %r" % \ (test_suite_data,)) #Clean-up test suites self.checkOldTestSuite(test_suite_data) for test_suite in test_suite_data: remote_test_result_needs_cleanup = False node_test_suite = self.getNodeTestSuite( test_suite["test_suite_reference"]) node_test_suite.edit( working_directory=self.config['working_directory']) node_test_suite.edit(**test_suite) run_software = True # Write our own software.cfg to use the local repository self.constructProfile(node_test_suite) # kill processes from previous loop if any self.process_manager.killPreviousRun() self.getAndUpdateFullRevisionList(node_test_suite) # Make sure we have local repository test_result = portal.createTestResult(node_test_suite.revision, [], config['test_node_title'], False, node_test_suite.test_suite_title, node_test_suite.project_title) remote_test_result_needs_cleanup = True log("testnode, test_result : %r" % (test_result, )) if test_result is not None: log_file_name = self.addWatcher(test_result) self.checkRevision(test_result,node_test_suite) # Now prepare the installation of SlapOS and create instance status_dict = self.prepareSlapOSForTestSuite(node_test_suite) # Give some time so computer partitions may start # as partitions can be of any kind we have and likely will never have # a reliable way to check if they are up or not ... time.sleep(20) self.runTestSuite(node_test_suite,portal_url) test_result.removeWatch(log_file_name) # break the loop to get latest priorities from master break self.cleanUp(test_result) except (SubprocessError, CalledProcessError) as e: log("SubprocessError", exc_info=sys.exc_info()) if test_result is not None: test_result.removeWatch(log_file_name) if remote_test_result_needs_cleanup: status_dict = e.status_dict or {} test_result.reportFailure( command=status_dict.get('command'), stdout=status_dict.get('stdout'), stderr=status_dict.get('stderr'), ) continue except ValueError as e: # This could at least happens if runTestSuite is not found log("ValueError", exc_info=sys.exc_info()) node_test_suite.retry_software_count += 1 except CancellationError, e: log("CancellationError", exc_info=sys.exc_info()) self.process_manager.under_cancellation = False node_test_suite.retry = True continue except: log("erp5testnode exception", exc_info=sys.exc_info()) raise now = time.time() self.cleanUp(test_result) if (now-begin) < 120: sleep_time = 120 - (now-begin) log("End of processing, going to sleep %s" % sleep_time) time.sleep(sleep_time)