def _clean_up(self, test_run_id, cluster_id, cleanup): session = engine.get_session() #need for performing proper cleaning up for current cluster cluster_deployment_info = \ session.query(models.ClusterState.deployment_tags)\ .filter_by(id=cluster_id)\ .scalar() try: module_obj = __import__(cleanup, -1) os.environ['NAILGUN_HOST'] = str(conf.nailgun.host) os.environ['NAILGUN_PORT'] = str(conf.nailgun.port) os.environ['CLUSTER_ID'] = str(cluster_id) module_obj.cleanup.cleanup(cluster_deployment_info) except Exception: LOG.exception('Cleanup error. Test Run ID %s. Cluster ID %s', test_run_id, cluster_id) finally: models.TestRun.update_test_run(session, test_run_id, status='finished')
def _clean_up(self, test_run_id, cluster_id, cleanup): session = engine.get_session() #need for performing proper cleaning up for current cluster cluster_deployment_info = \ session.query(models.ClusterState.deployment_tags)\ .filter_by(id=cluster_id)\ .scalar() try: module_obj = __import__(cleanup, -1) os.environ['NAILGUN_HOST'] = str(conf.nailgun.host) os.environ['NAILGUN_PORT'] = str(conf.nailgun.port) os.environ['CLUSTER_ID'] = str(cluster_id) module_obj.cleanup.cleanup(cluster_deployment_info) except Exception: LOG.exception( 'Cleanup error. Test Run ID %s. Cluster ID %s', test_run_id, cluster_id ) finally: models.TestRun.update_test_run( session, test_run_id, status='finished')
def _add_message( self, test, err=None, status=None): data = { 'status': status, 'time_taken': self.taken, #disable traceback 'traceback': u'', 'step': None, 'message': u'' } if err: exc_type, exc_value, exc_traceback = err if not status == 'error': data['step'], data['message'] = \ nose_utils.format_failure_message(exc_value) session = engine.get_session() with session.begin(subtransactions=True): tests_to_update = nose_utils.get_tests_ids_to_update(test) for test_id in tests_to_update: models.Test.add_result( session, self.test_run_id, test_id, data )
def _add_message( self, test, err=None, status=None): data = { 'status': status, 'time_taken': self.taken, #disable traceback 'traceback': u'', 'step': None, 'message': u'' } if err: exc_type, exc_value, exc_traceback = err if not status == 'error': data['step'], data['message'] = \ nose_utils.format_failure_message(exc_value) session = engine.get_session() with session.begin(subtransactions=True): if isinstance(test, ContextSuite): for sub_test in test._tests: models.Test.add_result( session, self.test_run_id, sub_test.id(), data) else: models.Test.add_result( session, self.test_run_id, test.id(), data)
def _add_message( self, test, err=None, status=None): data = { 'status': status, 'time_taken': self.taken } data['title'], data['description'], data['duration'] = \ nose_utils.get_description(test) if err: exc_type, exc_value, exc_traceback = err data['step'], data['message'] = None, u'' if not status == 'error': data['step'], data['message'] = \ nose_utils.format_failure_message(exc_value) data['traceback'] = u'' else: data['step'], data['message'] = None, u'' data['traceback'] = u'' session = engine.get_session() with session.begin(subtransactions=True): if isinstance(test, ContextSuite): for sub_test in test._tests: data['title'], data['description'], data['duration'] = \ nose_utils.get_description(test) models.Test.add_result( session, self.test_run_id, sub_test.id(), data) else: models.Test.add_result( session, self.test_run_id, test.id(), data)
def _add_message(self, test, err=None, status=None): data = {'status': status, 'time_taken': self.taken} data['title'], data['description'], data['duration'] = \ nose_utils.get_description(test) if err: exc_type, exc_value, exc_traceback = err data['step'], data['message'] = None, u'' if not status == 'error': data['step'], data['message'] = \ nose_utils.format_failure_message(exc_value) data['traceback'] = u'' else: data['step'], data['message'] = None, u'' data['traceback'] = u'' session = engine.get_session() with session.begin(subtransactions=True): if isinstance(test, ContextSuite): for sub_test in test._tests: data['title'], data['description'], data['duration'] = \ nose_utils.get_description(test) models.Test.add_result(session, self.test_run_id, sub_test.id(), data) else: models.Test.add_result(session, self.test_run_id, test.id(), data)
def __init__( self, test_run_id, cluster_id): self.test_run_id = test_run_id self.cluster_id = cluster_id super(StoragePlugin, self).__init__() self._start_time = None self.session = engine.get_session()
def enabled_tests(self): session = engine.get_session() tests = session.query(Test)\ .filter_by(test_run_id=self.id)\ .order_by(Test.name) return [test.name for test in tests if test.status != 'disabled']
def get_test_results(cls): session = engine.get_session() test_runs = session.query(cls). \ options(joinedload('tests')). \ order_by(desc(cls.id)) session.commit() session.close() return test_runs
def get_test_results(cls): session = engine.get_session() test_runs = session.query(cls). \ options(joinedload('tests')). \ order_by(desc(cls.id)) session.commit() session.close() return test_runs
def setup_app(config=None, session=None): setup_config(config or {}) session = session or engine.get_session(pecan.conf.dbpath) app_hooks = [hooks.CustomTransactionalHook(session), hooks.AddTokenHook()] app = pecan.make_app( pecan.conf.app.root, debug=pecan.conf.debug, force_canonical=True, hooks=app_hooks, ) return access_control.setup(app)
def afterImport(self, filename, module): module = __import__(module, fromlist=[module]) LOG.info('Inspecting %s', filename) if hasattr(module, '__profile__'): session = engine.get_session() with session.begin(subtransactions=True): LOG.info('%s discovered.', module.__name__) test_set = models.TestSet(**module.__profile__) test_set = session.merge(test_set) session.add(test_set) self.test_sets[test_set.id] = test_set
def afterImport(self, filename, module): module = __import__(module, fromlist=[module]) LOG.info('Inspecting %s', filename) if hasattr(module, '__profile__'): session = engine.get_session() with session.begin(subtransactions=True): LOG.info('%s discovered.', module.__name__) test_set = models.TestSet(**module.__profile__) test_set = session.merge(test_set) session.add(test_set) self.test_sets[test_set.id] = test_set
def _run_tests(self, test_run_id, cluster_id, argv_add): session = engine.get_session() try: nose_test_runner.SilentTestProgram( addplugins=[nose_storage_plugin.StoragePlugin( test_run_id, str(cluster_id))], exit=False, argv=['ostf_tests'] + argv_add) self._named_threads.pop(int(test_run_id), None) except Exception, e: LOG.exception('Test run ID: %s', test_run_id)
def _run_tests(self, test_run_id, cluster_id, argv_add): session = engine.get_session() try: nose_test_runner.SilentTestProgram(addplugins=[ nose_storage_plugin.StoragePlugin(test_run_id, str(cluster_id)) ], exit=False, argv=['ostf_tests'] + argv_add) self._named_threads.pop(int(test_run_id), None) except Exception, e: LOG.exception('Test run ID: %s', test_run_id)
def main(): cli_args = cli_config.parse_cli() config = { 'server': { 'host': cli_args.host, 'port': cli_args.port }, 'dbpath': cli_args.dbpath, 'debug': cli_args.debug, 'debug_tests': cli_args.debug_tests, 'nailgun': { 'host': cli_args.nailgun_host, 'port': cli_args.nailgun_port } } logger.setup(log_file=cli_args.log_file) log = logging.getLogger(__name__) root = app.setup_app(config=config) if getattr(cli_args, 'after_init_hook'): return nailgun_hooks.after_initialization_environment_hook() #performing cleaning of expired data (if any) in db clean_db(engine.get_engine()) #discover testsets and their tests CORE_PATH = pecan.conf.debug_tests if \ pecan.conf.get('debug_tests') else 'fuel_health' session = engine.get_session() discovery(path=CORE_PATH, session=session) #cache needed data from test repository cache_data(session) host, port = pecan.conf.server.host, pecan.conf.server.port srv = pywsgi.WSGIServer((host, int(port)), root) log.info('Starting server in PID %s', os.getpid()) log.info("serving on http://%s:%s", host, port) try: signal.signal(signal.SIGCHLD, signal.SIG_IGN) srv.serve_forever() except KeyboardInterrupt: pass
def main(): cli_args = cli_config.parse_cli() config = { 'server': { 'host': cli_args.host, 'port': cli_args.port }, 'dbpath': cli_args.dbpath, 'debug': cli_args.debug, 'debug_tests': cli_args.debug_tests, 'nailgun': { 'host': cli_args.nailgun_host, 'port': cli_args.nailgun_port } } logger.setup(log_file=cli_args.log_file) log = logging.getLogger(__name__) root = app.setup_app(config=config) if getattr(cli_args, 'after_init_hook'): return nailgun_hooks.after_initialization_environment_hook() #performing cleaning of expired data (if any) in db clean_db() #discover testsets and their tests CORE_PATH = pecan.conf.debug_tests if \ pecan.conf.get('debug_tests') else 'fuel_health' discovery(path=CORE_PATH, session=engine.get_session()) #cache needed data from test repository cache_data() host, port = pecan.conf.server.host, pecan.conf.server.port srv = pywsgi.WSGIServer((host, int(port)), root) log.info('Starting server in PID %s', os.getpid()) log.info("serving on http://%s:%s", host, port) try: signal.signal(signal.SIGCHLD, signal.SIG_IGN) srv.serve_forever() except KeyboardInterrupt: pass
def setup_app(config=None, session=None): setup_config(config or {}) session = session or engine.get_session(pecan.conf.dbpath) app_hooks = [ hooks.CustomTransactionalHook(session), hooks.AddTokenHook() ] app = pecan.make_app( pecan.conf.app.root, debug=pecan.conf.debug, force_canonical=True, hooks=app_hooks, ) return access_control.setup(app)
def kill(self, test_run_id, cluster_id, cleanup=None): session = engine.get_session() if test_run_id in self._named_threads: self._named_threads[test_run_id].terminate() self._named_threads.pop(test_run_id, None) if cleanup: nose_utils.run_proc(self._clean_up, test_run_id, cluster_id, cleanup) else: models.TestRun.update_test_run(session, test_run_id, status='finished') return True return False
def _clean_up(self, test_run_id, cluster_id, cleanup): session = engine.get_session() try: module_obj = __import__(cleanup, -1) os.environ['NAILGUN_HOST'] = str(conf.nailgun.host) os.environ['NAILGUN_PORT'] = str(conf.nailgun.port) os.environ['CLUSTER_ID'] = str(cluster_id) module_obj.cleanup.cleanup() except Exception: LOG.exception('Cleanup errer. Test Run ID %s. Cluster ID %s', test_run_id, cluser_id) finally: models.TestRun.update_test_run( session, test_run_id, status='finished')
def discovery_check(cluster): #get needed information from nailgun via series of #requests to nailgun api. At this time we need #info about deployment type(ha, non-ha), type of network #management (nova-network, quntum) and attributes that #indicate that savanna/murano is installed cluster_deployment_args = _get_cluster_depl_tags(cluster) cluster_data = { 'cluster_id': cluster, 'deployment_tags': cluster_deployment_args } session = engine.get_session() with session.begin(subtransactions=True): test_sets = session.query(models.TestSet)\ .filter_by(cluster_id=cluster)\ .all() if not test_sets: nose_discovery.discovery( path=CORE_PATH, deployment_info=cluster_data ) else: for testset in test_sets: deployment_tags = testset.deployment_tags deployment_tags = deployment_tags if deployment_tags else [] if not set(deployment_tags).issubset( cluster_data['deployment_tags'] ): #perform cascade deletion of testset #and corresponding to it tests and #testruns with their tests too session.query(models.TestSet)\ .filter_by(id=testset.id)\ .filter_by(cluster_id=testset.cluster_id)\ .delete() #perform final discovery for tests nose_discovery.discovery( path=CORE_PATH, deployment_info=cluster_data )
def kill(self, test_run_id, cluster_id, cleanup=None): session = engine.get_session() if test_run_id in self._named_threads: self._named_threads[test_run_id].terminate() self._named_threads.pop(test_run_id, None) if cleanup: nose_utils.run_proc( self._clean_up, test_run_id, cluster_id, cleanup) else: models.TestRun.update_test_run( session, test_run_id, status='finished') return True return False
def _clean_up(self, test_run_id, cluster_id, cleanup): session = engine.get_session() try: module_obj = __import__(cleanup, -1) os.environ['NAILGUN_HOST'] = str(conf.nailgun.host) os.environ['NAILGUN_PORT'] = str(conf.nailgun.port) os.environ['CLUSTER_ID'] = str(cluster_id) module_obj.cleanup.cleanup() except Exception: LOG.exception('Cleanup errer. Test Run ID %s. Cluster ID %s', test_run_id, cluser_id) finally: models.TestRun.update_test_run(session, test_run_id, status='finished')
def addSuccess(self, test): test_id = test.id() for test_set_id in self.test_sets.keys(): if test_set_id in test_id: session = engine.get_session() with session.begin(subtransactions=True): LOG.info('%s added for %s', test_id, test_set_id) data = dict() data['title'], data['description'], data['duration'] = \ nose_utils.get_description(test) old_test_obj = session.query(models.Test).filter_by( name=test_id, test_set_id=test_set_id, test_run_id=None).\ update(data, synchronize_session=False) if not old_test_obj: data.update({'test_set_id': test_set_id, 'name': test_id}) test_obj = models.Test(**data) session.add(test_obj)
def addSuccess(self, test): test_id = test.id() for test_set_id in self.test_sets.keys(): if test_set_id in test_id: session = engine.get_session() with session.begin(subtransactions=True): LOG.info('%s added for %s', test_id, test_set_id) data = dict() data['title'], data['description'], data['duration'] = \ nose_utils.get_description(test) old_test_obj = session.query(models.Test).filter_by( name=test_id, test_set_id=test_set_id, test_run_id=None).\ update(data, synchronize_session=False) if not old_test_obj: data.update({ 'test_set_id': test_set_id, 'name': test_id }) test_obj = models.Test(**data) session.add(test_obj)
def cache_data(): session = engine.get_session() with session.begin(subtransactions=True): test_repository = session.query(models.TestSet)\ .options(joinedload('tests'))\ .all() crucial_tests_attrs = ['name', 'deployment_tags'] for test_set in test_repository: data_elem = dict() data_elem['test_set_id'] = test_set.id data_elem['deployment_tags'] = test_set.deployment_tags data_elem['tests'] = [] for test in test_set.tests: test_dict = dict([(attr_name, getattr(test, attr_name)) for attr_name in crucial_tests_attrs]) data_elem['tests'].append(test_dict) simple_cache.TEST_REPOSITORY.append(data_elem)
def afterImport(self, filename, module): module = __import__(module, fromlist=[module]) LOG.info('Inspecting %s', filename) if hasattr(module, '__profile__'): profile = module.__profile__ profile['deployment_tags'] = [ tag.lower() for tag in profile.get('deployment_tags', []) ] if set(profile['deployment_tags']) \ .issubset(self.deployment_info['deployment_tags']): profile['cluster_id'] = self.deployment_info['cluster_id'] session = engine.get_session() with session.begin(subtransactions=True): LOG.info('%s discovered.', module.__name__) test_set = models.TestSet(**profile) test_set = session.merge(test_set) session.add(test_set) self.test_sets[test_set.id] = test_set
def addSuccess(self, test): test_id = test.id() for test_set_id in self.test_sets.keys(): if test_set_id in test_id: session = engine.get_session() with session.begin(subtransactions=True): data = dict() data['cluster_id'] = self.deployment_info['cluster_id'] (data['title'], data['description'], data['duration'], data['deployment_tags']) = \ nose_utils.get_description(test) if set(data['deployment_tags'])\ .issubset(self.deployment_info['deployment_tags']): data.update( { 'test_set_id': test_set_id, 'name': test_id } ) #merge doesn't work here so we must check #tests existing with such test_set_id and cluster_id #so we won't ended up with dublicating data upon tests #in db. tests = session.query(models.Test)\ .filter_by(cluster_id=self.test_sets[test_set_id].cluster_id)\ .filter_by(test_set_id=test_set_id)\ .filter_by(test_run_id=None)\ .filter_by(name=data['name'])\ .first() if not tests: LOG.info('%s added for %s', test_id, test_set_id) test_obj = models.Test(**data) session.add(test_obj)
def _add_message(self, test, err=None, status=None): data = { 'status': status, 'time_taken': self.taken, #disable traceback 'traceback': u'', 'step': None, 'message': u'' } if err: exc_type, exc_value, exc_traceback = err if not status == 'error': data['step'], data['message'] = \ nose_utils.format_failure_message(exc_value) session = engine.get_session() with session.begin(subtransactions=True): tests_to_update = nose_utils.get_tests_ids_to_update(test) for test_id in tests_to_update: models.Test.add_result(session, self.test_run_id, test_id, data)
def __init__(self): LOG.warning('Initializing Nose Driver') self._named_threads = {} session = engine.get_session() with session.begin(subtransactions=True): storage_utils.update_all_running_test_runs(session)
def __init__(self): LOG.warning('Initializing Nose Driver') self._named_threads = {} session = engine.get_session() with session.begin(subtransactions=True): storage_utils.update_all_running_test_runs(session)
def before(self, state): state.request.session = engine.get_session()
def before(self, state): state.request.session = engine.get_session()