def main(): # ----create log files for runner----- logger_path = ConfigAdapter.get_log_path() logfile = os.path.join(logger_path, PlatformHelper.get_ip_address(), "worker_logger.log") LogHelper.create_logger(logfile, fmt=PlatformHelper.get_ip_address() + " %(asctime)s %(levelname)s " + "worker" + " | %(message)s |") LogHelper.info("Worker %s" %(PlatformHelper.get_ip_address()))
def before_scenario(context, scenario): testrun = RUNNER_CONFIG.get('TESTRUN') or 'testrun' context.log_starttime = strftime("%Y-%m-%dT%H:%M:%SZ", localtime()) start_time = strftime("%Y-%m-%dT%H:%M:%SZ", gmtime()) hostname = PlatformHelper.get_hostname() ip = PlatformHelper.get_ip_address() product = RUNNER_CONFIG.get("PRODUCT") build = RUNNER_CONFIG.get('BUILD') context.senddb = RUNNER_CONFIG.get('DATABASE_ENABLED') context.env = RUNNER_CONFIG.get('ENVIRONMENT') logger_path = os.path.join(ConfigAdapter.get_log_path(), testrun) tc_prefix = ConfigAdapter.get_testlink_prefix(product) match = filter(lambda x: x.find(tc_prefix) >= 0, scenario.tags) if len(match)>0: # differentiate scenarios in scenario outline if hasattr(context, 'active_outline') and type(context.active_outline) == behave.model.Row: suffix = match.pop() for example_key in context.active_outline: suffix += ".%s" % (example_key) tc_id = testrun + "_" + suffix else: tc_id = testrun + "_" + match.pop() else: #no test link project id foud tc_id = hashlib.md5(testrun + "_" + scenario.name.encode()).hexdigest() if not FileHelper.dir_exist(logger_path): FileHelper.create_directory(logger_path) logger_filename = "%s.log" % (tc_id) logfile = os.path.join(logger_path, logger_filename) client_ip = PlatformHelper.get_ip_address() LogHelper.create_logger(logfile, fmt=client_ip+" %(asctime)s %(levelname)s " + product+" | %(message)s |") tc = TestCase(testrun=testrun, start_time=start_time, hostname=hostname, product=product, ip=ip, summary=scenario.name, feature=scenario.filename, tags=scenario.tags, logfile=logfile, _id=tc_id, build=build) context.tc = tc LogHelper.info("test start: " + scenario.name)
def create_runner_log(): # ----create log files for runner----- logger_path = os.path.join(ConfigAdapter.get_log_path(), testrun) logfile = os.path.join(logger_path, "runner_logger.log") LogHelper.create_logger(logfile, fmt=PlatformHelper.get_ip_address() + " %(asctime)s %(levelname)s " + product + " | %(message)s |")
#! /usr/sbin/python import os from lib.filebeathelper import FilebeatHelper from configuration.global_config_loader import GLOBAL_CONFIG from configuration.config_adapter import ConfigAdapter from configuration.runner_config_loader import RUNNER_CONFIG logstash_url = GLOBAL_CONFIG.get('LOGSTASH').get('URL') or 'localhost' port = GLOBAL_CONFIG.get('LOGSTASH').get('PORT') or '5044' product = RUNNER_CONFIG.get('PRODUCT') log_path = os.path.join(ConfigAdapter.get_log_path(), '*', '*.log') config = {} config['filebeat.prospectors'] = [{'input_type': 'log', 'paths': log_path}] config['output.logstash'] = { 'hosts': ["{logstash_url}:{port}".format(logstash_url=logstash_url, port=port)] } FilebeatHelper.setup_fb(config, product)
def run(): tr = prepare_testrun(sendtodb) logger_path = ConfigAdapter.get_log_path() logfile = os.path.join(logger_path, tr.id, "tasks_logger.log") LogHelper.create_logger(logfile, fmt=PlatformHelper.get_ip_address() + " %(asctime)s %(levelname)s " + product + " | %(message)s |") # select workers based on machine parameter LogHelper.debug("list all online workers") # online_workers = CeleryHelper.get_online_workers() # online_workers = CeleryHelper.inspect_active_queues() online_workers = CeleryHelper.inspect_active_workers() LogHelper.debug(';'.join(online_workers)) print "online workers \n {0}".format("\n".join(online_workers)) selected_workers = [] LogHelper.debug('setup workers') worker_setup_queue = 'worker_setup' automation_queue = tr.id for worker in online_workers: CeleryHelper.add_worker(worker_name=worker, queue_name=worker_setup_queue) worker_info = CeleryHelper.run_task('worker_setup', worker_name=worker, queue=worker_setup_queue).get() result = CeleryHelper.delete_worker(worker_name=worker, queue_name=worker_setup_queue) LogHelper.debug('delete worker result %s' % result) LogHelper.debug('checking working {0}'.format(worker_info['WORKER_NAME'])) worker_raw_info = '{os}_{os_version}_{worker_name}_{hostname}_{ip}'.format(os=worker_info.get('OS'), os_version=worker_info.get('OS_VERSION'), worker_name=worker_info.get('WORKER_NAME'), hostname=worker_info.get('HOSTNAME'), ip=worker_info.get('IP') ) LogHelper.debug('worker machine info data is: %s' % worker_raw_info) machine_pattern = re.compile(machines, flags=re.IGNORECASE) if re.search(machine_pattern, worker_raw_info): LogHelper.info("worker: {0} is selected for test agent".format(worker)) output = CeleryHelper.add_worker(worker, queue_name=automation_queue) LogHelper.debug(output) selected_workers.append(worker) if len(selected_workers) == 0: print "ERROR: NO available workers matched!!!!!" print "Please recheck machine option argument" exit(1) LogHelper.debug('select workers: %s' % selected_workers) print "selected workers \n {0}".format("\n".join(selected_workers)) LogHelper.debug("Prepare tasks") LogHelper.debug(str(options)) features = parse_feature(options.features) print "start to run automation: %s" % tr.id if len(features) < 1: print "no feature file" LogHelper.error("There is no test feature to be executed.") exit(1) else: task_obj_list = [] for feature in features: feature_name = feature.split('/').pop().replace('.', '_') task_id = '{0}_{1}'.format(tr.id, feature_name) task = "python bin/runner.py -p {0} -o {1} -f {2} -r {3} --taskid {4}".format(product, options.oem_client, feature, tr.id, task_id ) if job: task += " -j %s" % job if build: task += " -b %s" % build if options.tags: task += " --tags %s" % options.tags if options.environment: task += " -e %s" % options.environment if options.database_enabled: task += " -d %s" % options.database_enabled if dry_run: task += " --dry_run %s" % dry_run if scheduletype: task += " -s %s" % scheduletype if options.year: task += " -year %s" % year if options.month: task += " -month %s" % month if options.week: task += " -week %s" % week if options.day: task += " -day %s" % day if options.dayofweek: task += " -dayofweek %s" % dayofweek if options.hour: task += " -hour %s" % hour if options.minute: task += " -minute %s" % minute if options.second: task += " -second %s" % second if options.waitweeks: task += " -waitweeks %s" % waitweeks if options.waitdays: task += " -waitdays %s" % waitdays if options.waithours: task += " -waithours %s" % waithours if options.waitminutes: task += " -waitminutes %s" % waitminutes if options.waitseconds: task += " -waitseconds %s" % waitseconds LogHelper.info('run automation task %s' % task) p = CeleryHelper.run_task('run_automation', args=[task], queue=automation_queue, task_id=task_id) print "start to run task %s " % task if sendtodb: CeleryHelper.update_task_status(p.id, tr.id, result='PENDING') task_obj_list.append(p) LogHelper.debug("All tasks are sent to coordinator.")