def suite_teardown(self): # run generator's output suite result try: module_path = CommonUtil.get_value_from_config(config=self.index_config, key='module-path') module_name = CommonUtil.get_value_from_config(config=self.index_config, key='module-name') if module_path and module_name: generator_class = getattr(importlib.import_module(module_path), module_name) generator_class.output_suite_result(self.global_config, self.index_config, self.exec_config, self.suite_result_dp) except Exception as e: self.logger.error('The module {module_name} cannot output suite result. Error: {exp}'.format(module_name=module_name, exp=e)) # do action when upload mode is enabled try: if CommonUtil.get_value_from_config(config=self.upload_config, key='enable'): self.clean_up_output_data() except Exception as e: self.logger.error('Can not clean up output data. Error: {exp}'.format(exp=e)) # clean browser profile try: if CommonUtil.get_value_from_config(config=self.exec_config, key='advance'): self.logger.debug('Skip removing profile: {}'.format(self._firefox_profile_path)) self.logger.debug('Skip removing profile: {}'.format(self._chrome_profile_path)) else: if os.path.isdir(self._firefox_profile_path): self.firefox_profile_creator.remove_firefox_profile() if os.path.isdir(self._chrome_profile_path): self.chrome_profile_creator.remove_chrome_profile() except Exception as e: self.logger.error('Can not clean browser profile. Error: {exp}'.format(exp=e)) os.system(DEFAULT_EDITOR_CMD + " end.txt")
def __init__(self, input_cmd_config_fp, input_job_config_fp, input_config_fp): # init value cmd_config_fp = os.path.abspath(input_cmd_config_fp) job_config_fp = os.path.abspath(input_job_config_fp) config_fp = os.path.abspath(input_config_fp) # load configuration json files self.cmd_config = CommonUtil.load_json_file(cmd_config_fp) self.job_config = CommonUtil.load_json_file(job_config_fp) self.config = CommonUtil.load_json_file(config_fp) # init schedulers self.scheduler = BackgroundScheduler() self.scheduler.add_jobstore('sqlalchemy', url=self.config['job_store_url']) self.scheduler.start() # init variables mananger = Manager() self.sync_queue = mananger.Queue() self.async_queue = mananger.Queue() self.current_job_list = [] # Slack Sending Queue # TODO: prevent the Slack bot is disable, the sending queue will use too much memory. self.slack_sending_queue = mananger.Queue(50) # init logger self.set_logging(self.config['log_level'], self.config['log_filter'])
def validate_default_configs(): """ Validating all default configs under default configs folder. @return: True or False. """ final_result = True all_config_folders = ConfigValidator.get_all_sub_configs_folder() for c_folder in all_config_folders: logger.info('Validating Configs under {}'.format(os.path.relpath(c_folder))) config_schema_mapping_data = ConfigValidator.get_mapping_config_and_schema(c_folder) for config_path, schema_path in config_schema_mapping_data.items(): if schema_path: config_obj = CommonUtil.load_json_file(config_path) schema_obj = CommonUtil.load_json_file(schema_path) validate_result = ConfigValidator.validate(config_obj, schema_obj) if validate_result: logger.info(' Config: {c} ... {r}'.format(c=os.path.basename(config_path), r='Pass')) else: logger.error(' Config: {c} ... {r}'.format(c=os.path.relpath(config_path), r='Failed')) msg = 'Config settings {c} does not pass the schema {s} validation.'.format( c=os.path.relpath(config_path), s=os.path.relpath(schema_path)) logger.error(msg) final_result = False else: logger.error(' Config: {c} ... {r}'.format(c=os.path.relpath(config_path), r='Failed')) msg = 'Config settings {c} does not have schema file {s}.'.format(c=config_path, s=schema_path) logger.error(msg) final_result = False return final_result
def main(): """ Demo of pushing MetaTask to Pulse. It will load Pulse config from `--config`, please create the config json file before run this demo. The timestamp file of each job will be stored into "ejenti/pulse_modules/.md5/<JOB_NAME>". ex: { "pulse_username": "******", "pulse_password": "******" } Also, you can monitor the Pulse Message Queue from https://pulseguardian.mozilla.org/ website. """ default_log_format = '%(asctime)s %(levelname)s [%(name)s.%(funcName)s] %(message)s' default_datefmt = '%Y-%m-%d %H:%M' logging.basicConfig(level=logging.INFO, format=default_log_format, datefmt=default_datefmt) # loading docopt arguments = docopt(__doc__) # loading config config_arg = arguments['--config'] config_file = os.path.abspath(config_arg) config = CommonUtil.load_json_file(config_file) if not config: logging.error('There is not trigger config. (Loaded from {})'.format( config_file)) exit(1) # filter the logger log_filter = LogFilter() for disabled_logger in config.get('log_filter', []): logging.getLogger(disabled_logger).addFilter(log_filter) # loading cmd_config cmd_config_arg = arguments['--cmd-config'] cmd_config_file = os.path.abspath(cmd_config_arg) command_config = CommonUtil.load_json_file(cmd_config_file) if not command_config: logging.error('There is not command config. (Loaded from {})'.format( cmd_config_file)) exit(1) clean_flag = arguments['--clean'] try: trigger = TasksTrigger(config=config, cmd_config_obj=command_config, clean_at_begin=clean_flag) trigger.run() while True: time.sleep(10) except Exception as e: logging.error(e) exit(1)
def main(): """ Demo of pushing MetaTask to Pulse. It will load Pulse config from `--config`, please create the config json file before run this demo. The timestamp file of each job will be stored into "ejenti/pulse_modules/.md5/<JOB_NAME>". ex: { "pulse_username": "******", "pulse_password": "******" } Also, you can monitor the Pulse Message Queue from https://pulseguardian.mozilla.org/ website. """ default_log_format = '%(asctime)s %(levelname)s [%(name)s.%(funcName)s] %(message)s' default_datefmt = '%Y-%m-%d %H:%M' logging.basicConfig(level=logging.INFO, format=default_log_format, datefmt=default_datefmt) # loading docopt arguments = docopt(__doc__) # loading config config_arg = arguments['--config'] config_file = os.path.abspath(config_arg) config = CommonUtil.load_json_file(config_file) if not config: logging.error('There is not trigger config. (Loaded from {})'.format(config_file)) exit(1) # filter the logger log_filter = LogFilter() for disabled_logger in config.get('log_filter', []): logging.getLogger(disabled_logger).addFilter(log_filter) # loading cmd_config cmd_config_arg = arguments['--cmd-config'] cmd_config_file = os.path.abspath(cmd_config_arg) command_config = CommonUtil.load_json_file(cmd_config_file) if not command_config: logging.error('There is not command config. (Loaded from {})'.format(cmd_config_file)) exit(1) clean_flag = arguments['--clean'] try: trigger = TasksTrigger(config=config, cmd_config_obj=command_config, clean_at_begin=clean_flag) trigger.run() while True: time.sleep(10) except Exception as e: logging.error(e) exit(1)
def validate_configs(self): """ Validating all input configs. Raise exception if failed. @return: """ mapping_config_schema = { 'exec_config': os.path.join('configs', 'exec'), 'index_config': os.path.join('configs', 'index'), 'upload_config': os.path.join('configs', 'upload'), 'global_config': os.path.join('configs', 'global'), 'firefox_config': os.path.join('configs', 'firefox'), 'chrome_config': os.path.join('configs', 'chrome') } for config_name, schema_folder in mapping_config_schema.items(): config_obj = getattr(self, config_name) schema_list = ConfigValidator.get_schema_list(schema_folder) for schema_path in schema_list: schema_obj = CommonUtil.load_json_file(schema_path) validate_result = ConfigValidator.validate(config_obj, schema_obj) self.logger.info('Validate {c} by {s} ... {r}'.format(c=config_name, s=os.path.relpath(schema_path), r='Pass' if validate_result else 'Failed')) if not validate_result: message = 'Config settings {} does not pass the schema {} validation.'.format(config_name, os.path.relpath( schema_path)) raise Exception(message)
def clean_up_output_data(self): # clean output folder temp_artifact_dir = CommonUtil.get_value_from_config(config=self.global_config, key='default-case-temp-artifact-store-dn') if temp_artifact_dir: output_dir = os.path.join(os.getcwd(), temp_artifact_dir) if os.path.exists(output_dir): shutil.rmtree(output_dir)
def main(): # loading docopt arguments = docopt(__doc__) # loading config config_arg = arguments['--config'] config_file = os.path.abspath(config_arg) config = CommonUtil.load_json_file(config_file) if not config: logging.error('There is not trigger config. (Loaded from {})'.format(config_file)) exit(1) # loading cmd_config trigger_config_arg = arguments['--config'] trigger_config_file = os.path.abspath(trigger_config_arg) trigger_config = CommonUtil.load_json_file(trigger_config_file) if not trigger_config: logging.error('There is not trigger config. (Loaded from {})'.format(trigger_config_file)) exit(1) is_query = arguments['--query'] is_remove = arguments['--remove'] if is_query or is_remove: try: jobs = trigger_config.get('jobs') for job_name, job_obj in sorted(jobs.items()): is_enabled = job_obj.get('enable') print('Job [{name}]: {status}'.format(name=job_name, status='enabled' if is_enabled else 'disabled')) if is_remove and is_enabled: input_value = raw_input('>>> Remove the checking file of Job [{name}] (y/N): '.format(name=job_name)) if input_value.lower() == 'y' or input_value.lower() == 'yes': print(' cleaning checking file ... ', end='') ret = TasksTrigger.clean_md5_by_job_name(job_name) if ret: print(' OK') else: print(' Failed') except Exception as e: logging.error(e) exit(1)
def get_remote_url_list(input_repo_path): DEFAULT_GIT_CMD_REMOTE_V = ["git", "remote", "-v"] return_result = {} return_code, output_string = CommonUtil.subprocess_checkoutput_wrapper(DEFAULT_GIT_CMD_REMOTE_V, cwd=input_repo_path) if return_code == 0: output_list = [line.split("\t") for line in output_string.splitlines()] for tmp_output_list in output_list: return_result[tmp_output_list[0]] = tmp_output_list[1].split(" ")[0] logging.debug("get remote url list result [%s]" % return_result) return return_result
def validate(dict_obj, schema_obj): """ Validating the dict_obj base on schema_obj. @param dict_obj: the dict object which you want to validate. @param schema_obj: the JSON schema. @return: True or False. """ if isinstance(dict_obj, str): dict_obj = CommonUtil.load_json_file(dict_obj) if isinstance(schema_obj, str): schema_obj = CommonUtil.load_json_file(schema_obj) try: jsonschema.validate(dict_obj, schema_obj) return True except Exception as e: logger.error('\n{line}\n{message}\n{line}'.format(message=e, line='-' * 60)) return False
def git_pull(**kwargs): """ git pull cmd wrapper @param kwargs: kwargs['cmd_obj']['configs']['GIT_PULL_PARAMETER_REMOTE_URL'] :: remote url parameter kwargs['cmd_obj']['configs']['GIT_PULL_PARAMETER_BRANCH_NAME'] :: branch name @return: """ DEFAULT_GIT_CMD_PULL = ["git", "pull"] # get queue msg, consumer config from kwargs queue_msg, consumer_config, task_config = init_task(kwargs) # get default repo path repo_path = get_hasal_repo_path(task_config) # get remote url and branch cmd_parameter_list = parse_cmd_parameters(queue_msg) if len(cmd_parameter_list) == 2: remote_url = cmd_parameter_list[1] branch_name = "" elif len(cmd_parameter_list) == 3: remote_url = cmd_parameter_list[1] branch_name = cmd_parameter_list[2] else: remote_url = task_config.get("GIT_PULL_PARAMETER_REMOTE_URL", "") branch_name = task_config.get("GIT_PULL_PARAMETER_BRANCH_NAME", "") if remote_url == "" and branch_name == "": exec_cmd = DEFAULT_GIT_CMD_PULL elif remote_url != "": if remote_url.startswith("https://") or remote_url in get_remote_url_list(repo_path): if branch_name: exec_cmd = DEFAULT_GIT_CMD_PULL + [remote_url, branch_name] else: exec_cmd = DEFAULT_GIT_CMD_PULL + [remote_url] else: logging.error("Remote name cannot find in your repo [%s]" % remote_url) return False else: logging.error("Incorrect usage for git pull remote_url:[%s], branch_name:[%s]" % (remote_url, branch_name)) return False logging.debug("git pull execute cmd [%s]" % exec_cmd) return_code, output = CommonUtil.subprocess_checkoutput_wrapper(exec_cmd, cwd=repo_path) if return_code == 0: logging.info("git pull command execute success [%s]! output [%s]" % (queue_msg['input_cmd_str'], output)) return True else: return False
def __init__(self, **kwargs): self.exec_config = {} self.online_config = {} self.index_config = {} self.global_config = {} self.firefox_config = {} # load exec-config, firefox-config, index-config, online-config and global config to self object self.exec_config_fp = os.path.abspath(kwargs['exec_config']) self.firefox_config_fp = os.path.abspath(kwargs['firefox_config']) self.index_config_fp = os.path.abspath(kwargs['index_config']) self.online_config_fp = os.path.abspath(kwargs['online_config']) self.global_config_fp = os.path.abspath(kwargs['global_config']) for variable_name in kwargs.keys(): setattr(self, variable_name, load_json_file(kwargs[variable_name])) # init logger self.logger = get_logger(__file__, self.exec_config['advance']) self.logger.debug( '\n###############\n# Important #\n###############\n') for v_name in kwargs.keys(): self.logger.debug('Loading Settings from {}:\n{}\n'.format( v_name, json.dumps(getattr(self, v_name), indent=4))) # init values self.firefox_profile_creator = FirefoxProfileCreator() self.settings_prefs = self.firefox_config.get('prefs', {}) self.cookies_settings = self.firefox_config.get('cookies', {}) self.extensions_settings = self.firefox_config.get('extensions', {}) self.suite_result_dp = '' self._firefox_profile_path = self.firefox_profile_creator.get_firefox_profile( prefs=self.settings_prefs, cookies_settings=self.cookies_settings, extensions_settings=self.extensions_settings) self.default_result_fp = os.path.join( os.getcwd(), self.global_config['default-result-fn']) # check the video recording, raise exception if more than one recorders CommonUtil.is_video_recording(self.firefox_config)
def git_fetch(**kwargs): """ git fetch command wrapper @param kwargs: kwargs['cmd_obj']['configs']['GIT_FETCH_PARAMETER_REMOTE_URL'] :: remote url @return: """ DEFAULT_GIT_CMD_FETCH = ["git", "fetch"] # get queue msg, consumer config from kwargs queue_msg, consumer_config, task_config = init_task(kwargs) # get default repo path repo_path = get_hasal_repo_path(task_config) # get remote url and branch cmd_parameter_list = parse_cmd_parameters(queue_msg) if len(cmd_parameter_list) == 2: remote_url = cmd_parameter_list[1] else: remote_url = task_config.get("GIT_FETCH_PARAMETER_REMOTE_URL", "") if remote_url == "": exec_cmd = DEFAULT_GIT_CMD_FETCH else: if remote_url.startswith("https://") or remote_url in get_remote_url_list(repo_path): exec_cmd = DEFAULT_GIT_CMD_FETCH + [remote_url] else: logging.error("Remote name cannot find in your repo [%s]" % remote_url) return False logging.debug("git fetch execute cmd [%s]" % exec_cmd) return_code, output = CommonUtil.subprocess_checkoutput_wrapper(exec_cmd, cwd=repo_path) if return_code == 0: logging.info("git fetch command execute success [%s]! output [%s]" % (queue_msg['input_cmd_str'], output)) return True else: return False
def git_reset(**kwargs): """ git reset command wrapper @param kwargs: @return: """ DEFAULT_GIT_CMD_RESET = ["git", "reset", "--hard", "HEAD"] # get queue msg, consumer config from kwargs queue_msg, consumer_config, task_config = init_task(kwargs) # get default repo path repo_path = get_hasal_repo_path(task_config) logging.debug("git reset execute cmd [%s]" % DEFAULT_GIT_CMD_RESET) return_code, output = CommonUtil.subprocess_checkoutput_wrapper(DEFAULT_GIT_CMD_RESET, cwd=repo_path) if return_code == 0: logging.info("git clean command execute success [%s]! output [%s]" % (queue_msg['input_cmd_str'], output)) return True else: return False
def git_checkout(**kwargs): """ git checkout command wrapper @param kwargs: kwargs['cmd_obj']['configs']['GIT_CHECKOUT_PARAMETER_BRANCH_NAME'] :: branch name @return: """ DEFAULT_GIT_CMD_CHECKOUT = ["git", "checkout"] # get queue msg, consumer config from kwargs queue_msg, consumer_config, task_config = init_task(kwargs) # get default repo path repo_path = get_hasal_repo_path(task_config) # get remote url and branch cmd_parameter_list = parse_cmd_parameters(queue_msg) if len(cmd_parameter_list) == 2: branch_name = cmd_parameter_list[1] else: branch_name = task_config.get("GIT_CHECKOUT_PARAMETER_BRANCH_NAME", "") if branch_name: exec_cmd = DEFAULT_GIT_CMD_CHECKOUT + [branch_name] else: logging.error("Please specify the checkout branch after cmd or configs") return False logging.debug("git checkout execute cmd [%s]" % exec_cmd) return_code, output = CommonUtil.subprocess_checkoutput_wrapper(exec_cmd, cwd=repo_path) if return_code == 0: logging.info("git checkout command execute success [%s]! output [%s]" % (queue_msg['input_cmd_str'], output)) return True else: return False
def calculate(env, global_config, exec_config, index_config, firefox_config, upload_config, suite_upload_dp="", crop_data=None): """ @param env: from lib.common.environment.py @param crop_data: sample crop data area @param calc_si: '1' or '0' @param waveform: 0~3 @param revision: upload to perfherder revision @param pkg_platform: upload to perfherder pkg platform name @param suite_upload_dp: folder consolidate all execution result @param viewport: browser viewport region @return: """ # validation data assign validator_data = { global_config['default-file-exist-validator-name']: { 'check_fp_list': [env.video_output_fp] } } validator_settings = { 'modules': { global_config['default-file-exist-validator-name']: { 'path': global_config['default-file-exist-validator-module-path'] } } } if CommonUtil.is_validate_fps(firefox_config): validator_data[global_config['default-fps-validator-name']] = { 'recording_log_fp': env.recording_log_fp, 'default_fps': index_config['video-recording-fps'] } validator_settings['modules'][ global_config['default-fps-validator-name']] = { 'path': global_config['default-fps-validator-module-path'] } validator_settings['status_file'] = global_config[ 'default-running-statistics-fn'] # will do the analyze after validate pass validate_result = validate_data(validator_settings, validator_data) exec_timestamp_list = get_timestamp_json_data(env.DEFAULT_TIMESTAMP, env.INITIAL_TIMESTAMP_NAME) if validate_result['validate_result']: if not validate_result.get( global_config['default-fps-validator-name']): current_fps_value = index_config['video-recording-fps'] else: current_fps_value = validate_result[ global_config['default-fps-validator-name']]['output_result'] # using different converter will introduce different time seq, # the difference range will between 0.000000000002 to 0.000000000004 ms (cv2 is lower than ffmpeg) converter_settings = { 'modules': { index_config['image-converter-name']: { 'path': index_config['image-converter-path'] } } } converter_data = { index_config['image-converter-name']: { 'video_fp': env.video_output_fp, 'output_img_dp': env.img_output_dp, 'convert_fmt': index_config['image-converter-format'], 'current_fps': current_fps_value, 'exec_timestamp_list': exec_timestamp_list, 'search_margin': index_config['search-margin'] } } converter_result = run_modules( converter_settings, converter_data[index_config['image-converter-name']]) generator_name = index_config['module-name'] generator_module_path = index_config['module-path'] sample_settings = { 'modules': { index_config['sample-converter-name']: { 'path': index_config['sample-converter-path'] } } } sample_data = { 'sample_dp': env.img_sample_dp, 'configuration': { 'generator': { generator_name: { 'path': generator_module_path } } }, 'orig_sample': env.img_output_sample_1_fn, 'index_config': index_config, 'exec_config': exec_config, 'upload_config': upload_config, 'global_config': global_config, 'input_env': env } # {1:{'fp': 'xxcxxxx', 'RunTimeDctGenerator': 'dctobj', 'SpeedIndexGenerator': None, }, # 2:{'fp':'xxxxx', 'SpeedIndexGenerator': None, 'crop_fp': 'xxxxxxx', 'viewport':'xxxxx'}, # } sample_result = run_modules(sample_settings, sample_data) generator_data = { 'converter_result': converter_result[index_config['image-converter-name']], 'sample_result': sample_result[index_config['sample-converter-name']], 'exec_timestamp_list': exec_timestamp_list } generator_class = getattr( importlib.import_module(generator_module_path), generator_name) generator_obj = generator_class(index_config, exec_config, upload_config, global_config, env) start_time = time.time() generator_result = generator_obj.generate_result(generator_data) last_end = time.time() elapsed_time = last_end - start_time logger.debug(generator_result) logger.debug("Generator [%s] Time Elapsed: [%s]" % (generator_name, elapsed_time)) # record fps_stat objStatusRecorder = StatusRecorder( global_config['default-running-statistics-fn']) if validate_result.get(global_config['default-fps-validator-name'], {}).get('validate_result', True): objStatusRecorder.record_current_status( {objStatusRecorder.STATUS_FPS_VALIDATION: 0}) else: objStatusRecorder.record_current_status( {objStatusRecorder.STATUS_FPS_VALIDATION: 1}) # generate case result to json generator_obj.output_case_result(suite_upload_dp)
def generate_hasal_config(**kwargs): """ generate hasal config jsons for ejenti, default should generate agent/chrome/exec/firefox/global/index/online jsons @param kwargs: will have two keys queue_msg, consumer_config kwargs['cmd_obj']['configs']['DEFAULT_HASAL_CONFIG_CONTENT_TEMPLATE'] :: default tempalate will use for generating config content kwargs['cmd_obj']['configs']['DEFAULT_HASAL_RUNTEST_CMD_PARAMETERS_TEMPLATE'] :: default runtest exec parameters template kwargs['cmd_obj']['configs']['OVERWIRTE_HASAL_CONFIG_CTNT'] :: the ctnt use for overwrite the current config example as below: { "configs": { "exec": { "default.json": { "key1": "value1" } }, "firefox": { "default.json": { "key2": "value2", "key3": "value3" } }, "online": { "abc.json":{ "key3": "value3", "key4": "value4" } } } } @return: """ DEFAULT_HASAL_CONFIG = { "configs": { "exec": { "default.json": {} }, "firefox": { "default.json": {} }, "chrome": { "default.json": {} }, "index": { "runtimeDctGenerator.json": {} }, "upload": { "default.json": {} }, "global": { "default.json": {} } } } DEFAULT_HASAL_RUNTEST_CONFIGS = { "--exec-config": "", "--firefox-config": "", "--index-config": "", "--upload-config": "", "--global-config": "", "--chrome-config": "" } # get queue msg, consumer config from kwargs queue_msg, consumer_config, task_config = init_task(kwargs) # get override config cmd_parameter_list = queue_msg.get('input_cmd_str', "").split(" ", 1) default_config_settings = task_config.get( "DEFAULT_HASAL_CONFIG_CONTENT_TEMPLATE", DEFAULT_HASAL_CONFIG) default_runtest_configs = task_config.get( "DEFAULT_HASAL_RUNTEST_CMD_PARAMETERS_TEMPLATE", DEFAULT_HASAL_RUNTEST_CONFIGS) # get input config from user interactive mode if len(cmd_parameter_list) == 2: input_json_str = cmd_parameter_list[1] logging.debug("input cmd parameter : [%s]" % input_json_str) input_json_obj = CommonUtil.load_json_string(input_json_str) logging.debug("load json obj from input cmd: [%s]" % input_json_obj) else: input_json_obj = task_config.get("OVERWIRTE_HASAL_CONFIG_CTNT", {}) logging.debug("load json obj from input config: [%s]" % input_json_obj) if len(input_json_obj.keys()) == 0: logging.info( "No input config object [%s] detected, will use the default config setting instead" % input_json_obj) else: json_path = get_hasal_repo_path(task_config) # merge default and input full_config_obj = merge_user_input_config_with_default_config( input_json_obj, default_config_settings) # generate config path and need to modify key-value pair dict full_config_path_mapping = generate_config_path_json_mapping( json_path, full_config_obj, {}) full_exec_runtest_config = copy.deepcopy(default_runtest_configs) # dump to json file for config_path in full_config_path_mapping: tmp_json_obj = CommonUtil.load_json_file(config_path) tmp_json_obj.update(full_config_path_mapping[config_path]) dir_name = os.path.dirname(config_path) new_config_path = os.path.join(dir_name, "ejenti.json") parameter_name = "--{}-config".format(dir_name.split(os.sep)[-1]) full_exec_runtest_config[parameter_name] = new_config_path with open(new_config_path, 'w') as fh: json.dump(tmp_json_obj, fh) logging.debug("exec runtest config [%s]" % full_exec_runtest_config) return full_exec_runtest_config
def calculate(env, global_config, exec_config, index_config, firefox_config, online_config, suite_upload_dp="", crop_data=None): """ @param env: from lib.common.environment.py @param crop_data: sample crop data area @param calc_si: '1' or '0' @param waveform: 0~3 @param revision: upload to perfherder revision @param pkg_platform: upload to perfherder pkg platform name @param suite_upload_dp: folder consolidate all execution result @param viewport: browser viewport region @return: """ calculator_result = {} # validation data assign validator_data = { global_config['default-file-exist-validator-name']: { 'check_fp_list': [env.video_output_fp] } } validator_settings = { 'modules': { global_config['default-file-exist-validator-name']: { 'path': global_config['default-file-exist-validator-module-path'] } } } if CommonUtil.is_validate_fps(firefox_config): validator_data[global_config['default-fps-validator-name']] = { 'recording_log_fp': env.recording_log_fp, 'default_fps': index_config['video-recording-fps'] } validator_settings['modules'][ global_config['default-fps-validator-name']] = { 'path': global_config['default-fps-validator-module-path'] } validator_settings['status_file'] = env.DEFAULT_STAT_RESULT # will do the analyze after validate pass validate_result = validate_data(validator_settings, validator_data) exec_timestamp_list = get_json_data(env.DEFAULT_TIMESTAMP, env.INITIAL_TIMESTAMP_NAME) if validate_result['validate_result']: # using different converter will introduce different time seq, # the difference range will between 0.000000000002 to 0.000000000004 ms (cv2 is lower than ffmpeg) converter_settings = { 'modules': { index_config['image-converter-name']: { 'path': index_config['image-converter-path'] } } } converter_data = { index_config['image-converter-name']: { 'video_fp': env.video_output_fp, 'output_img_dp': env.img_output_dp, 'convert_fmt': index_config['image-converter-format'], 'current_fps': validate_result[global_config['default-fps-validator-name']] ['output_result'], 'exec_timestamp_list': exec_timestamp_list } } converter_result = run_modules( converter_settings, converter_data[index_config['image-converter-name']]) sample_settings = { 'modules': { index_config['sample-converter-name']: { 'path': index_config['sample-converter-path'] } } } sample_data = { 'sample_dp': env.img_sample_dp, 'configuration': { 'generator': { index_config['module-name']: { 'path': index_config['module-path'] } } } } # {1:{'fp': 'xxcxxxx', 'RunTimeDctGenerator': 'dctobj', 'SpeedIndexGenerator': None, }, # 2:{'fp':'xxxxx', 'SpeedIndexGenerator': None, 'crop_fp': 'xxxxxxx', 'viewport':'xxxxx'}, # } sample_result = run_modules(sample_settings, sample_data) generator_settings = sample_data['configuration']['generator'] generator_data = { 'converter_result': converter_result[index_config['image-converter-name']], 'sample_result': sample_result[index_config['sample-converter-name']], 'index_config': index_config, 'exec_timestamp_list': exec_timestamp_list } generator_result = run_generators(generator_settings, generator_data) # To support legacy function output result need to put all result in running time result key for generator_name in sample_data['configuration']['generator']: if generator_result[generator_name]: calculator_result.update(generator_result[generator_name]) # output sikuli status to static file with open(env.DEFAULT_STAT_RESULT, "r+") as fh: stat_data = json.load(fh) if validate_result[global_config['default-fps-validator-name']][ 'validate_result']: stat_data['fps_stat'] = 0 else: stat_data['fps_stat'] = 1 fh.seek(0) fh.write(json.dumps(stat_data)) if calculator_result is not None: output_result(env.test_name, calculator_result, env.DEFAULT_TEST_RESULT, env.DEFAULT_STAT_RESULT, env.test_method_doc, exec_config['max-run'], env.video_output_fp, env.web_app_name, online_config['perfherder-revision'], online_config['perfherder-pkg-platform'], env.output_name, index_config['drop-outlier-flag']) start_time = time.time() output_video(calculator_result, env.converted_video_output_fp, index_config) current_time = time.time() elapsed_time = current_time - start_time logger.debug("Generate Video Elapsed: [%s]" % elapsed_time) upload_case_name = "_".join(env.output_name.split("_")[2:-1]) upload_case_dp = os.path.join(suite_upload_dp, upload_case_name) if os.path.exists(upload_case_dp) is False: os.mkdir(upload_case_dp) if os.path.exists(env.converted_video_output_fp): shutil.move(env.converted_video_output_fp, upload_case_dp)
def run_hasal_on_latest_nightly(**kwargs): """ Combination task for daily nightly trigger test @param kwargs: kwargs['cmd_obj']['configs']['GIT_PULL_PARAMETER_REMOTE_URL'] :: git pull remote url (should be origin) kwargs['cmd_obj']['configs']['GIT_PULL_PARAMETER_BRANCH_NAME'] :: git pull remote url (current will be dev) kwargs['cmd_obj']['configs']['GIT_CHECKOUT_PARAMETER_BRANCH_NAME'] :: git checkout branch name kwargs['cmd_obj']['configs']['OVERWRITE_HASAL_SUITE_CASE_LIST'] :: the case list use for overwrite the current suite file, will generate a new suite file called ejenti.suite, ex: tests.regression.gdoc.test_firefox_gdoc_read_basic_txt_1, tests.regression.gdoc.test_firefox_gdoc_read_basic_txt_2 kwargs['cmd_obj']['configs']['OVERWIRTE_HASAL_CONFIG_CTNT'] :: the ctnt use for overwrite the current config example as below: { "configs": { "exec": { "default.json": { "key1": "value1" } }, "firefox": { "default.json": { "key2": "value2", "key3": "value3" } }, "online": { "abc.json":{ "key3": "value3", "key4": "value4" } } } } @return: """ # checkout latest code checkout_latest_code(**kwargs) # download latest nightly build pkg_download_info_json = download_latest_nightly_build(**kwargs) # deploy fx # specify firefox downloaded package path kwargs['queue_msg']['cmd_obj']['configs'][ 'INPUT_FX_DL_PKG_PATH'] = pkg_download_info_json['FX-DL-PACKAGE-PATH'] if deploy_fx_package(**kwargs): # generate hasal config, get the config from upper task and merge with info from nightly json info meta_task_input_config = kwargs['queue_msg']['cmd_obj']['configs'].get( "OVERWIRTE_HASAL_CONFIG_CTNT", {}) auto_generate_config = { "configs": { "upload": { "default.json": { "perfherder-revision": pkg_download_info_json['PERFHERDER-REVISION'], "perfherder-pkg-platform": pkg_download_info_json['PERFHERDER-PKG-PLATFORM'] } }, "exec": { "default.json": { "exec-suite-fp": generate_suite_file(**kwargs) } } } } merge_input_config = CommonUtil.deep_merge_dict( meta_task_input_config, auto_generate_config) kwargs['queue_msg']['cmd_obj']['configs'][ 'OVERWIRTE_HASAL_CONFIG_CTNT'] = merge_input_config ejenti_hasal_config = generate_hasal_config(**kwargs) # exec hasal runtest kwargs['queue_msg']['cmd_obj']['configs'][ 'RUNTEST_CONFIG_PARAMETERS'] = ejenti_hasal_config exec_hasal_runtest(**kwargs)
def calculate(env, global_config, exec_config, index_config, firefox_config, online_config, suite_upload_dp="", crop_data=None): """ @param env: from lib.common.environment.py @param crop_data: sample crop data area @param calc_si: '1' or '0' @param waveform: 0~3 @param revision: upload to perfherder revision @param pkg_platform: upload to perfherder pkg platform name @param suite_upload_dp: folder consolidate all execution result @param viewport: browser viewport region @return: """ calculator_result = {} # validation data assign validator_data = {global_config['default-file-exist-validator-name']: {'check_fp_list': [env.video_output_fp]}} validator_settings = {'modules': {global_config['default-file-exist-validator-name']: {'path': global_config['default-file-exist-validator-module-path']}}} if CommonUtil.is_validate_fps(firefox_config): validator_data[global_config['default-fps-validator-name']] = {'recording_log_fp': env.recording_log_fp, 'default_fps': index_config['video-recording-fps']} validator_settings['modules'][global_config['default-fps-validator-name']] = {'path': global_config['default-fps-validator-module-path']} validator_settings['status_file'] = env.DEFAULT_STAT_RESULT # will do the analyze after validate pass validate_result = validate_data(validator_settings, validator_data) exec_timestamp_list = get_json_data(env.DEFAULT_TIMESTAMP, env.INITIAL_TIMESTAMP_NAME) if validate_result['validate_result']: # using different converter will introduce different time seq, # the difference range will between 0.000000000002 to 0.000000000004 ms (cv2 is lower than ffmpeg) converter_settings = {'modules': {index_config['image-converter-name']: {'path': index_config['image-converter-path']}}} converter_data = { index_config['image-converter-name']: {'video_fp': env.video_output_fp, 'output_img_dp': env.img_output_dp, 'convert_fmt': index_config['image-converter-format'], 'current_fps': validate_result[global_config['default-fps-validator-name']][ 'output_result'], 'exec_timestamp_list': exec_timestamp_list}} converter_result = run_modules(converter_settings, converter_data[index_config['image-converter-name']]) sample_settings = {'modules': {index_config['sample-converter-name']: {'path': index_config['sample-converter-path']}}} sample_data = {'sample_dp': env.img_sample_dp, 'configuration': {'generator': { index_config['module-name']: {'path': index_config['module-path']}}}} # {1:{'fp': 'xxcxxxx', 'RunTimeDctGenerator': 'dctobj', 'SpeedIndexGenerator': None, }, # 2:{'fp':'xxxxx', 'SpeedIndexGenerator': None, 'crop_fp': 'xxxxxxx', 'viewport':'xxxxx'}, # } sample_result = run_modules(sample_settings, sample_data) generator_settings = sample_data['configuration']['generator'] generator_data = {'converter_result': converter_result[index_config['image-converter-name']], 'sample_result': sample_result[index_config['sample-converter-name']], 'index_config': index_config, 'exec_timestamp_list': exec_timestamp_list} generator_result = run_generators(generator_settings, generator_data) # To support legacy function output result need to put all result in running time result key for generator_name in sample_data['configuration']['generator']: if generator_result[generator_name]: calculator_result.update(generator_result[generator_name]) # output sikuli status to static file with open(env.DEFAULT_STAT_RESULT, "r+") as fh: stat_data = json.load(fh) if validate_result[global_config['default-fps-validator-name']]['validate_result']: stat_data['fps_stat'] = 0 else: stat_data['fps_stat'] = 1 fh.seek(0) fh.write(json.dumps(stat_data)) if calculator_result is not None: output_result(env.test_name, calculator_result, env.DEFAULT_TEST_RESULT, env.DEFAULT_STAT_RESULT, env.test_method_doc, exec_config['max-run'], env.video_output_fp, env.web_app_name, online_config['perfherder-revision'], online_config['perfherder-pkg-platform'], env.output_name, index_config['drop-outlier-flag']) start_time = time.time() output_video(calculator_result, env.converted_video_output_fp, index_config) current_time = time.time() elapsed_time = current_time - start_time logger.debug("Generate Video Elapsed: [%s]" % elapsed_time) upload_case_name = "_".join(env.output_name.split("_")[2:-1]) upload_case_dp = os.path.join(suite_upload_dp, upload_case_name) if os.path.exists(upload_case_dp) is False: os.mkdir(upload_case_dp) if os.path.exists(env.converted_video_output_fp): shutil.move(env.converted_video_output_fp, upload_case_dp)
def calculate(env, global_config, exec_config, index_config, firefox_config, upload_config, suite_upload_dp="", crop_data=None): """ @param env: from lib.common.environment.py @param crop_data: sample crop data area @param calc_si: '1' or '0' @param waveform: 0~3 @param revision: upload to perfherder revision @param pkg_platform: upload to perfherder pkg platform name @param suite_upload_dp: folder consolidate all execution result @param viewport: browser viewport region @return: """ # validation data assign validator_data = {global_config['default-file-exist-validator-name']: {'check_fp_list': [env.video_output_fp]}} validator_settings = {'modules': {global_config['default-file-exist-validator-name']: {'path': global_config['default-file-exist-validator-module-path']}}} if CommonUtil.is_validate_fps(firefox_config): validator_data[global_config['default-fps-validator-name']] = {'recording_log_fp': env.recording_log_fp, 'default_fps': index_config['video-recording-fps']} validator_settings['modules'][global_config['default-fps-validator-name']] = {'path': global_config['default-fps-validator-module-path']} validator_settings['status_file'] = global_config['default-running-statistics-fn'] # will do the analyze after validate pass validate_result = validate_data(validator_settings, validator_data) exec_timestamp_list = get_timestamp_json_data(env.DEFAULT_TIMESTAMP, env.INITIAL_TIMESTAMP_NAME) if validate_result['validate_result']: if not validate_result.get(global_config['default-fps-validator-name']): current_fps_value = index_config['video-recording-fps'] else: current_fps_value = validate_result[global_config['default-fps-validator-name']]['output_result'] # using different converter will introduce different time seq, # the difference range will between 0.000000000002 to 0.000000000004 ms (cv2 is lower than ffmpeg) converter_settings = {'modules': {index_config['image-converter-name']: {'path': index_config['image-converter-path']}}} converter_data = { index_config['image-converter-name']: {'video_fp': env.video_output_fp, 'output_img_dp': env.img_output_dp, 'convert_fmt': index_config['image-converter-format'], 'current_fps': current_fps_value, 'exec_timestamp_list': exec_timestamp_list, 'search_margin': index_config['search-margin']}} converter_result = run_modules(converter_settings, converter_data[index_config['image-converter-name']]) generator_name = index_config['module-name'] generator_module_path = index_config['module-path'] sample_settings = {'modules': {index_config['sample-converter-name']: {'path': index_config['sample-converter-path']}}} sample_data = {'sample_dp': env.img_sample_dp, 'configuration': {'generator': {generator_name: {'path': generator_module_path}}}, 'orig_sample': env.img_output_sample_1_fn, 'index_config': index_config, 'exec_config': exec_config, 'upload_config': upload_config, 'global_config': global_config, 'input_env': env} # {1:{'fp': 'xxcxxxx', 'RunTimeDctGenerator': 'dctobj', 'SpeedIndexGenerator': None, }, # 2:{'fp':'xxxxx', 'SpeedIndexGenerator': None, 'crop_fp': 'xxxxxxx', 'viewport':'xxxxx'}, # } sample_result = run_modules(sample_settings, sample_data) generator_data = {'converter_result': converter_result[index_config['image-converter-name']], 'sample_result': sample_result[index_config['sample-converter-name']], 'exec_timestamp_list': exec_timestamp_list} generator_class = getattr(importlib.import_module(generator_module_path), generator_name) generator_obj = generator_class(index_config, exec_config, upload_config, global_config, env) start_time = time.time() generator_result = generator_obj.generate_result(generator_data) last_end = time.time() elapsed_time = last_end - start_time logger.debug(generator_result) logger.debug("Generator [%s] Time Elapsed: [%s]" % (generator_name, elapsed_time)) # record fps_stat objStatusRecorder = StatusRecorder(global_config['default-running-statistics-fn']) if validate_result.get(global_config['default-fps-validator-name'], {}).get('validate_result', True): objStatusRecorder.record_current_status({objStatusRecorder.STATUS_FPS_VALIDATION: 0}) else: objStatusRecorder.record_current_status({objStatusRecorder.STATUS_FPS_VALIDATION: 1}) # generate case result to json generator_obj.output_case_result(suite_upload_dp)
def __init__(self, **kwargs): # init variables self.exec_config = {} self.index_config = {} self.upload_config = {} self.global_config = {} self.firefox_config = {} self.chrome_config = {} # load exec-config, firefox-config, index-config, upload-config and global config to self object self.exec_config_fp = os.path.abspath(kwargs['exec_config']) self.index_config_fp = os.path.abspath(kwargs['index_config']) self.upload_config_fp = os.path.abspath(kwargs['upload_config']) self.global_config_fp = os.path.abspath(kwargs['global_config']) self.firefox_config_fp = os.path.abspath(kwargs['firefox_config']) self.chrome_config_fp = os.path.abspath(kwargs['chrome_config']) for variable_name in kwargs.keys(): setattr(self, variable_name, CommonUtil.load_json_file(kwargs[variable_name])) # init logger self.logger = get_logger(__file__, self.exec_config['advance']) self.logger.debug('\n###############\n# Important #\n###############\n') for v_name in kwargs.keys(): self.logger.debug( 'Loading Settings from {}:\n{}\n'.format(v_name, json.dumps(getattr(self, v_name), indent=4))) # validate all configs before overwrite platform depend setting, raise Exception if failed self.validate_configs() # overwrite platform dep setting in configs self.__dict__.update( HasalConfigUtil.overwrite_platform_dep_settings_into_configs(self, "firefox_config", self.firefox_config, ["firefox_config"], sys.platform, platform.release()).__dict__) self.__dict__.update( HasalConfigUtil.overwrite_platform_dep_settings_into_configs(self, "chrome_config", self.chrome_config, ["chrome_config"], sys.platform, platform.release()).__dict__) # init values self.suite_result_dp = '' self.default_result_fp = os.path.join(os.getcwd(), self.global_config['default-result-fn']) if self.firefox_config.get('enable_create_new_profile', False): self.firefox_profile_creator = FirefoxProfileCreator(launch_cmd=self.firefox_config.get('launch-browser-cmd-path')) settings_prefs = self.firefox_config.get('prefs', {}) cookies_settings = self.firefox_config.get('cookies', {}) profile_files_settings = self.firefox_config.get('profile_files', {}) extensions_settings = self.firefox_config.get('extensions', {}) self._firefox_profile_path = self.firefox_profile_creator.get_firefox_profile( prefs=settings_prefs, cookies_settings=cookies_settings, profile_files_settings=profile_files_settings, extensions_settings=extensions_settings) else: self._firefox_profile_path = "" # chrome config if self.chrome_config.get('enable_create_new_profile', False): self.chrome_profile_creator = ChromeProfileCreator(launch_cmd=self.firefox_config.get('launch-browser-cmd-path')) chrome_cookies_settings = self.chrome_config.get('cookies', {}) self._chrome_profile_path = self.chrome_profile_creator.get_chrome_profile(cookies_settings=chrome_cookies_settings) else: self._chrome_profile_path = "" # check the video recording, raise exception if more than one recorders CommonUtil.is_video_recording(self.firefox_config) # check the upload config revision, raise exception if upload config enabled without revision if self.upload_config.get("perfherder-revision", "") == "" and self.upload_config.get("enable", False): raise Exception("Your current upload config is enabled [%s], but revision is empty [%s], please make sure your config set correctly" % (self.upload_config.get("perfherder-revision", ""), self.upload_config.get("enable", False)))
def upload_test_result_handler(self): # load failed result upload_result_data = CommonUtil.load_json_file(self.global_config['default-upload-result-failed-fn']) # init status recorder objStatusRecorder = StatusRecorder(self.global_config['default-running-statistics-fn']) # get case basic info case_time_stamp = objStatusRecorder.get_case_basic_info()[objStatusRecorder.DEFAULT_FIELD_CASE_TIME_STAMP] case_name = objStatusRecorder.get_case_basic_info()[objStatusRecorder.DEFAULT_FIELD_CASE_NAME] # get test result data by case name current_test_result = CommonUtil.load_json_file(self.global_config['default-result-fn']).get(case_name, {}) if current_test_result: # get upload related data objGeneratePerfherderData = PerfherderUploadDataGenerator(case_name, current_test_result, self.upload_config, self.index_config) upload_result_data[case_time_stamp] = objGeneratePerfherderData.generate_upload_data() else: self.logger.error("Can't find result json file[%s], please check the current environment!" % self.global_config['default-result-fn']) server_url = '{protocol}://{host}'.format(protocol=self.upload_config['perfherder-protocol'], host=self.upload_config['perfherder-host']) perfherder_uploader = PerfherderUploader(self.upload_config['perfherder-client-id'], self.upload_config['perfherder-secret'], os_name=sys.platform, platform=self.upload_config['perfherder-pkg-platform'], machine_arch=self.upload_config['perfherder-pkg-platform'], build_arch=self.upload_config['perfherder-pkg-platform'], server_url=server_url, repo=self.upload_config['perfherder-repo']) upload_success_timestamp_list = [] for current_time_stamp in upload_result_data: # upload video first, if failed, put the log into status recorder if not upload_result_data[current_time_stamp]['video_link']: if upload_result_data[current_time_stamp]['upload_video_fp']: upload_result_data[current_time_stamp]['video_link'] = { "adjusted_running_video": VideoUploader.upload_video(upload_result_data[current_time_stamp]['upload_video_fp']) } else: self.logger.error("Can't find the upload video fp in result json file!") # if video is not uploaded success, will continue upload data, leave the video blank uploader_response = perfherder_uploader.submit(upload_result_data[current_time_stamp]['revision'], upload_result_data[current_time_stamp]['browser'], upload_result_data[current_time_stamp]['timestamp'], upload_result_data[current_time_stamp]['perf_data'], upload_result_data[current_time_stamp]['version'], upload_result_data[current_time_stamp]['repo_link'], upload_result_data[current_time_stamp]['video_link'], upload_result_data[current_time_stamp]['extra_info_obj']) if uploader_response: if uploader_response.status_code == requests.codes.ok: upload_success_timestamp_list.append(current_time_stamp) self.logger.debug("upload to perfherder success, result ::: %s" % upload_result_data[current_time_stamp]) self.logger.info("upload to perfherder success, status code: [%s], json: [%s]" % (uploader_response.status_code, uploader_response.json())) else: upload_result_data[current_time_stamp]['upload_status_code'] = uploader_response.status_code upload_result_data[current_time_stamp]['upload_status_json'] = uploader_response.json() self.logger.info("upload to perfherder failed, status code: [%s], json: [%s]" % (uploader_response.status_code, uploader_response.json())) else: self.logger.info("upload to perfherder failed, unknown exception happened in submitting to perfherder") # remove success time stamp from upload result data for del_time_stamp in upload_success_timestamp_list: upload_result_data.pop(del_time_stamp) # dump all remaining upload failed data to json file with open(self.global_config['default-upload-result-failed-fn'], 'w') as write_fh: json.dump(upload_result_data, write_fh)