def validate_data(validator_settings, validator_data):
    """

    @param validator_settings: use validator name as key to define validator module and module path in dict type
    @param validator_data: use validator name as key to define validator data in dict type
    @return: validate_result , and validator name as key, include with validate result and output_result
    """
    validator_result = {'validate_result': False}
    for validator_name in validator_settings['modules'].keys():
        validator_class = getattr(
            importlib.import_module(
                validator_settings['modules'][validator_name]['path']),
            validator_name)
        validator_obj = validator_class()
        validate_result = validator_obj.validate(
            validator_data[validator_name])
        if not validate_result:
            logger.warning(
                "Validator[%s] validate data failed, output is [%s]." %
                (validator_name, validator_obj.get_output()))
            return validator_result
        else:
            validator_result[validator_name] = {
                'validate_result': validate_result,
                'output_result': validator_obj.get_output()
            }
    validator_result['validate_result'] = True

    # dump validate result to status file
    objStatusRecorder = StatusRecorder(validator_settings['status_file'])
    objStatusRecorder.record_current_status(
        {objStatusRecorder.STATUS_VALIDATOR_RESULT: validator_result})
    return validator_result
コード例 #2
0
ファイル: runtest.py プロジェクト: askeing/Hasal
    def loop_test(self,
                  test_case_module_name,
                  test_name,
                  test_env,
                  current_run=0,
                  current_retry=0):
        return_result = {"ip": None, "video_path": None, "test_name": None}
        while current_run < self.exec_config['max-run']:
            self.logger.info("The counter is %d and the retry counter is %d" %
                             (current_run, current_retry))
            try:
                self.kill_legacy_process()
                self.run_test(test_case_module_name, test_env)
                current_run, current_retry, return_result = self.run_test_result_analyzer(
                    test_case_module_name, test_name, current_run,
                    current_retry, return_result)
            except Exception as e:
                self.logger.warn('Exception happend during running test!')
                objStatusRecorder = StatusRecorder(
                    self.global_config['default-running-statistics-fn'])
                objStatusRecorder.record_status(
                    test_case_module_name,
                    StatusRecorder.ERROR_LOOP_TEST_RAISE_EXCEPTION, e)
                current_retry += 1

            if current_retry >= self.exec_config['max-retry']:
                break
        return return_result
コード例 #3
0
ファイル: generatorHelper.py プロジェクト: Mozilla-TWQA/Hasal
def validate_data(validator_settings, validator_data):
    """

    @param validator_settings: use validator name as key to define validator module and module path in dict type
    @param validator_data: use validator name as key to define validator data in dict type
    @return: validate_result , and validator name as key, include with validate result and output_result
    """
    validator_result = {'validate_result': False}
    for validator_name in validator_settings['modules'].keys():
        validator_class = getattr(importlib.import_module(validator_settings['modules'][validator_name]['path']),
                                  validator_name)
        validator_obj = validator_class()
        validate_result = validator_obj.validate(validator_data[validator_name])
        if not validate_result:
            logger.warning(
                "Validator[%s] validate data failed, output is [%s]." % (validator_name, validator_obj.get_output()))
            return validator_result
        else:
            validator_result[validator_name] = {'validate_result': validate_result,
                                                'output_result': validator_obj.get_output()}
    validator_result['validate_result'] = True

    # dump validate result to status file
    objStatusRecorder = StatusRecorder(validator_settings['status_file'])
    objStatusRecorder.record_current_status({objStatusRecorder.STATUS_VALIDATOR_RESULT: validator_result})
    return validator_result
コード例 #4
0
ファイル: sikuli.py プロジェクト: Mozilla-TWQA/Hasal
class Sikuli():

    KEY_NAME_CURRENT_STATUS = 'current_status'
    KEY_NAME_SIKULI = 'sikuli'
    KEY_NAME_SIKULI_ARGS = 'args'
    KEY_NAME_SIKULI_ADDITIONAL_ARGS_LIST = 'additional_args'

    def __init__(self, run_sikulix_cmd_path, hasal_dir, running_statistics_file_path=''):
        self.run_sikulix_cmd_str = run_sikulix_cmd_path + " -r "
        self.hasal_dir = os.path.abspath(hasal_dir)
        self.running_statistics_file_path = os.path.abspath(running_statistics_file_path)
        self.status_recorder = None
        self._check_status_recorder()

    def set_syspath(self, hasal_dir):
        """
        Get the Sikuli Library folder path.
        @param hasal_dir: the Hasal root folder.
        @return: the `lib/sikuli` folder path under Hasal root folder.
        """
        library_path = os.path.join(hasal_dir, "lib", "sikuli")
        sys.path.append(library_path)
        return library_path

    def _check_status_recorder(self):
        self.status_recorder = StatusRecorder(self.running_statistics_file_path)
        self.status_recorder.record_current_status({})

    def _load_current_status(self):
        if self.status_recorder:
            return self.status_recorder.get_current_status()
        else:
            return {}

    def _load_sikuli_status(self):
        if self.status_recorder:
            current_status = self._load_current_status()
            return current_status.get(self.KEY_NAME_SIKULI, {})
        else:
            return {}

    def set_sikuli_status(self, key, value):
        """
        Set up the key, value pair into status file.
        @param key:
        @param value:
        @return:
        """
        if self.status_recorder:
            current_status = self._load_current_status()
            if self.KEY_NAME_SIKULI in current_status:
                current_status[self.KEY_NAME_SIKULI].update({key: value})
            else:
                current_status[self.KEY_NAME_SIKULI] = {}
                current_status[self.KEY_NAME_SIKULI].update({key: value})
            self.status_recorder.record_current_status(current_status)

    def run_test(self, script_name, case_output_name, test_target="", script_dp=None, args_list=[]):
        """

        @param script_name: <SIKULI_CASE_NAME>
        @param case_output_name: <CASE_NAME>_<TIMESTAMP>
        @param test_target: the target URL address.
        @param script_dp:  specify the Sikuli cases' folder path.
        @param args_list:
        @return:
        """
        if script_dp:
            script_dir_path = script_dp + os.sep + script_name + ".sikuli"
        else:
            script_path = os.path.join(self.hasal_dir, "tests")
            script_dir_path = script_path + os.sep + script_name + ".sikuli"

        default_args = {
            'case_output_name': str(case_output_name),
            'hasal_root_folder': self.hasal_dir,
            'stat_file_path': self.running_statistics_file_path
        }
        if test_target != "":
            default_args.update({'test_target': test_target})

        self.set_sikuli_status(self.KEY_NAME_SIKULI_ARGS, default_args)
        self.set_sikuli_status(self.KEY_NAME_SIKULI_ADDITIONAL_ARGS_LIST, args_list)

        args = [self.set_syspath(self.hasal_dir), self.running_statistics_file_path]

        return self.run_sikulix_cmd(script_dir_path, args)

    def run_sikulix_cmd(self, script_dir_path, args_list=[]):
        args_str = " ".join(['"{}"'.format(item) for item in args_list])
        cmd = self.run_sikulix_cmd_str + script_dir_path + " --args " + args_str
        return os.system(cmd)
コード例 #5
0
ファイル: sikuli.py プロジェクト: Mozilla-TWQA/Hasal
 def _check_status_recorder(self):
     self.status_recorder = StatusRecorder(self.running_statistics_file_path)
     self.status_recorder.record_current_status({})
コード例 #6
0
class Sikuli():

    KEY_NAME_CURRENT_STATUS = 'current_status'
    KEY_NAME_SIKULI = 'sikuli'
    KEY_NAME_SIKULI_ARGS = 'args'
    KEY_NAME_SIKULI_ADDITIONAL_ARGS_LIST = 'additional_args'

    def __init__(self,
                 run_sikulix_cmd_path,
                 hasal_dir,
                 running_statistics_file_path=''):
        self.run_sikulix_cmd_str = run_sikulix_cmd_path + " -r "
        self.hasal_dir = os.path.abspath(hasal_dir)
        self.running_statistics_file_path = os.path.abspath(
            running_statistics_file_path)
        self.status_recorder = None
        self._check_status_recorder()

    def set_syspath(self, hasal_dir):
        """
        Get the Sikuli Library folder path.
        @param hasal_dir: the Hasal root folder.
        @return: the `lib/sikuli` folder path under Hasal root folder.
        """
        library_path = os.path.join(hasal_dir, "lib", "sikuli")
        sys.path.append(library_path)
        return library_path

    def _check_status_recorder(self):
        self.status_recorder = StatusRecorder(
            self.running_statistics_file_path)
        self.status_recorder.record_current_status({})

    def _load_current_status(self):
        if self.status_recorder:
            return self.status_recorder.get_current_status()
        else:
            return {}

    def _load_sikuli_status(self):
        if self.status_recorder:
            current_status = self._load_current_status()
            return current_status.get(self.KEY_NAME_SIKULI, {})
        else:
            return {}

    def set_sikuli_status(self, key, value):
        """
        Set up the key, value pair into status file.
        @param key:
        @param value:
        @return:
        """
        if self.status_recorder:
            current_status = self._load_current_status()
            if self.KEY_NAME_SIKULI in current_status:
                current_status[self.KEY_NAME_SIKULI].update({key: value})
            else:
                current_status[self.KEY_NAME_SIKULI] = {}
                current_status[self.KEY_NAME_SIKULI].update({key: value})
            self.status_recorder.record_current_status(current_status)

    def run_test(self,
                 script_name,
                 case_output_name,
                 test_target="",
                 script_dp=None,
                 args_list=[]):
        """

        @param script_name: <SIKULI_CASE_NAME>
        @param case_output_name: <CASE_NAME>_<TIMESTAMP>
        @param test_target: the target URL address.
        @param script_dp:  specify the Sikuli cases' folder path.
        @param args_list:
        @return:
        """
        if script_dp:
            script_dir_path = script_dp + os.sep + script_name + ".sikuli"
        else:
            script_path = os.path.join(self.hasal_dir, "tests")
            script_dir_path = script_path + os.sep + script_name + ".sikuli"

        default_args = {
            'case_output_name': str(case_output_name),
            'hasal_root_folder': self.hasal_dir,
            'stat_file_path': self.running_statistics_file_path
        }
        if test_target != "":
            default_args.update({'test_target': test_target})

        self.set_sikuli_status(self.KEY_NAME_SIKULI_ARGS, default_args)
        self.set_sikuli_status(self.KEY_NAME_SIKULI_ADDITIONAL_ARGS_LIST,
                               args_list)

        args = [
            self.set_syspath(self.hasal_dir), self.running_statistics_file_path
        ]

        return self.run_sikulix_cmd(script_dir_path, args)

    def run_sikulix_cmd(self, script_dir_path, args_list=[]):
        args_str = " ".join(['"{}"'.format(item) for item in args_list])
        cmd = self.run_sikulix_cmd_str + script_dir_path + " --args " + args_str
        return os.system(cmd)
コード例 #7
0
 def _check_status_recorder(self):
     self.status_recorder = StatusRecorder(
         self.running_statistics_file_path)
     self.status_recorder.record_current_status({})
コード例 #8
0
ファイル: runtest.py プロジェクト: askeing/Hasal
    def run_test_result_analyzer(self, test_case_module_name, test_name,
                                 current_run, current_retry, video_result):
        run_result = None
        objStatusRecorder = StatusRecorder(
            self.global_config['default-running-statistics-fn'])
        if os.path.exists(
                self.global_config['default-current-running-status-fn']):
            with open(self.global_config['default-current-running-status-fn']
                      ) as stat_fh:
                run_result = json.load(stat_fh)
                round_status = int(run_result.get("round_status", -1))
                fps_stat = int(run_result.get("fps_stat", -1))
                if round_status == 0 and fps_stat == 0:
                    if self.online_config['enable']:
                        # Online mode handling
                        upload_result = self.upload_agent_obj.upload_result(
                            self.default_result_fp)
                        if upload_result:
                            self.logger.info("===== upload success =====")
                            self.logger.info(upload_result)
                            video_result['ip'] = upload_result['ip']
                            video_result['video_path'] = upload_result['video']
                            video_result['test_name'] = test_name
                            self.logger.info("===== upload success =====")
                            if "current_test_times" in upload_result:
                                current_run = upload_result[
                                    "current_test_times"]
                                self.exec_config['max-run'] = upload_result[
                                    'config_test_times']
                            else:
                                current_run += 1
                        else:

                            current_run += 1
                    else:
                        if run_result.get("comparing_image_missing", False):
                            if "time_list_counter" in run_result:
                                current_run = int(
                                    run_result['time_list_counter'])
                            else:
                                current_run += 1
                        else:
                            objStatusRecorder.record_status(
                                test_case_module_name,
                                StatusRecorder.ERROR_COMPARING_IMAGE_FAILED,
                                None)
                            current_retry += 1
                else:
                    if round_status != 0:
                        objStatusRecorder.record_status(
                            test_case_module_name,
                            StatusRecorder.ERROR_ROUND_STAT_ABNORMAL,
                            round_status)
                    if fps_stat != 0:
                        objStatusRecorder.record_status(
                            test_case_module_name,
                            StatusRecorder.ERROR_FPS_STAT_ABNORMAL,
                            round_status)
                    current_retry += 1
        else:
            self.logger.error("test could raise exception during execution!!")
            objStatusRecorder.record_status(
                test_case_module_name,
                StatusRecorder.ERROR_CANT_FIND_STATUS_FILE, None)
            current_retry += 1
        return current_run, current_retry, video_result
def calculate(env,
              global_config,
              exec_config,
              index_config,
              firefox_config,
              upload_config,
              suite_upload_dp="",
              crop_data=None):
    """

    @param env: from lib.common.environment.py
    @param crop_data: sample crop data area
    @param calc_si: '1' or '0'
    @param waveform: 0~3
    @param revision:  upload to perfherder revision
    @param pkg_platform:  upload to perfherder pkg platform name
    @param suite_upload_dp: folder consolidate all execution result
    @param viewport: browser viewport region
    @return:
    """

    # validation data assign
    validator_data = {
        global_config['default-file-exist-validator-name']: {
            'check_fp_list': [env.video_output_fp]
        }
    }
    validator_settings = {
        'modules': {
            global_config['default-file-exist-validator-name']: {
                'path':
                global_config['default-file-exist-validator-module-path']
            }
        }
    }

    if CommonUtil.is_validate_fps(firefox_config):
        validator_data[global_config['default-fps-validator-name']] = {
            'recording_log_fp': env.recording_log_fp,
            'default_fps': index_config['video-recording-fps']
        }
        validator_settings['modules'][
            global_config['default-fps-validator-name']] = {
                'path': global_config['default-fps-validator-module-path']
            }
    validator_settings['status_file'] = global_config[
        'default-running-statistics-fn']

    # will do the analyze after validate pass
    validate_result = validate_data(validator_settings, validator_data)

    exec_timestamp_list = get_timestamp_json_data(env.DEFAULT_TIMESTAMP,
                                                  env.INITIAL_TIMESTAMP_NAME)
    if validate_result['validate_result']:
        if not validate_result.get(
                global_config['default-fps-validator-name']):
            current_fps_value = index_config['video-recording-fps']
        else:
            current_fps_value = validate_result[
                global_config['default-fps-validator-name']]['output_result']
        # using different converter will introduce different time seq,
        # the difference range will between 0.000000000002 to 0.000000000004 ms (cv2 is lower than ffmpeg)
        converter_settings = {
            'modules': {
                index_config['image-converter-name']: {
                    'path': index_config['image-converter-path']
                }
            }
        }
        converter_data = {
            index_config['image-converter-name']: {
                'video_fp': env.video_output_fp,
                'output_img_dp': env.img_output_dp,
                'convert_fmt': index_config['image-converter-format'],
                'current_fps': current_fps_value,
                'exec_timestamp_list': exec_timestamp_list,
                'search_margin': index_config['search-margin']
            }
        }
        converter_result = run_modules(
            converter_settings,
            converter_data[index_config['image-converter-name']])

        generator_name = index_config['module-name']
        generator_module_path = index_config['module-path']

        sample_settings = {
            'modules': {
                index_config['sample-converter-name']: {
                    'path': index_config['sample-converter-path']
                }
            }
        }
        sample_data = {
            'sample_dp': env.img_sample_dp,
            'configuration': {
                'generator': {
                    generator_name: {
                        'path': generator_module_path
                    }
                }
            },
            'orig_sample': env.img_output_sample_1_fn,
            'index_config': index_config,
            'exec_config': exec_config,
            'upload_config': upload_config,
            'global_config': global_config,
            'input_env': env
        }

        # {1:{'fp': 'xxcxxxx', 'RunTimeDctGenerator': 'dctobj', 'SpeedIndexGenerator': None, },
        #  2:{'fp':'xxxxx', 'SpeedIndexGenerator': None, 'crop_fp': 'xxxxxxx', 'viewport':'xxxxx'},
        #  }
        sample_result = run_modules(sample_settings, sample_data)
        generator_data = {
            'converter_result':
            converter_result[index_config['image-converter-name']],
            'sample_result':
            sample_result[index_config['sample-converter-name']],
            'exec_timestamp_list':
            exec_timestamp_list
        }

        generator_class = getattr(
            importlib.import_module(generator_module_path), generator_name)
        generator_obj = generator_class(index_config, exec_config,
                                        upload_config, global_config, env)
        start_time = time.time()
        generator_result = generator_obj.generate_result(generator_data)
        last_end = time.time()
        elapsed_time = last_end - start_time
        logger.debug(generator_result)
        logger.debug("Generator [%s] Time Elapsed: [%s]" %
                     (generator_name, elapsed_time))

        # record fps_stat
        objStatusRecorder = StatusRecorder(
            global_config['default-running-statistics-fn'])
        if validate_result.get(global_config['default-fps-validator-name'],
                               {}).get('validate_result', True):
            objStatusRecorder.record_current_status(
                {objStatusRecorder.STATUS_FPS_VALIDATION: 0})
        else:
            objStatusRecorder.record_current_status(
                {objStatusRecorder.STATUS_FPS_VALIDATION: 1})

        # generate case result to json
        generator_obj.output_case_result(suite_upload_dp)
コード例 #10
0
ファイル: generatorHelper.py プロジェクト: Mozilla-TWQA/Hasal
def calculate(env, global_config, exec_config, index_config, firefox_config, upload_config, suite_upload_dp="", crop_data=None):
    """

    @param env: from lib.common.environment.py
    @param crop_data: sample crop data area
    @param calc_si: '1' or '0'
    @param waveform: 0~3
    @param revision:  upload to perfherder revision
    @param pkg_platform:  upload to perfherder pkg platform name
    @param suite_upload_dp: folder consolidate all execution result
    @param viewport: browser viewport region
    @return:
    """

    # validation data assign
    validator_data = {global_config['default-file-exist-validator-name']: {'check_fp_list': [env.video_output_fp]}}
    validator_settings = {'modules': {global_config['default-file-exist-validator-name']: {'path': global_config['default-file-exist-validator-module-path']}}}

    if CommonUtil.is_validate_fps(firefox_config):
        validator_data[global_config['default-fps-validator-name']] = {'recording_log_fp': env.recording_log_fp,
                                                                       'default_fps': index_config['video-recording-fps']}
        validator_settings['modules'][global_config['default-fps-validator-name']] = {'path': global_config['default-fps-validator-module-path']}
    validator_settings['status_file'] = global_config['default-running-statistics-fn']

    # will do the analyze after validate pass
    validate_result = validate_data(validator_settings, validator_data)

    exec_timestamp_list = get_timestamp_json_data(env.DEFAULT_TIMESTAMP, env.INITIAL_TIMESTAMP_NAME)
    if validate_result['validate_result']:
        if not validate_result.get(global_config['default-fps-validator-name']):
            current_fps_value = index_config['video-recording-fps']
        else:
            current_fps_value = validate_result[global_config['default-fps-validator-name']]['output_result']
        # using different converter will introduce different time seq,
        # the difference range will between 0.000000000002 to 0.000000000004 ms (cv2 is lower than ffmpeg)
        converter_settings = {'modules': {index_config['image-converter-name']: {'path': index_config['image-converter-path']}}}
        converter_data = {
            index_config['image-converter-name']: {'video_fp': env.video_output_fp, 'output_img_dp': env.img_output_dp,
                                                   'convert_fmt': index_config['image-converter-format'],
                                                   'current_fps': current_fps_value,
                                                   'exec_timestamp_list': exec_timestamp_list,
                                                   'search_margin': index_config['search-margin']}}
        converter_result = run_modules(converter_settings, converter_data[index_config['image-converter-name']])

        generator_name = index_config['module-name']
        generator_module_path = index_config['module-path']

        sample_settings = {'modules': {index_config['sample-converter-name']: {'path': index_config['sample-converter-path']}}}
        sample_data = {'sample_dp': env.img_sample_dp,
                       'configuration': {'generator': {generator_name: {'path': generator_module_path}}},
                       'orig_sample': env.img_output_sample_1_fn,
                       'index_config': index_config,
                       'exec_config': exec_config,
                       'upload_config': upload_config,
                       'global_config': global_config,
                       'input_env': env}

        # {1:{'fp': 'xxcxxxx', 'RunTimeDctGenerator': 'dctobj', 'SpeedIndexGenerator': None, },
        #  2:{'fp':'xxxxx', 'SpeedIndexGenerator': None, 'crop_fp': 'xxxxxxx', 'viewport':'xxxxx'},
        #  }
        sample_result = run_modules(sample_settings, sample_data)
        generator_data = {'converter_result': converter_result[index_config['image-converter-name']], 'sample_result': sample_result[index_config['sample-converter-name']],
                          'exec_timestamp_list': exec_timestamp_list}

        generator_class = getattr(importlib.import_module(generator_module_path), generator_name)
        generator_obj = generator_class(index_config, exec_config, upload_config, global_config, env)
        start_time = time.time()
        generator_result = generator_obj.generate_result(generator_data)
        last_end = time.time()
        elapsed_time = last_end - start_time
        logger.debug(generator_result)
        logger.debug("Generator [%s] Time Elapsed: [%s]" % (generator_name, elapsed_time))

        # record fps_stat
        objStatusRecorder = StatusRecorder(global_config['default-running-statistics-fn'])
        if validate_result.get(global_config['default-fps-validator-name'], {}).get('validate_result', True):
            objStatusRecorder.record_current_status({objStatusRecorder.STATUS_FPS_VALIDATION: 0})
        else:
            objStatusRecorder.record_current_status({objStatusRecorder.STATUS_FPS_VALIDATION: 1})

        # generate case result to json
        generator_obj.output_case_result(suite_upload_dp)
 def loop_test(self, test_case_module_name, test_name, test_env, current_run=0, current_retry=0):
     objStatusRecorder = StatusRecorder(self.global_config['default-running-statistics-fn'])
     objStatusRecorder.set_case_basic_info(test_name)
     analyze_result = False
     while current_run < self.exec_config['max-run']:
         self.logger.info("The counter is %d and the retry counter is %d" % (current_run, current_retry))
         try:
             objStatusRecorder.clean_legacy_status()
             objStatusRecorder.record_case_exec_time_history(objStatusRecorder.STATUS_DESC_CASE_TOTAL_EXEC_TIME)
             self.kill_legacy_process()
             self.run_test(test_case_module_name, test_env)
             current_run, current_retry, analyze_result = self.run_test_result_analyzer(current_run, current_retry)
             objStatusRecorder.record_case_exec_time_history(objStatusRecorder.STATUS_DESC_CASE_TOTAL_EXEC_TIME)
         except:
             self.logger.warn('Exception happened during running test!')
             traceback.print_exc()
             objStatusRecorder.record_case_status_history(objStatusRecorder.STATUS_DESC_CASE_RUNNING_STATUS,
                                                          StatusRecorder.ERROR_LOOP_TEST_RAISE_EXCEPTION)
             current_retry += 1
         if current_retry >= self.exec_config['max-retry']:
             self.logger.warn("current retry [%s] exceed the max retry count [%s]" % (current_retry, self.exec_config['max-retry']))
             return False
     return analyze_result
    def run_test_result_analyzer(self, current_run, current_retry):
        status_result = None
        objStatusRecorder = StatusRecorder(self.global_config['default-running-statistics-fn'])
        if os.path.exists(self.global_config['default-running-statistics-fn']):
            status_result = objStatusRecorder.get_current_status()
            round_status = int(status_result.get(objStatusRecorder.STATUS_SIKULI_RUNNING_VALIDATION, -1))
            fps_stat = int(status_result.get(objStatusRecorder.STATUS_FPS_VALIDATION, -1))
            compare_img_result = status_result.get(objStatusRecorder.STATUS_IMG_COMPARE_RESULT, objStatusRecorder.ERROR_MISSING_FIELD_IMG_COMPARE_RESULT)

            if round_status == 0 and fps_stat == 0 and compare_img_result == objStatusRecorder.PASS_IMG_COMPARE_RESULT:
                # check the field status_img_compare_result of current status in running_statistics.json
                # only continue when status equal to PASS otherwise retry count plus one

                if objStatusRecorder.STATUS_TIME_LIST_COUNTER in status_result:
                    current_run = int(status_result[objStatusRecorder.STATUS_TIME_LIST_COUNTER])
                else:
                    current_run += 1
                return current_run, current_retry, True
            else:
                if compare_img_result != objStatusRecorder.PASS_IMG_COMPARE_RESULT:
                    objStatusRecorder.record_case_status_history(compare_img_result, None)
                if round_status != 0:
                    objStatusRecorder.record_case_status_history(StatusRecorder.ERROR_ROUND_STAT_ABNORMAL, round_status)
                if fps_stat != 0:
                    objStatusRecorder.record_case_status_history(StatusRecorder.ERROR_FPS_STAT_ABNORMAL, round_status)
                current_retry += 1
        else:
            self.logger.error("test could raise exception during execution!!")
            objStatusRecorder.record_case_status_history(objStatusRecorder.STATUS_DESC_CASE_RUNNING_STATUS,
                                                         StatusRecorder.ERROR_CANT_FIND_STATUS_FILE)
            current_retry += 1
        return current_run, current_retry, False
    def upload_test_result_handler(self):
        # load failed result
        upload_result_data = CommonUtil.load_json_file(self.global_config['default-upload-result-failed-fn'])

        # init status recorder
        objStatusRecorder = StatusRecorder(self.global_config['default-running-statistics-fn'])

        # get case basic info
        case_time_stamp = objStatusRecorder.get_case_basic_info()[objStatusRecorder.DEFAULT_FIELD_CASE_TIME_STAMP]
        case_name = objStatusRecorder.get_case_basic_info()[objStatusRecorder.DEFAULT_FIELD_CASE_NAME]

        # get test result data by case name
        current_test_result = CommonUtil.load_json_file(self.global_config['default-result-fn']).get(case_name, {})

        if current_test_result:
            # get upload related data
            objGeneratePerfherderData = PerfherderUploadDataGenerator(case_name, current_test_result, self.upload_config, self.index_config)
            upload_result_data[case_time_stamp] = objGeneratePerfherderData.generate_upload_data()
        else:
            self.logger.error("Can't find result json file[%s], please check the current environment!" % self.global_config['default-result-fn'])

        server_url = '{protocol}://{host}'.format(protocol=self.upload_config['perfherder-protocol'],
                                                  host=self.upload_config['perfherder-host'])
        perfherder_uploader = PerfherderUploader(self.upload_config['perfherder-client-id'],
                                                 self.upload_config['perfherder-secret'],
                                                 os_name=sys.platform,
                                                 platform=self.upload_config['perfherder-pkg-platform'],
                                                 machine_arch=self.upload_config['perfherder-pkg-platform'],
                                                 build_arch=self.upload_config['perfherder-pkg-platform'],
                                                 server_url=server_url,
                                                 repo=self.upload_config['perfherder-repo'])

        upload_success_timestamp_list = []
        for current_time_stamp in upload_result_data:
            # upload video first, if failed, put the log into status recorder
            if not upload_result_data[current_time_stamp]['video_link']:
                if upload_result_data[current_time_stamp]['upload_video_fp']:
                    upload_result_data[current_time_stamp]['video_link'] = {
                        "adjusted_running_video": VideoUploader.upload_video(upload_result_data[current_time_stamp]['upload_video_fp'])
                    }
                else:
                    self.logger.error("Can't find the upload video fp in result json file!")

            # if video is not uploaded success, will continue upload data, leave the video blank
            uploader_response = perfherder_uploader.submit(upload_result_data[current_time_stamp]['revision'],
                                                           upload_result_data[current_time_stamp]['browser'],
                                                           upload_result_data[current_time_stamp]['timestamp'],
                                                           upload_result_data[current_time_stamp]['perf_data'],
                                                           upload_result_data[current_time_stamp]['version'],
                                                           upload_result_data[current_time_stamp]['repo_link'],
                                                           upload_result_data[current_time_stamp]['video_link'],
                                                           upload_result_data[current_time_stamp]['extra_info_obj'])

            if uploader_response:
                if uploader_response.status_code == requests.codes.ok:
                    upload_success_timestamp_list.append(current_time_stamp)
                    self.logger.debug("upload to perfherder success, result ::: %s" % upload_result_data[current_time_stamp])
                    self.logger.info("upload to perfherder success, status code: [%s], json: [%s]" % (uploader_response.status_code, uploader_response.json()))
                else:
                    upload_result_data[current_time_stamp]['upload_status_code'] = uploader_response.status_code
                    upload_result_data[current_time_stamp]['upload_status_json'] = uploader_response.json()
                    self.logger.info("upload to perfherder failed, status code: [%s], json: [%s]" % (uploader_response.status_code, uploader_response.json()))
            else:
                self.logger.info("upload to perfherder failed, unknown exception happened in submitting to perfherder")

        # remove success time stamp from upload result data
        for del_time_stamp in upload_success_timestamp_list:
            upload_result_data.pop(del_time_stamp)

        # dump all remaining upload failed data to json file
        with open(self.global_config['default-upload-result-failed-fn'], 'w') as write_fh:
            json.dump(upload_result_data, write_fh)