def test_hello_classification_check_unicode_path_support(self, param):
        """
        Check UNICODE characters in paths.
        """
        #  Make temporary dirs, prepare temporary input data and temporary model
        if sys.platform.startswith(
                "win"
        ):  #issue 71298 need fix, then add condition: and param.get('sample_type') == "C":
            pytest.skip("C sample doesn't support unicode paths on Windows")

        tmp_dir_path = Path(
            os.path.join(os.environ.get('WORKSPACE'),
                         f"tmp_dir_for_{self.sample_name}"))
        tmp_image_dir = tmp_dir_path / 'image'
        tmp_model_dir = tmp_dir_path / 'model'

        if tmp_dir_path.exists():
            shutil.rmtree(tmp_dir_path)

        tmp_image_dir.mkdir(parents=True)  # make tmp_dir_path too
        tmp_model_dir.mkdir()

        # Copy files
        shutil.copy(
            Path(Environment.env['test_data']) / Path(param['i']),
            tmp_image_dir)
        shutil.copy(
            Path(Environment.env['models_path']) / 'public' / Path(param['m']),
            tmp_model_dir)
        shutil.copy(
            Path(Environment.env['models_path']) / 'public' /
            Path(param['m'].replace('.xml', '.bin')), tmp_model_dir)

        image_path = tmp_image_dir / Path(param['i']).name
        original_image_name = image_path.name.split(sep='.')[0]

        model_path = tmp_model_dir / Path(param['m']).name
        original_model_name = model_path.name.split(sep='.')[0]

        # List of encoded words
        # All b'...' lines are disabled by default. If you want to use theme check 'Testing' block (see below)
        encoded_words = [
            b'\xd1\x80\xd1\x83\xd1\x81\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9',  # russian
            b'\xd7\xa2\xd7\x91\xd7\xa8\xd7\x99\xd7\xaa',  # hebrew
            b'\xc4\x8desky',  # cesky
            b'\xe4\xb8\xad\xe5\x9b\xbd\xe4\xba\xba',  # chinese
            b'\xed\x95\x9c\xea\xb5\xad\xec\x9d\xb8',  # korean
            b'\xe6\x97\xa5\xe6\x9c\xac\xe4\xba\xba',  # japanese
            #  all
            b'\xd1\x80\xd1\x83\xd1\x81\xd1\x81\xd0\xba\xd0\xb8\xd0\xb9_\xd7\xa2\xd7\x91\xd7\xa8\xd7\x99\xd7\xaa_\xc4\x8desky_\xe4\xb8\xad\xe5\x9b\xbd\xe4\xba\xba_\xed\x95\x9c\xea\xb5\xad\xec\x9d\xb8_\xe6\x97\xa5\xe6\x9c\xac\xe4\xba\xba'
        ]

        # Reference run
        log.info("Reference run ...")
        self.made_executable_path(os.environ.get('IE_APP_PATH'),
                                  self.sample_name,
                                  sample_type=param.get('sample_type'))
        cmd_line = f"{model_path} {image_path} {param.get('d', 'C++')}"
        ref_retcode, ref_stdout, ref_stderr = shell(
            [self.executable_path, cmd_line])

        if ref_retcode != 0:
            log.error("Reference run FAILED with error:")
            log.error(ref_stderr)
            raise AssertionError("Sample execution failed!")
        log.info(ref_stdout)

        ref_probs = []
        for line in ref_stdout.split(sep='\n'):
            if re.match(r"\\d+\\s+\\d+.\\d+", line):
                prob_class = int(line.split()[0])
                prob = float(line.split()[1])
                ref_probs.append((prob_class, prob))

        #  Testing
        errors_list = []
        passed = True

        for image_name in [encoded_words[-1]]:
            for model_name in [encoded_words[-1]]:

                new_image_path = tmp_image_dir / (
                    original_image_name +
                    f"_{image_name.decode('utf-8')}{image_path.suffix}")
                image_path.rename(new_image_path)
                image_path = new_image_path

                new_model_path = tmp_model_dir / (
                    original_model_name + f"_{model_name.decode('utf-8')}.xml")
                model_path.rename(new_model_path)
                Path(str(model_path).replace('.xml', '.bin')).rename(
                    Path(str(new_model_path).replace('.xml', '.bin')))
                model_path = new_model_path

                cmd_line = f"{model_path} {image_path} {param.get('d', 'CPU')}"

                if sys.platform.startswith('win'):
                    subproc = subprocess.Popen(
                        f"{self.executable_path} {cmd_line}",
                        shell=True,
                        stdout=subprocess.PIPE,
                        stderr=subprocess.PIPE,
                        encoding='utf-8')
                    (stdout, stderr) = subproc.communicate()
                    retcode = subproc.returncode

                    if param['sample_type'] == 'C':
                        print(f"STDOUT:\n"
                              f"{stdout}\n\n"
                              f"STDERR:\n"
                              f"{stderr}\n\n"
                              f"RETCODE:\n"
                              f"{retcode}\n\n")
                else:
                    retcode, stdout, stderr = shell(
                        [self.executable_path, cmd_line])

                if retcode != 0:
                    passed = False
                    errors_list.append({
                        'image_additional_name':
                        image_name.decode('utf-8'),
                        'model_additional_name':
                        model_name.decode('utf-8'),
                        'error':
                        stderr
                    })

                probs = []
                for line in stdout.split(sep='\n'):
                    if re.match(r"^\\d+\\s+\\d+.\\d+", line):
                        prob_class = int(line.split()[0])
                        prob = float(line.split()[1])
                        probs.append((prob_class, prob))

                if ref_probs == probs:
                    log.info('Accuracy passed. \n')
                else:
                    passed = False
                    errors_list.append({
                        'image_additional_name':
                        image_name.decode('utf-8'),
                        'model_additional_name':
                        model_name.decode('utf-8'),
                        'error':
                        "Accuracy failed!"
                    })

        if passed:
            shutil.rmtree(tmp_dir_path)
            log.info(
                "UNICODE check passed. Temporary files and directories has been deleted."
            )
        else:
            log.error(
                "UNICODE check failed. Temporary files and directories has not been deleted."
            )
            raise AssertionError("Sample execution failed!")
Exemplo n.º 2
0
    def _test(self,
              param,
              use_preffix=True,
              get_cmd_func=None,
              get_shell_result=False,
              long_hyphen=None):
        """
        :param param:
        :param use_preffix: use it when sample doesn't require keys (i.e. hello_classification <path_to_model> <path_to_image>
        instead of hello_classification -m  <path_to_model> -i <path_to_image>)
        :param get_cmd_func: to use specific cmd concatenate function, again for hello_request_classification sample
        :param get_shell_result: to return the result of sample running (retcode, strout, stderr) directly, \
                                 without failing inside _test function. Needed for negative test cases checking \
                                 (e.g. error messages validation)
        :param long_hyphen: to concatenate cmd param with '--', instead of '-', example: instance_segmentation_demo --labels
        :return:
        """
        # Copy param to another variable, because it is need to save original parameters without changes
        param_cp = dict(param)
        sample_type = param_cp.get('sample_type', "C++")
        if 'python' in sample_type.lower():
            if 'benchmark_app' in self.sample_name:
                assert os.environ.get('IE_APP_PYTHON_TOOL_PATH') is not None, \
                    "IE_APP_PYTHON_TOOL_PATH environment variable is not specified!"
                self.made_executable_path(
                    os.environ.get('IE_APP_PYTHON_TOOL_PATH'),
                    self.sample_name,
                    sample_type=sample_type)
            else:
                assert os.environ.get('IE_APP_PYTHON_PATH') is not None, \
                    "IE_APP_PYTHON_PATH environment variable is not specified!"
                self.made_executable_path(os.environ.get('IE_APP_PYTHON_PATH'),
                                          self.sample_name,
                                          sample_type=sample_type)
        else:
            self.made_executable_path(os.environ.get('IE_APP_PATH'),
                                      self.sample_name,
                                      sample_type=sample_type)

        if not os.path.exists(self.output_dir):
            os.mkdir(self.output_dir)

        if 'bitstream' in param_cp:
            del param_cp['bitstream']

        if 'precision' in param_cp:
            del param_cp['precision']

        if get_cmd_func is None:
            get_cmd_func = self.get_cmd_line

        self.join_env_path(param_cp, executable_path=self.executable_path)

        # Updating all attributes in the original dictionary (param), because param_cp was changes (join_env_path)
        for key in param.keys():
            if key in param_cp:
                param[key] = param_cp[key]

        if 'sample_type' in param_cp:
            del param_cp['sample_type']

        cmd_line = get_cmd_func(param_cp,
                                use_preffix=use_preffix,
                                long_hyphen=long_hyphen)

        log.info("Running command: {} {}".format(self.executable_path,
                                                 cmd_line))
        retcode, stdout, stderr = shell([self.executable_path, cmd_line])

        # Execute performance:
        if Environment.env['performance'] and retcode == 0:
            perf_iter = int(Environment.env['performance'])
            # Check if samples are for performance testing: if FPS in output
            is_perf = self.check_is_perf(stdout.split('\n'))
            is_niter = self.check_has_niter(param_cp)
            if not is_perf:
                # Skipping all tests for this sample, because no of them are ready for performance.
                # Add name of sample to global pytest variable, then skip it in setup method
                if 'list_of_skipped_samples' in Environment.env:
                    Environment.env['list_of_skipped_samples'].append(
                        self.sample_name)
                else:
                    Environment.env.update(
                        {'list_of_skipped_samples': [self.sample_name]})
                pytest.skip(
                    '[INFO] Sample {} not executed for performance'.format(
                        self.executable_path))
            else:
                log.info(
                    'Running perfomance for {} iteraction'.format(perf_iter))
                # Perf_iter = 0 when it isn't neccessary to add niter key
                if perf_iter > 0:
                    if is_niter:
                        log.warning(
                            'Changed value of niter param to {}'.format(
                                perf_iter))
                        param_cp['niter'] = perf_iter
                    else:
                        log.warning(
                            'Added key: niter to param with value: {}'.format(
                                perf_iter))
                        param_cp.update({'niter': perf_iter})
                cmd_perf = get_cmd_func(param_cp,
                                        use_preffix=use_preffix,
                                        long_hyphen=long_hyphen)
                retcode_perf, stdout_perf, stderr_perf = shell(
                    [self.executable_path, cmd_perf])
                if (retcode_perf != 0):
                    log.error(stderr_perf)
                assert retcode_perf == 0, "Execution sample for performace failed"
                fps_perf = self.find_fps(stdout_perf)
                self.write_csv(sample_name=self.sample_name,
                               sample_type=sample_type,
                               cmd_perf=cmd_perf,
                               fps_perf=fps_perf)
                log.info('Perf results: {}'.format(fps_perf))
        if get_shell_result:
            return retcode, stdout, stderr
        # Check return code
        if (retcode != 0):
            log.error(stderr)
        assert retcode == 0, "Sample execution failed"
        return stdout