コード例 #1
0
ファイル: test_shortcuts.py プロジェクト: liuyq/squad-client
    def test_malformed_data(self):
        # job_id already exists
        metadata = {"job_id": "12345", "a-metadata-field": "value"}
        tests = {
            "test-malformed": "pass",
            "testb": {
                "result": "pass",
                "log": "the log"
            },
        }
        metrics = {"metrica": 42}

        with self.assertLogs(logger='squad_client.core.models',
                             level=logging.ERROR) as cm:
            success = submit_results(
                group_project_slug="my_group/my_project",
                build_version="my_build",
                env_slug="my_env",
                tests=tests,
                metrics=metrics,
                metadata=metadata,
            )

            self.assertIn(
                'ERROR:squad_client.core.models:Failed to submit results: There is already a test run with job_id 12345',
                cm.output)

        results = self.squad.tests(name="test-malformed")
        self.assertTrue(len(results) == 0)
        self.assertFalse(success)
コード例 #2
0
    def send_to_qa_reports(self):
        if None in (self.qa_reports_server, self.qa_reports_token, self.qa_reports_group, self.qa_reports_project, self.qa_reports_build_version, self.qa_reports_env):
            self.logger.warning("All parameters for qa reports are not set, results will not be pushed to qa reports")
            return

        SquadApi.configure(
            url=self.qa_reports_server, token=self.qa_reports_token
        )
        tests = {}
        metrics = {}
        for metric in self.metrics:
            if metric['measurement'] != "":
                metrics["{}/{}".format(self.test['test_name'], metric['test_case_id'])] = metric['measurement']
            else:
                tests["{}/{}".format(self.test['test_name'], metric['test_case_id'])] = metric['result']

        with open("{}/stdout.log".format(self.test['test_path']), "r") as logfile:
            log = logfile.read()

        metadata = {}
        if not self.qa_reports_disable_metadata:
            if self.qa_reports_metadata:
                metadata.update(self.qa_reports_metadata)
            if self.qa_reports_metadata_file:
                try:
                    with open(self.qa_reports_metadata_file, "r") as metadata_file:
                        loaded_metadata = yaml.load(metadata_file, Loader=yaml.SafeLoader)
                        # check if loaded metadata is key=value and both are strings
                        for key, value in loaded_metadata.items():
                            if type(key) == str and type(value) == str:
                                # only update metadata with simple keys
                                # ignore all other items in the dictionary
                                metadata.update({key: value})
                            else:
                                self.logger.warning("Ignoring key: %s" % key)
                except FileNotFoundError:
                    self.logger.warning("Metadata file not found")
                except PermissionError:
                    self.logger.warning("Insufficient permissions to open metadata file")
        if submit_results(
                group_project_slug="{}/{}".format(self.qa_reports_group, self.qa_reports_project),
                build_version=self.qa_reports_build_version,
                env_slug=self.qa_reports_env,
                tests=tests,
                metrics=metrics,
                log=log,
                metadata=metadata,
                attachments=None):
            self.logger.info("Results pushed to QA Reports")
        else:
            self.logger.warning("Results upload to QA Reports failed!")
コード例 #3
0
    def run(self, args):
        builds = self._load_builds(args.tuxbuild)

        # log
        if builds is None:
            return False

        try:
            jsonschema.validate(instance=builds, schema=tuxbuild_schema)
        except jsonschema.exceptions.ValidationError as ve:
            logger.error("Failed to validate tuxbuild data: %s", ve)
            return False

        data = {}
        for build in builds:
            arch = build["target_arch"]
            description = build["git_describe"]
            kconfig = build["kconfig"]
            status = build["build_status"]
            toolchain = build["toolchain"]
            test = self._get_test_name(kconfig, toolchain)

            multi_key = (description, arch)
            if multi_key not in data:
                data[multi_key] = {}

            data[multi_key].update({test: status})

        for key, result in data.items():
            description, arch = key
            submit_results(
                group_project_slug="%s/%s" % (args.group, args.project),
                build_version=description,
                env_slug=arch,
                tests=result,
            )

        return True
コード例 #4
0
ファイル: submit_tuxbuild.py プロジェクト: liuyq/squad-client
    def run(self, args):
        try:
            builds = load_builds(args.tuxbuild)
        except InvalidBuildJson as ibj:
            logger.error("Failed to load build json: %s", ibj)
            return False
        except OSError as ose:
            logger.error("Failed to load build json: %s", ose)
            return False

        try:
            jsonschema.validate(instance=builds, schema=TUXBUILD_SCHEMA)
        except jsonschema.exceptions.ValidationError as ve:
            logger.error("Failed to validate tuxbuild data: %s", ve)
            return False

        for build in builds:
            arch = build["target_arch"]
            description = build["git_describe"]
            warnings_count = build["warnings_count"]
            test_name = create_name(build)
            test_status = build["build_status"]
            duration = build["duration"]

            tests = {test_name: test_status}
            metrics = {test_name + '-warnings': warnings_count}
            metrics.update({test_name + '-duration': duration})

            submit_results(
                group_project_slug="%s/%s" % (args.group, args.project),
                build_version=description,
                env_slug=arch,
                tests=tests,
                metrics=metrics,
                metadata=create_metadata(build),
            )

        return True
コード例 #5
0
    def run(self, args):
        builds = self._load_builds(args.tuxbuild)

        # log
        if builds is None:
            return False

        try:
            jsonschema.validate(instance=builds, schema=tuxbuild_schema)
        except jsonschema.exceptions.ValidationError as ve:
            logger.error("Failed to validate tuxbuild data: %s", ve)
            return False

        for build in builds:
            arch = build["target_arch"]
            description = build["git_describe"]
            kconfig = build["kconfig"]
            toolchain = build["toolchain"]
            warnings_count = build["warnings_count"]
            test_name = self._get_test_name(kconfig, toolchain)
            test_status = build["build_status"]
            duration = build["duration"]

            tests = {test_name: test_status}
            metrics = {test_name + '-warnings': warnings_count}
            metrics.update({test_name + '-duration': duration})

            submit_results(group_project_slug="%s/%s" %
                           (args.group, args.project),
                           build_version=description,
                           env_slug=arch,
                           tests=tests,
                           metrics=metrics,
                           metadata=self._build_metadata(build))

        return True
コード例 #6
0
    def test_basic(self):
        metadata = {"job_id": "12345", "a-metadata-field": "value"}
        tests = {"testa": "pass", "testb": {"result": "pass", "log": "the log"}}
        metrics = {"metrica": 42}
        success = submit_results(
            group_project_slug="my_group/my_project",
            build_version="my_build",
            env_slug="my_env",
            tests=tests,
            metrics=metrics,
            metadata=metadata,
        )

        results = self.squad.tests(name="testa")
        self.assertTrue(len(results) > 0)
        self.assertTrue(success)
コード例 #7
0
    def test_malformed_data(self):
        # job_id already exists
        metadata = {"job_id": "12345", "a-metadata-field": "value"}
        tests = {
            "test-malformed": "pass",
            "testb": {
                "result": "pass",
                "log": "the log"
            },
        }
        metrics = {"metrica": 42}
        success = submit_results(
            group_project_slug="my_group/my_project",
            build_version="my_build",
            env_slug="my_env",
            tests=tests,
            metrics=metrics,
            metadata=metadata,
        )

        results = self.squad.tests(name="test-malformed")
        self.assertTrue(len(results) == 0)
        self.assertFalse(success)
コード例 #8
0
    def run(self, args):
        results_dict = {}
        metrics_dict = {}
        metadata_dict = None
        logs_file = None
        if args.result_name:
            if not args.result_value:
                logger.error("Test result value is required")
                return False
            results_dict = {args.result_name: args.result_value}

        if args.results:
            if args.results_layout == 'tuxbuild_json':
                results_dict = self._load_tuxbuild_json(args.results)
            else:
                results_dict = self.__read_input_file(args.results)

            if results_dict is None:
                return False

        if args.metrics:
            metrics_dict = self.__read_input_file(args.metrics)
            if metrics_dict is None:
                return False

        if args.result_name is None and args.results is None and args.metrics is None:
            logger.error(
                "At least one of --result-name, --results, --metrics is required"
            )
            return False

        if args.metadata:
            metadata_dict = self.__read_input_file(args.metadata)
            if metadata_dict is None:
                return False

        if args.logs:
            if not self.__check_file(args.logs):
                return False
            with open(args.logs, "r") as logs_file_source:
                logs_file = logs_file_source.read()

        for filename in args.attachments:
            if not self.__check_file(filename):
                return False

        if results_dict:
            # check dictionary correctness
            for key, value in iter(results_dict.items()):
                if type(key) is not str:
                    logger.error("Non-string key detected")
                    return False
                if type(value) not in [str, dict]:
                    logger.error("Incompatible results detected")
                    return False

        if metrics_dict:
            # check dictionary correctness
            for key, value in iter(metrics_dict.items()):
                if type(key) is not str:
                    logger.error("Non-string key detected")
                    return False
                if type(value) not in [float, int, list]:
                    logger.error("Incompatible metrics detected")
                    return False

        if metadata_dict:
            # check dictionary correctness
            for key, value in iter(metadata_dict.items()):
                if type(key) is not str:
                    logger.error("Non-string key detected")
                    return False
                if type(value) not in [str, dict]:
                    logger.error("Incompatible results detected")
                    return False

        submit_results(
            group_project_slug="%s/%s" % (args.group, args.project),
            build_version=args.build,
            env_slug=args.environment,
            tests=results_dict,
            metrics=metrics_dict,
            log=logs_file,
            metadata=metadata_dict,
        )

        return True