Exemple #1
0
    def test_metastore(self):
        field = TestCase._meta.get_field('metadata')
        level = '1.3.5.1'
        # artificially inflate results to represent a set of kernel messages
        results = {
            'definition': 'lava',
            'case': 'unit-test',
            # list of numbers, generates a much longer YAML string than just the count
            'extra': range(int(field.max_length / 2)),
            'result': 'pass'
        }
        stub = "%s-%s-%s.yaml" % (results['definition'], results['case'], level)
        job = TestJob.from_yaml_and_user(
            self.factory.make_job_yaml(), self.user)
        meta_filename = os.path.join(job.output_dir, 'metadata', stub)
        filename = "%s/job-%s/pipeline/%s/%s-%s.yaml" % (job.output_dir,
                                                         job.id, level.split('.')[0],
                                                         level, results['definition'])

        mkdir(os.path.dirname(filename))
        if os.path.exists(meta_filename):
            # isolate from other unit tests
            os.unlink(meta_filename)
        self.assertEqual(meta_filename, create_metadata_store(results, job, level))
        self.assertTrue(map_scanned_results(results, job, meta_filename))
        self.assertEqual(TestCase.objects.filter(name='unit-test').count(), 1)
        test_data = yaml.load(TestCase.objects.filter(name='unit-test')[0].metadata, Loader=yaml.CLoader)
        self.assertEqual(test_data['extra'], meta_filename)
        self.assertTrue(os.path.exists(meta_filename))
        with open(test_data['extra'], 'r') as extra_file:
            data = yaml.load(extra_file, Loader=yaml.CLoader)
        self.assertIsNotNone(data)
        os.unlink(meta_filename)
        shutil.rmtree(job.output_dir)
Exemple #2
0
    def test_metastore(self):
        field = TestCase._meta.get_field("metadata")
        level = "1.3.5.1"
        # artificially inflate results to represent a set of kernel messages
        results = {
            "definition": "lava",
            "case": "unit-test",
            "level": level,
            # list of numbers, generates a much longer YAML string than just the count
            "extra": range(int(field.max_length / 2)),
            "result": "pass",
        }
        stub = "%s-%s-%s.yaml" % (results["definition"], results["case"],
                                  level)
        job = TestJob.from_yaml_and_user(self.factory.make_job_yaml(),
                                         self.user)
        meta_filename = os.path.join(job.output_dir, "metadata", stub)
        filename = "%s/job-%s/pipeline/%s/%s-%s.yaml" % (
            job.output_dir,
            job.id,
            level.split(".")[0],
            level,
            results["definition"],
        )

        mkdir(os.path.dirname(filename))
        if os.path.exists(meta_filename):
            # isolate from other unit tests
            os.unlink(meta_filename)
        self.assertEqual(meta_filename, create_metadata_store(results, job))
        ret = map_scanned_results(results, job, {}, meta_filename)
        self.assertIsNotNone(ret)
        ret.save()
        self.assertEqual(TestCase.objects.filter(name="unit-test").count(), 1)
        test_data = yaml.load(  # nosec - unit test
            TestCase.objects.filter(name="unit-test")[0].metadata,
            Loader=yaml.CLoader)
        self.assertEqual(test_data["extra"], meta_filename)
        self.assertTrue(os.path.exists(meta_filename))
        with open(test_data["extra"], "r") as extra_file:
            data = yaml.load(extra_file,
                             Loader=yaml.CLoader)  # nosec - unit test
        self.assertIsNotNone(data)
        os.unlink(meta_filename)
        shutil.rmtree(job.output_dir)
Exemple #3
0
    def test_metastore(self):
        field = TestCase._meta.get_field('metadata')
        level = '1.3.5.1'
        # artificially inflate results to represent a set of kernel messages
        results = {
            'definition': 'lava',
            'case': 'unit-test',
            'level': level,
            # list of numbers, generates a much longer YAML string than just the count
            'extra': range(int(field.max_length / 2)),
            'result': 'pass'
        }
        stub = "%s-%s-%s.yaml" % (results['definition'], results['case'],
                                  level)
        job = TestJob.from_yaml_and_user(self.factory.make_job_yaml(),
                                         self.user)
        meta_filename = os.path.join(job.output_dir, 'metadata', stub)
        filename = "%s/job-%s/pipeline/%s/%s-%s.yaml" % (
            job.output_dir, job.id, level.split('.')[0], level,
            results['definition'])

        mkdir(os.path.dirname(filename))
        if os.path.exists(meta_filename):
            # isolate from other unit tests
            os.unlink(meta_filename)
        self.assertEqual(meta_filename, create_metadata_store(results, job))
        ret = map_scanned_results(results, job, {}, meta_filename)
        self.assertIsNotNone(ret)
        ret.save()
        self.assertEqual(TestCase.objects.filter(name='unit-test').count(), 1)
        test_data = yaml.load(
            TestCase.objects.filter(name='unit-test')[0].metadata,
            Loader=yaml.CLoader)
        self.assertEqual(test_data['extra'], meta_filename)
        self.assertTrue(os.path.exists(meta_filename))
        with open(test_data['extra'], 'r') as extra_file:
            data = yaml.load(extra_file, Loader=yaml.CLoader)
        self.assertIsNotNone(data)
        os.unlink(meta_filename)
        shutil.rmtree(job.output_dir)
    def logging_socket(self, options):
        msg = self.pull_socket.recv_multipart()
        try:
            (job_id, level, name, message) = msg  # pylint: disable=unbalanced-tuple-unpacking
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error("Failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml.load(message, Loader=yaml.CLoader)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except KeyError:
            self.logger.error(
                "[%s] Invalid log line, missing \"lvl\" or \"msg\" keys: %s",
                job_id, message)
            return

        # Clear filename
        if '/' in level or '/' in name:
            self.logger.error(
                "[%s] Wrong level or name received, dropping the message",
                job_id)
            return

        # Find the handler (if available)
        if job_id in self.jobs:
            if level != self.jobs[job_id].current_level:
                # Close the old file handler
                self.jobs[job_id].sub_log.close()
                filename = os.path.join(self.jobs[job_id].output_dir,
                                        "pipeline",
                                        level.split('.')[0],
                                        "%s-%s.yaml" % (level, name))
                mkdir(os.path.dirname(filename))
                self.current_level = level
                self.jobs[job_id].sub_log = open(filename, 'a+')
        else:
            # Query the database for the job
            try:
                job = TestJob.objects.get(id=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] Unknown job id", job_id)
                return

            self.logger.info("[%s] Receiving logs from a new job", job_id)
            filename = os.path.join(job.output_dir, "pipeline",
                                    level.split('.')[0],
                                    "%s-%s.yaml" % (level, name))
            # Create the sub directories (if needed)
            mkdir(os.path.dirname(filename))
            self.jobs[job_id] = JobHandler(job, level, filename)

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] Unknown job id", job_id)
                return
            meta_filename = create_metadata_store(message_msg, job, level)
            ret = map_scanned_results(results=message_msg,
                                      job=job,
                                      meta_filename=meta_filename)
            if not ret:
                self.logger.warning("[%s] Unable to map scanned results: %s",
                                    job_id, message)

        # Mark the file handler as used
        self.jobs[job_id].last_usage = time.time()

        # n.b. logging here would produce a log entry for every message in every job.
        # The format is a list of dictionaries
        message = "- %s" % message

        # Write data
        self.jobs[job_id].write(message)
Exemple #5
0
    def logging_socket(self):
        msg = self.log_socket.recv_multipart()
        try:
            (job_id, message) = (u(m) for m in msg)  # pylint: disable=unbalanced-tuple-unpacking
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error("[POLL] failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml.load(message, Loader=yaml.CLoader)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except TypeError:
            self.logger.error("[%s] not a dictionary, dropping", job_id)
            return
        except KeyError:
            self.logger.error(
                "[%s] invalid log line, missing \"lvl\" or \"msg\" keys: %s",
                job_id, message)
            return

        # Find the handler (if available)
        if job_id not in self.jobs:
            # Query the database for the job
            try:
                job = TestJob.objects.get(id=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return

            self.logger.info("[%s] receiving logs from a new job", job_id)
            # Create the sub directories (if needed)
            mkdir(job.output_dir)
            self.jobs[job_id] = JobHandler(job)

        # For 'event', send an event and log as 'debug'
        if message_lvl == 'event':
            self.logger.debug("[%s] event: %s", job_id, message_msg)
            send_event(".event", "lavaserver", {"message": message_msg, "job": job_id})
            message_lvl = "debug"
        # For 'marker', save in the database and log as 'debug'
        elif message_lvl == 'marker':
            # TODO: save on the file system in case of lava-logs restart
            m_type = message_msg.get("type")
            case = message_msg.get("case")
            if m_type is None or case is None:
                self.logger.error("[%s] invalid marker: %s", job_id, message_msg)
                return
            self.jobs[job_id].markers.setdefault(case, {})[m_type] = self.jobs[job_id].line_count()
            # This is in fact the previous line
            self.jobs[job_id].markers[case][m_type] -= 1
            self.logger.debug("[%s] marker: %s line: %s", job_id, message_msg, self.jobs[job_id].markers[case][m_type])
            return

        # Mark the file handler as used
        self.jobs[job_id].last_usage = time.time()
        # The format is a list of dictionaries
        self.jobs[job_id].write("- %s" % message)

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return
            meta_filename = create_metadata_store(message_msg, job)
            new_test_case = map_scanned_results(results=message_msg, job=job,
                                                markers=self.jobs[job_id].markers,
                                                meta_filename=meta_filename)

            if new_test_case is None:
                self.logger.warning(
                    "[%s] unable to map scanned results: %s",
                    job_id, message)
            else:
                self.test_cases.append(new_test_case)

            # Look for lava.job result
            if message_msg.get("definition") == "lava" and message_msg.get("case") == "job":
                # Flush cached test cases
                self.flush_test_cases()

                if message_msg.get("result") == "pass":
                    health = TestJob.HEALTH_COMPLETE
                    health_msg = "Complete"
                else:
                    health = TestJob.HEALTH_INCOMPLETE
                    health_msg = "Incomplete"
                self.logger.info("[%s] job status: %s", job_id, health_msg)

                infrastructure_error = (message_msg.get("error_type") in ["Bug",
                                                                          "Configuration",
                                                                          "Infrastructure"])
                if infrastructure_error:
                    self.logger.info("[%s] Infrastructure error", job_id)

                # Update status.
                with transaction.atomic():
                    # TODO: find a way to lock actual_device
                    job = TestJob.objects.select_for_update() \
                                         .get(id=job_id)
                    job.go_state_finished(health, infrastructure_error)
                    job.save()
    def logging_socket(self, options):
        msg = self.pull_socket.recv_multipart()
        try:
            (job_id, level, name, message) = msg  # pylint: disable=unbalanced-tuple-unpacking
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error("Failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml.load(message)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except KeyError:
            self.logger.error(
                "[%s] Invalid log line, missing \"lvl\" or \"msg\" keys: %s",
                job_id, message)
            return

        # Clear filename
        if '/' in level or '/' in name:
            self.logger.error("[%s] Wrong level or name received, dropping the message", job_id)
            return
        filename = "%s/job-%s/pipeline/%s/%s-%s.yaml" % (options['output_dir'],
                                                         job_id, level.split('.')[0],
                                                         level, name)

        # Find the handler (if available)
        if job_id in self.logs:
            if filename != self.logs[job_id].filename:
                # Close the old file handler
                self.logs[job_id].close()
                mkdir(os.path.dirname(filename))
                self.logs[job_id] = FileHandler(filename)
        else:
            self.logger.info("[%s] Receiving logs from a new job", job_id)
            mkdir(os.path.dirname(filename))
            self.logs[job_id] = FileHandler(filename)

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] Unknown job id", job_id)
                return
            meta_filename = create_metadata_store(message_msg, job, level)
            ret = map_scanned_results(results=message_msg, job=job, meta_filename=meta_filename)
            if not ret:
                self.logger.warning(
                    "[%s] Unable to map scanned results: %s",
                    job_id, message)

        # Mark the file handler as used
        # TODO: try to use a more pythonnic way
        self.logs[job_id].last_usage = time.time()

        # n.b. logging here would produce a log entry for every message in every job.
        # The format is a list of dictionaries
        message = "- %s" % message

        # Write data
        f_handler = self.logs[job_id].fd
        f_handler.write(message)
        f_handler.write('\n')
        f_handler.flush()

        # TODO: keep the file handler to avoid calling open for each line
        filename = os.path.join(options['output_dir'],
                                "job-%s" % job_id,
                                'output.yaml')
        with open(filename, 'a+') as f_out:
            f_out.write(message)
            f_out.write('\n')
Exemple #7
0
    def logging_socket(self):
        msg = self.log_socket.recv_multipart()
        try:
            (job_id, message) = (u(m) for m in msg)  # pylint: disable=unbalanced-tuple-unpacking
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error("[POLL] failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml.load(message, Loader=yaml.CLoader)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except TypeError:
            self.logger.error("[%s] not a dictionary, dropping", job_id)
            return
        except KeyError:
            self.logger.error(
                "[%s] invalid log line, missing \"lvl\" or \"msg\" keys: %s",
                job_id, message)
            return

        # Find the handler (if available)
        if job_id not in self.jobs:
            # Query the database for the job
            try:
                job = TestJob.objects.get(id=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return

            self.logger.info("[%s] receiving logs from a new job", job_id)
            # Create the sub directories (if needed)
            mkdir(job.output_dir)
            self.jobs[job_id] = JobHandler(job)

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return
            meta_filename = create_metadata_store(message_msg, job)
            new_test_case = map_scanned_results(results=message_msg, job=job,
                                                meta_filename=meta_filename)
            if new_test_case is None:
                self.logger.warning(
                    "[%s] unable to map scanned results: %s",
                    job_id, message)
            else:
                self.test_cases.append(new_test_case)

            # Look for lava.job result
            if message_msg.get("definition") == "lava" and message_msg.get("case") == "job":
                # Flush cached test cases
                self.flush_test_cases()

                if message_msg.get("result") == "pass":
                    health = TestJob.HEALTH_COMPLETE
                    health_msg = "Complete"
                else:
                    health = TestJob.HEALTH_INCOMPLETE
                    health_msg = "Incomplete"
                self.logger.info("[%s] job status: %s", job_id, health_msg)

                infrastructure_error = (message_msg.get("error_type") in ["Bug",
                                                                          "Configuration",
                                                                          "Infrastructure"])
                if infrastructure_error:
                    self.logger.info("[%s] Infrastructure error", job_id)

                # Update status.
                with transaction.atomic():
                    # TODO: find a way to lock actual_device
                    job = TestJob.objects.select_for_update() \
                                         .get(id=job_id)
                    job.go_state_finished(health, infrastructure_error)
                    job.save()

        # Mark the file handler as used
        self.jobs[job_id].last_usage = time.time()

        # n.b. logging here would produce a log entry for every message in every job.
        # The format is a list of dictionaries
        message = "- %s" % message

        # Write data
        self.jobs[job_id].write(message)
Exemple #8
0
    def logging_socket(self, options):
        msg = self.pull_socket.recv_multipart()
        try:
            (job_id, level, name, message) = msg  # pylint: disable=unbalanced-tuple-unpacking
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error("Failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml.load(message)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except KeyError:
            self.logger.error(
                "[%s] Invalid log line, missing \"lvl\" or \"msg\" keys: %s",
                job_id, message)
            return

        # Clear filename
        if '/' in level or '/' in name:
            self.logger.error(
                "[%s] Wrong level or name received, dropping the message",
                job_id)
            return
        filename = "%s/job-%s/pipeline/%s/%s-%s.yaml" % (
            options['output_dir'], job_id, level.split('.')[0], level, name)

        # Find the handler (if available)
        if job_id in self.logs:
            if filename != self.logs[job_id].filename:
                # Close the old file handler
                self.logs[job_id].close()
                mkdir(os.path.dirname(filename))
                self.logs[job_id] = FileHandler(filename)
        else:
            self.logger.info("[%s] Receiving logs from a new job", job_id)
            mkdir(os.path.dirname(filename))
            self.logs[job_id] = FileHandler(filename)

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] Unknown job id", job_id)
                return
            meta_filename = create_metadata_store(message_msg, job, level)
            ret = map_scanned_results(results=message_msg,
                                      job=job,
                                      meta_filename=meta_filename)
            if not ret:
                self.logger.warning("[%s] Unable to map scanned results: %s",
                                    job_id, message)

        # Mark the file handler as used
        # TODO: try to use a more pythonnic way
        self.logs[job_id].last_usage = time.time()

        # n.b. logging here would produce a log entry for every message in every job.
        # The format is a list of dictionaries
        message = "- %s" % message

        # Write data
        f_handler = self.logs[job_id].fd
        f_handler.write(message)
        f_handler.write('\n')
        f_handler.flush()

        # TODO: keep the file handler to avoid calling open for each line
        filename = os.path.join(options['output_dir'], "job-%s" % job_id,
                                'output.yaml')
        with open(filename, 'a+') as f_out:
            f_out.write(message)
            f_out.write('\n')