예제 #1
0
    def test_metastore(self):
        field = TestCase._meta.get_field('metadata')
        level = '1.3.5.1'
        # artificially inflate results to represent a set of kernel messages
        results = {
            'definition': 'lava',
            'case': 'unit-test',
            # list of numbers, generates a much longer YAML string than just the count
            'extra': range(int(field.max_length / 2)),
            'result': 'pass'
        }
        stub = "%s-%s-%s.yaml" % (results['definition'], results['case'], level)
        job = TestJob.from_yaml_and_user(
            self.factory.make_job_yaml(), self.user)
        meta_filename = os.path.join(job.output_dir, 'metadata', stub)
        filename = "%s/job-%s/pipeline/%s/%s-%s.yaml" % (job.output_dir,
                                                         job.id, level.split('.')[0],
                                                         level, results['definition'])

        mkdir(os.path.dirname(filename))
        if os.path.exists(meta_filename):
            # isolate from other unit tests
            os.unlink(meta_filename)
        self.assertEqual(meta_filename, create_metadata_store(results, job, level))
        self.assertTrue(map_scanned_results(results, job, meta_filename))
        self.assertEqual(TestCase.objects.filter(name='unit-test').count(), 1)
        test_data = yaml.load(TestCase.objects.filter(name='unit-test')[0].metadata, Loader=yaml.CLoader)
        self.assertEqual(test_data['extra'], meta_filename)
        self.assertTrue(os.path.exists(meta_filename))
        with open(test_data['extra'], 'r') as extra_file:
            data = yaml.load(extra_file, Loader=yaml.CLoader)
        self.assertIsNotNone(data)
        os.unlink(meta_filename)
        shutil.rmtree(job.output_dir)
예제 #2
0
    def _handle_end(self, hostname, action, msg):  # pylint: disable=unused-argument
        try:
            job_id = int(msg[2])
            error_msg = msg[3]
            compressed_description = msg[4]
        except (IndexError, ValueError):
            self.logger.error("Invalid message from <%s> '%s'", hostname, msg)
            return

        try:
            job = TestJob.objects.get(id=job_id)
        except TestJob.DoesNotExist:
            self.logger.error("[%d] Unknown job", job_id)
            # ACK even if the job is unknown to let the dispatcher
            # forget about it
            send_multipart_u(self.controler, [hostname, 'END_OK', str(job_id)])
            return

        filename = os.path.join(job.output_dir, 'description.yaml')
        # If description.yaml already exists: a END was already received
        if os.path.exists(filename):
            self.logger.info("[%d] %s => END (duplicated), skipping", job_id,
                             hostname)
        else:
            if compressed_description:
                self.logger.info("[%d] %s => END", job_id, hostname)
            else:
                self.logger.info(
                    "[%d] %s => END (lava-run crashed, mark job as INCOMPLETE)",
                    job_id, hostname)
                with transaction.atomic():
                    # TODO: find a way to lock actual_device
                    job = TestJob.objects.select_for_update() \
                                         .get(id=job_id)

                    job.go_state_finished(TestJob.HEALTH_INCOMPLETE)
                    if error_msg:
                        self.logger.error("[%d] Error: %s", job_id, error_msg)
                        job.failure_comment = error_msg
                    job.save()

            # Create description.yaml even if it's empty
            # Allows to know when END messages are duplicated
            try:
                # Create the directory if it was not already created
                mkdir(os.path.dirname(filename))
                # TODO: check that compressed_description is not ""
                description = lzma.decompress(compressed_description)
                with open(filename, 'w') as f_description:
                    f_description.write(description.decode("utf-8"))
                if description:
                    parse_job_description(job)
            except (OSError, lzma.LZMAError) as exc:
                self.logger.error("[%d] Unable to dump 'description.yaml'",
                                  job_id)
                self.logger.exception("[%d] %s", job_id, exc)

        # ACK the job and mark the dispatcher as alive
        send_multipart_u(self.controler, [hostname, 'END_OK', str(job_id)])
        self.dispatcher_alive(hostname)
예제 #3
0
    def _handle_end(self, hostname, action, msg):  # pylint: disable=unused-argument
        try:
            job_id = int(msg[2])
            error_msg = msg[3]
            compressed_description = msg[4]
        except (IndexError, ValueError):
            self.logger.error("Invalid message from <%s> '%s'", hostname, msg)
            return

        try:
            job = TestJob.objects.get(id=job_id)
        except TestJob.DoesNotExist:
            self.logger.error("[%d] Unknown job", job_id)
            # ACK even if the job is unknown to let the dispatcher
            # forget about it
            send_multipart_u(self.controler, [hostname, 'END_OK', str(job_id)])
            return

        filename = os.path.join(job.output_dir, 'description.yaml')
        # If description.yaml already exists: a END was already received
        if os.path.exists(filename):
            self.logger.info("[%d] %s => END (duplicated), skipping", job_id, hostname)
        else:
            if compressed_description:
                self.logger.info("[%d] %s => END", job_id, hostname)
            else:
                self.logger.info("[%d] %s => END (lava-run crashed, mark job as INCOMPLETE)",
                                 job_id, hostname)
                with transaction.atomic():
                    # TODO: find a way to lock actual_device
                    job = TestJob.objects.select_for_update() \
                                         .get(id=job_id)

                    job.go_state_finished(TestJob.HEALTH_INCOMPLETE)
                    if error_msg:
                        self.logger.error("[%d] Error: %s", job_id, error_msg)
                        job.failure_comment = error_msg
                    job.save()

            # Create description.yaml even if it's empty
            # Allows to know when END messages are duplicated
            try:
                # Create the directory if it was not already created
                mkdir(os.path.dirname(filename))
                # TODO: check that compressed_description is not ""
                description = lzma.decompress(compressed_description)
                with open(filename, 'w') as f_description:
                    f_description.write(description.decode("utf-8"))
                if description:
                    parse_job_description(job)
            except (IOError, lzma.LZMAError) as exc:
                self.logger.error("[%d] Unable to dump 'description.yaml'",
                                  job_id)
                self.logger.exception("[%d] %s", job_id, exc)

        # ACK the job and mark the dispatcher as alive
        send_multipart_u(self.controler, [hostname, 'END_OK', str(job_id)])
        self.dispatcher_alive(hostname)
예제 #4
0
 def save_job_config(self, job, worker, device_cfg, options):
     output_dir = job.output_dir
     mkdir(output_dir)
     with open(os.path.join(output_dir, "job.yaml"), "w") as f_out:
         f_out.write(self.export_definition(job))
     with contextlib.suppress(IOError):
         shutil.copy(options["env"], os.path.join(output_dir, "env.yaml"))
     with contextlib.suppress(IOError):
         shutil.copy(options["env_dut"], os.path.join(output_dir, "env.dut.yaml"))
     with contextlib.suppress(IOError):
         shutil.copy(os.path.join(options["dispatchers_config"], "%s.yaml" % worker.hostname),
                     os.path.join(output_dir, "dispatcher.yaml"))
     with open(os.path.join(output_dir, "device.yaml"), "w") as f_out:
         yaml.dump(device_cfg, f_out)
예제 #5
0
 def save_job_config(self, job, device_cfg, env_str, env_dut_str, dispatcher_cfg):
     output_dir = job.output_dir
     mkdir(output_dir)
     with open(os.path.join(output_dir, "job.yaml"), "w") as f_out:
         f_out.write(self.export_definition(job))
     with open(os.path.join(output_dir, "device.yaml"), "w") as f_out:
         yaml.dump(device_cfg, f_out)
     if env_str:
         with open(os.path.join(output_dir, "env.yaml"), "w") as f_out:
             f_out.write(env_str)
     if env_dut_str:
         with open(os.path.join(output_dir, "env.dut.yaml"), "w") as f_out:
             f_out.write(env_dut_str)
     if dispatcher_cfg:
         with open(os.path.join(output_dir, "dispatcher.yaml"), "w") as f_out:
             f_out.write(dispatcher_cfg)
    def handle(self, *_, **options):
        base_dir = "/var/lib/lava-server/default/media/job-output/"
        len_base_dir = len(base_dir)
        jobs = TestJob.objects.all().order_by("id")

        self.stdout.write("Browsing all jobs")
        for job in jobs:
            self.stdout.write(
                "* %d {%s => %s}" %
                (job.id, "job-%d" % job.id, job.output_dir[len_base_dir:]))
            mkdir(os.path.dirname(job.output_dir))
            old_dir = base_dir + "job-%d" % job.id
            if not os.path.exists(old_dir):
                self.stdout.write("  -> no output directory")
                continue
            os.rename(old_dir, job.output_dir)
예제 #7
0
 def save_job_config(self, job, worker, device_cfg, options):
     output_dir = job.output_dir
     mkdir(output_dir)
     with open(os.path.join(output_dir, "job.yaml"), "w") as f_out:
         f_out.write(self.export_definition(job))
     with contextlib.suppress(IOError):
         shutil.copy(options["env"], os.path.join(output_dir, "env.yaml"))
     with contextlib.suppress(IOError):
         shutil.copy(options["env_dut"],
                     os.path.join(output_dir, "env.dut.yaml"))
     with contextlib.suppress(IOError):
         shutil.copy(
             os.path.join(options["dispatchers_config"],
                          "%s.yaml" % worker.hostname),
             os.path.join(output_dir, "dispatcher.yaml"))
     with open(os.path.join(output_dir, "device.yaml"), "w") as f_out:
         yaml.dump(device_cfg, f_out)
예제 #8
0
    def test_metastore(self):
        field = TestCase._meta.get_field("metadata")
        level = "1.3.5.1"
        # artificially inflate results to represent a set of kernel messages
        results = {
            "definition": "lava",
            "case": "unit-test",
            "level": level,
            # list of numbers, generates a much longer YAML string than just the count
            "extra": range(int(field.max_length / 2)),
            "result": "pass",
        }
        stub = "%s-%s-%s.yaml" % (results["definition"], results["case"],
                                  level)
        job = TestJob.from_yaml_and_user(self.factory.make_job_yaml(),
                                         self.user)
        meta_filename = os.path.join(job.output_dir, "metadata", stub)
        filename = "%s/job-%s/pipeline/%s/%s-%s.yaml" % (
            job.output_dir,
            job.id,
            level.split(".")[0],
            level,
            results["definition"],
        )

        mkdir(os.path.dirname(filename))
        if os.path.exists(meta_filename):
            # isolate from other unit tests
            os.unlink(meta_filename)
        self.assertEqual(meta_filename, create_metadata_store(results, job))
        ret = map_scanned_results(results, job, {}, meta_filename)
        self.assertIsNotNone(ret)
        ret.save()
        self.assertEqual(TestCase.objects.filter(name="unit-test").count(), 1)
        test_data = yaml.load(  # nosec - unit test
            TestCase.objects.filter(name="unit-test")[0].metadata,
            Loader=yaml.CLoader)
        self.assertEqual(test_data["extra"], meta_filename)
        self.assertTrue(os.path.exists(meta_filename))
        with open(test_data["extra"], "r") as extra_file:
            data = yaml.load(extra_file,
                             Loader=yaml.CLoader)  # nosec - unit test
        self.assertIsNotNone(data)
        os.unlink(meta_filename)
        shutil.rmtree(job.output_dir)
예제 #9
0
    def handle(self, *_, **options):
        base_dir = "/var/lib/lava-server/default/media/job-output/"
        len_base_dir = len(base_dir)
        jobs = TestJob.objects.all().order_by("id")

        self.stdout.write("Browsing all jobs")
        start = 0
        while True:
            count = 0
            for job in jobs[start:start + 100]:
                count += 1
                old_path = os.path.join(settings.MEDIA_ROOT, "job-output",
                                        "job-%s" % job.id)
                date_path = os.path.join(
                    settings.MEDIA_ROOT,
                    "job-output",
                    "%02d" % job.submit_time.year,
                    "%02d" % job.submit_time.month,
                    "%02d" % job.submit_time.day,
                    str(job.id),
                )
                if not os.path.exists(old_path):
                    self.stdout.write("* %d skip" % job.id)
                    continue

                self.stdout.write("* %d {%s => %s}" %
                                  (job.id, old_path[len_base_dir:],
                                   date_path[len_base_dir:]))
                if not options["dry_run"]:
                    mkdir(os.path.dirname(date_path))
                    if not os.path.exists(old_path):
                        self.stdout.write("  -> no output directory")
                        continue
                    os.rename(old_path, date_path)
            start += count
            if count == 0:
                break
            if options["slow"]:
                self.stdout.write("sleeping 2s...")
                time.sleep(2)
예제 #10
0
    def test_metastore(self):
        field = TestCase._meta.get_field('metadata')
        level = '1.3.5.1'
        # artificially inflate results to represent a set of kernel messages
        results = {
            'definition': 'lava',
            'case': 'unit-test',
            'level': level,
            # list of numbers, generates a much longer YAML string than just the count
            'extra': range(int(field.max_length / 2)),
            'result': 'pass'
        }
        stub = "%s-%s-%s.yaml" % (results['definition'], results['case'],
                                  level)
        job = TestJob.from_yaml_and_user(self.factory.make_job_yaml(),
                                         self.user)
        meta_filename = os.path.join(job.output_dir, 'metadata', stub)
        filename = "%s/job-%s/pipeline/%s/%s-%s.yaml" % (
            job.output_dir, job.id, level.split('.')[0], level,
            results['definition'])

        mkdir(os.path.dirname(filename))
        if os.path.exists(meta_filename):
            # isolate from other unit tests
            os.unlink(meta_filename)
        self.assertEqual(meta_filename, create_metadata_store(results, job))
        ret = map_scanned_results(results, job, {}, meta_filename)
        self.assertIsNotNone(ret)
        ret.save()
        self.assertEqual(TestCase.objects.filter(name='unit-test').count(), 1)
        test_data = yaml.load(
            TestCase.objects.filter(name='unit-test')[0].metadata,
            Loader=yaml.CLoader)
        self.assertEqual(test_data['extra'], meta_filename)
        self.assertTrue(os.path.exists(meta_filename))
        with open(test_data['extra'], 'r') as extra_file:
            data = yaml.load(extra_file, Loader=yaml.CLoader)
        self.assertIsNotNone(data)
        os.unlink(meta_filename)
        shutil.rmtree(job.output_dir)
예제 #11
0
    def handle(self, *_, **options):
        base_dir = "/var/lib/lava-server/default/media/job-output/"
        len_base_dir = len(base_dir)
        jobs = TestJob.objects.all().order_by("id")

        self.stdout.write("Browsing all jobs")
        start = 0
        while True:
            count = 0
            for job in jobs[start:start + 100]:
                count += 1
                old_path = os.path.join(settings.MEDIA_ROOT, 'job-output', 'job-%s' % job.id)
                date_path = os.path.join(settings.MEDIA_ROOT, 'job-output',
                                         "%02d" % job.submit_time.year,
                                         "%02d" % job.submit_time.month,
                                         "%02d" % job.submit_time.day,
                                         str(job.id))
                if not os.path.exists(old_path):
                    self.stdout.write("* %d skip" % job.id)
                    continue

                self.stdout.write("* %d {%s => %s}" % (job.id,
                                                       old_path[len_base_dir:],
                                                       date_path[len_base_dir:]))
                if not options["dry_run"]:
                    mkdir(os.path.dirname(date_path))
                    if not os.path.exists(old_path):
                        self.stdout.write("  -> no output directory")
                        continue
                    os.rename(old_path, date_path)
            start += count
            if count == 0:
                break
            if options["slow"]:
                self.stdout.write("sleeping 2s...")
                time.sleep(2)
예제 #12
0
    def handle(self, *_, **options):
        base_dir = "/var/lib/lava-server/default/media/job-output/"
        len_base_dir = len(base_dir)
        jobs = TestJob.objects.all().order_by("id")

        self.stdout.write("Browsing all jobs")
        for job in jobs:
            old_path = os.path.join(settings.MEDIA_ROOT, 'job-output', 'job-%s' % job.id)
            date_path = os.path.join(settings.MEDIA_ROOT, 'job-output',
                                     "%02d" % job.submit_time.year,
                                     "%02d" % job.submit_time.month,
                                     "%02d" % job.submit_time.day,
                                     str(job.id))
            if not os.path.exists(old_path):
                continue

            self.stdout.write("* %d {%s => %s}" % (job.id,
                                                   old_path[len_base_dir:],
                                                   date_path[len_base_dir:]))
            mkdir(os.path.dirname(date_path))
            if not os.path.exists(old_path):
                self.stdout.write("  -> no output directory")
                continue
            os.rename(old_path, date_path)
예제 #13
0
    def logging_socket(self):
        msg = self.log_socket.recv_multipart()
        try:
            (job_id, message) = (u(m) for m in msg)  # pylint: disable=unbalanced-tuple-unpacking
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error("[POLL] failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml.load(message, Loader=yaml.CLoader)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except TypeError:
            self.logger.error("[%s] not a dictionary, dropping", job_id)
            return
        except KeyError:
            self.logger.error(
                "[%s] invalid log line, missing \"lvl\" or \"msg\" keys: %s",
                job_id, message)
            return

        # Find the handler (if available)
        if job_id not in self.jobs:
            # Query the database for the job
            try:
                job = TestJob.objects.get(id=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return

            self.logger.info("[%s] receiving logs from a new job", job_id)
            # Create the sub directories (if needed)
            mkdir(job.output_dir)
            self.jobs[job_id] = JobHandler(job)

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return
            meta_filename = create_metadata_store(message_msg, job)
            new_test_case = map_scanned_results(results=message_msg, job=job,
                                                meta_filename=meta_filename)
            if new_test_case is None:
                self.logger.warning(
                    "[%s] unable to map scanned results: %s",
                    job_id, message)
            else:
                self.test_cases.append(new_test_case)

            # Look for lava.job result
            if message_msg.get("definition") == "lava" and message_msg.get("case") == "job":
                # Flush cached test cases
                self.flush_test_cases()

                if message_msg.get("result") == "pass":
                    health = TestJob.HEALTH_COMPLETE
                    health_msg = "Complete"
                else:
                    health = TestJob.HEALTH_INCOMPLETE
                    health_msg = "Incomplete"
                self.logger.info("[%s] job status: %s", job_id, health_msg)

                infrastructure_error = (message_msg.get("error_type") in ["Bug",
                                                                          "Configuration",
                                                                          "Infrastructure"])
                if infrastructure_error:
                    self.logger.info("[%s] Infrastructure error", job_id)

                # Update status.
                with transaction.atomic():
                    # TODO: find a way to lock actual_device
                    job = TestJob.objects.select_for_update() \
                                         .get(id=job_id)
                    job.go_state_finished(health, infrastructure_error)
                    job.save()

        # Mark the file handler as used
        self.jobs[job_id].last_usage = time.time()

        # n.b. logging here would produce a log entry for every message in every job.
        # The format is a list of dictionaries
        message = "- %s" % message

        # Write data
        self.jobs[job_id].write(message)
예제 #14
0
    def logging_socket(self, options):
        msg = self.pull_socket.recv_multipart()
        try:
            (job_id, level, name, message) = msg  # pylint: disable=unbalanced-tuple-unpacking
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error("Failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml.load(message, Loader=yaml.CLoader)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except KeyError:
            self.logger.error(
                "[%s] Invalid log line, missing \"lvl\" or \"msg\" keys: %s",
                job_id, message)
            return

        # Clear filename
        if '/' in level or '/' in name:
            self.logger.error(
                "[%s] Wrong level or name received, dropping the message",
                job_id)
            return

        # Find the handler (if available)
        if job_id in self.jobs:
            if level != self.jobs[job_id].current_level:
                # Close the old file handler
                self.jobs[job_id].sub_log.close()
                filename = os.path.join(self.jobs[job_id].output_dir,
                                        "pipeline",
                                        level.split('.')[0],
                                        "%s-%s.yaml" % (level, name))
                mkdir(os.path.dirname(filename))
                self.current_level = level
                self.jobs[job_id].sub_log = open(filename, 'a+')
        else:
            # Query the database for the job
            try:
                job = TestJob.objects.get(id=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] Unknown job id", job_id)
                return

            self.logger.info("[%s] Receiving logs from a new job", job_id)
            filename = os.path.join(job.output_dir, "pipeline",
                                    level.split('.')[0],
                                    "%s-%s.yaml" % (level, name))
            # Create the sub directories (if needed)
            mkdir(os.path.dirname(filename))
            self.jobs[job_id] = JobHandler(job, level, filename)

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] Unknown job id", job_id)
                return
            meta_filename = create_metadata_store(message_msg, job, level)
            ret = map_scanned_results(results=message_msg,
                                      job=job,
                                      meta_filename=meta_filename)
            if not ret:
                self.logger.warning("[%s] Unable to map scanned results: %s",
                                    job_id, message)

        # Mark the file handler as used
        self.jobs[job_id].last_usage = time.time()

        # n.b. logging here would produce a log entry for every message in every job.
        # The format is a list of dictionaries
        message = "- %s" % message

        # Write data
        self.jobs[job_id].write(message)
예제 #15
0
    def logging_socket(self):
        msg = self.log_socket.recv_multipart()
        try:
            (job_id, message) = (u(m) for m in msg)  # pylint: disable=unbalanced-tuple-unpacking
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error("[POLL] failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml.load(message, Loader=yaml.CLoader)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except TypeError:
            self.logger.error("[%s] not a dictionary, dropping", job_id)
            return
        except KeyError:
            self.logger.error(
                "[%s] invalid log line, missing \"lvl\" or \"msg\" keys: %s",
                job_id, message)
            return

        # Find the handler (if available)
        if job_id not in self.jobs:
            # Query the database for the job
            try:
                job = TestJob.objects.get(id=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return

            self.logger.info("[%s] receiving logs from a new job", job_id)
            # Create the sub directories (if needed)
            mkdir(job.output_dir)
            self.jobs[job_id] = JobHandler(job)

        # For 'event', send an event and log as 'debug'
        if message_lvl == 'event':
            self.logger.debug("[%s] event: %s", job_id, message_msg)
            send_event(".event", "lavaserver", {"message": message_msg, "job": job_id})
            message_lvl = "debug"
        # For 'marker', save in the database and log as 'debug'
        elif message_lvl == 'marker':
            # TODO: save on the file system in case of lava-logs restart
            m_type = message_msg.get("type")
            case = message_msg.get("case")
            if m_type is None or case is None:
                self.logger.error("[%s] invalid marker: %s", job_id, message_msg)
                return
            self.jobs[job_id].markers.setdefault(case, {})[m_type] = self.jobs[job_id].line_count()
            # This is in fact the previous line
            self.jobs[job_id].markers[case][m_type] -= 1
            self.logger.debug("[%s] marker: %s line: %s", job_id, message_msg, self.jobs[job_id].markers[case][m_type])
            return

        # Mark the file handler as used
        self.jobs[job_id].last_usage = time.time()
        # The format is a list of dictionaries
        self.jobs[job_id].write("- %s" % message)

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] unknown job id", job_id)
                return
            meta_filename = create_metadata_store(message_msg, job)
            new_test_case = map_scanned_results(results=message_msg, job=job,
                                                markers=self.jobs[job_id].markers,
                                                meta_filename=meta_filename)

            if new_test_case is None:
                self.logger.warning(
                    "[%s] unable to map scanned results: %s",
                    job_id, message)
            else:
                self.test_cases.append(new_test_case)

            # Look for lava.job result
            if message_msg.get("definition") == "lava" and message_msg.get("case") == "job":
                # Flush cached test cases
                self.flush_test_cases()

                if message_msg.get("result") == "pass":
                    health = TestJob.HEALTH_COMPLETE
                    health_msg = "Complete"
                else:
                    health = TestJob.HEALTH_INCOMPLETE
                    health_msg = "Incomplete"
                self.logger.info("[%s] job status: %s", job_id, health_msg)

                infrastructure_error = (message_msg.get("error_type") in ["Bug",
                                                                          "Configuration",
                                                                          "Infrastructure"])
                if infrastructure_error:
                    self.logger.info("[%s] Infrastructure error", job_id)

                # Update status.
                with transaction.atomic():
                    # TODO: find a way to lock actual_device
                    job = TestJob.objects.select_for_update() \
                                         .get(id=job_id)
                    job.go_state_finished(health, infrastructure_error)
                    job.save()
예제 #16
0
    def logging_socket(self, options):
        msg = self.pull_socket.recv_multipart()
        try:
            (job_id, level, name, message) = msg  # pylint: disable=unbalanced-tuple-unpacking
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error("Failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml.load(message)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except KeyError:
            self.logger.error(
                "[%s] Invalid log line, missing \"lvl\" or \"msg\" keys: %s",
                job_id, message)
            return

        # Clear filename
        if '/' in level or '/' in name:
            self.logger.error("[%s] Wrong level or name received, dropping the message", job_id)
            return
        filename = "%s/job-%s/pipeline/%s/%s-%s.yaml" % (options['output_dir'],
                                                         job_id, level.split('.')[0],
                                                         level, name)

        # Find the handler (if available)
        if job_id in self.logs:
            if filename != self.logs[job_id].filename:
                # Close the old file handler
                self.logs[job_id].close()
                mkdir(os.path.dirname(filename))
                self.logs[job_id] = FileHandler(filename)
        else:
            self.logger.info("[%s] Receiving logs from a new job", job_id)
            mkdir(os.path.dirname(filename))
            self.logs[job_id] = FileHandler(filename)

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] Unknown job id", job_id)
                return
            meta_filename = create_metadata_store(message_msg, job, level)
            ret = map_scanned_results(results=message_msg, job=job, meta_filename=meta_filename)
            if not ret:
                self.logger.warning(
                    "[%s] Unable to map scanned results: %s",
                    job_id, message)

        # Mark the file handler as used
        # TODO: try to use a more pythonnic way
        self.logs[job_id].last_usage = time.time()

        # n.b. logging here would produce a log entry for every message in every job.
        # The format is a list of dictionaries
        message = "- %s" % message

        # Write data
        f_handler = self.logs[job_id].fd
        f_handler.write(message)
        f_handler.write('\n')
        f_handler.flush()

        # TODO: keep the file handler to avoid calling open for each line
        filename = os.path.join(options['output_dir'],
                                "job-%s" % job_id,
                                'output.yaml')
        with open(filename, 'a+') as f_out:
            f_out.write(message)
            f_out.write('\n')
예제 #17
0
    def logging_socket(self, options):
        msg = self.pull_socket.recv_multipart()
        try:
            (job_id, level, name, message) = msg
        except ValueError:
            # do not let a bad message stop the master.
            self.logger.error("Failed to parse log message, skipping: %s", msg)
            return

        try:
            scanned = yaml.load(message)
        except yaml.YAMLError:
            self.logger.error("[%s] data are not valid YAML, dropping", job_id)
            return

        # Look for "results" level
        try:
            message_lvl = scanned["lvl"]
            message_msg = scanned["msg"]
        except KeyError:
            self.logger.error(
                "[%s] Invalid log line, missing \"lvl\" or \"msg\" keys: %s",
                job_id, message)
            return

        if message_lvl == "results":
            try:
                job = TestJob.objects.get(pk=job_id)
            except TestJob.DoesNotExist:
                self.logger.error("[%s] Unknown job id", job_id)
                return
            ret = map_scanned_results(results=message_msg, job=job)
            if not ret:
                self.logger.warning("[%s] Unable to map scanned results: %s",
                                    job_id, message)

        # Clear filename
        if '/' in level or '/' in name:
            self.logger.error(
                "[%s] Wrong level or name received, dropping the message",
                job_id)
            return
        filename = "%s/job-%s/pipeline/%s/%s-%s.yaml" % (
            options['output_dir'], job_id, level.split('.')[0], level, name)

        # Find the handler (if available)
        if job_id in self.logs:
            if filename != self.logs[job_id].filename:
                # Close the old file handler
                self.logs[job_id].close()
                mkdir(os.path.dirname(filename))
                self.logs[job_id] = FileHandler(filename)
        else:
            self.logger.info("[%s] Receiving logs from a new job", job_id)
            mkdir(os.path.dirname(filename))
            self.logs[job_id] = FileHandler(filename)

        # Mark the file handler as used
        # TODO: try to use a more pythonnic way
        self.logs[job_id].last_usage = time.time()

        # n.b. logging here would produce a log entry for every message in every job.
        # The format is a list of dictionaries
        message = "- %s" % message

        # Write data
        f_handler = self.logs[job_id].fd
        f_handler.write(message)
        f_handler.write('\n')
        f_handler.flush()

        # TODO: keep the file handler to avoid calling open for each line
        filename = os.path.join(options['output_dir'], "job-%s" % job_id,
                                'output.yaml')
        with open(filename, 'a+') as f_out:
            f_out.write(message)
            f_out.write('\n')