def create_metadata_store(results, job): """ Uses the OrderedDict import to correctly handle the yaml.load """ if "extra" not in results: return None level = results.get("level") if level is None: return None logger = logging.getLogger("lava-master") stub = "%s-%s-%s.yaml" % (results["definition"], results["case"], level) meta_filename = os.path.join(job.output_dir, "metadata", stub) os.makedirs(os.path.dirname(meta_filename), mode=0o755, exist_ok=True) if os.path.exists(meta_filename): with open(meta_filename, "r") as existing_store: data = yaml_load(existing_store) data.update(results["extra"]) else: data = results["extra"] try: with open(meta_filename, "w") as extra_store: yaml_dump(data, extra_store) except OSError as exc: # LAVA-847 msg = "[%d] Unable to create metadata store: %s" % (job.id, exc) logger.error(msg) append_failure_comment(job, msg) return None return meta_filename
def log_message(self, level, level_name, message, *args, **kwargs): # Build the dictionary data = { "dt": datetime.datetime.utcnow().isoformat(), "lvl": level_name } if isinstance(message, str) and args: data["msg"] = message % args else: data["msg"] = message # Set width to a really large value in order to always get one line. # But keep this reasonable because the logs will be loaded by CLoader # that is limited to around 10**7 chars data_str = yaml_dump(data, default_flow_style=True, default_style='"', width=10**6)[:-1] # Test the limit and skip if the line is too long if len(data_str) >= 10**6: if isinstance(message, str): data["msg"] = "<line way too long ...>" else: data["msg"] = {"skip": "line way too long ..."} data_str = yaml_dump(data, default_flow_style=True, default_style='"', width=10**6)[:-1] self._log(level, data_str, ())
def testcase_yaml(request, pk): testcase = get_object_or_404(TestCase, pk=pk) check_request_auth(request, testcase.suite.job) response = HttpResponse(content_type="text/yaml") filename = "lava_%s.yaml" % testcase.name response["Content-Disposition"] = 'attachment; filename="%s"' % filename yaml_dump(export_testcase(testcase, with_buglinks=True), response) return response
def testjob_yaml_summary(request, job): job = get_object_or_404(TestJob, pk=job) check_request_auth(request, job) suites = job.testsuite_set.all() response = HttpResponse(content_type="text/yaml") filename = "lava_%s_summary.yaml" % job.id response["Content-Disposition"] = 'attachment; filename="%s"' % filename yaml_list = [] for test_suite in suites: yaml_list.append(export_testsuite(test_suite)) yaml_dump(yaml_list, response) return response
def test_parameter_support(self): data = self.factory.make_job_data() test_block = [block for block in data["actions"] if "test" in block][0] smoke = test_block["test"]["definitions"][0] smoke["parameters"] = { "VARIABLE_NAME_1": "first variable value", "VARIABLE_NAME_2": "second value", } job = TestJob.from_yaml_and_user(yaml_dump(data), self.user) job_def = yaml_safe_load(job.definition) job_ctx = job_def.get("context", {}) job_ctx.update( {"no_kvm": True} ) # override to allow unit tests on all types of systems device = Device.objects.get(hostname="fakeqemu1") device_config = device.load_configuration(job_ctx) # raw dict parser = JobParser() obj = PipelineDevice(device_config) pipeline_job = parser.parse(job.definition, obj, job.id, None, "") allow_missing_path( pipeline_job.pipeline.validate_actions, self, "qemu-system-x86_64" ) pipeline = pipeline_job.describe() testdata, _ = TestData.objects.get_or_create(testjob=job) retval = _get_action_metadata(pipeline["job"]["actions"]) self.assertIn("test.0.common.definition.parameters.VARIABLE_NAME_2", retval) self.assertIn("test.0.common.definition.parameters.VARIABLE_NAME_1", retval) self.assertEqual( retval["test.0.common.definition.parameters.VARIABLE_NAME_1"], "first variable value", ) self.assertEqual( retval["test.0.common.definition.parameters.VARIABLE_NAME_2"], "second value", )
def suite_yaml(request, job, pk): job = get_object_or_404(TestJob, pk=job) check_request_auth(request, job) test_suite = get_object_or_404(TestSuite, name=pk, job=job) querydict = request.GET offset = int(querydict.get("offset", default=0)) limit = int(querydict.get("limit", default=0)) response = HttpResponse(content_type="text/yaml") filename = "lava_%s.yaml" % test_suite.name response["Content-Disposition"] = 'attachment; filename="%s"' % filename yaml_list = [] testcases = get_testcases_with_limit(test_suite, limit, offset) for test_case in testcases: yaml_list.append(export_testcase(test_case)) yaml_dump(yaml_list, response) return response
def test_multinode_description(self): self.assertIsNotNone(self.client_job) allow_missing_path(self.client_job.validate, self, "qemu-system-x86_64") # check that the description can be re-loaded as valid YAML for action in self.client_job.pipeline.actions: data = action.explode() data_str = yaml_dump(data) yaml_load(data_str) # nosec not suitable for safe_load
def test_job(self): MetaType.objects.all().delete() TestJob.objects.all().delete() job = TestJob.from_yaml_and_user(self.factory.make_job_yaml(), self.user) job_def = yaml_safe_load(job.definition) job_ctx = job_def.get("context", {}) job_ctx.update( {"no_kvm": True} ) # override to allow unit tests on all types of systems device = Device.objects.get(hostname="fakeqemu1") device_config = device.load_configuration(job_ctx) # raw dict parser = JobParser() obj = PipelineDevice(device_config) pipeline_job = parser.parse(job.definition, obj, job.id, None, "") allow_missing_path( pipeline_job.pipeline.validate_actions, self, "qemu-system-x86_64" ) pipeline = pipeline_job.describe() map_metadata(yaml_dump(pipeline), job) self.assertEqual( MetaType.objects.filter(metatype=MetaType.DEPLOY_TYPE).count(), 1 ) self.assertEqual( MetaType.objects.filter(metatype=MetaType.BOOT_TYPE).count(), 1 ) count = ActionData.objects.all().count() self.assertEqual(TestData.objects.all().count(), 1) testdata = TestData.objects.all()[0] self.assertEqual(testdata.testjob, job) for actionlevel in ActionData.objects.all(): self.assertEqual(actionlevel.testdata, testdata) action_levels = [] action_levels.extend(job.testdata.actionlevels.all()) self.assertEqual(count, len(action_levels)) count = ActionData.objects.filter( meta_type__metatype=MetaType.DEPLOY_TYPE ).count() self.assertNotEqual( ActionData.objects.filter(meta_type__metatype=MetaType.BOOT_TYPE).count(), 0 ) self.assertEqual( ActionData.objects.filter( meta_type__metatype=MetaType.UNKNOWN_TYPE ).count(), 0, ) for actionlevel in ActionData.objects.filter( meta_type__metatype=MetaType.BOOT_TYPE ): self.assertEqual(actionlevel.testdata.testjob.id, job.id) self.assertEqual( ActionData.objects.filter( meta_type__metatype=MetaType.DEPLOY_TYPE, testdata__testjob=job ).count(), count, )
def dump(data: Dict) -> str: # Set width to a really large value in order to always get one line. # But keep this reasonable because the logs will be loaded by CLoader # that is limited to around 10**7 chars data_str = yaml_dump(data, default_flow_style=True, default_style='"', width=10**6)[:-1] # Test the limit and skip if the line is too long if len(data_str) >= 10**6: if isinstance(data["msg"], str): data["msg"] = "<line way too long ...>" else: data["msg"] = {"skip": "line way too long ..."} data_str = yaml_dump(data, default_flow_style=True, default_style='"', width=10**6)[:-1] return data_str
def metadata_export(request, job): """ Dispatcher adds some metadata, Job submitter can add more. CSV is not supported as the user-supplied metadata can include nested dicts or lists. """ job = get_object_or_404(TestJob, pk=job) check_request_auth(request, job) # testdata from job & export if not hasattr(job, "testdata"): raise Http404("No TestData present in test job.") response = HttpResponse(content_type="text/yaml") filename = "lava_metadata_%s.yaml" % job.id response["Content-Disposition"] = 'attachment; filename="%s"' % filename yaml_dict = {} # hide internal python objects for data in job.testdata.attributes.all(): yaml_dict[str(data.name)] = str(data.value) yaml_dump(yaml_dict, response) return response
def get_testsuite_results_yaml(self, job_id, suite_name, limit=None, offset=None): """ Name ---- `get_testsuite_results_yaml` (`job_id`, `suite_name`, `limit=None`, `offset=None`) Description ----------- Get the suite results of given job id and suite name in YAML format. Arguments --------- `job_id`: string Job id for which the results are required. `suite_name`: string Name of the suite for which the results are required. `limit`: int Limit the number of test cases fetched. `offset`: int Start fetching test cases from a specific point. Return value ------------ This function returns an XML-RPC structures of suite results in YAML format, provided the user is authenticated with an username and token. """ self._authenticate() if not job_id: raise xmlrpc.client.Fault( 400, "Bad request: TestJob id was not specified.") try: job = TestJob.get_by_job_number(job_id) if not job.can_view(self.user): raise xmlrpc.client.Fault( 401, "Permission denied for user to job %s" % job_id) yaml_list = [] test_suite = job.testsuite_set.get(name=suite_name) for test_case in get_testcases_with_limit(test_suite, limit, offset): yaml_list.append(export_testcase(test_case)) except TestJob.DoesNotExist: raise xmlrpc.client.Fault(404, "Specified job not found.") except TestSuite.DoesNotExist: raise xmlrpc.client.Fault(404, "Specified test suite not found.") return yaml_dump(yaml_list)
def yaml(self, request, **kwargs): limit = request.query_params.get("limit", None) offset = request.query_params.get("offset", None) yaml_list = [] for test_case in get_testcases_with_limit(self.get_object(), limit, offset): yaml_list.append(export_testcase(test_case)) response = HttpResponse(yaml_dump(yaml_list), content_type="application/yaml") response["Content-Disposition"] = ( "attachment; filename=suite_%s.yaml" % self.get_object().name ) return response
def test_inline(self): """ Test inline can be parsed without run steps """ data = self.factory.make_job_data() test_block = [block for block in data["actions"] if "test" in block][0] smoke = [ { "path": "inline/smoke-tests-basic.yaml", "from": "inline", "name": "smoke-tests-inline", "repository": { "install": {"steps": ["apt"]}, "metadata": { "description": "Basic system test command for Linaro Ubuntu images", "format": "Lava-Test Test Definition 1.0", "name": "smoke-tests-basic", }, }, } ] test_block["test"]["definitions"] = smoke job = TestJob.from_yaml_and_user(yaml_dump(data), self.user) job_def = yaml_safe_load(job.definition) job_ctx = job_def.get("context", {}) job_ctx.update( {"no_kvm": True} ) # override to allow unit tests on all types of systems device = Device.objects.get(hostname="fakeqemu1") device_config = device.load_configuration(job_ctx) # raw dict parser = JobParser() obj = PipelineDevice(device_config) pipeline_job = parser.parse(job.definition, obj, job.id, None, "") allow_missing_path( pipeline_job.pipeline.validate_actions, self, "qemu-system-x86_64" ) pipeline = pipeline_job.describe() map_metadata(yaml_dump(pipeline), job)
def test_mongo_logs(mocker): mocker.patch("pymongo.database.Database.command") mocker.patch("pymongo.collection.Collection.create_index") logs_mongo = LogsMongo() job = mocker.Mock() job.id = 1 insert_one = mocker.MagicMock() find = mocker.MagicMock() find_ret_val = [ { "dt": "2020-03-25T19:44:36.209548", "lvl": "info", "msg": "first message" }, { "dt": "2020-03-26T19:44:36.209548", "lvl": "info", "msg": "second message" }, ] find.return_value = find_ret_val mocker.patch("pymongo.collection.Collection.find", find) mocker.patch("pymongo.collection.Collection.insert_one", insert_one) logs_mongo.write( job, '- {"dt": "2020-03-25T19:44:36.209548", "lvl": "info", "msg": "lava-dispatcher, installed at version: 2020.02"}', ) insert_one.assert_called_with({ "job_id": 1, "dt": "2020-03-25T19:44:36.209548", "lvl": "info", "msg": "lava-dispatcher, installed at version: 2020.02", }) # nosec result = yaml_load(logs_mongo.read(job)) assert len(result) == 2 # nosec assert result == find_ret_val # nosec # size of find_ret_val in bytes assert logs_mongo.size(job) == 137 # nosec assert logs_mongo.open(job).read() == yaml_dump(find_ret_val).encode( "utf-8")
def yaml(self, request, **kwargs): limit = request.query_params.get("limit", None) offset = request.query_params.get("offset", None) yaml_list = [] for test_suite in self.get_object().testsuite_set.all(): for test_case in test_suite.testcase_set.all(): yaml_list.append(export_testcase(test_case)) response = HttpResponse(yaml_dump(yaml_list), content_type="application/yaml") response["Content-Disposition"] = ("attachment; filename=job_%d.yaml" % self.get_object().id) return response
def get_testcase_results_yaml(self, job_id, suite_name, case_name): """ Name ---- `get_testcase_results_yaml` (`job_id`, `suite_name`, `case_name`) Description ----------- Get the test case results of given job id, suite and test case name in YAML format. Arguments --------- `job_id`: string Job id for which the results are required. `suite_name`: string Name of the suite for which the results are required. `case_name`: string Name of the test case for which the results are required. Return value ------------ This function returns an XML-RPC structures of test case results in YAML format, provided the user is authenticated with an username and token. """ self._authenticate() if not job_id: raise xmlrpc.client.Fault( 400, "Bad request: TestJob id was not specified.") try: job = TestJob.get_by_job_number(job_id) if not job.can_view(self.user): raise xmlrpc.client.Fault( 401, "Permission denied for user to job %s" % job_id) test_suite = job.testsuite_set.get(name=suite_name) test_cases = test_suite.testcase_set.filter(name=case_name) yaml_list = [ export_testcase(test_case) for test_case in test_cases ] except TestJob.DoesNotExist: raise xmlrpc.client.Fault(404, "Specified job not found.") except TestSuite.DoesNotExist: raise xmlrpc.client.Fault(404, "Specified test suite not found.") except TestCase.DoesNotExist: raise xmlrpc.client.Fault(404, "Specified test case not found.") return yaml_dump(yaml_list)
def render_failure_comment(self, record): if record.failure_comment: return record.failure_comment try: failure = TestCase.objects.get( suite__job=record, result=TestCase.RESULT_FAIL, suite__name="lava", name="job", ) except TestCase.DoesNotExist: return "" action_metadata = failure.action_metadata if action_metadata is not None and "error_msg" in action_metadata: return yaml_dump(failure.action_metadata["error_msg"]) else: return ""
def add_device_container_mapping(job_id, device_info, container, container_type="lxc"): validate_device_info(device_info) item = { "device_info": device_info, "container": container, "container_type": container_type, "job_id": job_id, } mapping_path = get_mapping_path(job_id) data = load_mapping_data(mapping_path) # remove old mappings for the same device_info newdata = [old for old in data if old["device_info"] != item["device_info"]] newdata.append(item) os.makedirs(os.path.dirname(mapping_path), exist_ok=True) with open(mapping_path, "w") as f: f.write(yaml_dump(newdata))
def add_device_container_mapping(job_id, device_info, container, container_type="lxc", logging_info={}): data = { "device_info": device_info, "container": container, "container_type": container_type, "logging_info": logging_info, } logger = logging.getLogger("dispatcher") mapping_path = get_mapping_path(job_id) os.makedirs(os.path.dirname(mapping_path), exist_ok=True) with open(mapping_path, "w") as f: f.write(yaml_dump(data)) logger.info( "Added mapping for {device_info} to {container_type} container {container}" .format(**data))
def get_testjob_suites_list_yaml(self, job_id): """ Name ---- `get_testjob_suites_list_yaml` (`job_id`) Description ----------- Get the test suites list from job results of given job id in YAML format. Arguments --------- `job_id`: string Job id for which the test suites are required. Return value ------------ This function returns an XML-RPC structures of test suites list from job results in YAML format, provided the user is authenticated with an username and token. """ self._authenticate() if not job_id: raise xmlrpc.client.Fault( 400, "Bad request: TestJob id was not specified.") try: job = TestJob.get_by_job_number(job_id) if not job.can_view(self.user): raise xmlrpc.client.Fault( 401, "Permission denied for user to job %s" % job_id) yaml_list = [] for test_suite in job.testsuite_set.all(): yaml_list.append(export_testsuite(test_suite)) except TestJob.DoesNotExist: raise xmlrpc.client.Fault(404, "Specified job not found.") return yaml_dump(yaml_list)
def test_job_multi(self): MetaType.objects.all().delete() multi_test_file = os.path.join(os.path.dirname(__file__), "multi-test.yaml") self.assertTrue(os.path.exists(multi_test_file)) with open(multi_test_file, "r") as test_support: data = test_support.read() job = TestJob.from_yaml_and_user(data, self.user) job_def = yaml_safe_load(job.definition) job_ctx = job_def.get("context", {}) job_ctx.update( {"no_kvm": True} ) # override to allow unit tests on all types of systems device = Device.objects.get(hostname="fakeqemu1") device_config = device.load_configuration(job_ctx) # raw dict parser = JobParser() obj = PipelineDevice(device_config) pipeline_job = parser.parse(job.definition, obj, job.id, None, "") allow_missing_path( pipeline_job.pipeline.validate_actions, self, "qemu-system-x86_64" ) pipeline = pipeline_job.describe() map_metadata(yaml_dump(pipeline), job)
def add_device_container_mapping(job_id, device_info, container, container_type="lxc"): validate_device_info(device_info) item = { "device_info": device_info, "container": container, "container_type": container_type, "job_id": job_id, } logger = logging.getLogger("dispatcher") mapping_path = get_mapping_path(job_id) data = load_mapping_data(mapping_path) # remove old mappings for the same device_info newdata = [old for old in data if old["device_info"] != item["device_info"]] newdata.append(item) os.makedirs(os.path.dirname(mapping_path), exist_ok=True) with open(mapping_path, "w") as f: f.write(yaml_dump(newdata)) logger.info( "Added mapping for {device_info} to {container_type} container {container}".format( **item ) )
def map_scanned_results(results, job, starttc, endtc, meta_filename): """ Sanity checker on the logged results dictionary :param results: results logged via the slave :param job: the current test job :param meta_filename: YAML store for results metadata :return: the TestCase object that should be saved to the database. None on error. """ logger = logging.getLogger("lava-master") if not isinstance(results, dict): append_failure_comment( job, "[%d] %s is not a dictionary" % (job.id, results)) return None if not {"definition", "case", "result"}.issubset(set(results.keys())): append_failure_comment( job, 'Missing some keys ("definition", "case" or "result") in %s' % results) return None if "extra" in results: results["extra"] = meta_filename metadata = yaml_dump(results) if len(metadata) > 4096: # bug 2471 - test_length unit test msg = "[%d] Result metadata is too long. %s" % (job.id, metadata) logger.warning(msg) append_failure_comment(job, msg) # Manually strip the results to keep some data stripped_results = { "case": results["case"], "definition": results["definition"], "result": results["result"], } if "error_type" in results: stripped_results["error_type"] = results["error_type"] metadata = yaml_dump(stripped_results) if len(metadata) > 4096: metadata = "" suite, _ = TestSuite.objects.get_or_create(name=results["definition"], job=job) testset = _check_for_testset(results, suite) name = results["case"].strip() test_case = None if suite.name == "lava": try: result_val = TestCase.RESULT_MAP[results["result"]] except KeyError: logger.error('[%d] Unable to MAP result "%s"', job.id, results["result"]) return None measurement = None units = "" if "duration" in results: measurement = results["duration"] units = "seconds" test_case = TestCase( name=name, suite=suite, test_set=testset, metadata=metadata, measurement=measurement, units=units, result=result_val, ) else: result = results["result"] measurement = None units = "" if testset: logger.debug("%s/%s/%s %s", suite, testset, name, result) else: logger.debug("%s/%s %s", suite, name, result) if "measurement" in results: measurement = results["measurement"] if "units" in results: units = results["units"] logger.debug("%s/%s %s%s", suite, name, measurement, units) if result not in TestCase.RESULT_MAP: logger.warning( "[%d] Unrecognised result: '%s' for test case '%s'", job.id, result, name, ) return None try: test_case = TestCase( name=name, suite=suite, test_set=testset, result=TestCase.RESULT_MAP[result], metadata=metadata, measurement=measurement, units=units, start_log_line=starttc, end_log_line=endtc, ) except decimal.InvalidOperation: logger.exception("[%d] Unable to create test case %s", job.id, name) return test_case
def open(self, job): stream = io.BytesIO(yaml_dump(self._get_docs(job)).encode("utf-8")) stream.seek(0) return stream
def handle(self, *args, **options): # Initialize logging. self.setup_logging("lava-master", options["level"], options["log_file"], FORMAT) self.logger.info("[INIT] Starting lava-master") self.logger.info("[INIT] Version %s", __version__) self.logger.info("[INIT] Using protocol version %d", PROTOCOL_VERSION) self.logger.info("[INIT] Dropping privileges") if not self.drop_privileges(options["user"], options["group"]): self.logger.error("[INIT] Unable to drop privileges") return filename = os.path.join(settings.MEDIA_ROOT, "lava-master-config.yaml") self.logger.debug("[INIT] Dumping config to %s", filename) with open(filename, "w") as output: yaml_dump(options, output) self.logger.info("[INIT] Marking all workers as offline") with transaction.atomic(): for worker in Worker.objects.select_for_update().all(): worker.go_state_offline() worker.save() # Create the sockets context = zmq.Context() self.controler = context.socket(zmq.ROUTER) self.event_socket = context.socket(zmq.SUB) if options["ipv6"]: self.logger.info("[INIT] Enabling IPv6") self.controler.setsockopt(zmq.IPV6, 1) self.event_socket.setsockopt(zmq.IPV6, 1) if options["encrypt"]: self.logger.info("[INIT] Starting encryption") try: self.auth = ThreadAuthenticator(context) self.auth.start() self.logger.debug("[INIT] Opening master certificate: %s", options["master_cert"]) master_public, master_secret = zmq.auth.load_certificate( options["master_cert"]) self.logger.debug("[INIT] Using slaves certificates from: %s", options["slaves_certs"]) self.auth.configure_curve(domain="*", location=options["slaves_certs"]) except OSError as err: self.logger.error(err) self.auth.stop() return self.controler.curve_publickey = master_public self.controler.curve_secretkey = master_secret self.controler.curve_server = True self.logger.debug("[INIT] Watching %s", options["slaves_certs"]) self.inotify_fd = watch_directory(options["slaves_certs"]) if self.inotify_fd is None: self.logger.error("[INIT] Unable to start inotify") self.controler.setsockopt(zmq.IDENTITY, b"master") # From http://api.zeromq.org/4-2:zmq-setsockopt#toc42 # "If two clients use the same identity when connecting to a ROUTER # [...] the ROUTER socket shall hand-over the connection to the new # client and disconnect the existing one." self.controler.setsockopt(zmq.ROUTER_HANDOVER, 1) self.controler.bind(options["master_socket"]) # Set the topic and connect self.event_socket.setsockopt(zmq.SUBSCRIBE, b(settings.EVENT_TOPIC)) self.event_socket.connect(options["event_url"]) # Poll on the sockets. This allow to have a # nice timeout along with polling. self.poller = zmq.Poller() self.poller.register(self.controler, zmq.POLLIN) self.poller.register(self.event_socket, zmq.POLLIN) if self.inotify_fd is not None: self.poller.register(os.fdopen(self.inotify_fd), zmq.POLLIN) # Translate signals into zmq messages (self.pipe_r, _) = self.setup_zmq_signal_handler() self.poller.register(self.pipe_r, zmq.POLLIN) self.logger.info("[INIT] Starting main loop") try: self.main_loop(options) except BaseException as exc: self.logger.error("[CLOSE] Unknown exception raised, leaving!") self.logger.exception(exc) finally: # Drop controler socket: the protocol does handle lost messages self.logger.info( "[CLOSE] Closing the controler socket and dropping messages") self.controler.close(linger=0) self.event_socket.close(linger=0) if options["encrypt"]: self.auth.stop() context.term()
def read(self, job, start=0, end=None): docs = self._get_docs(job, start, end) if not docs: return "" return yaml_dump(docs)
def handle(self, *args, **options): # Initialize logging. self.setup_logging("lava-logs", options["level"], options["log_file"], FORMAT) self.logger.info("[INIT] Starting lava-logs") self.logger.info("[INIT] Version %s", __version__) self.logger.info("[INIT] Dropping privileges") if not self.drop_privileges(options["user"], options["group"]): self.logger.error("[INIT] Unable to drop privileges") return filename = os.path.join(settings.MEDIA_ROOT, "lava-logs-config.yaml") self.logger.debug("[INIT] Dumping config to %s", filename) with open(filename, "w") as output: yaml_dump(options, output) # Create the sockets context = zmq.Context() self.log_socket = context.socket(zmq.PULL) self.controler = context.socket(zmq.ROUTER) self.controler.setsockopt(zmq.IDENTITY, b"lava-logs") # Limit the number of messages in the queue self.controler.setsockopt(zmq.SNDHWM, 2) # From http://api.zeromq.org/4-2:zmq-setsockopt#toc5 # "Immediately readies that connection for data transfer with the master" self.controler.setsockopt(zmq.CONNECT_RID, b"master") if options["ipv6"]: self.logger.info("[INIT] Enabling IPv6") self.log_socket.setsockopt(zmq.IPV6, 1) self.controler.setsockopt(zmq.IPV6, 1) if options["encrypt"]: self.logger.info("[INIT] Starting encryption") try: self.auth = ThreadAuthenticator(context) self.auth.start() self.logger.debug("[INIT] Opening master certificate: %s", options["master_cert"]) master_public, master_secret = zmq.auth.load_certificate( options["master_cert"]) self.logger.debug("[INIT] Using slaves certificates from: %s", options["slaves_certs"]) self.auth.configure_curve(domain="*", location=options["slaves_certs"]) except OSError as err: self.logger.error("[INIT] %s", err) self.auth.stop() return self.log_socket.curve_publickey = master_public self.log_socket.curve_secretkey = master_secret self.log_socket.curve_server = True self.controler.curve_publickey = master_public self.controler.curve_secretkey = master_secret self.controler.curve_serverkey = master_public self.logger.debug("[INIT] Watching %s", options["slaves_certs"]) self.cert_dir_path = options["slaves_certs"] self.inotify_fd = watch_directory(options["slaves_certs"]) if self.inotify_fd is None: self.logger.error("[INIT] Unable to start inotify") self.log_socket.bind(options["socket"]) self.controler.connect(options["master_socket"]) # Poll on the sockets. This allow to have a # nice timeout along with polling. self.poller = zmq.Poller() self.poller.register(self.log_socket, zmq.POLLIN) self.poller.register(self.controler, zmq.POLLIN) if self.inotify_fd is not None: self.poller.register(os.fdopen(self.inotify_fd), zmq.POLLIN) # Translate signals into zmq messages (self.pipe_r, _) = self.setup_zmq_signal_handler() self.poller.register(self.pipe_r, zmq.POLLIN) self.logger.info("[INIT] listening for logs") # PING right now: the master is waiting for this message to start # scheduling. self.controler.send_multipart([b"master", b"PING"]) try: self.main_loop() except BaseException as exc: self.logger.error("[EXIT] Unknown exception raised, leaving!") self.logger.exception(exc) # Close the controler socket self.controler.close(linger=0) self.poller.unregister(self.controler) # Carefully close the logging socket as we don't want to lose messages self.logger.info( "[EXIT] Disconnect logging socket and process messages") endpoint = u(self.log_socket.getsockopt(zmq.LAST_ENDPOINT)) self.logger.debug("[EXIT] unbinding from '%s'", endpoint) self.log_socket.unbind(endpoint) # Empty the queue try: while self.wait_for_messages(True): # Flush test cases cache for every iteration because we might # get killed soon. self.flush_test_cases() except BaseException as exc: self.logger.error("[EXIT] Unknown exception raised, leaving!") self.logger.exception(exc) finally: # Last flush self.flush_test_cases() self.logger.info( "[EXIT] Closing the logging socket: the queue is empty") self.log_socket.close() if options["encrypt"]: self.auth.stop() context.term()
def size(self, job, start=0, end=None): docs = self._get_docs(job, start, end) return len(yaml_dump(docs).encode("utf-8"))
def test_elasticsearch_logs(mocker, logs_elasticsearch): job = mocker.Mock() job.id = 1 post = mocker.MagicMock() get = mocker.MagicMock() get_ret_val = mocker.Mock() # Test with empty object first. get_ret_val.text = "{}" get.return_value = get_ret_val mocker.patch("requests.get", get) result = logs_elasticsearch.read(job) assert result == "" # Normal test. get_ret_val.text = '{"hits":{"hits":[{"_source":{"dt": 1585165476209, "lvl": "info", "msg": "first message"}}, {"_source":{"dt": 1585165476210, "lvl": "info", "msg": "second message"}}]}}' get.return_value = get_ret_val mocker.patch("requests.get", get) mocker.patch("requests.post", post) line = '- {"dt": "2020-03-25T19:44:36.209", "lvl": "info", "msg": "lava-dispatcher, installed at version: 2020.02"}' logs_elasticsearch.write(job, line) post.assert_called_with( "%s%s/_doc/" % (settings.ELASTICSEARCH_URI, settings.ELASTICSEARCH_INDEX), data= '{"dt": 1585165476209, "lvl": "info", "msg": "lava-dispatcher, installed at version: 2020.02", "job_id": 1}', headers={"Content-type": "application/json"}, ) # nosec result = yaml_load(logs_elasticsearch.read(job)) assert len(result) == 2 # nosec assert result == [ { "dt": "2020-03-25T19:44:36.209000", "lvl": "info", "msg": "first message" }, { "dt": "2020-03-25T19:44:36.210000", "lvl": "info", "msg": "second message" }, ] # nosec # size of get_ret_val in bytes assert logs_elasticsearch.size(job) == 137 # nosec assert (logs_elasticsearch.open(job).read() == yaml_dump([ { "dt": "2020-03-25T19:44:36.209000", "lvl": "info", "msg": "first message", }, { "dt": "2020-03-25T19:44:36.210000", "lvl": "info", "msg": "second message", }, ]).encode("utf-8"))
def test_case_stream(): for test_suite in suites: for test_case in test_suite.testcase_set.all(): yield yaml_dump([export_testcase(test_case)])