def attach(self): if self._container is not None: logger.info("Attaching in subprocess to container") cmd = "docker attach {}".format(self._container.id) logger.info(cmd) subprocess.Popen(cmd, shell=True) time.sleep(5)
def _check_existing_container(self): containers = ServiceManager.CLIENT.containers.list() for container in containers: ports = container.ports # {'8080/tcp': [{'HostIp': '', 'HostPort': '4000'}]} tags = container.image.tags if self.docker_image in tags: for ports_dict in ports.get("{}/tcp".format(self.service_port), []): if ports_dict.get("HostPort", "") == "{}".format(self.host_port): logger.info("Already running container found {}".format(container.id)) return container, True return None, False
def wait_done(self, max_tries=20): num_tries = 0 while (num_tries < max_tries) and (not self.is_done): logger.info("Waiting for job to complete") num_tries += 1 time.sleep(2) if num_tries < max_tries: logger.info("Job complete") return True else: logger.error("Timeout reached") return False
def validate_process(self, payload: dict, expected_response: Optional[dict]): validation_result = dict() logger.info("Validating payload against input schema") try: jsonschema.validate(payload, self.endpoint_manager.input_schema) except jsonschema.ValidationError as e: print(e) validation_result["input_schema"] = False return "NO RESPONSE", validation_result except jsonschema.SchemaError as e: print(e) validation_result["input_schema"] = False return "NO RESPONSE", validation_result logger.info("Validation OK") validation_result["input_schema"] = True logger.info("Computing response") response = self.endpoint_manager.process(payload) logger.info("Validating response against output schema") try: jsonschema.validate(payload, self.endpoint_manager.input_schema) except jsonschema.ValidationError as e: logger.error(e) validation_result["output_schema"] = False except jsonschema.SchemaError as e: logger.error(e) validation_result["output_schema"] = False logger.info("Validation OK") validation_result["output_schema"] = True expected_response = expected_response or dict() validation_result = compare_dicts(expected_response, response) validation_result = validation_result or {"NoDifference": True} return response, validation_result
def pull(self): try: self.CLIENT.images.get(self.docker_image) except docker.errors.ImageNotFound: logger.info("Pulling {}".format(self.docker_image)) self.CLIENT.images.pull(self.docker_image) logger.info("Pulled image") except docker.errors.APIError: logger.info("Can't pull image {} you should have it in local other it will not work !".format( self.docker_image))
def run(self): if self._container is None and self.is_alive: logger.warn("There is a container running at {} but it does not match {}.\n" "We assume that you know what you are doing".format( self.server_url, self.docker_image)) return None if self._container is None: self.pull() logger.info("Starting container with {} on port {}".format(self.docker_image, self.host_port)) self._container = self.CLIENT.containers.run( self.docker_image, ports={self.service_port: self.host_port}, detach=True, remove=True, runtime="nvidia" if self.nvidia else None, volumes={self.image_volume_path: { "bind": self.host_volume_path, "mode": "rw", }}, ) time.sleep(2) logger.info("Container {} started, available at {}".format(self._container.id, self.server_url)) # Healthcheck until service responds num_tries = 0 max_tries = 10 logger.info("Trying api/v1/health for {}st time".format(num_tries + 1)) while (num_tries < max_tries) and (not self.is_alive): logger.info("Server not yet alive") num_tries += 1 time.sleep(2) logger.info("Trying api/v1/health for {}th time".format(num_tries + 1)) if num_tries < max_tries: logger.info("Server alive") else: logger.error("Timeout reached") # Attach if self.attach_when_running: self.attach() return self._container.id
def stop(self): if self._container is not None: logger.info("Stopping container {}".format(self._container.id)) self._container.stop()
def run_all(self, test_resources_path: Path): tests, expected_describe = self.prepare_resources(test_resources_path) if self.nvidia: logger.info("Running with nvidia runtime") with ServiceManager( docker_image=self.docker_image_name, nvidia=self.nvidia, attach_when_running=True, ) as service: # force restarting service if service._existing_container: logger.debug( "Force restarting service to ensure resources properly mounted" ) service.stop() time.sleep(10) service.run() time.sleep(5) service_tester = ServiceTester(server_url=service.server_url) all_results = dict() # Save describes for debug with open(self._tmp_test_resources / "expected_describe.json", "w") as f: json.dump(expected_describe, f, indent=2) with open(self._tmp_test_resources / "actual_describe.json", "w") as f: actual_describe = service_tester.endpoint_manager.describe json.dump(actual_describe, f, indent=2) # Validate describe describe_differences = service_tester.validate_describe( expected_describe) all_results['describe'] = describe_differences for test_dir in tests: payload = PayloadGenerator(images_as_base64=False).generate( str(test_dir / "input")) expected = PayloadGenerator(images_as_base64=False).generate( str(test_dir / "output")) response, validation_results = service_tester.validate_process( payload, expected) all_results[test_dir.stem] = validation_results # Serialize response for debug response_path = test_dir / "response" with open(test_dir / "response.json", "w") as f: json.dump(response, f, indent=2) self._serialize_response(response_path, response) with open(self._tmp_test_resources / "results.json", "w") as f: json.dump(all_results, f, indent=2) logger.info("--- Tests Results ---") logger.info(json.dumps(all_results, indent=2)) logger.info("--- Copying tests outputs to {}".format( self._tmp_test_resources)) shutil.rmtree(self._test_output, ignore_errors=True) shutil.copytree(self._tmp_test_resources, self._test_output) return all_results