def handle_connection(self, cl_socket): """ Handle socket connection. :param cl_socket: :return: """ service = None while True: cmd, msg = retrieve_msg(cl_socket) if cmd == b'I': resp = service.predict(msg) cl_socket.send(resp) elif cmd == b'L': service, result, code = self.load_model(msg) resp = bytearray() resp += create_load_model_response(code, result) cl_socket.send(resp) if code != 200: raise RuntimeError("{} - {}".format(code, result)) else: raise ValueError("Received unknown command: {}".format(cmd)) if service is not None and service.context is not None and service.context.metrics is not None: emit_metrics(service.context.metrics.store)
def handle_connection(self, cl_socket): """ Handle socket connection. :param cl_socket: :return: """ logging.basicConfig(stream=sys.stdout, format="%(message)s", level=logging.INFO) cl_socket.setblocking(True) while True: cmd, msg = retrieve_msg(cl_socket) if cmd == b'I': resp = self.service.predict(msg) cl_socket.send(resp) elif cmd == b'L': result, code = self.load_model(msg) resp = bytearray() resp += create_load_model_response(code, result) cl_socket.send(resp) self._remap_io() if code != 200: raise RuntimeError("{} - {}".format(code, result)) else: raise ValueError("Received unknown command: {}".format(cmd)) if self.service is not None and self.service.context is not None \ and self.service.context.metrics is not None: emit_metrics(self.service.context.metrics.store)
def load(self, model_name, model_dir, handler, gpu_id, batch_size): """ Load MMS 1.0 model from file. :param model_name: :param model_dir: :param handler: :param gpu_id: :param batch_size: :return: """ logging.debug("Loading model - working dir: %s", os.getcwd()) # TODO: Request ID is not given. UUID is a temp UUID. metrics = MetricsStore(uuid.uuid4(), model_name) manifest_file = os.path.join(model_dir, "MAR-INF/MANIFEST.json") manifest = None if os.path.exists(manifest_file): with open(manifest_file) as f: manifest = json.load(f) temp = handler.split(":", 1) module_name = temp[0] function_name = None if len(temp) == 1 else temp[1] if module_name.endswith(".py"): module_name = module_name[:-3] module = importlib.import_module(module_name) if module is None: raise ValueError("Unable to load module {}, make sure it is added to python path".format(module_name)) if function_name is None: function_name = "handle" if hasattr(module, function_name): entry_point = getattr(module, function_name) service = Service(model_name, model_dir, manifest, entry_point, gpu_id, batch_size) service.context.metrics = metrics # initialize model at load time entry_point(None, service.context) else: model_class_definitions = ModelLoader.list_model_services(module) if len(model_class_definitions) != 1: raise ValueError("Expected only one class in custom service code or a function entry point") model_class = model_class_definitions[0] model_service = model_class() handle = getattr(model_service, "handle") if handle is None: raise ValueError("Expect handle method in class {}".format(str(model_class)),) service = Service(model_name, model_dir, manifest, model_service.handle, gpu_id, batch_size) initialize = getattr(model_service, "initialize") if initialize is not None: # noinspection PyBroadException try: model_service.initialize(service.context) # pylint: disable=broad-except except Exception: sys.exc_clear() emit_metrics(metrics.store) return service
def test_emit_metrics(self, caplog): caplog.set_level(logging.INFO) metrics = {'test_emit_metrics': True} emit_metrics(metrics) assert "[METRICS]" in caplog.text
def test_metrics(caplog): """ Test if metric classes methods behave as expected Also checks global metric service methods """ caplog.set_level(logging.INFO) # Create a batch of request ids request_ids = {0: 'abcd', 1: "xyz", 2: "qwerty", 3: "hjshfj"} all_req_ids = ','.join(request_ids.values()) model_name = "dummy model" # Create a metrics objects metrics = MetricsStore(request_ids, model_name) # Counter tests metrics.add_counter('CorrectCounter', 1, 1) test_metric = metrics.cache[get_model_key('CorrectCounter', 'count', 'xyz', model_name)] assert 'CorrectCounter' == test_metric.name metrics.add_counter('CorrectCounter', 1, 1) metrics.add_counter('CorrectCounter', 1, 3) metrics.add_counter('CorrectCounter', 1) test_metric = metrics.cache[get_model_key('CorrectCounter', 'count', all_req_ids, model_name)] assert 'CorrectCounter' == test_metric.name metrics.add_counter('CorrectCounter', 3) test_metric = metrics.cache[get_model_key('CorrectCounter', 'count', 'xyz', model_name)] assert test_metric.value == 2 test_metric = metrics.cache[get_model_key('CorrectCounter', 'count', 'hjshfj', model_name)] assert test_metric.value == 1 test_metric = metrics.cache[get_model_key('CorrectCounter', 'count', all_req_ids, model_name)] assert test_metric.value == 4 # Check what is emitted is correct emit_metrics(metrics.store) assert "hjshfj" in caplog.text assert "ModelName:dummy model" in caplog.text # Adding other types of metrics # Check for time metric with pytest.raises(Exception) as e_info: metrics.add_time('WrongTime', 20, 1, 'ns') assert "the unit for a timed metric should be one of ['ms', 's']" == e_info.value.args[ 0] metrics.add_time('CorrectTime', 20, 2, 's') metrics.add_time('CorrectTime', 20, 0) test_metric = metrics.cache[get_model_key('CorrectTime', 'ms', 'abcd', model_name)] assert test_metric.value == 20 assert test_metric.unit == 'Milliseconds' test_metric = metrics.cache[get_model_key('CorrectTime', 's', 'qwerty', model_name)] assert test_metric.value == 20 assert test_metric.unit == 'Seconds' # Size based metrics with pytest.raises(Exception) as e_info: metrics.add_size('WrongSize', 20, 1, 'TB') assert "The unit for size based metric is one of ['MB','kB', 'GB', 'B']" == e_info.value.args[ 0] metrics.add_size('CorrectSize', 200, 0, 'GB') metrics.add_size('CorrectSize', 10, 2) test_metric = metrics.cache[get_model_key('CorrectSize', 'GB', 'abcd', model_name)] assert test_metric.value == 200 assert test_metric.unit == 'Gigabytes' test_metric = metrics.cache[get_model_key('CorrectSize', 'MB', 'qwerty', model_name)] assert test_metric.value == 10 assert test_metric.unit == 'Megabytes' # Check a percentage metric metrics.add_percent('CorrectPercent', 20.0, 3) test_metric = metrics.cache[get_model_key('CorrectPercent', 'percent', 'hjshfj', model_name)] assert test_metric.value == 20.0 assert test_metric.unit == 'Percent' # Check a error metric metrics.add_error('CorrectError', 'Wrong values') test_metric = metrics.cache[get_error_key('CorrectError', '')] assert test_metric.value == 'Wrong values'
def test_emit_metrics(self, caplog): metrics = {'test_emit_metrics': True} emit_metrics(metrics) assert "[METRICS]" in caplog.text