def handle_connection(self, cl_socket):
        """
        Handle socket connection.

        :param cl_socket:
        :return:
        """
        service = None
        while True:
            if BENCHMARK:
                pr.disable()
                pr.dump_stats('/tmp/tsPythonProfile.prof')
            cmd, msg = retrieve_msg(cl_socket)
            if BENCHMARK:
                pr.enable()
            if cmd == b'I':
                resp = service.predict(msg)
                cl_socket.sendall(resp)
            elif cmd == b'L':
                service, result, code = self.load_model(msg)
                resp = bytearray()
                resp += create_load_model_response(code, result)
                cl_socket.sendall(resp)
                if code != 200:
                    raise RuntimeError("{} - {}".format(code, result))
            else:
                raise ValueError("Received unknown command: {}".format(cmd))

            if service is not None and service.context is not None and service.context.metrics is not None:
                emit_metrics(service.context.metrics.store)
 def test_emit_metrics(self, caplog):
     caplog.set_level(logging.INFO)
     metrics = {'test_emit_metrics': True}
     emit_metrics(metrics)
     assert "[METRICS]" in caplog.text
Ejemplo n.º 3
0
def test_metrics(caplog):
    """
    Test if metric classes methods behave as expected
    Also checks global metric service methods
    """
    # Create a batch of request ids
    request_ids = {0: 'abcd', 1: "xyz", 2: "qwerty", 3: "hjshfj"}
    all_req_ids = ','.join(request_ids.values())
    model_name = "dummy model"

    # Create a metrics objects
    metrics = MetricsStore(request_ids, model_name)

    # Counter tests
    metrics.add_counter('CorrectCounter', 1, 1)
    test_metric = metrics.cache[get_model_key('CorrectCounter', 'count', 'xyz',
                                              model_name)]
    assert 'CorrectCounter' == test_metric.name
    metrics.add_counter('CorrectCounter', 1, 1)
    metrics.add_counter('CorrectCounter', 1, 3)
    metrics.add_counter('CorrectCounter', 1)
    test_metric = metrics.cache[get_model_key('CorrectCounter', 'count',
                                              all_req_ids, model_name)]
    assert 'CorrectCounter' == test_metric.name
    metrics.add_counter('CorrectCounter', 3)
    test_metric = metrics.cache[get_model_key('CorrectCounter', 'count', 'xyz',
                                              model_name)]
    assert test_metric.value == 2
    test_metric = metrics.cache[get_model_key('CorrectCounter', 'count',
                                              'hjshfj', model_name)]
    assert test_metric.value == 1
    test_metric = metrics.cache[get_model_key('CorrectCounter', 'count',
                                              all_req_ids, model_name)]
    assert test_metric.value == 4
    # Check what is emitted is correct
    emit_metrics(metrics.store)

    assert "hjshfj" in caplog.text
    assert "ModelName:dummy model" in caplog.text

    # Adding other types of metrics
    # Check for time metric
    with pytest.raises(Exception) as e_info:
        metrics.add_time('WrongTime', 20, 1, 'ns')
    assert "the unit for a timed metric should be one of ['ms', 's']" == e_info.value.args[
        0]

    metrics.add_time('CorrectTime', 20, 2, 's')
    metrics.add_time('CorrectTime', 20, 0)
    test_metric = metrics.cache[get_model_key('CorrectTime', 'ms', 'abcd',
                                              model_name)]
    assert test_metric.value == 20
    assert test_metric.unit == 'Milliseconds'
    test_metric = metrics.cache[get_model_key('CorrectTime', 's', 'qwerty',
                                              model_name)]
    assert test_metric.value == 20
    assert test_metric.unit == 'Seconds'
    # Size based metrics
    with pytest.raises(Exception) as e_info:
        metrics.add_size('WrongSize', 20, 1, 'TB')
    assert "The unit for size based metric is one of ['MB','kB', 'GB', 'B']" == e_info.value.args[
        0]

    metrics.add_size('CorrectSize', 200, 0, 'GB')
    metrics.add_size('CorrectSize', 10, 2)
    test_metric = metrics.cache[get_model_key('CorrectSize', 'GB', 'abcd',
                                              model_name)]
    assert test_metric.value == 200
    assert test_metric.unit == 'Gigabytes'
    test_metric = metrics.cache[get_model_key('CorrectSize', 'MB', 'qwerty',
                                              model_name)]
    assert test_metric.value == 10
    assert test_metric.unit == 'Megabytes'

    # Check a percentage metric
    metrics.add_percent('CorrectPercent', 20.0, 3)
    test_metric = metrics.cache[get_model_key('CorrectPercent', 'percent',
                                              'hjshfj', model_name)]
    assert test_metric.value == 20.0
    assert test_metric.unit == 'Percent'

    # Check a error metric
    metrics.add_error('CorrectError', 'Wrong values')
    test_metric = metrics.cache[get_error_key('CorrectError', '')]
    assert test_metric.value == 'Wrong values'
Ejemplo n.º 4
0
 def test_emit_metrics(self, caplog):
     metrics = {'test_emit_metrics': True}
     emit_metrics(metrics)
     assert "[METRICS]" in caplog.text