def test_contains(): histo = histogram.Histogram() histo.add(10) histo.add(20) assert 10 in histo assert 20 in histo assert 30 not in histo
def test_percentile(): histo = histogram.Histogram() [histo.add(i) for i in range(101, 201)] assert histo.percentile(100) == 200 assert histo.percentile(101) == 200 assert histo.percentile(99) == 199 assert histo.percentile(1) == 101
def __init__(self, client, subscription, flow_control=types.FlowControl(), scheduler=None): self._client = client self._subscription = subscription self._flow_control = flow_control self._ack_histogram = histogram.Histogram() self._last_histogram_size = 0 self._ack_deadline = 10 self._rpc = None self._callback = None self._closing = threading.Lock() self._closed = False self._close_callbacks = [] if scheduler is None: self._scheduler = ( google.cloud.pubsub_v1.subscriber.scheduler.ThreadScheduler()) else: self._scheduler = scheduler # The threads created in ``.open()``. self._dispatcher = None self._leaser = None self._consumer = None self._heartbeater = None
def test_percentile(): histo = histogram.Histogram() assert histo.percentile(42) == histogram.MIN_ACK_DEADLINE # default when empty [histo.add(i) for i in range(101, 201)] assert histo.percentile(100) == 200 assert histo.percentile(101) == 200 assert histo.percentile(99) == 199 assert histo.percentile(1) == 101
def test_min(): histo = histogram.Histogram() assert histo.min == histogram.MIN_ACK_DEADLINE histo.add(60) assert histo.min == 60 histo.add(30) assert histo.min == 30 histo.add(120) assert histo.min == 30
def test_max(): histo = histogram.Histogram() assert histo.max == 600 histo.add(120) assert histo.max == 120 histo.add(150) assert histo.max == 150 histo.add(20) assert histo.max == 150
def test_min(): histo = histogram.Histogram() assert histo.min == 10 histo.add(60) assert histo.min == 60 histo.add(30) assert histo.min == 30 histo.add(120) assert histo.min == 30
def create_manager(flow_control=types.FlowControl()): manager = mock.create_autospec(streaming_pull_manager.StreamingPullManager, instance=True) manager.dispatcher = mock.create_autospec(dispatcher.Dispatcher, instance=True) manager.is_active = True manager.flow_control = flow_control manager.ack_histogram = histogram.Histogram() return manager
def test_max(): histo = histogram.Histogram() assert histo.max == histogram.MAX_ACK_DEADLINE histo.add(120) assert histo.max == 120 histo.add(150) assert histo.max == 150 histo.add(20) assert histo.max == 150
def __init__( self, client, subscription, flow_control=types.FlowControl(), scheduler=None, use_legacy_flow_control=False, await_callbacks_on_shutdown=False, ): self._client = client self._subscription = subscription self._flow_control = flow_control self._use_legacy_flow_control = use_legacy_flow_control self._await_callbacks_on_shutdown = await_callbacks_on_shutdown self._ack_histogram = histogram.Histogram() self._last_histogram_size = 0 self._ack_deadline = 10 self._rpc = None self._callback = None self._closing = threading.Lock() self._closed = False self._close_callbacks = [] self._regular_shutdown_thread = None # Created on intentional shutdown. # Generate a random client id tied to this object. All streaming pull # connections (initial and re-connects) will then use the same client # id. Doing so lets the server establish affinity even across stream # disconncetions. self._client_id = str(uuid.uuid4()) if scheduler is None: self._scheduler = ( google.cloud.pubsub_v1.subscriber.scheduler.ThreadScheduler()) else: self._scheduler = scheduler # A collection for the messages that have been received from the server, # but not yet sent to the user callback. self._messages_on_hold = messages_on_hold.MessagesOnHold() # The total number of bytes consumed by the messages currently on hold self._on_hold_bytes = 0 # A lock ensuring that pausing / resuming the consumer are both atomic # operations that cannot be executed concurrently. Needed for properly # syncing these operations with the current leaser load. Additionally, # the lock is used to protect modifications of internal data that # affects the load computation, i.e. the count and size of the messages # currently on hold. self._pause_resume_lock = threading.Lock() # The threads created in ``.open()``. self._dispatcher = None self._leaser = None self._consumer = None self._heartbeater = None
def __init__(self, client, subscription, flow_control=types.FlowControl(), scheduler=None): self._client = client self._subscription = subscription self._flow_control = flow_control self._ack_histogram = histogram.Histogram() self._last_histogram_size = 0 self._ack_deadline = 10 self._rpc = None self._callback = None self._closing = threading.Lock() self._closed = False self._close_callbacks = [] if scheduler is None: self._scheduler = ( google.cloud.pubsub_v1.subscriber.scheduler.ThreadScheduler()) else: self._scheduler = scheduler # A FIFO queue for the messages that have been received from the server, # but not yet added to the lease management (and not sent to user callback), # because the FlowControl limits have been hit. self._messages_on_hold = queue.Queue() # the total number of bytes consumed by the messages currently on hold self._on_hold_bytes = 0 # A lock ensuring that pausing / resuming the consumer are both atomic # operations that cannot be executed concurrently. Needed for properly # syncing these operations with the current leaser load. Additionally, # the lock is used to protect modifications of internal data that # affects the load computation, i.e. the count and size of the messages # currently on hold. self._pause_resume_lock = threading.Lock() # The threads created in ``.open()``. self._dispatcher = None self._leaser = None self._consumer = None self._heartbeater = None
def __init__(self, client, subscription, flow_control=types.FlowControl(), histogram_data=None): self._client = client self._subscription = subscription self._consumer = _consumer.Consumer() self._ack_deadline = 10 self._last_histogram_size = 0 self._future = None self.flow_control = flow_control self.histogram = histogram.Histogram(data=histogram_data) """.Histogram: the histogram tracking ack latency.""" self.leased_messages = {} """dict[str, float]: A mapping of ack IDs to the local time when the ack ID was initially leased in seconds since the epoch.""" # These are for internal flow control tracking. # They should not need to be used by subclasses. self._bytes = 0 self._ack_on_resume = set()
def test_add_lower_limit(): histo = histogram.Histogram() low_value = histogram.MIN_ACK_DEADLINE - 1 histo.add(low_value) assert low_value not in histo assert histogram.MIN_ACK_DEADLINE in histo
def test_add_upper_limit(): histo = histogram.Histogram() histo.add(12000) assert 12000 not in histo assert 600 in histo
def test_add_lower_limit(): histo = histogram.Histogram() histo.add(5) assert 5 not in histo assert 10 in histo
def test_add(): histo = histogram.Histogram() histo.add(60) assert histo._data[60] == 1 histo.add(60) assert histo._data[60] == 2
def test_add_upper_limit(): histo = histogram.Histogram() high_value = histogram.MAX_ACK_DEADLINE + 1 histo.add(high_value) assert high_value not in histo assert histogram.MAX_ACK_DEADLINE in histo
def test_init(): data = {} histo = histogram.Histogram(data=data) assert histo._data is data assert len(histo) == 0