Exemplo n.º 1
0
 def reader_release(self):
     with self._lock:
         ASSERT.greater(self._num_readers, 0)
         ASSERT.equal(self._num_writers, 0)
         self._num_readers -= 1
         if self._num_readers == 0:
             self._writer_cond.notify()
Exemplo n.º 2
0
 def __init__(self, token_rate, bucket_size, raise_when_empty):
     self._raise_when_empty = raise_when_empty
     self._token_rate = ASSERT.greater(token_rate, 0)
     self._token_period = 1 / self._token_rate
     self._bucket_size = ASSERT.greater(bucket_size, 0)
     self._num_tokens = 0
     self._last_added = time.monotonic()
Exemplo n.º 3
0
def lease_dissociate(tables, *, lease, key_id):
    ASSERT.greater(lease, 0)
    return (
        tables.leases_key_ids.delete()\
        .where(tables.leases_key_ids.c.lease == lease)
        .where(tables.leases_key_ids.c.key_id == key_id)
    )
Exemplo n.º 4
0
def lease_grant(tables, *, lease, expiration):
    ASSERT.greater(lease, 0)
    ASSERT.greater_or_equal(expiration, 0)
    return sqlite.upsert(tables.leases).values(
        lease=lease,
        expiration=expiration,
    )
Exemplo n.º 5
0
 def __init__(
     self,
     *,
     num_buckets=512,
     token_rate,
     bucket_size,
     get_bucket_key=default_get_bucket_key,
     get_num_needed=lambda _: 1,
 ):
     self._buckets = g1_collections.LruCache(num_buckets)
     self._get_bucket_key = get_bucket_key
     self._get_num_needed = get_num_needed
     self._token_rate = ASSERT.greater(token_rate, 0)
     self._bucket_size = ASSERT.greater(bucket_size, 0)
Exemplo n.º 6
0
 def __init__(
     self,
     *,
     key,
     failure_threshold,
     failure_period,
     failure_timeout,
     success_threshold,
 ):
     self._key = key
     self._failure_threshold = ASSERT.greater(failure_threshold, 0)
     self._failure_period = ASSERT.greater(failure_period, 0)
     self._failure_timeout = ASSERT.greater(failure_timeout, 0)
     self._success_threshold = ASSERT.greater(success_threshold, 0)
     self._state = _States.GREEN
     # When state is GREEN, _event_log records failures; when state
     # is YELLOW, it records successes; when state is RED, it records
     # when the state was changed to RED.
     self._event_log = _EventLog(max(failure_threshold, success_threshold))
     self._num_concurrent_requests = 0
Exemplo n.º 7
0
    def __init__(
            self,
            cache_dir_path,
            capacity,
            *,
            post_eviction_size=None,
            executor=None,  # Use this to evict in the background.
    ):
        self._lock = threading.Lock()

        self._cache_dir_path = ASSERT.predicate(cache_dir_path, Path.is_dir)

        self._capacity = ASSERT.greater(capacity, 0)
        self._post_eviction_size = (post_eviction_size
                                    if post_eviction_size is not None else
                                    int(self._capacity *
                                        POST_EVICTION_SIZE_RATIO))
        ASSERT(
            0 <= self._post_eviction_size <= self._capacity,
            'expect 0 <= post_eviction_size <= {}, not {}',
            self._capacity,
            self._post_eviction_size,
        )

        self._executor = executor

        # By the way, if cache cold start is an issue, we could store
        # and load this table from a file.
        self._access_log = collections.OrderedDict()

        # getting_path may "lease" paths to the user, and we should not
        # evict these paths.
        self._active_paths = g1_collections.Multiset()

        self._num_hits = 0
        self._num_misses = 0

        # It's safe to call these methods after this point.
        self._eviction_countdown = self._estimate_eviction_countdown()
        self._maybe_evict()
Exemplo n.º 8
0
async def recvfile(response, file):
    """Receive response body into a file.

    The caller must set ``stream`` to true when make the request.

    DANGER! This breaks the multiple levels of encapsulation, from
    requests.Response all the way down to http.client.HTTPResponse.
    As a result, the response object is most likely unusable after a
    recvfile call, and you should probably close it immediately.
    """
    # requests sets _content to False initially.
    ASSERT.is_(response._content, False)
    ASSERT.false(response._content_consumed)

    urllib3_response = ASSERT.not_none(response.raw)
    chunked = urllib3_response.chunked

    httplib_response = ASSERT.isinstance(
        urllib3_response._fp, http.client.HTTPResponse
    )
    ASSERT.false(httplib_response.closed)
    sock = ASSERT.isinstance(httplib_response.fp.raw._sock, socket.socket)

    output = DecoderChain(file)

    if chunked:
        chunk_decoder = ChunkDecoder()
        output.add(chunk_decoder)
        num_to_read = 0
        eof = lambda: chunk_decoder.eof
    else:
        num_to_read = ASSERT.greater(
            ASSERT.not_none(httplib_response.length), 0
        )
        eof = lambda: num_to_read <= 0

    # Use urllib3's decoder code.
    urllib3_response._init_decoder()
    if urllib3_response._decoder is not None:
        output.add(ContentDecoder(urllib3_response._decoder))

    with contextlib.ExitStack() as stack:
        src = adapters.FileAdapter(httplib_response.fp)
        stack.callback(src.disown)

        sock.setblocking(False)
        stack.callback(sock.setblocking, True)

        buffer = memoryview(stack.enter_context(_BUFFER_POOL.using()))
        while not eof():
            if chunked:
                # TODO: If server sends more data at the end, like
                # response of the next request, for now recvfile might
                # read them, and then err out.  Maybe recvfile should
                # check this, and not read more than it should instead?
                num_read = await src.readinto1(buffer)
            else:
                num_read = await src.readinto1(
                    buffer[:min(num_to_read, _CHUNK_SIZE)]
                )
            if num_read == 0:
                break
            output.write(buffer[:num_read])
            num_to_read -= num_read

        output.flush()

    # Sanity check.
    if not chunked:
        ASSERT.equal(num_to_read, 0)

    # Trick requests to release the connection back to the connection
    # pool, rather than closing/discarding it.
    response._content_consumed = True
    # http.client.HTTPConnection tracks the last response; so you have
    # to close it to make the connection object useable again.
    httplib_response.close()

    # Close the response for the caller since response is not useable
    # after recvfile.
    response.close()

    loggings.ONCE_PER(
        1000, LOG.info, 'buffer pool stats: %r', _BUFFER_POOL.get_stats()
    )
Exemplo n.º 9
0
 def add(self, t):
     if self._log:
         ASSERT.greater(t, self._log[-1])
     self._log.append(t)
Exemplo n.º 10
0
 def __init__(self, max_retries, backoff_base):
     self._max_retries = ASSERT.greater(max_retries, 0)
     self._backoff_base = ASSERT.greater(backoff_base, 0)
Exemplo n.º 11
0
 def __init__(self, token_rate, bucket_size):
     self._token_rate = ASSERT.greater(token_rate, 0)
     self._bucket_size = ASSERT.greater(bucket_size, 0)
     self._num_tokens = self._bucket_size  # Bucket is full initially.
     self._last_added = time.monotonic()
Exemplo n.º 12
0
def lease_revoke(tables, *, lease):
    ASSERT.greater(lease, 0)
    return [
        table.delete().where(table.c.lease == lease)
        for table in (tables.leases, tables.leases_key_ids)
    ]
Exemplo n.º 13
0
def lease_associate(tables, *, lease, key_id):
    ASSERT.greater(lease, 0)
    return sqlite.upsert(tables.leases_key_ids).values(
        lease=lease,
        key_id=key_id,
    )
Exemplo n.º 14
0
 def __post_init__(self):
     ASSERT.greater(self.revision, 0)
     ASSERT.true(self.key)
Exemplo n.º 15
0
 def __post_init__(self):
     ASSERT.greater(self.lease, 0)
     ASSERT.greater_or_equal(self.expiration, 0)