def open(readonly): nbdkit.debug("open: readonly=%d" % readonly) # You can return any non-NULL Python object from open, and the # same object will be passed as the first arg to the other # callbacks [in the client connected phase]. return 1
def after_fork(): global options, pool http = create_http(url) options = get_options(http, url) http.close() nbdkit.debug("imageio features: flush=%(can_flush)r " "zero=%(can_zero)r unix_socket=%(unix_socket)r " "max_readers=%(max_readers)r max_writers=%(max_writers)r" % options) pool = create_http_pool(url, options)
def create_http(url, unix_socket=None): """ Create http connection for transfer url. Returns HTTPConnection. """ if unix_socket: nbdkit.debug("creating unix http connection socket=%r" % unix_socket) try: return UnixHTTPConnection(unix_socket) except Exception as e: # Very unlikely, but we can recover by using https. nbdkit.debug("cannot create unix socket connection: %s" % e) if url.scheme == "https": context = \ ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=cafile) if insecure: context.check_hostname = False context.verify_mode = ssl.CERT_NONE nbdkit.debug("creating https connection host=%s port=%s" % (url.hostname, url.port)) return HTTPSConnection(url.hostname, url.port, context=context) elif url.scheme == "http": nbdkit.debug("creating http connection host=%s port=%s" % (url.hostname, url.port)) return HTTPConnection(url.hostname, url.port) else: raise RuntimeError("unknown URL scheme (%s)" % url.scheme)
def create_http_pool(url, options): count = min(options["max_readers"], options["max_writers"], MAX_CONNECTIONS) nbdkit.debug("creating http pool connections=%d" % count) unix_socket = options["unix_socket"] if is_ovirt_host else None pool = queue.Queue(count) for i in range(count): http = create_http(url, unix_socket=unix_socket) pool.put(PoolItem(http)) return pool
def close_http_pool(pool): """ Wait until all inflight requests are done, close all connections and remove them from the pool. No request can be served by the pool after this call. """ nbdkit.debug("closing http pool") locked = [] while len(locked) < pool.maxsize: locked.append(pool.get()) for item in locked: item.http.close()
def after_fork(): global options, pool http = create_http(url) options = get_options(http, url) http.close() nbdkit.debug("imageio features: flush=%(can_flush)r " "zero=%(can_zero)r unix_socket=%(unix_socket)r " "max_readers=%(max_readers)r max_writers=%(max_writers)r" % options) pool = create_http_pool(url, options) t = threading.Thread(target=pool_keeper, name="poolkeeper") t.daemon = True t.start()
def request_failed(r, msg): status = r.status reason = r.reason try: body = r.read() except EnvironmentError as e: body = "(Unable to read response body: %s)" % e # Log the full error if we're verbose. nbdkit.debug("unexpected response from imageio server:") nbdkit.debug(msg) nbdkit.debug("%d: %s" % (status, reason)) nbdkit.debug(body) # Only a short error is included in the exception. raise RuntimeError("%s: %d %s: %r" % (msg, status, reason, body[:200]))
def pool_keeper(): """ Thread flushing idle connections, keeping them alive. If a connection does not send any request for 60 seconds, imageio server closes the connection. Recovering from closed connection is hard and unsafe, so this thread ensure that connections never becomes idle by sending a flush request if the connection is idle for too much time. In normal conditions, all connections are busy most of the time, so the keeper will find no idle connections. If there short delays in nbdcopy, the keeper will find some idle connections, but will quickly return them back to the pool. In the pathological case when nbdcopy is blocked for 3 minutes on vddk input, the keeper will send a flush request on all connections every ~30 seconds, until nbdcopy starts communicating again. """ global pool_error nbdkit.debug("poolkeeper: started") while not done.wait(IDLE_TIMEOUT / 2): idle = [] while True: try: idle.append(pool.get_nowait()) except queue.Empty: break if idle: now = time.monotonic() for item in idle: if item.last_used and now - item.last_used > IDLE_TIMEOUT: nbdkit.debug("poolkeeper: flushing idle connection") try: send_flush(item.http) item.last_used = now except Exception as e: # We will report this error on the next request. pool_error = e item.last_used = None pool.put(item) nbdkit.debug("poolkeeper: stopped")
def open(readonly): # The export name is a Unicode string, but to return this to the # client we have to convert it to bytes. name = nbdkit.export_name() nbdkit.debug("export name = '%s'" % name) return {'name': str.encode(name)}
def cleanup(): nbdkit.debug("cleaning up") done.set() close_http_pool(pool)
def pread(h, buf, offset, flags): nbdkit.debug("pread start ---") path = h['path'] current_offset = offset buf_offset = 0 fd_list = h['fd_list'] filenames = h['filenames'] # find closest value of offset index = bisect.bisect_left(fd_list, offset) - 1 index = index if index > 0 else 0 remain_len = len(buf) total_read = 0 nbdkit.debug(f"offset: {offset}") while remain_len > 0: block_start = fd_list[index] fd = os.open(os.path.join(path, filenames[index]), os.O_RDONLY) sb = os.stat(fd) block_offset = current_offset - block_start read_len = remain_len if block_offset >= sb.st_size: nbdkit.debug(f"no data in {offset}") return if sb.st_size - block_offset < read_len: read_len = sb.st_size - block_offset nbdkit.debug(f"block_start: {block_start}") nbdkit.debug(f"block_offset: {block_offset}") nbdkit.debug(f"remain_len: {remain_len}") nbdkit.debug(f"read_len: {read_len}") nbdkit.debug(f"buf_offset: {buf_offset}") nbdkit.debug(f"current_offset: {current_offset}") nbdkit.debug(f"") tmp_buf = os.pread(fd, read_len, block_offset) buf[buf_offset:buf_offset + read_len] = tmp_buf remain_len -= read_len total_read += read_len buf_offset += read_len current_offset += read_len index += 1 os.close(fd) nbdkit.debug(f"pread end -----") nbdkit.debug(f"")
def config(key, value): nbdkit.debug("ignored parameter %s=%s" % (key, value))