def serve_conn(self, sock: BufferedSocket) -> None: while self.is_running: try: length_data = sock.read_exactly(4) except OSError as err: self.logger.debug("%s: closing client connection: %s", self, err) break except Exception: self.logger.exception("Error reading serialized record length data") break data_length = int.from_bytes(length_data, 'big') try: record_bytes = sock.read_exactly(data_length) except OSError as err: self.logger.debug("%s: closing client connection: %s", self, err) break except Exception: self.logger.exception("Error reading serialized log record data") break record = pickle.loads(record_bytes) for handler in self.handlers: if record.levelno >= handler.level: handler.handle(record)
class IPCHandler(logging.Handler): logger = logging.getLogger('trinity._utils.logging.IPCHandler') def __init__(self, sock: socket.socket): self._socket = BufferedSocket(sock) super().__init__() @classmethod def connect(cls: Type[THandler], path: Path) -> THandler: wait_for_ipc(path) s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) cls.logger.debug("Opened connection to %s: %s", path, s) s.connect(str(path)) return cls(s) def prepare(self, record: logging.LogRecord) -> logging.LogRecord: msg = self.format(record) new_record = copy.copy(record) new_record.message = msg new_record.msg = msg new_record.args = None new_record.exc_info = None new_record.exc_text = None return new_record def emit(self, record: logging.LogRecord) -> None: try: msg_data = pickle.dumps(self.prepare(record)) msg_length_data = len(msg_data).to_bytes(4, 'big') self._socket.sendall(msg_length_data + msg_data) except Exception: self.handleError(record)
def handle_SET(self, sock: BufferedSocket) -> None: key_and_value_size_data = sock.read_exactly(DOUBLE_LEN_BYTES) key_size, value_size = struct.unpack('<II', key_and_value_size_data) combined_size = key_size + value_size key_and_value_data = sock.read_exactly(combined_size) key = key_and_value_data[:key_size] value = key_and_value_data[key_size:] self.db[key] = value sock.sendall(SUCCESS_BYTE)
def handle_ATOMIC_BATCH(self, sock: BufferedSocket) -> None: kv_pair_and_delete_count_data = sock.read_exactly(DOUBLE_LEN_BYTES) kv_pair_count, delete_count = struct.unpack( '<II', kv_pair_and_delete_count_data) total_kv_count = 2 * kv_pair_count if kv_pair_count or delete_count: kv_and_delete_sizes_data = sock.read_exactly( DOUBLE_LEN_BYTES * kv_pair_count + LEN_BYTES * delete_count) fmt_str = '<' + 'I' * (total_kv_count + delete_count) kv_and_delete_sizes = struct.unpack(fmt_str, kv_and_delete_sizes_data) kv_sizes = kv_and_delete_sizes[:total_kv_count] delete_sizes = kv_and_delete_sizes[total_kv_count:total_kv_count + delete_count] with self.db.atomic_batch() as batch: for key_size, value_size in partition(2, kv_sizes): combined_size = key_size + value_size key_and_value_data = sock.read_exactly(combined_size) key = key_and_value_data[:key_size] value = key_and_value_data[key_size:] batch[key] = value for key_size in delete_sizes: key = sock.read_exactly(key_size) del batch[key] sock.sendall(SUCCESS_BYTE)
def handle_EXISTS(self, sock: BufferedSocket) -> None: key_size_data = sock.read_exactly(LEN_BYTES) key = sock.read_exactly(int.from_bytes(key_size_data, 'little')) if key in self.db: sock.sendall(SUCCESS_BYTE) else: sock.sendall(FAIL_BYTE)
def handle_DELETE(self, sock: BufferedSocket) -> None: key_size_data = sock.read_exactly(LEN_BYTES) key = sock.read_exactly(int.from_bytes(key_size_data, 'little')) try: del self.db[key] except KeyError: sock.sendall(FAIL_BYTE) else: sock.sendall(SUCCESS_BYTE)
def handle_GET(self, sock: BufferedSocket) -> None: key_size_data = sock.read_exactly(LEN_BYTES) key = sock.read_exactly(int.from_bytes(key_size_data, 'little')) try: value = self.db[key] except KeyError: sock.sendall(FAIL_BYTE) else: sock.sendall(SUCCESS_BYTE + len(value).to_bytes(LEN_BYTES, 'little') + value)
def serve_conn(self, sock: BufferedSocket) -> None: while self.is_running: try: operation_byte = sock.read_exactly(1) except OSError as err: self.logger.debug("%s: closing client connection: %s", self, err) break except Exception: self.logger.exception("Error reading operation flag") break try: operation = Operation(operation_byte) except TypeError: self.logger.error("Unrecognized database operation: %s", operation_byte.hex()) break try: if operation is GET: self.handle_GET(sock) elif operation is SET: self.handle_SET(sock) elif operation is DELETE: self.handle_DELETE(sock) elif operation is EXISTS: self.handle_EXISTS(sock) elif operation is ATOMIC_BATCH: self.handle_ATOMIC_BATCH(sock) else: self.logger.error("Got unhandled operation %s", operation) except Exception as err: self.logger.exception( "Unhandled error during operation %s: %s", operation, err) raise
def __init__(self, sock: socket.socket): self._socket = BufferedSocket(sock) super().__init__()
def __init__(self, sock: socket.socket): self._socket = BufferedSocket(sock) self._lock = threading.Lock()
class DBClient(BaseAtomicDB): logger = logging.getLogger('trinity.db.client.DBClient') def __init__(self, sock: socket.socket): self._socket = BufferedSocket(sock) self._lock = threading.Lock() def __enter__(self) -> None: self._socket.__enter__() def __exit__(self, exc_type: Type[BaseException], exc_value: BaseException, exc_tb: TracebackType) -> None: self._socket.__exit__(exc_type, exc_value, exc_tb) def __getitem__(self, key: bytes) -> bytes: with self._lock: self._socket.sendall(GET.value + len(key).to_bytes(LEN_BYTES, 'little') + key) result_byte = self._socket.read_exactly(1) if result_byte == SUCCESS_BYTE: value_size_data = self._socket.read_exactly(LEN_BYTES) value = self._socket.read_exactly( int.from_bytes(value_size_data, 'little')) return value elif result_byte == FAIL_BYTE: raise KeyError(key) else: raise Exception(f"Unknown result byte: {result_byte.hex}") def __setitem__(self, key: bytes, value: bytes) -> None: with self._lock: self._socket.sendall(SET.value + struct.pack('<II', len(key), len(value)) + key + value) Result(self._socket.read_exactly(1)) def __delitem__(self, key: bytes) -> None: with self._lock: self._socket.sendall(DELETE.value + len(key).to_bytes(4, 'little') + key) result_byte = self._socket.read_exactly(1) if result_byte == SUCCESS_BYTE: return elif result_byte == FAIL_BYTE: raise KeyError(key) else: raise Exception(f"Unknown result byte: {result_byte.hex}") def _exists(self, key: bytes) -> bool: with self._lock: self._socket.sendall(EXISTS.value + len(key).to_bytes(4, 'little') + key) result_byte = self._socket.read_exactly(1) if result_byte == SUCCESS_BYTE: return True elif result_byte == FAIL_BYTE: return False else: raise Exception(f"Unknown result byte: {result_byte.hex}") @contextlib.contextmanager def atomic_batch(self) -> Iterator['AtomicBatch']: batch = AtomicBatch(self) yield batch diff = batch.finalize() pending_deletes = diff.deleted_keys() pending_kv_pairs = diff.pending_items() kv_pair_count = len(pending_kv_pairs) delete_count = len(pending_deletes) kv_sizes = tuple( len(item) for item in itertools.chain(*pending_kv_pairs)) delete_sizes = tuple(len(key) for key in pending_deletes) # We encode all of the *sizes* in one shot using `struct.pack` and this # dynamically constructed format string. fmt_str = '<II' + 'I' * (len(kv_sizes) + len(pending_deletes)) kv_pair_count_and_size_data = struct.pack( fmt_str, kv_pair_count, delete_count, *kv_sizes, *delete_sizes, ) kv_and_delete_data = b''.join( itertools.chain(*pending_kv_pairs, pending_deletes)) with self._lock: self._socket.sendall(ATOMIC_BATCH.value + kv_pair_count_and_size_data + kv_and_delete_data) Result(self._socket.read_exactly(1)) def close(self) -> None: try: self._socket.shutdown(socket.SHUT_WR) except OSError as e: # on mac OS this can result in the following error: # OSError: [Errno 57] Socket is not connected if e.errno != errno.ENOTCONN: raise self._socket.close() @classmethod def connect(cls, path: pathlib.Path, timeout: int = 5) -> "DBClient": wait_for_ipc(path, timeout) s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) cls.logger.debug("Opened connection to %s: %s", path, s) s.connect(str(path)) return cls(s)