def shrink(self, connection): """(connection:Connection) Try to reduce the size of self.objects. """ current = len(self.objects) if current <= self.size: # No excess. log(10, '[%s] cache size %s recent %s', getpid(), current, len(self.recent_objects)) return start_time = time() heap = self._build_heap(connection.get_transaction_serial()) num_ghosted = 0 while heap and len(self.objects) > self.size: serial, oid = heappop(heap) obj = self.objects.get(oid) if obj is None: continue if obj._p_is_saved(): obj._p_set_status_ghost() num_ghosted += 1 self.recent_objects.discard(obj) log(10, '[%s] shrink %fs removed %s ghosted %s size %s recent %s', getpid(), time() - start_time, current - len(self.objects), num_ghosted, len(self.objects), len(self.recent_objects))
def handle_quit(self, client): # Q log(20, 'Quit') for storage in self.storages.values(): if storage is not None: storage.close() self.scheduler.stop()
def serve_to_client(self, client_socket): client = ConnectedClient(client_socket) # Initialize per-storage state for the new client. client.invalid = dict( (db_name, set()) for db_name in self.storages) client.unused_oids = dict( (db_name, set()) for db_name in self.storages) self.clients.add(client) while not client.closed: try: command = yield client.read(1) except (ConnectionClosed, SocketError): break else: if command in self.handlers: handler_name = self.handlers[command] handler = getattr(self, handler_name) yield handler(client) elif command in self.db_handlers: handler_name = self.db_handlers[command] handler = getattr(self, handler_name) # Get database name. name_length = str_to_int4((yield client.read(4))) db_name = yield client.read(name_length) yield handler(client, db_name) yield client.flush() log(20, 'Connection closed.') self.clients.remove(client)
def handle_C(self, s): # commit client = self._find_client(s) s.sendall(p32(len(client.invalid)) + ''.join(client.invalid)) client.invalid.clear() tlen = u32(recv(s, 4)) if tlen == 0: return # client decided not to commit (e.g. conflict) tdata = recv(s, tlen) logging_debug = is_logging(10) logging_debug and log(10, 'Committing %s bytes', tlen) self.storage.begin() i = 0 oids = [] while i < len(tdata): rlen = u32(tdata[i:i+4]) i += 4 oid = tdata[i:i+8] record = tdata[i+8:i+rlen] i += rlen if logging_debug: class_name = extract_class_name(record) log(10, ' oid=%-6s rlen=%-6s %s', u64(oid), rlen, class_name) self.storage.store(oid, record) oids.append(oid) assert i == len(tdata) self.storage.end() log(20, 'Committed %3s objects %s bytes at %s', len(oids), tlen, datetime.now()) s.sendall(STATUS_OKAY) for c in self.clients: if c is not client: c.invalid.update(oids)
def _report_load_record(self): if self.load_record and is_logging(5): log( 5, '[%s]\n' % getpid() + '\n'.join("%8s: %s" % (item[1], item[0]) for item in sorted(self.load_record.items()))) self.load_record.clear()
def start_durus(host, port, logfilename, dbfilename): logfile = open(logfilename, 'a+') direct_output(logfile) logger.setLevel(9) storage = FileStorage(dbfilename, repair=False, readonly=False) log(20, 'Storage file=%s host=%s port=%s', storage.get_filename(), host, port) StorageServer(storage, host=host, port=port).serve()
def handle_S(self, s): # sync client = self._find_client(s) log(8, 'Sync %s', len(client.invalid)) invalid = self.storage.sync() assert not invalid # should have exclusive access s.sendall(p32(len(client.invalid)) + ''.join(client.invalid)) client.invalid.clear()
def handle_V(self, s): # Verify protocol version match. client_protocol = read(s, 4) log(10, 'Client Protocol: %s', str_to_int4(client_protocol)) assert len(self.protocol) == 4 write(s, self.protocol) if client_protocol != self.protocol: raise ClientError("Protocol not supported.")
def handle_new_oids(self, client, db_name): # M log(20, 'New OIDs %s' % db_name) storage = self.storages[db_name] count = ord((yield client.read(1))) log(10, 'oids: %s', count) yield client.write( join_bytes(self._new_oids(client, db_name, storage, count)))
def handle_S(self, s): # sync client = self._find_client(s) self._report_load_record() self._sync_storage() log(8, 'Sync %s', len(client.invalid)) write_all(s, int4_to_str(len(client.invalid)), join_bytes(client.invalid)) client.invalid.clear()
def handle_destroy(self, client, db_name): # D log(20, 'Destroy %s' % db_name) if db_name in self.storages: # Do nothing if it's still in use. pass else: db_path = self._db_path(db_name) os.unlink(db_path)
def handle_bulk_read(self, client, db_name): # B log(20, 'Bulk read %s' % db_name) storage = self.storages[db_name] number_of_oids = str_to_int4((yield client.read(4))) oid_str_len = 8 * number_of_oids oid_str = yield client.read(oid_str_len) oids = split_oids(oid_str) for oid in oids: yield self._send_load_response(client, db_name, storage, oid)
def handle_close(self, client, db_name): # X log(20, 'Close %s' % db_name) if db_name in self.storages: self.storages[db_name].close() del self.storages[db_name] # Remove per-storage state for each client. for c in self.clients: del c.invalid[db_name] del c.unused_oids[db_name]
def handle_sync(self, client, db_name): # S log(20, 'Sync %s' % db_name) storage = self.storages[db_name] self._report_load_record(storage) self._sync_storage(db_name, storage) invalid = client.invalid[db_name] log(8, 'Sync %s', len(invalid)) yield client.write(int4_to_str(len(invalid))) yield client.write(join_bytes(invalid)) invalid.clear()
def end(self, handle_invalidations=None): self.shelf.store(iteritems(self.pending_records)) if is_logging(20): shelf_file = self.shelf.get_file() shelf_file.seek_end() pos = shelf_file.tell() log(20, "Transaction at [%s] end=%s" % (datetime.now(), pos)) if self.pack_extra is not None: self.pack_extra.update(self.pending_records) self.allocated_unused_oids -= set(self.pending_records) self.begin()
def end(self, handle_invalidations=None): self.shelf.store(iteritems(self.pending_records)) if is_logging(10): shelf_file = self.shelf.get_file() shelf_file.seek_end() pos = shelf_file.tell() log(10, "Transaction at [%s] end=%s" % (datetime.now(), pos)) if self.pack_extra is not None: self.pack_extra.update(self.pending_records) self.allocated_unused_oids -= set(self.pending_records) self.begin()
def startDurus(host, port, logfilename, dbfilename): """Start and initialize the Durus server component. Also opens a log file. """ lf = logfile.open(logfilename, 50000) direct_output(lf) logger.setLevel(9) storage = FileStorage(dbfilename, repair=False, readonly=False) log(20, 'Storage file=%s host=%s port=%s', storage.get_filename(), host, port) StorageServer(storage, host=host, port=port).serve()
def start_durus(host, port, logfilename, dbfilename): logfile = open(logfilename, 'a+') direct_output(logfile) logger.setLevel(9) storage = FileStorage(dbfilename, repair=False, readonly=False) """ In Durus 2.6, there was a fp.name member data. However it doesn't exist in Durus 2.7. However, in both 2.6 and 2.7, there is a get_filename() method that will work. #log(20, 'Storage file=%s host=%s port=%s', storage.fp.name, host, port) """ log(20, 'Storage file=%s host=%s port=%s',storage.get_filename(), host, port) StorageServer(storage, host=host, port=port).serve()
def start_durus(logfile, logginglevel, address, storage, gcbytes): if logfile is None: logfile = sys.stderr else: logfile = open(logfile, 'a+') direct_output(logfile) logger.setLevel(logginglevel) socket_address = SocketAddress.new(address) if hasattr(storage, 'get_filename'): log(20, 'Storage file=%s address=%s', storage.get_filename(), socket_address) StorageServer(storage, address=socket_address, gcbytes=gcbytes).serve()
def dispatch(self): socket = Socket() address = (self.host, self.port) socket.bind(address) socket.listen(16) log(20, 'Listening on %s:%i' % address) while 1: client_socket, client_address = yield socket.accept() log(20, 'Connection from %s:%s' % client_address) self.scheduler.add( self.serve_to_client, args=(client_socket,), )
def handle_pack(self, client, db_name): # P log(20, 'Pack %s' % db_name) storage = self.storages[db_name] if storage.d_packer is None: log(20, 'Pack started at %s' % datetime.now()) storage.d_packer = storage.get_packer() if storage.d_packer is None: log(20, 'Cannot iteratively pack, performing full pack.') storage.pack() log(20, 'Pack completed at %s' % datetime.now()) else: log(20, 'Pack already in progress at %s' % datetime.now()) yield client.write(STATUS_OKAY)
def handle_open(self, client, db_name): # O log(20, 'Open %s' % db_name) if db_name not in self.storages: db_path = self._db_path(db_name) storage = self.storage_class(db_path) storage.d_bytes_since_pack = 0 storage.d_load_record = {} storage.d_packer = None self.storages[db_name] = storage # Initialize per-storage state for each client. for c in self.clients: c.invalid[db_name] = set() c.unused_oids[db_name] = set()
def start_durus(logfile, logginglevel, file, repair, readonly, host, port): if logfile is None: logfile = sys.stderr else: logfile = open(logfile, 'a+') direct_output(logfile) logger.setLevel(logginglevel) if file is None: storage = TempFileStorage() else: storage = FileStorage(file, repair=repair, readonly=readonly) log(20, 'Storage file=%s host=%s port=%s', storage.fp.name, host, port) StorageServer(storage, host=host, port=port).serve()
def handle_commit(self, client, db_name): # C log(20, 'Commit %s' % db_name) storage = self.storages[db_name] self._sync_storage(db_name, storage) invalid = client.invalid[db_name] yield client.write(int4_to_str(len(invalid))) yield client.write(join_bytes(invalid)) yield client.flush() invalid.clear() tdata_len = str_to_int4((yield client.read(4))) if tdata_len == 0: # Client decided not to commit (e.g. conflict) return tdata = yield client.read(tdata_len) logging_debug = is_logging(10) logging_debug and log(10, 'Committing %s bytes', tdata_len) storage.begin() i = 0 oids = [] while i < tdata_len: rlen = str_to_int4(tdata[i:i+4]) i += 4 oid = tdata[i:i+8] record = tdata[i+8:i+rlen] i += rlen if logging_debug: class_name = extract_class_name(record) log(10, ' oid=%-6s rlen=%-6s %s', str_to_int8(oid), rlen, class_name) storage.store(oid, record) oids.append(oid) assert i == tdata_len oid_set = set(oids) for c in self.clients: if c is not client: if oid_set.intersection(c.unused_oids[db_name]): raise ClientError('invalid oid: %r' % oid) try: handle_invalidations = ( lambda oids: self._handle_invalidations(db_name, oids)) storage.end(handle_invalidations=handle_invalidations) except ConflictError: log(20, 'Conflict during commit') yield client.write(STATUS_INVALID) else: self._report_load_record(storage) log(20, 'Committed %3s objects %s bytes at %s', len(oids), tdata_len, datetime.now()) yield client.write(STATUS_OKAY) client.unused_oids[db_name] -= oid_set for c in self.clients: if c is not client: c.invalid[db_name].update(oids) storage.d_bytes_since_pack += tdata_len + 8
def serve(self): sock = get_systemd_socket() if sock is None: sock = self.address.get_listening_socket() else: self.address = InheritedSocket(sock) log(20, 'Ready on %s', self.address) self.sockets.append(sock) try: while 1: if self.packer is not None: timeout = 0.0 else: timeout = None r, w, e = select.select(self.sockets, [], [], timeout) for s in r: if s is sock: # new connection conn, addr = s.accept() self.address.set_connection_options(conn) self.clients.append(_Client(conn, addr)) self.sockets.append(conn) else: # command from client try: self.handle(s) except (ClientError, socket.error, socket.timeout, IOError): exc = sys.exc_info()[1] log(10, '%s', ''.join(map(str, exc.args))) self.sockets.remove(s) self.clients.remove(self._find_client(s)) s.close() if (self.packer is None and 0 < self.gcbytes <= self.bytes_since_pack): self.packer = self.storage.get_packer() if self.packer is not None: log(20, 'gc started at %s' % datetime.now()) if not r and self.packer is not None: try: pack_step = next(self.packer) if isinstance(pack_step, str): log(15, 'gc ' + pack_step) except StopIteration: log(20, 'gc at %s' % datetime.now()) self.packer = None # done packing self.bytes_since_pack = 0 # reset finally: self.address.close(sock)
def handle_L(self, s): # load oid = recv(s, 8) if oid in self._find_client(s).invalid: s.sendall(STATUS_INVALID) else: try: record = self.storage.load(oid) except KeyError: log(10, 'KeyError %s', u64(oid)) s.sendall(STATUS_KEYERROR) else: if is_logging(5): log(5, 'Load %-7s %s', u64(oid), extract_class_name(record)) s.sendall(STATUS_OKAY + p32(len(record)) + record)
def stop_durus(address): socket_address = SocketAddress.new(address) sock = socket_address.get_connected_socket() if sock is None: log(20, "Durus server %s doesn't seem to be running." % str(address)) return False write(sock, 'Q') # graceful exit message sock.close() # Try to wait until the address is free. for attempt in range(20): sleep(0.5) sock = socket_address.get_connected_socket() if sock is None: break sock.close() return True
def shrink(self, loaded_oids): if 0: # debugging code, ensure loaded_oids is sane for oid, r in self.objects.iteritems(): obj = r() if obj is not None and obj._p_is_saved(): # every SAVED object must be in loaded_oids assert oid in loaded_oids, obj._p_format_oid() for oid in loaded_oids: # every oid in loaded_oids must have an entry in the cache assert oid in self.objects size = len(self.objects) assert len(loaded_oids) <= size extra = size - self.size if extra < 0: log(10, '[%s] cache size %s loaded %s', getpid(), size, len(loaded_oids)) return start_time = time() aged = 0 removed = Set() ghosts = Set() start = self.finger % size # Look at no more than 1/4th and no less than 1/64th of objects stop = start + max(min(size >> 2, extra), size >> 6) for oid in islice(chain(self.objects, self.objects), start, stop): weak_reference = self.objects[oid] obj = weak_reference() if obj is None: removed.add(oid) elif obj._p_touched: obj._p_touched = 0 aged += 1 elif obj._p_is_saved(): obj._p_set_status_ghost() ghosts.add(oid) for oid in removed: del self.objects[oid] loaded_oids -= removed loaded_oids -= ghosts self.finger = stop - len(removed) log(10, '[%s] shrink %fs aged %s removed %s ghosted %s' ' loaded %s size %s', getpid(), time() - start_time, aged, len(removed), len(ghosts), len(loaded_oids), len(self.objects))
def serve(self): sock = self.address.get_listening_socket() log(20, 'Ready on %s', self.address) self.sockets.append(sock) try: while 1: if self.packer is not None: timeout = 0.0 else: timeout = None r, w, e = select.select(self.sockets, [], [], timeout) for s in r: if s is sock: # new connection conn, addr = s.accept() self.address.set_connection_options(conn) self.clients.append(_Client(conn, addr)) self.sockets.append(conn) else: # command from client try: self.handle(s) except (ClientError, socket.error, socket.timeout, IOError): exc = sys.exc_info()[1] log(10, '%s', ''.join(map(str, exc.args))) self.sockets.remove(s) self.clients.remove(self._find_client(s)) s.close() if (self.packer is None and 0 < self.gcbytes <= self.bytes_since_pack): self.packer = self.storage.get_packer() if self.packer is not None: log(20, 'gc started at %s' % datetime.now()) if not r and self.packer is not None: try: pack_step = next(self.packer) if isinstance(pack_step, str): log(15, 'gc ' + pack_step) except StopIteration: log(20, 'gc at %s' % datetime.now()) self.packer = None # done packing self.bytes_since_pack = 0 # reset finally: self.address.close(sock)
def handle_P(self, s): # pack if self.packer is None: log(20, 'Pack started at %s' % datetime.now()) self.packer = self.storage.get_packer() if self.packer is None: self.storage.pack() log(20, 'Pack completed at %s' % datetime.now()) else: log(20, 'Pack already in progress at %s' % datetime.now()) write(s, STATUS_OKAY)
def handle_C(self, s): # commit self._sync_storage() client = self._find_client(s) write_all(s, int4_to_str(len(client.invalid)), join_bytes(client.invalid)) client.invalid.clear() tdata = read_int4_str(s) if len(tdata) == 0: return # client decided not to commit (e.g. conflict) logging_debug = is_logging(10) logging_debug and log(10, 'Committing %s bytes', len(tdata)) self.storage.begin() i = 0 oids = [] while i < len(tdata): rlen = str_to_int4(tdata[i:i + 4]) i += 4 oid = tdata[i:i + 8] record = tdata[i + 8:i + rlen] i += rlen if logging_debug: class_name = extract_class_name(record) log(10, ' oid=%-6s rlen=%-6s %s', str_to_int8(oid), rlen, class_name) self.storage.store(oid, record) oids.append(oid) assert i == len(tdata) oid_set = set(oids) for other_client in self.clients: if other_client is not client: if oid_set.intersection(other_client.unused_oids): raise ClientError("invalid oid: %r" % oid) try: self.storage.end(handle_invalidations=self._handle_invalidations) except ConflictError: log(20, 'Conflict during commit') write(s, STATUS_INVALID) else: self._report_load_record() log(20, 'Committed %3s objects %s bytes at %s', len(oids), len(tdata), datetime.now()) write(s, STATUS_OKAY) client.unused_oids -= oid_set for c in self.clients: if c is not client: c.invalid.update(oids) self.bytes_since_pack += len(tdata) + 8
def handle_C(self, s): # commit self._sync_storage() client = self._find_client(s) write_all(s, int4_to_str(len(client.invalid)), join_bytes(client.invalid)) client.invalid.clear() tdata = read_int4_str(s) if len(tdata) == 0: return # client decided not to commit (e.g. conflict) logging_debug = is_logging(10) logging_debug and log(10, 'Committing %s bytes', len(tdata)) self.storage.begin() i = 0 oids = [] while i < len(tdata): rlen = str_to_int4(tdata[i:i+4]) i += 4 oid = tdata[i:i+8] record = tdata[i+8:i+rlen] i += rlen if logging_debug: class_name = extract_class_name(record) log(10, ' oid=%-6s rlen=%-6s %s', str_to_int8(oid), rlen, class_name) self.storage.store(oid, record) oids.append(oid) assert i == len(tdata) oid_set = set(oids) for other_client in self.clients: if other_client is not client: if oid_set.intersection(other_client.unused_oids): raise ClientError("invalid oid: %r" % oid) try: self.storage.end(handle_invalidations=self._handle_invalidations) except ConflictError: log(20, 'Conflict during commit') write(s, STATUS_INVALID) else: self._report_load_record() log(20, 'Committed %3s objects %s bytes at %s', len(oids), len(tdata), datetime.now()) write(s, STATUS_OKAY) client.unused_oids -= oid_set for c in self.clients: if c is not client: c.invalid.update(oids) self.bytes_since_pack += len(tdata) + 8
def _send_load_response(self, s, oid): if oid in self._find_client(s).invalid: write(s, STATUS_INVALID) else: try: record = self.storage.load(oid) except KeyError: log(10, 'KeyError %s', str_to_int8(oid)) write(s, STATUS_KEYERROR) except ReadConflictError: log(10, 'ReadConflictError %s', str_to_int8(oid)) write(s, STATUS_INVALID) else: if is_logging(5): class_name = extract_class_name(record) if class_name in self.load_record: self.load_record[class_name] += 1 else: self.load_record[class_name] = 1 log(4, 'Load %-7s %s', str_to_int8(oid), class_name) write(s, STATUS_OKAY) write_int4_str(s, record)
def _send_load_response(self, client, db_name, storage, oid): if oid in client.invalid[db_name]: yield client.write(STATUS_INVALID) else: try: record = storage.load(oid) except KeyError: log(10, 'KeyError %s', str_to_int8(oid)) yield client.write(STATUS_KEYERROR) except ReadConflictError: log(10, 'ReadConflictError %s', str_to_int8(oid)) yield client.write(STATUS_INVALID) else: if is_logging(5): class_name = extract_class_name(record) if class_name in storage.d_load_record: storage.d_load_record[class_name] += 1 else: storage.d_load_record[class_name] = 1 log(4, 'Load %-7s %s', str_to_int8(oid), class_name) yield client.write(STATUS_OKAY) yield client.write(int4_to_str(len(record))) yield client.write(record)
def handle_M(self, s): # new OIDs count = ord(read(s, 1)) log(10, "oids: %s", count) write(s, join_bytes(self._new_oids(s, count)))
def handle_Q(self, s): # graceful quit log(20, 'Quit') raise SystemExit
def handle_Q(self, s): # graceful quit log(20, 'Quit') self.storage.close() raise SystemExit