def getLoopbackConnection(self): app = MasterApplication(address=BIND, getSSL=NEOCluster.SSL, getReplicas=0, getPartitions=1) try: handler = EventHandler(app) app.listening_conn = ListeningConnection(app, handler, app.server) yield ClientConnection(app, handler, app.nm.createMaster( address=app.listening_conn.getAddress(), uuid=app.uuid)) finally: app.close()
class HandlerTests(NeoUnitTestBase): def setUp(self): NeoUnitTestBase.setUp(self) app = Mock() self.handler = EventHandler(app) def setFakeMethod(self, method): self.handler.fake_method = method def getFakePacket(self): p = Mock({ '__repr__': 'Fake Packet', }) p._args = () p.handler_method_name = 'fake_method' return p def test_dispatch(self): conn = self.getFakeConnection() packet = self.getFakePacket() # all is ok self.setFakeMethod(lambda c: None) self.handler.dispatch(conn, packet) # raise UnexpectedPacketError conn.mockCalledMethods = {} def fake(c): raise UnexpectedPacketError('fake packet') self.setFakeMethod(fake) self.handler.dispatch(conn, packet) self.checkErrorPacket(conn) self.checkAborted(conn) # raise NotReadyError conn.mockCalledMethods = {} def fake(c): raise NotReadyError self.setFakeMethod(fake) self.handler.dispatch(conn, packet) self.checkErrorPacket(conn) self.checkAborted(conn) # raise ProtocolError conn.mockCalledMethods = {} def fake(c): raise ProtocolError self.setFakeMethod(fake) self.handler.dispatch(conn, packet) self.checkErrorPacket(conn) self.checkAborted(conn)
def shutdown(self): """Close all connections and exit""" self.changeClusterState(ClusterStates.STOPPING) # Marking a fictional storage node as starting operation blocks any # request to start a new transaction. Do this way has 2 advantages: # - It's simpler than changing the handler of all clients, # which is anyway not supported by EventQueue. # - Returning an error code would cause activity on client side for # nothing. # What's important is to not abort during the second phase of commits # and for this, clients must even be able to reconnect, in case of # failure during tpc_finish. # We're rarely involved in vote, so we have to trust clients that they # abort any transaction that is still in the first phase. self.storage_starting_set.add(None) try: # wait for all transaction to be finished while self.tm.hasPending(): self.em.poll(1) except StoppedOperation: logging.critical('No longer operational') logging.info("asking remaining nodes to shutdown") self.listening_conn.close() handler = EventHandler(self) for node in self.nm.getList(): if not node.isConnected(True): continue conn = node.getConnection() conn.setHandler(handler) if not conn.connecting: if node.isStorage(): conn.send( Packets.NotifyNodeInformation( monotonic_time(), ((node.getType(), node.getAddress(), node.getUUID(), NodeStates.DOWN, None), ))) if conn.pending(): conn.abort() continue conn.close() while self.em.connection_dict: self.em.poll(1) # then shutdown sys.exit()
def getLoopbackConnection(self): app = MasterApplication(getSSL=NEOCluster.SSL, getReplicas=0, getPartitions=1) handler = EventHandler(app) app.listening_conn = ListeningConnection(app, handler, app.server) node = app.nm.createMaster(address=app.listening_conn.getAddress(), uuid=app.uuid) conn = ClientConnection.__new__(ClientConnection) def reset(): conn.__dict__.clear() conn.__init__(app, handler, node) conn.reset = reset reset() return conn
def shutdown(self): """Close all connections and exit""" self.changeClusterState(ClusterStates.STOPPING) self.listening_conn.close() for conn in self.em.getConnectionList(): node = self.nm.getByUUID(conn.getUUID()) if node is None or not node.isIdentified(): conn.close() # No need to change handlers in order to reject RequestIdentification # & AskBeginTransaction packets because they won't be any: # the only remaining connected peers are identified non-clients # and we don't accept new connections anymore. try: # wait for all transaction to be finished while self.tm.hasPending(): self.em.poll(1) except StoppedOperation: logging.critical('No longer operational') logging.info("asking remaining nodes to shutdown") handler = EventHandler(self) for node in self.nm.getConnectedList(): conn = node.getConnection() if node.isStorage(): conn.setHandler(handler) conn.notify( Packets.NotifyNodeInformation( ((node.getType(), node.getAddress(), node.getUUID(), NodeStates.TEMPORARILY_DOWN), ))) conn.abort() elif conn.pending(): conn.abort() else: conn.close() while self.em.connection_dict: self.em.poll(1) # then shutdown sys.exit()
def provideService(self): logging.info('provide backup') poll = self.em.poll app = self.app pt = app.pt while True: app.changeClusterState(ClusterStates.STARTING_BACKUP) bootstrap = BootstrapManager(self, NodeTypes.CLIENT, backup=app.name) # {offset -> node} self.primary_partition_dict = {} # [[tid]] self.tid_list = tuple([] for _ in xrange(pt.getPartitions())) try: while True: for node in pt.getNodeSet(readable=True): if not app.isStorageReady(node.getUUID()): break else: break poll(1) node, conn = bootstrap.getPrimaryConnection() try: app.changeClusterState(ClusterStates.BACKINGUP) del bootstrap, node self.ignore_invalidations = True conn.setHandler(BackupHandler(self)) conn.ask(Packets.AskLastTransaction()) # debug variable to log how big 'tid_list' can be. self.debug_tid_count = 0 while True: poll(1) except PrimaryFailure, msg: logging.error('upstream master is down: %s', msg) finally: app.backup_tid = pt.getBackupTid() try: conn.close() except PrimaryFailure: pass try: del self.pt except AttributeError: pass for node in app.nm.getClientList(True): node.getConnection().close() except StateChangedException, e: if e.args[0] != ClusterStates.STOPPING_BACKUP: raise app.changeClusterState(*e.args) tid = app.backup_tid # Wait for non-primary partitions to catch up, # so that all UP_TO_DATE cells are really UP_TO_DATE. # XXX: Another possibility could be to outdate such cells, and # they would be quickly updated at the beginning of the # RUNNING phase. This may simplify code. # Any unfinished replication from upstream will be truncated. while pt.getBackupTid(min) < tid: poll(1) last_tid = app.getLastTransaction() handler = EventHandler(app) if tid < last_tid: assert tid != ZERO_TID logging.warning("Truncating at %s (last_tid was %s)", dump(app.backup_tid), dump(last_tid)) else: # We will do a dummy truncation, just to leave backup mode, # so it's fine to start automatically if there's any # missing storage. # XXX: Consider using another method to leave backup mode, # at least when there's nothing to truncate. Because # in case of StoppedOperation during VERIFYING state, # this flag will be wrongly set to False. app._startup_allowed = True # If any error happened before reaching this line, we'd go back # to backup mode, which is the right mode to recover. del app.backup_tid # Now back to RECOVERY... return tid
def setUp(self): NeoUnitTestBase.setUp(self) app = Mock() self.handler = EventHandler(app)
class HandlerTests(NeoUnitTestBase): def setUp(self): NeoUnitTestBase.setUp(self) app = Mock() self.handler = EventHandler(app) def setFakeMethod(self, method): self.handler.fake_method = method def getFakePacket(self): p = Mock({"decode": (), "__repr__": "Fake Packet"}) p.handler_method_name = "fake_method" return p def test_dispatch(self): conn = self.getFakeConnection() packet = self.getFakePacket() # all is ok self.setFakeMethod(lambda c: None) self.handler.dispatch(conn, packet) # raise UnexpectedPacketError conn.mockCalledMethods = {} def fake(c): raise UnexpectedPacketError("fake packet") self.setFakeMethod(fake) self.handler.dispatch(conn, packet) self.checkErrorPacket(conn) self.checkAborted(conn) # raise PacketMalformedError conn.mockCalledMethods = {} def fake(c): raise PacketMalformedError("message") self.setFakeMethod(fake) self.handler.dispatch(conn, packet) self.checkClosed(conn) # raise BrokenNodeDisallowedError conn.mockCalledMethods = {} def fake(c): raise BrokenNodeDisallowedError self.setFakeMethod(fake) self.handler.dispatch(conn, packet) self.checkErrorPacket(conn) self.checkAborted(conn) # raise NotReadyError conn.mockCalledMethods = {} def fake(c): raise NotReadyError self.setFakeMethod(fake) self.handler.dispatch(conn, packet) self.checkErrorPacket(conn) self.checkAborted(conn) # raise ProtocolError conn.mockCalledMethods = {} def fake(c): raise ProtocolError self.setFakeMethod(fake) self.handler.dispatch(conn, packet) self.checkErrorPacket(conn) self.checkAborted(conn)
def testTimeout(self): # NOTE: This method uses ping/pong packets only because MT connections # don't accept any other packet without specifying a queue. self.handler = EventHandler(self.app) conn = self._makeClientConnection() use_case_list = ( # (a) For a single packet sent at T, # the limit time for the answer is T + (1 * CRITICAL_TIMEOUT) ((), (1., 0)), # (b) Same as (a), even if send another packet at (T + CT/2). # But receiving a packet (at T + CT - ε) resets the timeout # (which means the limit for the 2nd one is T + 2*CT) ((.5, None), (1., 0, 2., 1)), # (c) Same as (b) with a first answer at well before the limit # (T' = T + CT/2). The limit for the second one is T' + CT. ((.1, None, .5, 1), (1.5, 0)), ) def set_time(t): connection.time = lambda: int(CRITICAL_TIMEOUT * (1000 + t)) closed = [] conn.close = lambda: closed.append(connection.time()) def answer(packet_id): p = Packets.Pong() p.setId(packet_id) conn.connector.receive = lambda read_buf: \ read_buf.append(''.join(p.encode())) conn.readable() checkTimeout() conn.process() def checkTimeout(): timeout = conn.getTimeout() if timeout and timeout <= connection.time(): conn.onTimeout() try: for use_case, expected in use_case_list: i = iter(use_case) conn.cur_id = 0 set_time(0) # No timeout when no pending request self.assertEqual(conn._handlers.getNextTimeout(), None) conn.ask(Packets.Ping()) for t in i: set_time(t) checkTimeout() packet_id = i.next() if packet_id is None: conn.ask(Packets.Ping()) else: answer(packet_id) i = iter(expected) for t in i: set_time(t - .1) checkTimeout() set_time(t) # this test method relies on the fact that only # conn.close is called in case of a timeout checkTimeout() self.assertEqual(closed.pop(), connection.time()) answer(i.next()) self.assertFalse(conn.isPending()) self.assertFalse(closed) finally: connection.time = time