Esempio n. 1
0
    async def startup(self, another_node: int):
        trace_log("SuccRouter.startup addr:", self.addr, "Running Startup!")
        if (another_node == None):
            self.first_node = True
            self.predecessor = None
            self.successor = self.addr
        else:
            self.first_node = False
            # Find our successor
            request_successor = self.message_sender.create_message(
                MessageType.FIND_NODE_RESPONSIBLE_FOR_ADDR, another_node,
                {'addr': self.addr})
            node_who_used_to_be_responsible_reply = await self.send_message_and_await(
                request_successor, lambda msg: msg.has_type(
                    MessageType.FIND_NODE_RESPONSIBLE_FOR_ADDR_REPLY) and msg.
                src == another_node)

            assert ('addr' in node_who_used_to_be_responsible_reply.args
                    and 'node' in node_who_used_to_be_responsible_reply.args)
            node_who_used_to_be_responsible = node_who_used_to_be_responsible_reply.args[
                'node']
            self.successor = node_who_used_to_be_responsible
            trace_log("SuccRouter.startup addr:", self.addr,
                      "Found Successor! ", self.successor)

        await self.post_startup()
Esempio n. 2
0
 async def find_node_responsible_for_key(self, key: str):
     hashed_key = hash_key(key)
     node_responsible = await self.find_node_responsible_for_addr(hashed_key
                                                                  )
     trace_log('SuccRouter: Found the node responsible for key ', key,
               ' : ', node_responsible)
     return node_responsible
Esempio n. 3
0
    def __init__(self, addr: int, async_scheduler: AsyncScheduler,
                 message_sender: MessageSender, key_transferer: KeyTransferer):
        trace_log("Starting SuccRouter!")
        self.addr = addr
        self.async_scheduler = async_scheduler
        self.message_sender = message_sender
        self.key_transferer = key_transferer
        key_transferer.register_router(self)

        self.predecessor = None  # initialized in startup
        self.successor = None  # defined in startup

        self.async_scheduler.register_handler(
            Handler(lambda msg: msg.has_type(MessageType.FIND_SUCCESSOR),
                    self.handle_find_successor))
        self.async_scheduler.register_handler(
            Handler(lambda msg: msg.has_type(MessageType.FIND_PREDECESSOR),
                    self.handle_find_predecessor))
        self.async_scheduler.register_handler(
            Handler(
                lambda msg: msg.has_type(MessageType.
                                         FIND_NODE_RESPONSIBLE_FOR_ADDR),
                self.handle_find_node_responsible_for_addr))
        self.async_scheduler.register_handler(
            Handler(
                lambda msg: msg.has_type(MessageType.
                                         I_MIGHT_BE_YOUR_PREDECESSOR),
                self.handle_i_might_be_your_predecessor))
Esempio n. 4
0
    async def copy_key_values_to(self, to_node: int, interval: Interval,
                                 dict_to_transfer: Dict[str, str]):
        """
        Copy key_values in the `interval` from persistent_storage to `to_node`
        """
        keys_to_transfer = dict_to_transfer.keys()
        msg_to_transfer = self.message_sender.create_message(
            MessageType.TRANSFER_KEYS, to_node, {
                'interval': interval.to_string(),
                'data_dict': dict_to_transfer,
                'copy_node': self.chord_addr,
                'receiving_node': to_node
            })
        recieved_msg = await self.send_message_and_await_response(
            msg_to_transfer, MessageType.TRANSFER_KEYS_CONFIRMED)

        keys_transfered_successfully, interval_transfered_successfully = recieved_msg.get_args(
            ['keys_transfered', 'interval'])
        assert (set(keys_transfered_successfully) == set(
            dict_to_transfer.keys()))
        trace_log(colorama.Fore.MAGENTA + "KeyTransferer", self.chord_addr,
                  ": Copy Successfully completed from", self.chord_addr, "to",
                  to_node, "of interval", interval.to_string())
        if debug_log_key_transfers:
            debug_log("KeyTransferer", self.chord_addr,
                      ": Copy Successfully completed from", self.chord_addr,
                      "to", to_node, "of interval", interval.to_string())
Esempio n. 5
0
    def __init__(self, 
                addr: int, 
                another_node: int, 
                persistent_storage: PersistantKeyValueStore, 
                async_scheduler: AsyncScheduler, 
                message_sender: MessageSender, 
                key_transferer: KeyTransferer):
        self.alive = True
        trace_log('ChordNode.__init__: Starting Node addr: ', addr, ' with relay-node ', another_node)
        
        self.chord_addr = addr

        self.router = FingerTableRouter(addr, async_scheduler, message_sender, key_transferer, persistent_storage)
        self.persistent_storage = persistent_storage
        self.async_scheduler = async_scheduler
        self.message_sender = message_sender
        self.another_node = another_node
        self.key_transferer = key_transferer

        # Register the handlers
        self.handlers = [
            async_scheduler.register_handler(Handler(lambda msg: msg.message_type == MessageType.GET.value, self.try_twice_get))
            ,async_scheduler.register_handler(Handler(lambda msg: msg.message_type == MessageType.SET.value, self.try_twice_set))
            ,async_scheduler.register_handler(Handler(lambda msg: msg.message_type == MessageType.GET_VALUE.value, self.handle_get_value))
            ,async_scheduler.register_handler(Handler(lambda msg: msg.message_type == MessageType.SET_VALUE.value, self.handle_set_value))]
        trace_log('ChordNode.__init__: Done Starting Node addr: ', addr, ' with relay-node ', another_node)
Esempio n. 6
0
    async def handle_get(self, msg: Message):
        """
        Handle a client request to get the value of a key among the chord network.
        """
        if not self.alive:
            return
        trace_log('ChordNode: Running get handler')
        assert ('key' in msg.args and 'id' in msg.args)
        req_id = msg.args['id']
        key = msg.args['key']

        node_responsible_addr = await self.router.find_node_responsible_for_key(key) 
        debug_test("Chord", self.chord_addr,": found node responsible for", key, node_responsible_addr)

        if (node_responsible_addr == self.chord_addr):
            value = await self.persistent_storage.get(key)
        else:
            get_value_msg = self.message_sender.create_message(MessageType.GET_VALUE, node_responsible_addr, {'key' : key})
            get_value_reply = await self.keep_resending_until_response(get_value_msg, lambda msg: msg.message_type == MessageType.GET_VALUE_REPLY.value and msg.src == node_responsible_addr)
            assert ('key' in get_value_reply.args and 'value' in get_value_reply.args)
            assert (get_value_reply.args['key'] == key)
            value = get_value_reply.args['value']

        self.message_sender.create_and_send_message(MessageType.GET_RESPONSE_TO_BROKER, None, {'id' : req_id, 'key': str(key), 'value' : value})
        
Esempio n. 7
0
 def startup(self):
     """
     Startup the montior that stabalizes the successor list.
     """
     trace_log('SuccessorList.Startup', self.addr, 'starting up.')
     self.async_scheduler.schedule_monitor_job(
         self.stabalize_successor_list, config.MONITOR_TIME)
     self.async_scheduler.schedule_monitor_job(self.stabalize_predecessor,
                                               config.MONITOR_TIME)
Esempio n. 8
0
 def __init__(self, addr: str, peers: List[int],
              async_scheduler: AsyncScheduler,
              message_sender: MessageSender, on_become_leader,
              on_step_down):
     trace_log(colorama.Fore.LIGHTRED_EX + "RaftPersistentKeyValueStore",
               addr, "starting up")
     self.addr = addr
     self.persistent_storage = PersistantKeyValueStore()
     self.raft_node = RaftNode(addr, peers, self.persistent_storage,
                               async_scheduler, message_sender,
                               on_become_leader, on_step_down)
Esempio n. 9
0
 async def copy_to(self, to_node: int, interval: Interval):
     """
     Copy the keys of an interval from our persistant storage to the `to_node`.
     """
     trace_log("KeyTransferer ", self.chord_addr,
               " : Starting copy of keys from myself to ", to_node)
     all_keys = await self.persistant_storage.get_keys()
     keys_to_transfer = [
         key for key in all_keys if interval.in_interval(hash_key(key))
     ]
     dict_to_transfer = {}
     for key in keys_to_transfer:
         dict_to_transfer[key] = await self.persistant_storage.get(key)
     await self.copy_key_values_to(to_node, interval, dict_to_transfer)
Esempio n. 10
0
    async def stabilize(self):
        trace_log("SuccRouter.stabilize: addr ", self.addr, " Stabilizing...")
        if (self.successor == self.addr):
            # If we were the first one to start
            return

        # Find our sucessors predecessor
        # request_predecessor = self.message_sender.create_message(MessageType.FIND_PREDECESSOR, node_id)
        # predecessor_msg = await self.send_message_and_await(request_predecessor,
        #     lambda msg: msg.has_type(MessageType.FIND_PREDECESSOR_REPLY) and msg.src == self.successor)
        # assert ('predecessor' in predecessor_msg.args)

        # N Node Network
        #1. Find Predecessor of our Successor and Wait for Response
        successor_predecessor = await self.get_predecessor(self.successor)
        trace_log("SuccRouter.stabilize: addr ", self.addr,
                  "found predecessor ", successor_predecessor)

        #UPDATE NODES
        #1. If Succesor does not have Predecessor, we may be their Predecessor
        if successor_predecessor == None:
            self.notify_i_might_be_your_predecessor(self.successor)
            return

        #2. If Node < Sucessor's predecessor < Succesor
        #       then our Sucessors Predecessor is Most likely adjacent to us on
        #       Ring so they become our new Sucessor
        if in_interval(self.addr, self.successor, successor_predecessor):
            self.successor = successor_predecessor

        #3. If Sucessor's predecessor ... < Node < Succesor
        #       then our Sucessors Predecessor is Most likely us
        if (successor_predecessor != self.addr):
            # notify our sucessor that we might be their predecessor
            self.notify_i_might_be_your_predecessor(self.successor)

        debug_log("SuccRouter.stabilize Complete: addr: ", self.addr,
                  "predecessor: ", self.predecessor, " successor: ",
                  self.successor)
Esempio n. 11
0
    async def handle_i_might_be_your_predecessor(self, msg: Message):
        assert ('addr' in msg.args)
        candidate_predecessor = msg.args['addr']

        # If we are the first node.
        if (self.successor == self.addr):
            self.successor = candidate_predecessor
            trace_log("SuccRouter addr:", self.addr, "Setting new successor ",
                      candidate_predecessor)
            # transfer the stuff from (self.addr, candidate_predecessor] because we are no longer responsible for that
            #   half of addr space
            # debug_log(self.addr, "Transfer because self.successor == self.addr, pred", self.predecessor, 'suc', self.successor)
            # await self.key_transferer.copy_to(candidate_predecessor, Interval(self.addr, False, candidate_predecessor, True))

        # We know there are at least two nodes in the system.
        if (self.predecessor == None):
            # From our point of view, we used to be responsible for (self.sucessor, self.addr]
            # Now, candidate_predecessor is coming in and taking (self.successor, candidate_predecessor] from us
            # So we transfer (self.successor, candidate_predecessor] to them.
            if (self.successor != candidate_predecessor):
                debug_log(self.addr,
                          "Transfer because self.prdecessor == None, pred",
                          self.predecessor, 'suc', self.successor)
                await self.key_transferer.copy_to(
                    candidate_predecessor,
                    Interval(self.successor, False, candidate_predecessor,
                             True))
            else:
                # There is only one other node, so we should transfer him the other side
                debug_log(self.addr,
                          "Transfer because self.prdecessor == None, pred",
                          self.predecessor, 'suc', self.successor)
                await self.key_transferer.copy_to(
                    candidate_predecessor,
                    Interval(self.addr, False, candidate_predecessor, True))

            # Normally, we don't need this. However, this fixes an edge cases involved with starting up with 1 node and having
            # 2 concurrent joins.

            self.predecessor = candidate_predecessor
            trace_log("SuccRouter addr:", self.addr,
                      "No current predecessor. Setting predecessor ",
                      candidate_predecessor)
        elif (in_interval(self.predecessor, self.addr, candidate_predecessor)):
            # From our point of view, we are responsible for (self.predecessor, self.addr]
            # However, now we are only responisble for (candidate_predecessor, self.addr]
            # So, we transfer out the difference: (self.predecessor, candidate_predecessor]
            debug_log(
                self.addr,
                "Transfer because candidate_predecessor \in (self.predecessor, self.addr), pred",
                self.predecessor, 'suc', self.successor)
            await self.key_transferer.copy_to(
                candidate_predecessor,
                Interval(self.predecessor, False, candidate_predecessor, True))
            self.predecessor = candidate_predecessor
            trace_log("SuccRouter addr:", self.addr,
                      "Setting new predecessor ", candidate_predecessor)
Esempio n. 12
0
    async def stabalize_successor_list(self):
        """
        Maintains the successor_list
        """
        # prune the start of the list for dead entries.
        trace_log('SuccessorList.stabalize', self.addr,
                  ':Starting to stabalize.')
        await self.prune_start_of_successor_list()
        old_successors = set(self.successor_list)
        # If no successor alive, we are our own successor.
        if self.successor_list[0] == None:
            self.router.successor = self.addr
        else:
            # refresh the list.
            await self.refresh_successor_list()
            self.router.successor = self.successor_list[0]

        # We always need to get to this point. We cannot block before getting here otherwise the successor pointers will not
        #   eventually be right.

        # Copy data to all new successors
        if self.replicate_data_to_successors:
            await self.replicate_our_data_to(
                set(self.successor_list).difference(old_successors))
Esempio n. 13
0
    async def find_node_responsible_for_addr(self, addr: int):
        trace_log(
            'SuccRouter.find_node_responsible : Finding the node responsible for addr',
            addr)

        # This means no other node has joined yet
        if (self.addr == self.successor):
            return self.addr

        previous_node_id = self.addr
        next_node_id = self.successor
        iteration_num = 1

        # A Key k is assigned to
        # the first node whose identifier is equal to or follows (the identifier
        # of) k in the identifier space
        while addr_later(self.addr, addr, next_node_id):
            trace_log(
                'SuccRouter.find_node_responsible: Running Successor iteration ',
                iteration_num)

            msg_to_send = self.message_sender.create_message(
                MessageType.FIND_SUCCESSOR, next_node_id)
            find_successor_message: Message = await self.send_message_and_await(
                msg_to_send, lambda msg: msg.message_type == MessageType.
                FIND_SUCCESSOR_REPLY.value and msg.src == next_node_id)

            assert ('successor' in find_successor_message.args)

            trace_log(
                'SuccRouter.find_node_responsible: Finished successor iteration ',
                iteration_num, '. Found succesor of ', next_node_id,
                ' which is ', find_successor_message.args['successor'])
            previous_node_id = next_node_id
            next_node_id = int(find_successor_message.args['successor'])
            iteration_num += 1

            if (next_node_id == self.addr):
                # This means we went in a circle and found that we are the correct one
                break

        node_responsible = next_node_id
        trace_log('SuccRouter: Found the node responsible for addr ', addr,
                  ' : ', node_responsible)
        return node_responsible
Esempio n. 14
0
 async def prune_start_of_successor_list(self):
     """
     Prunes the start of the successor list for nodes that have died.
     """
     trace_log('SuccessorList.prune_start_of_sucessor_list', self.addr,
               ':Starting to stabalize.')
     i = 0
     while i < config.NUM_SUCCESSORS_IN_SUCCESSOR_LIST:
         # Run this NUM_SUCCESSORS_IN_SUCCESSOR_LIST times
         successor = self.successor_list[0]
         if successor == None:
             return
         # If there are no successors in the list, we return None.
         trace_log('SuccessorList.prune_start_of_sucessor_list', self.addr,
                   ':Checking if', successor, 'is alive')
         is_alive = await self.is_alive(successor)
         trace_log('SuccessorList.prune_start_of_sucessor_list', self.addr,
                   ':Checking if', successor, 'is alive', is_alive)
         # If the node is alive, we have pruned!
         if is_alive:
             return
         # The node is not alive, so we shift the successor list over one
         self.successor_list = self.successor_list[1:] + [None]
Esempio n. 15
0
 async def set(self, key, value):
     """
     Set the value for a key
     """
     trace_log("PersistantStorage: setting key ", key, " to value ", value)
     self.dict[key] = value