Example #1
0
    def __init__(self, ksize=20, alpha=3, id=None, storage=None,
                 talos_vc=None, rebub_delay=3600, tls_port=-1):
        """
        Create a server instance.  This will start listening on the given port.
        Args:
            ksize (int): The k parameter from the paper
            alpha (int): The alpha parameter from the paper
            id: The id for this node on the network.
            storage: An instance that implements :interface:`~kademlia.storage.IStorage`
        """
        self.ksize = ksize
        self.alpha = alpha
        self.log = Logger(system=self)
        self.storage = storage or TalosLevelDBDHTStorage("./leveldb")
        self.node = Node(id or digest(random.getrandbits(255)))

        def start_looping_call(num_seconds):
            self.refreshLoop = LoopingCall(self.refreshTable).start(num_seconds)

        self.delay = rebub_delay
        task.deferLater(reactor, rebub_delay, start_looping_call, rebub_delay)
        self.talos_vc = talos_vc or AsyncPolicyApiClient()
        self.protocol = TalosKademliaProtocol(self.node, self.storage, ksize, talos_vc=self.talos_vc)
        self.httpprotocol_client = None
        self.tls_port = tls_port
Example #2
0
    def __init__(self, ksize=20, alpha=3, priv_key=None, storage=None,
                 talos_vc=None, rebub_delay=3600, c1bits=1, tls_port=-1):
        """
        Create a server instance.  This will start listening on the given port.
        Args:
            ksize (int): The k parameter from the paper
            alpha (int): The alpha parameter from the paper
            id: The id for this node on the network.
            storage: An instance that implements :interface:`~kademlia.storage.IStorage`
        """
        self.ksize = ksize
        self.alpha = alpha
        self.log = Logger(system=self)
        self.storage = storage or TalosLevelDBDHTStorage("./leveldb")
        self.c1bits = c1bits

        if priv_key is None:
            self.priv_key, node_id = generate_keys_with_crypto_puzzle(c1bits)
        else:
            self.priv_key = priv_key
            node_id = pub_to_node_id(self.priv_key.public_key())

        self.node = Node(node_id)

        def start_looping_call(num_seconds):
            self.refreshLoop = LoopingCall(self.refreshTable).start(num_seconds)

        self.delay = rebub_delay
        task.deferLater(reactor, rebub_delay, start_looping_call, rebub_delay)

        self.talos_vc = talos_vc or AsyncPolicyApiClient()
        self.protocol = TalosSKademliaProtocol(self.priv_key, self.node,
                                               self.storage, ksize, talos_vc=self.talos_vc, cbits=c1bits)
        self.httpprotocol_client = None
        self.tls_port = tls_port
Example #3
0
 def __init__(self, ttl=STORAGE_TTL):
     """
     By default, max age is three years.
     """
     self.data = OrderedDict()
     self.ttl = ttl
     self.log = Logger(system=self)
Example #4
0
 def __init__(self, sourceNode, storage, ksize):
     RPCProtocol.__init__(self)
     self.router = RoutingTable(self, ksize, sourceNode)
     self.storage = storage
     self.sourceNode = sourceNode
     self.log = Logger(system=self)
     self.messages = []
Example #5
0
 def __init__(self,
              storage,
              talos_vc=TalosVCRestClient(),
              max_nonce_cache=1000,
              nonce_ttl=10):
     Resource.__init__(self)
     self.storage = storage
     self.log = Logger(system=self)
     self.talos_vc = talos_vc
     self.nonce_cache = TTLCache(max_nonce_cache, nonce_ttl)
     self.refreshLoop = LoopingCall(self.nonce_cache.expire).start(3600)
     self.sem = Semaphore(1)
Example #6
0
 def __init__(self,
              sourceNode,
              storage,
              ksize,
              talos_vc=TalosVCRestClient()):
     TalosRPCProtocol.__init__(self)
     self.router = TalosKademliaRoutingTable(self, ksize, sourceNode)
     self.storage = storage
     self.sourceNode = sourceNode
     self.log = Logger(system=self)
     self.talos_vc = talos_vc
     self.http_client = None
Example #7
0
class SpiderCrawl(object):
    """
    Crawl the network and look for given 160-bit keys.
    """
    def __init__(self, protocol, node, peers, ksize, alpha):
        """
        Create a new C{SpiderCrawl}er.

        Args:
            protocol: A :class:`~kademlia.protocol.KademliaProtocol` instance.
            node: A :class:`~kademlia.node.Node` representing the key we're looking for
            peers: A list of :class:`~kademlia.node.Node` instances that provide the entry point for the network
            ksize: The value for k based on the paper
            alpha: The value for alpha based on the paper
        """
        self.protocol = protocol
        self.ksize = ksize
        self.alpha = alpha
        self.node = node
        self.nearest = NodeHeap(self.node, self.ksize)
        self.lastIDsCrawled = []
        self.log = Logger(system=self)
        self.log.info("creating spider with peers: %s" % peers)
        self.nearest.push(peers)

    def onError(self, err):
        self.log.error(repr(err))
        return err

    def _find(self, rpcmethod):
        """
        Get either a value or list of nodes.

        Args:
            rpcmethod: The protocol's callfindValue or callFindNode.

        The process:
          1. calls find_* to current ALPHA nearest not already queried nodes,
             adding results to current nearest list of k nodes.
          2. current nearest list needs to keep track of who has been queried already
             sort by nearest, keep KSIZE
          3. if list is same as last time, next call should be to everyone not
             yet queried
          4. repeat, unless nearest list has all been queried, then ur done
        """
        self.log.info("crawling with nearest: %s" % str(tuple(self.nearest)))
        count = self.alpha
        if self.nearest.getIDs() == self.lastIDsCrawled:
            self.log.info("last iteration same as current - checking all in list now")
            count = len(self.nearest)
        self.lastIDsCrawled = self.nearest.getIDs()

        ds = {}
        for peer in self.nearest.getUncontacted()[:count]:
            ds[peer.id] = rpcmethod(peer, self.node)
            self.nearest.markContacted(peer)
        d = deferredDict(ds)
        d.addCallback(self._nodesFound)
        d.addErrback(self.onError)
        return d
Example #8
0
    def __init__(self, ksize=20, alpha=3, id=None, storage=None):
        """
        Create a server instance.  This will start listening on the given port.

        @param port: UDP port to listen on
        @param k: The k parameter from the paper
        @param alpha: The alpha parameter from the paper
        """
        self.ksize = ksize
        self.alpha = alpha
        self.log = Logger(system=self)
        self.storage = storage or ForgetfulStorage()
        self.node = Node(id or digest(random.getrandbits(255)))
        self.protocol = KademliaProtocol(self.node, self.storage, ksize)
        self.refreshLoop = LoopingCall(self.refreshTable).start(3600)
Example #9
0
 def __init__(self, sourceNode, storage, ksize):
     RPCProtocol.__init__(self)
     self.router = RoutingTable(self, ksize, sourceNode)
     self.storage = storage
     self.sourceNode = sourceNode
     self.ksize = ksize
     self.log = Logger(system=self)
 def __init__(self, ttl=STORAGE_TTL):
     """
     By default, max age is three years.
     """
     self.data = OrderedDict()
     self.ttl = ttl
     self.log = Logger(system=self)
Example #11
0
    def __init__(self, ksize=20, alpha=3, id=None, storage=None):
        """
        Create a server instance.  This will start listening on the given port.

        Args:
            ksize (int): The k parameter from the paper
            alpha (int): The alpha parameter from the paper
            id: The id for this node on the network.
            storage: An instance that implements :interface:`~kademlia.storage.IStorage`
        """
        self.ksize = ksize
        self.alpha = alpha
        self.log = Logger(system=self)
        self.storage = storage or ForgetfulStorage()
        self.node = Node(id or digest(random.getrandbits(255)))
        self.protocol = KademliaProtocol(self.node, self.storage, ksize)
        self.refreshLoop = LoopingCall(self.refreshTable).start(3600)
Example #12
0
 def __init__(self,
              waitTimeout=10,
              max_packet_size=MAX_UDP_SIZE_PCK,
              noisy=False):
     self.max_packet_size = max_packet_size
     self.noisy = noisy
     self._waitTimeout = waitTimeout
     self._outstanding = {}
     self.log = Logger(system=self)
Example #13
0
class GetChunkLoaction(Resource):
    allowedMethods = ('GET', )

    def __init__(self, dhtstorage):
        Resource.__init__(self)
        self.dhtstorage = dhtstorage
        self.log = Logger(system=self)

    def getChild(self, path, request):
        return self

    def render_GET(self, request):
        time_keeper = TimeKeeper()
        time_id = time_keeper.start_clock_unique()

        def respond(result):
            time_keeper.stop_clock_unique(ENTRY_TOTAL_QUERY_CHUNK, time_id)
            self.log.debug(
                "%s %s %s" %
                (BENCH_TAG, TYPE_QUERY_CHUNK_ADDR, time_keeper.get_summary()))

            if result is None:
                request.setResponseCode(400)
                request.write("No Result found")
            else:
                request.setResponseCode(200)
                request.write(result)
            request.finish()

        if len(request.prepath) < 2:
            request.setResponseCode(400)
            return json.dumps({'error': "Illegal URL"})
        try:
            chunk_key = binascii.unhexlify(request.prepath[1])
            self.dhtstorage.get_addr_chunk(
                chunk_key, time_keeper=time_keeper).addCallback(respond)
            return NOT_DONE_YET
        except TalosVCRestClientError:
            request.setResponseCode(400)
            return "ERROR: No Policy found"
        except:
            request.setResponseCode(400)
            return "ERROR"
Example #14
0
 def __init__(self,
              ecdsa_privkey,
              sourceNode,
              storage,
              ksize,
              talos_vc=TalosVCRestClient(),
              cbits=10,
              bench_mode=True):
     TalosWeakSignedRPCProtocol.__init__(self,
                                         ecdsa_privkey,
                                         sourceNode.id,
                                         cbits=cbits)
     self.router = TalosKademliaRoutingTable(self, ksize, sourceNode)
     self.storage = storage
     self.sourceNode = sourceNode
     self.log = Logger(system=self)
     self.talos_vc = talos_vc
     self.http_client = None
     self.bench_mode = bench_mode
Example #15
0
    def __init__(self, protocol, node, peers, ksize, alpha):
        """
        Create a new C{SpiderCrawl}er.

        Args:
            protocol: A :class:`~kademlia.protocol.KademliaProtocol` instance.
            node: A :class:`~kademlia.node.Node` representing the key we're looking for
            peers: A list of :class:`~kademlia.node.Node` instances that provide the entry point for the network
            ksize: The value for k based on the paper
            alpha: The value for alpha based on the paper
        """
        self.protocol = protocol
        self.ksize = ksize
        self.alpha = alpha
        self.node = node
        self.nearest = NodeHeap(self.node, self.ksize)
        self.lastIDsCrawled = []
        self.log = Logger(system=self)
        self.log.info("creating spider with peers: %s" % peers)
        self.nearest.push(peers)
Example #16
0
class AddChunk(Resource):
    allowedMethods = ('POST', )

    def __init__(self, dhtstorage):
        Resource.__init__(self)
        self.dhtstorage = dhtstorage
        self.log = Logger(system=self)

    def render_POST(self, request):
        time_keeper = TimeKeeper()
        time_id = time_keeper.start_clock_unique()

        def respond(result):
            time_keeper.stop_clock_unique(ENTRY_TOTAL_ADD_CHUNK, time_id)
            self.log.debug(
                "%s %s %s" %
                (BENCH_TAG, TYPE_ADD_CHUNK, time_keeper.get_summary()))
            if not result is None:
                request.setResponseCode(200)
                request.write("OK")
            else:
                request.setResponseCode(400)
                request.write("ERROR")
            request.finish()

        encoded_chunk = request.content.read()
        try:

            chunk = CloudChunk.decode(encoded_chunk)
            self.dhtstorage.store_chunk(
                chunk, time_keeper=time_keeper).addCallback(respond)
            return NOT_DONE_YET
        except InvalidChunkError:
            request.setResponseCode(400)
            return "ERROR: Invalid Chunk"
        except TalosVCRestClientError:
            request.setResponseCode(400)
            return "ERROR: No Policy found"
        except:
            request.setResponseCode(400)
            return "ERROR"
Example #17
0
    def __init__(self, appDeployer, uuid):
        self._connected = False
        self._app_deployer = appDeployer

        # optional...
        self._number_of_nodes = 0
        self._list_of_nodes =[] 
        
        # logging capabilities
        self._log = Logger(system=self)

        # HERE--> Implementation specific node instanciation
        from kademlia.network import Server
        self._node = Server()
        self._node.log.level = 4
Example #18
0
    def __init__(self, ksize=20, alpha=3, id=None):
        """
        Create a server instance.  This will start listening on the given port.

        @param port: UDP port to listen on
        @param k: The k parameter from the paper
        @param alpha: The alpha parameter from the paper
        """
        self.ksize = ksize
        self.alpha = alpha
        self.log = Logger(system=self)
        self.storage = ForgetfulStorage()
        self.node = Node(id or digest(random.getrandbits(255)))
        self.protocol = KademliaProtocol(self.node, self.storage, ksize)
        self.refreshLoop = LoopingCall(self.refreshTable).start(3600)
Example #19
0
class SpiderCrawl(object):
    """
    Crawl the network and look for given 160-bit keys.
    """
    def __init__(self, protocol, node, peers, ksize, alpha):
        """
        Create a new C{SpiderCrawl}er.

        Args:
            protocol: A :class:`~kademlia.protocol.KademliaProtocol` instance.
            node: A :class:`~kademlia.node.Node` representing the key we're looking for
            peers: A list of :class:`~kademlia.node.Node` instances that provide the entry point for the network
            ksize: The value for k based on the paper
            alpha: The value for alpha based on the paper
        """
        self.protocol = protocol
        self.ksize = ksize
        self.alpha = alpha
        self.node = node
        self.nearest = NodeHeap(self.node, self.ksize)
        self.lastIDsCrawled = []
        self.log = Logger(system=self)
        self.log.info("creating spider with peers: %s" % peers)
        self.nearest.push(peers)

    def _find(self, rpcmethod):
        """
        Get either a value or list of nodes.

        Args:
            rpcmethod: The protocol's callfindValue or callFindNode.

        The process:
          1. calls find_* to current ALPHA nearest not already queried nodes,
             adding results to current nearest list of k nodes.
          2. current nearest list needs to keep track of who has been queried already
             sort by nearest, keep KSIZE
          3. if list is same as last time, next call should be to everyone not
             yet queried
          4. repeat, unless nearest list has all been queried, then ur done
        """
        self.log.info("crawling with nearest: %s" % str(tuple(self.nearest)))
        count = self.alpha
        if self.nearest.getIDs() == self.lastIDsCrawled:
            self.log.info(
                "last iteration same as current - checking all in list now")
            count = len(self.nearest)
        self.lastIDsCrawled = self.nearest.getIDs()

        ds = {}
        for peer in self.nearest.getUncontacted()[:count]:
            ds[peer.id] = rpcmethod(peer, self.node)
            self.nearest.markContacted(peer)
        return deferredDict(ds).addCallback(self._nodesFound)
Example #20
0
    def __init__(self, ksize=20, alpha=3, id=None, storage=None):
        """
        Create a server instance.  This will start listening on the given port.

        Args:
            ksize (int): The k parameter from the paper
            alpha (int): The alpha parameter from the paper
            id: The id for this node on the network.
            storage: An instance that implements :interface:`~kademlia.storage.IStorage`
        """
        self.ksize = ksize
        self.alpha = alpha
        self.log = Logger(system=self)
        self.storage = storage or ForgetfulStorage()
        self.node = Node(id or digest(random.getrandbits(255)))
        self.protocol = KademliaProtocol(self.node, self.storage, ksize)
        self.refreshLoop = LoopingCall(self.refreshTable).start(3600)
Example #21
0
    def __init__(self, protocol, node, peers, ksize, alpha):
        """
        Create a new C{SpiderCrawl}er.

        @param protocol: a C{KademliaProtocol} instance.
        @param node: A C{Node} representing the key we're looking for
        @param peers: A list of C{Node}s that provide the entry point for the network
        @param ksize: The value for k based on the paper
        @param alpha: The value for alpha based on the paper
        """
        self.protocol = protocol
        self.ksize = ksize
        self.alpha = alpha
        self.node = node
        self.nearest = NodeHeap(self.node, self.ksize)
        self.lastIDsCrawled = []
        self.log = Logger(system=self)
        self.log.info("creating spider with peers: %s" % peers)
        self.nearest.push(peers)
Example #22
0
    def __init__(self, appDeployer, uuid):
        self._connected = False
        self._app_deployer = appDeployer

        # optional...
        self._number_of_nodes = 0
        self._list_of_nodes =[] 
        
        # logging capabilities
        self._log = Logger(system=self)

        # HERE--> Implementation specific node instanciation
        from kademlia.network import Server
        import kademlia
        import os
        path = os.path.dirname(kademlia.__file__)
        print "PATH TO KADEMLIA: --> ", path
        self._node = Server()
        self._node.log.level = 4
Example #23
0
    def __init__(self, protocol, node, peers, ksize, alpha):
        """
        Create a new C{SpiderCrawl}er.

        Args:
            protocol: A :class:`~kademlia.protocol.KademliaProtocol` instance.
            node: A :class:`~kademlia.node.Node` representing the key we're looking for
            peers: A list of :class:`~kademlia.node.Node` instances that provide the entry point for the network
            ksize: The value for k based on the paper
            alpha: The value for alpha based on the paper
        """
        self.protocol = protocol
        self.ksize = ksize
        self.alpha = alpha
        self.node = node
        self.nearest = NodeHeap(self.node, self.ksize)
        self.lastIDsCrawled = []
        self.log = Logger(system=self)
        self.log.info("creating spider with peers: %s" % peers)
        self.nearest.push(peers)
Example #24
0
class TalosKademliaProtocol(TalosRPCProtocol):
    """
    New protocol for the talos storage, base protocol from bmuller's implementation
    """
    def __init__(self,
                 sourceNode,
                 storage,
                 ksize,
                 talos_vc=TalosVCRestClient()):
        TalosRPCProtocol.__init__(self)
        self.router = TalosKademliaRoutingTable(self, ksize, sourceNode)
        self.storage = storage
        self.sourceNode = sourceNode
        self.log = Logger(system=self)
        self.talos_vc = talos_vc
        self.http_client = None

    def getRefreshIDs(self):
        """
        Get ids to search for to keep old buckets up to date.
        """
        ids = []
        for bucket in self.router.getLonelyBuckets():
            ids.append(random.randint(*bucket.range))
        return ids

    def rpc_stun(self, sender):
        return sender

    def rpc_ping(self, sender, nodeid):
        source = Node(nodeid, sender[0], sender[1])
        self.welcomeIfNewNode(source)
        return self.sourceNode.id

    def rpc_store(self, sender, nodeid, key, value):
        source = Node(nodeid, sender[0], sender[1])
        time_keeper = TimeKeeper()
        total_time_id = time_keeper.start_clock_unique()

        time_keeper.start_clock()
        self.welcomeIfNewNode(source)
        time_keeper.stop_clock(ENTRY_TIME_WELCOME_NODE)

        self.log.debug("got a store request from %s, storing value" %
                       str(sender))
        try:

            chunk = CloudChunk.decode(value)

            if not digest(chunk.key) == key:
                return {'error': 'key missmatch'}

            def handle_policy(policy):
                time_keeper.stop_clock(ENTRY_FETCH_POLICY)

                # Hack no chunk id given -> no key checks, key is in the encoded chunk
                id = time_keeper.start_clock_unique()
                self.storage.store_check_chunk(chunk,
                                               None,
                                               policy,
                                               time_keeper=time_keeper)
                time_keeper.stop_clock_unique(ENTRY_STORE_CHECK, id)

                time_keeper.stop_clock_unique(ENTRY_TOTAL_STORE_LOCAL,
                                              total_time_id)
                self.log.debug("%s %s %s" % (BENCH_TAG, TYPE_STORE_CHUNK_LOCAL,
                                             time_keeper.get_summary()))
                return {'value': 'ok'}

            time_keeper.start_clock()
            return self.talos_vc.get_policy_with_txid(
                chunk.get_tag_hex()).addCallback(handle_policy)
        except InvalidChunkError as e:
            return {'error': e.value}
        except TalosVCRestClientError:
            return {'error': "No policy found"}

    def rpc_find_node(self, sender, nodeid, key):
        self.log.info("finding neighbors of %i in local table" %
                      long(nodeid.encode('hex'), 16))
        source = Node(nodeid, sender[0], sender[1])
        self.welcomeIfNewNode(source)
        node = Node(key)
        return map(tuple, self.router.findNeighbors(node, exclude=source))

    def rpc_find_value(self, sender, nodeid, key, chunk_key):
        source = Node(nodeid, sender[0], sender[1])
        self.welcomeIfNewNode(source)
        if self.storage.has_value(chunk_key):
            try:
                myaddress = self.transport.getHost()
                return {'value': "%s:%d" % (myaddress.host, myaddress.port)}
            except InvalidQueryToken as e:
                self.log.info("Invalid query token received %s" % (e.value, ))
                return {'error': e.value}
        else:
            return self.rpc_find_node(sender, nodeid, key)

    def callFindNode(self, nodeToAsk, nodeToFind):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.find_node(address, self.sourceNode.id, nodeToFind.id)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callFindValue(self, nodeToAsk, nodeToFind, chunk_key):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.find_value(address, self.sourceNode.id, nodeToFind.id,
                            chunk_key)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callPing(self, nodeToAsk):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.ping(address, self.sourceNode.id)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callStore(self, nodeToAsk, key, value):
        address = (nodeToAsk.ip, nodeToAsk.port)
        time_keeper = TimeKeeper()
        id = time_keeper.start_clock_unique()
        if len(value) < MAX_UDP_SIZE:
            d = self.store(address, self.sourceNode.id, key, value)
        else:
            d = self.http_client.call_store_large_chunk(nodeToAsk, key, value)
        return d.addCallback(self.handleTimedCallResponse, nodeToAsk,
                             time_keeper, id, ENTRY_STORE_ONE_NODE)

    def welcomeIfNewNode(self, node):
        """
        Given a new node, send it all the keys/values it should be storing,
        then add it to the routing table.

        @param node: A new node that just joined (or that we just found out
        about).

        Process:
        For each key in storage, get k closest nodes.  If newnode is closer
        than the furtherst in that list, and the node for this server
        is closer than the closest in that list, then store the key/value
        on the new node (per section 2.5 of the paper)
        """
        def perform_stores():
            ds = []
            for key, value in self.storage.iteritems():
                keynode = Node(digest(key))
                neighbors = self.router.findNeighbors(keynode)
                if len(neighbors) > 0:
                    newNodeClose = node.distanceTo(
                        keynode) < neighbors[-1].distanceTo(keynode)
                    thisNodeClosest = self.sourceNode.distanceTo(
                        keynode) < neighbors[0].distanceTo(keynode)
                if len(neighbors) == 0 or (newNodeClose and thisNodeClosest):
                    ds.append(self.callStore(node, digest(key), value))

        if self.router.isNewNode(node):
            self.log.info("Welcoming new node %s" % node)
            ds = []
            threads.deferToThread(perform_stores)
            self.router.addContact(node)
            return defer.gatherResults(ds)

    def handleCallResponse(self, result, node):
        """
        If we get a response, add the node to the routing table.  If
        we get no response, make sure it's removed from the routing table.
        """
        if result[0]:
            self.log.info("got response from %s, adding to router" % node)
            self.welcomeIfNewNode(node)
        else:
            self.log.debug("no response from %s, removing from router" % node)
            self.router.removeContact(node)
        return result

    def handleTimedCallResponse(self, result, node, time_keeper, id, name):
        """
        If we get a response, add the node to the routing table.  If
        we get no response, make sure it's removed from the routing table.
        """
        time_keeper.stop_clock_unique(name, id)
        self.log.debug(
            "%s %s %s " %
            (BENCH_TAG, TYPE_STORE_CHUNK_REMOTE, time_keeper.get_summary()))

        if result[0]:
            self.log.info("got response from %s, adding to router" % node)
            self.welcomeIfNewNode(node)
        else:
            self.log.debug("no response from %s, removing from router" % node)
            self.router.removeContact(node)
        return result
Example #25
0
class Server(object):
    """
    High level view of a node instance.  This is the object that should be created
    to start listening as an active node on the network.
    """

    def __init__(self, ksize=20, alpha=3, id=None):
        """
        Create a server instance.  This will start listening on the given port.

        @param port: UDP port to listen on
        @param k: The k parameter from the paper
        @param alpha: The alpha parameter from the paper
        """
        self.ksize = ksize
        self.alpha = alpha
        self.log = Logger(system=self)
        self.storage = ForgetfulStorage()
        self.node = Node(id or digest(random.getrandbits(255)))
        self.protocol = KademliaProtocol(self.node, self.storage, ksize)
        self.refreshLoop = LoopingCall(self.refreshTable).start(3600)

    def listen(self, port):
        """
        Start listening on the given port.

        This is the same as calling:
        C{reactor.listenUDP(port, server.protocol)}
        """
        return reactor.listenUDP(port, self.protocol)

    def refreshTable(self):
        """
        Refresh buckets that haven't had any lookups in the last hour
        (per section 2.3 of the paper).
        """
        ds = []
        for id in self.protocol.getRefreshIDs():
            node = Node(id)
            nearest = self.protocol.router.findNeighbors(node, self.alpha)
            spider = NodeSpiderCrawl(self.protocol, node, nearest)
            ds.append(spider.find())

        def republishKeys(_):
            ds = []
            # Republish keys older than one hour
            for key, value in self.storage.iteritemsOlderThan(3600):
                ds.append(self.set(key, value))
            return defer.gatherResults(ds)

        return defer.gatherResults(ds).addCallback(republishKeys)

    def bootstrappableNeighbors(self):
        """
        Get a C{list} of (ip, port) C{tuple}s suitable for use as an argument
        to the bootstrap method.

        The server should have been bootstrapped
        already - this is just a utility for getting some neighbors and then
        storing them if this server is going down for a while.  When it comes
        back up, the list of nodes can be used to bootstrap.
        """
        neighbors = self.protocol.router.findNeighbors(self.node)
        return [ tuple(n)[-2:] for n in neighbors ]

    def bootstrap(self, addrs):
        """
        Bootstrap the server by connecting to other known nodes in the network.

        @param addrs: A C{list} of (ip, port) C{tuple}s.  Note that only IP addresses
        are acceptable - hostnames will cause an error.
        """
        # if the transport hasn't been initialized yet, wait a second
        if self.protocol.transport is None:
            return task.deferLater(reactor, 1, self.bootstrap, addrs)

        def initTable(results):
            nodes = []
            for addr, result in results.items():
                if result[0]:
                    nodes.append(Node(result[1], addr[0], addr[1]))
            spider = NodeSpiderCrawl(self.protocol, self.node, nodes, self.ksize, self.alpha)
            return spider.find()

        ds = {}
        for addr in addrs:
            ds[addr] = self.protocol.ping(addr, self.node.id)
        return deferredDict(ds).addCallback(initTable)

    def inetVisibleIP(self):
        """
        Get the internet visible IP's of this node as other nodes see it.

        @return: An C{list} of IP's.  If no one can be contacted, then the
        C{list} will be empty.
        """
        def handle(results):
            ips = [ result[1][0] for result in results if result[0] ]
            self.log.debug("other nodes think our ip is %s" % str(ips))
            return ips

        ds = []
        for neighbor in self.bootstrappableNeighbors():
            ds.append(self.protocol.stun(neighbor))
        return defer.gatherResults(ds).addCallback(handle)

    def get(self, key):
        """
        Get a key if the network has it.

        @return: C{None} if not found, the value otherwise.
        """
        node = Node(digest(key))
        nearest = self.protocol.router.findNeighbors(node)
        if len(nearest) == 0:
            self.log.warning("There are no known neighbors to get key %s" % key)
            return defer.succeed(None)
        spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
        return spider.find()

    def set(self, key, value):
        """
        Set the given key to the given value in the network.
        """
        self.log.debug("setting '%s' = '%s' on network" % (key, value))
        dkey = digest(key)

        def store(nodes):
            self.log.info("setting '%s' on %s" % (key, map(str, nodes)))
            ds = [self.protocol.callStore(node, dkey, value) for node in nodes]
            return defer.DeferredList(ds).addCallback(self._anyRespondSuccess)

        node = Node(dkey)
        nearest = self.protocol.router.findNeighbors(node)
        if len(nearest) == 0:
            self.log.warning("There are no known neighbors to set key %s" % key)
            return defer.succeed(False)
        spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
        return spider.find().addCallback(store)

    def _anyRespondSuccess(self, responses):
        """
        Given the result of a DeferredList of calls to peers, ensure that at least
        one of them was contacted and responded with a Truthy result.
        """
        for deferSuccess, result in responses:
            peerReached, peerResponse = result
            if deferSuccess and peerReached and peerResponse:
                return True
        return False

    def saveState(self, fname):
        """
        Save the state of this node (the alpha/ksize/id/immediate neighbors)
        to a cache file with the given fname.
        """
        data = { 'ksize': self.ksize,
                 'alpha': self.alpha,
                 'id': self.node.id,
                 'neighbors': self.bootstrappableNeighbors() }
        with open(fname, 'w') as f:
            pickle.dump(data, f)

    @classmethod
    def loadState(self, fname):
        """
        Load the state of this node (the alpha/ksize/id/immediate neighbors)
        from a cache file with the given fname.
        """        
        with open(fname, 'r') as f:
            data = pickle.load(f)
        s = Server(data['ksize'], data['alpha'], data['id'])
        if len(data['neighbors']) > 0:
            s.bootstrap(data['neighbors'])
        return s

    def saveStateRegularly(self, fname, frequency=600):
        """
        Save the state of node with a given regularity to the given
        filename.
        
        @param fname: File to save retularly to
        @param frequencey: Frequency in seconds that the state
        should be saved.  By default, 10 minutes.
        """
        loop = LoopingCall(self.saveState, fname)
        loop.start(frequency)
        return loop
Example #26
0
class StoreLargeChunk(Resource):
    allowedMethods = ('POST', )

    def __init__(self, storage, rpc_protocol, talos_vc=TalosVCRestClient()):
        Resource.__init__(self)
        self.storage = storage
        self.log = Logger(system=self)
        self.talos_vc = talos_vc
        self.rpc_protocol = rpc_protocol

    def getChild(self, path, request):
        return self

    def render_POST(self, request):
        if len(request.prepath) < 4:
            request.setResponseCode(400)
            return json.dumps({'error': "Illegal URL"})
        try:
            time_keeper = TimeKeeper()
            total_time_id = time_keeper.start_clock_unique()

            nodeid = unhexlify(request.prepath[1])
            source_ip = request.client.host
            source_port = int(request.prepath[2])
            kad_key = unhexlify(request.prepath[3])

            source = Node(nodeid, source_ip, source_port)

            time_keeper.start_clock()
            self.rpc_protocol.welcomeIfNewNode(source)
            time_keeper.stop_clock(ENTRY_TIME_WELCOME_NODE)

            encoded_chunk = request.content.read()

            chunk = CloudChunk.decode(encoded_chunk)

            if not digest(chunk.key) == kad_key:
                request.setResponseCode(400)
                return json.dumps({'error': "key missmatch"})

            def handle_policy(policy):
                time_keeper.stop_clock(ENTRY_FETCH_POLICY)

                id = time_keeper.start_clock_unique()
                self.storage.store_check_chunk(chunk,
                                               None,
                                               policy,
                                               time_keeper=time_keeper)
                time_keeper.stop_clock_unique(ENTRY_STORE_CHECK, id)

                time_keeper.stop_clock_unique(ENTRY_TOTAL_STORE_LOCAL,
                                              total_time_id)
                self.log.debug("%s %s %s" % (BENCH_TAG, TYPE_STORE_CHUNK_LOCAL,
                                             time_keeper.get_summary()))
                request.write(json.dumps({'value': "ok"}))
                request.finish()

            time_keeper.start_clock()
            self.talos_vc.get_policy_with_txid(
                chunk.get_tag_hex()).addCallback(handle_policy)
            return NOT_DONE_YET
        except InvalidChunkError as e:
            request.setResponseCode(400)
            return json.dumps({'error': e.value})
        except TalosVCRestClientError:
            request.setResponseCode(400)
            return "ERROR: No policy found"
        except:
            request.setResponseCode(400)
            return json.dumps({'error': "Error occured"})
Example #27
0
class KademliaProtocol(RPCProtocol):
    def __init__(self, sourceNode, storage, ksize):
        RPCProtocol.__init__(self)
        self.router = RoutingTable(self, ksize, sourceNode)
        self.storage = storage
        self.sourceNode = sourceNode
        self.log = Logger(system=self)

    def getRefreshIDs(self):
        """
        Get ids to search for to keep old buckets up to date.
        """
        ids = []
        for bucket in self.router.getLonelyBuckets():
            ids.append(random.randint(*bucket.range))
        return ids

    def rpc_stun(self, sender):
        return sender

    def rpc_ping(self, sender, nodeid):
        source = Node(nodeid, sender[0], sender[1])
        self.router.addContact(source)
        return self.sourceNode.id

    def rpc_store(self, sender, nodeid, key, value):
        source = Node(nodeid, sender[0], sender[1])
        self.router.addContact(source)
        self.log.debug("got a store request from %s, storing value" %
                       str(sender))
        self.storage[key] = value
        return True

    def rpc_find_node(self, sender, nodeid, key):
        self.log.info("finding neighbors of %i in local table" %
                      long(nodeid.encode('hex'), 16))
        source = Node(nodeid, sender[0], sender[1])
        self.router.addContact(source)
        node = Node(key)
        return map(tuple, self.router.findNeighbors(node, exclude=source))

    def rpc_find_value(self, sender, nodeid, key):
        source = Node(nodeid, sender[0], sender[1])
        self.router.addContact(source)
        value = self.storage.get(key, None)
        if value is None:
            return self.rpc_find_node(sender, nodeid, key)
        return {'value': value}

    def callFindNode(self, nodeToAsk, nodeToFind):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.find_node(address, self.sourceNode.id, nodeToFind.id)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callFindValue(self, nodeToAsk, nodeToFind):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.find_value(address, self.sourceNode.id, nodeToFind.id)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callPing(self, nodeToAsk):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.ping(address, self.sourceNode.id)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callStore(self, nodeToAsk, key, value):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.store(address, self.sourceNode.id, key, value)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def transferKeyValues(self, node):
        """
        Given a new node, send it all the keys/values it should be storing.

        @param node: A new node that just joined (or that we just found out
        about).

        Process:
        For each key in storage, get k closest nodes.  If newnode is closer
        than the furtherst in that list, and the node for this server
        is closer than the closest in that list, then store the key/value
        on the new node (per section 2.5 of the paper)
        """
        ds = []
        for key, value in self.storage.iteritems():
            keynode = Node(digest(key))
            neighbors = self.router.findNeighbors(keynode)
            if len(neighbors) > 0:
                newNodeClose = node.distanceTo(
                    keynode) < neighbors[-1].distanceTo(keynode)
                thisNodeClosest = self.sourceNode.distanceTo(
                    keynode) < neighbors[0].distanceTo(keynode)
            if len(neighbors) == 0 or (newNodeClose and thisNodeClosest):
                ds.append(self.callStore(node, key, value))
        return defer.gatherResults(ds)

    def handleCallResponse(self, result, node):
        """
        If we get a response, add the node to the routing table.  If
        we get no response, make sure it's removed from the routing table.
        """
        if result[0]:
            self.log.info("got response from %s, adding to router" % node)
            self.router.addContact(node)
            if self.router.isNewNode(node):
                self.transferKeyValues(node)
        else:
            self.log.debug("no response from %s, removing from router" % node)
            self.router.removeContact(node)
        return result
Example #28
0
 def __init__(self, dht_server=None):
     self.dht_server = dht_server
     self.log = Logger(system=self)
Example #29
0
class DHTMirrorRPC(jsonrpc.JSONRPC):
    """ A DHT Mirror with faster get/set."""
    def _get_hash(self, value):

        if type(value) is not dict:
            try:
                #self.log.info("WARNING: converting to json")
                value = json.loads(value)
            except:
                self.log.info("WARNING: not valid json")

        return hex_hash160(json.dumps(value, sort_keys=True))

    def __init__(self, dht_server=None):
        self.dht_server = dht_server
        self.log = Logger(system=self)

    def jsonrpc_ping(self):

        reply = {}
        reply['status'] = "alive"
        return reply

    def jsonrpc_stats(self):
        stats = {}
        stats['entries'] = dht_mirror.count()
        return stats

    def jsonrpc_get(self, key):

        resp = {}
        resp['key'] = key

        self.log.info("Get request for key: %s" % key)

        entry = dht_mirror.find_one({"key": key})

        if entry is not None:
            resp['value'] = entry['value']
        else:
            # if not in mirror/cache get from DHT
            return self.jsonrpc_dht_get(key)

        return resp

    def jsonrpc_set(self, key, value):

        self.log.info("Set request for key: %s" % key)

        resp = {}

        test_hash = self._get_hash(value)

        if test_hash != key:
            resp['error'] = "hash(value) doesn't match key"
            return resp

        write_to_cache(key, value)

        # perform the dht set/refresh in the background
        self.jsonrpc_dht_set(key, value)

        resp['status'] = 'success'

        return resp

    def jsonrpc_dht_get(self, key):

        self.log.info("DHT get request for key: %s" % key)

        resp = {}

        try:
            resp = self.dht_server.get(key)
            value = resp[0]
            write_to_cache(key, value)

        except Exception as e:
            resp['error'] = e

        return resp

    def jsonrpc_dht_set(self, key, value):

        self.log.info("DHT set request for key: %s" % key)

        resp = {}

        try:
            resp = self.dht_server.set(key, value)
        except Exception as e:
            resp['error'] = e

        return resp
Example #30
0
class Server(object):
    """
    High level view of a node instance.  This is the object that should be created
    to start listening as an active node on the network.
    """
    def __init__(self, ksize=20, alpha=3, id=None, storage=None):
        """
        Create a server instance.  This will start listening on the given port.

        Args:
            ksize (int): The k parameter from the paper
            alpha (int): The alpha parameter from the paper
            id: The id for this node on the network.
            storage: An instance that implements :interface:`~kademlia.storage.IStorage`
        """
        self.ksize = ksize
        self.alpha = alpha
        self.log = Logger(system=self)
        self.storage = storage or ForgetfulStorage()
        self.node = Node(id or digest(random.getrandbits(255)))
        print(random.getrandbits(255))
        self.protocol = KademliaProtocol(self.node, self.storage, ksize)
        self.refreshLoop = LoopingCall(self.refreshTable).start(3600)

    def listen(self, port, interface=""):
        """
        Start listening on the given port.

        This is the same as calling::

            reactor.listenUDP(port, server.protocol)

        Provide interface="::" to accept ipv6 address
        """
        return reactor.listenUDP(port, self.protocol, interface)

    def refreshTable(self):
        """
        Refresh buckets that haven't had any lookups in the last hour
        (per section 2.3 of the paper).
        """
        ds = []
        for id in self.protocol.getRefreshIDs():
            node = Node(id)
            nearest = self.protocol.router.findNeighbors(node, self.alpha)
            spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize,
                                     self.alpha)
            ds.append(spider.find())

        def republishKeys(_):
            ds = []
            # Republish keys older than one hour
            for dkey, value in self.storage.iteritemsOlderThan(3600):
                ds.append(self.digest_set(dkey, value))
            return defer.gatherResults(ds)

        return defer.gatherResults(ds).addCallback(republishKeys)

    def bootstrappableNeighbors(self):
        """
        Get a :class:`list` of (ip, port) :class:`tuple` pairs suitable for use as an argument
        to the bootstrap method.

        The server should have been bootstrapped
        already - this is just a utility for getting some neighbors and then
        storing them if this server is going down for a while.  When it comes
        back up, the list of nodes can be used to bootstrap.
        """
        neighbors = self.protocol.router.findNeighbors(self.node)
        return [tuple(n)[-2:] for n in neighbors]

    def bootstrap(self, addrs):
        """
        Bootstrap the server by connecting to other known nodes in the network.

        Args:
            addrs: A `list` of (ip, port) `tuple` pairs.  Note that only IP addresses
                   are acceptable - hostnames will cause an error.
        """
        # if the transport hasn't been initialized yet, wait a second
        if self.protocol.transport is None:
            return task.deferLater(reactor, 1, self.bootstrap, addrs)

        def initTable(results):
            nodes = []
            for addr, result in results.items():
                if result[0]:
                    nodes.append(Node(result[1], addr[0], addr[1]))
            spider = NodeSpiderCrawl(self.protocol, self.node, nodes,
                                     self.ksize, self.alpha)
            return spider.find()

        ds = {}
        for addr in addrs:
            ds[addr] = self.protocol.ping(addr, self.node.id)
        return deferredDict(ds).addCallback(initTable)

    def inetVisibleIP(self):
        """
        Get the internet visible IP's of this node as other nodes see it.

        Returns:
            A `list` of IP's.  If no one can be contacted, then the `list` will be empty.
        """
        def handle(results):
            ips = [result[1][0] for result in results if result[0]]
            self.log.debug("other nodes think our ip is %s" % str(ips))
            return ips

        ds = []
        for neighbor in self.bootstrappableNeighbors():
            ds.append(self.protocol.stun(neighbor))
        return defer.gatherResults(ds).addCallback(handle)

    def get(self, key):
        """
        Get a key if the network has it.

        Returns:
            :class:`None` if not found, the value otherwise.
        """
        dkey = digest(key)
        # if this node has it, return it
        if self.storage.get(dkey) is not None:
            return defer.succeed(self.storage.get(dkey))
        node = Node(dkey)
        nearest = self.protocol.router.findNeighbors(node)
        if len(nearest) == 0:
            self.log.warning("There are no known neighbors to get key %s" %
                             key)
            return defer.succeed(None)
        spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize,
                                  self.alpha)
        return spider.find()

    def set(self, key, value):
        """
        Set the given key to the given value in the network.
        """
        self.log.debug("setting '%s' = '%s' on network" % (key, value))
        dkey = digest(key)
        return self.digest_set(dkey, value)

    def digest_set(self, dkey, value):
        """
        Set the given SHA1 digest key to the given value in the network.
        """
        node = Node(dkey)
        # this is useful for debugging messages
        hkey = binascii.hexlify(dkey)

        def store(nodes):
            self.log.info("setting '%s' on %s" % (hkey, map(str, nodes)))
            # if this node is close too, then store here as well
            if self.node.distanceTo(node) < max(
                [n.distanceTo(node) for n in nodes]):
                self.storage[dkey] = value
            ds = [self.protocol.callStore(n, dkey, value) for n in nodes]
            return defer.DeferredList(ds).addCallback(self._anyRespondSuccess)

        nearest = self.protocol.router.findNeighbors(node)
        if len(nearest) == 0:
            self.log.warning("There are no known neighbors to set key %s" %
                             hkey)
            return defer.succeed(False)
        spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize,
                                 self.alpha)
        return spider.find().addCallback(store)

    def _anyRespondSuccess(self, responses):
        """
        Given the result of a DeferredList of calls to peers, ensure that at least
        one of them was contacted and responded with a Truthy result.
        """
        for deferSuccess, result in responses:
            peerReached, peerResponse = result
            if deferSuccess and peerReached and peerResponse:
                return True
        return False

    def saveState(self, fname):
        """
        Save the state of this node (the alpha/ksize/id/immediate neighbors)
        to a cache file with the given fname.
        """
        data = {
            'ksize': self.ksize,
            'alpha': self.alpha,
            'id': self.node.id,
            'neighbors': self.bootstrappableNeighbors()
        }
        if len(data['neighbors']) == 0:
            self.log.warning("No known neighbors, so not writing to cache.")
            return
        with open(fname, 'w') as f:
            pickle.dump(data, f)

    @classmethod
    def loadState(self, fname):
        """
        Load the state of this node (the alpha/ksize/id/immediate neighbors)
        from a cache file with the given fname.
        """
        with open(fname, 'r') as f:
            data = pickle.load(f)
        s = Server(data['ksize'], data['alpha'], data['id'])
        if len(data['neighbors']) > 0:
            s.bootstrap(data['neighbors'])
        return s

    def saveStateRegularly(self, fname, frequency=600):
        """
        Save the state of node with a given regularity to the given
        filename.

        Args:
            fname: File name to save retularly to
            frequencey: Frequency in seconds that the state should be saved.
                        By default, 10 minutes.
        """
        loop = LoopingCall(self.saveState, fname)
        loop.start(frequency)
        return loop
Example #31
0
class QueryChunk(Resource):
    allowedMethods = ('GET', 'POST')

    def __init__(self,
                 storage,
                 talos_vc=TalosVCRestClient(),
                 max_nonce_cache=1000,
                 nonce_ttl=10):
        Resource.__init__(self)
        self.storage = storage
        self.log = Logger(system=self)
        self.talos_vc = talos_vc
        self.nonce_cache = TTLCache(max_nonce_cache, nonce_ttl)
        self.refreshLoop = LoopingCall(self.nonce_cache.expire).start(3600)
        self.sem = Semaphore(1)

    def render_GET(self, request):
        nonce = os.urandom(16)
        self.nonce_cache[nonce] = True
        return nonce

    def _check_cache(self, nonce):
        self.sem.acquire(True)
        try:
            ok = self.nonce_cache[nonce]
            if ok:
                self.nonce_cache[nonce] = False
            return ok
        except KeyError:
            return False
        finally:
            self.sem.release()

    def render_POST(self, request):
        msg = json.loads(request.content.read())
        timekeeper = TimeKeeper()
        total_time_id = timekeeper.start_clock_unique()
        try:
            timekeeper.start_clock()
            token = get_and_check_query_token(msg)
            check_query_token_valid(token)
            timekeeper.stop_clock(ENTRY_CHECK_TOKEN_VALID)

            # Check nonce ok
            if not self._check_cache(token.nonce):
                raise InvalidQueryToken("Nonce not valid")

            def handle_policy(policy):
                timekeeper.stop_clock(ENTRY_FETCH_POLICY)
                if policy is None:
                    request.setResponseCode(400)
                    request.write("No Policy Found")
                    request.finish()
                # check policy for correctness
                id = timekeeper.start_clock_unique()
                chunk = self.storage.get_check_chunk(token.chunk_key,
                                                     token.pubkey,
                                                     policy,
                                                     time_keeper=timekeeper)
                timekeeper.stop_clock_unique(ENTRY_GET_AND_CHECK, id)
                timekeeper.stop_clock_unique(ENTRY_TOTAL_LOCAL_QUERY,
                                             total_time_id)

                self.log.debug("%s %s %s" % (BENCH_TAG, TYPE_QUERY_CHUNK_LOCAL,
                                             timekeeper.get_summary()))
                request.write(chunk.encode())
                request.finish()

            timekeeper.start_clock()
            self.talos_vc.get_policy(token.owner,
                                     token.streamid).addCallback(handle_policy)
            return NOT_DONE_YET
        except InvalidQueryToken:
            request.setResponseCode(400)
            return "ERROR: token verification failure"
        except TalosVCRestClientError:
            request.setResponseCode(400)
            return "ERROR: No policy found"
        except:
            request.setResponseCode(400)
            return "ERROR: error occured"
Example #32
0
class NetworkInterface (object):

    # Create a NetworkInterface object to accomplish all network related tasks
    def __init__(self, appDeployer, uuid):
        self._connected = False
        self._app_deployer = appDeployer

        # optional...
        self._number_of_nodes = 0
        self._list_of_nodes =[] 
        
        # logging capabilities
        self._log = Logger(system=self)

        # HERE--> Implementation specific node instanciation
        from kademlia.network import Server
        self._node = Server()
        self._node.log.level = 4
        # END OF SECTION 

    def bootStrapDone(self, server):
        #contacts = self._node.inetVisibleIP()
        print "BOOOOTTTT STAPPP IT"

    def retrieveContacts(self):
        """
        NEED TO FIND A WAY TO RETRIEVE THE LIST OF NEIGHBORS !!!
        
        """
        # !!! DOES EXACTLY THE SAME AS bootstrappableNeighbors !!!
        for bucket in self._node.protocol.router.buckets:
            print bucket.getNodes()
        
        # !!! bootstrappableNeighbors returns only the list of neighbors that you provided as !!!
        # !!! a bootstrap list, that are also online !!!
        neighbors =  self._node.bootstrappableNeighbors()
        
        print neighbors
        return neighbors

    def connect(self,fromPort,toPort,ip='127.0.0.1'):
        self._log.debug('Connecting...')
        #print "in connect ... "  
        #print "now listening on port: ",fromPort
        self._node.listen(fromPort)
        return self._node.bootstrap([(ip,toPort)]).addCallback(self.bootStrapDone)
            
    # This function is used to set a value in the DHT
    def setDone(self,result):
        print result
        print "set is done"
        deferred = Deferred()
        return deferred

    def set(self, key, value):

        def _processKey(result, key, values):
            print result, key, values
            deferred = Deferred()
            # upon recovering the value of the key
            if result == None:
                deferred = self._node.set(key, values)
                return deferred 
                #.addCallback(self.setDone)
            else:
                for value in values:
                    if value not in result: 
                        # append + publish
                        result.append(value)
                    else:
                        self._log.info("Value is already in the corresponding key.")
                deferred = self._node.set(key, result)
            return deferred
            

        # Only application deployers are allowed to write to the DHT.
        if self._app_deployer != False: 
            deferred = Deferred()           
            # Two possible keys are allowed to be written to, the template key and their respective application key
            if ('template' == key or self._uuid == key) and key != None:
                # HERE --> Implementation Specific Code
                print  " :::  ", self,  " ::: ", key, " ::: ", value, " <----------------------------" 
                # if writing to the template, retrieve the value first then append to it if necessary
                if key == 'template':

                    deferred = self._node.get(key)
                    deferred.addCallback(_processKey, key, value)
                    return deferred
                    #self._node.set(key, value).addCallback(self.setDone)
                # END OF SECTION

        # Not Allowed to write to the DHT.
        else:
            self._log.info("Only application deployers are allowed to write values into the DHT!")
            

    def done(self,result):
        print "self: ", self
        print "Key result:", result

    def get(self,result, key):
        # HERE --> Implementation Specific Code
        print result, " :::  ", self,  " ::: ", key, " <----------------------------" 
        deferred = self._node.get(key)
        deferred.addCallback(self.done)
        return deferred
 def __init__(self, dht_server=None):
     self.dht_server = dht_server
     self.log = Logger(system=self)
Example #34
0
class KademliaProtocol(RPCProtocol):
    def __init__(self, sourceNode, storage, ksize):
        RPCProtocol.__init__(self)
        self.router = RoutingTable(self, ksize, sourceNode)
        self.storage = storage
        self.sourceNode = sourceNode
        self.log = Logger(system=self)
        self.messages = []

    def getRefreshIDs(self):
        """
        Get ids to search for to keep old buckets up to date.
        """
        ids = []
        for bucket in self.router.getLonelyBuckets():
            ids.append(random.randint(*bucket.range))
        return ids

    def getMessages(self):
        if len(self.messages) == 0:
            return None

        newList = []
        while len(self.messages) > 0:
            newList.append(self.messages.pop(0))
        return newList

    def rpc_stun(self, sender):
        return sender

    def rpc_ping(self, sender, nodeid):
        source = Node(nodeid, sender[0], sender[1])
        self.router.addContact(source)
        return self.sourceNode.id

    def rpc_store(self, sender, nodeid, key, value):
        source = Node(nodeid, sender[0], sender[1])
        self.router.addContact(source)
        #Check if the timestamp of any existing value is larger than the new one.
        existingValue = self.storage.get(key, None)
        if existingValue:
            if existingValue[4] < value[4]:
                existingTimestamp = decodeTimestamp(existingValue[1], value[2])
            else:
                self.log.debug("Local val unencrypted is too small")
                return True
        if (not existingValue) or (existingTimestamp < decodeTimestamp(
                value[1], value[2])):
            self.log.debug("got a store request from %s, storing value" %
                           str(sender))
            self.storage[key] = value
            return True
        else:
            self.log.debug(
                "IGNORING a store request from %s, existing timestamp %s is larger than new %s"
                % (str(sender), str(existingTimestamp), str(newTimestamp)))
            return True

    def rpc_send(self, sender, message):
        self.log.info("Received message: \"" + message.strip("\n") +
                      "\" from address " + str(sender))
        self.messages.append(message)
        return True

    def rpc_find_node(self, sender, nodeid, key):
        self.log.info("finding neighbors of %i in local table" %
                      long(nodeid.encode('hex'), 16))
        source = Node(nodeid, sender[0], sender[1])
        self.router.addContact(source)
        node = Node(key)
        return map(tuple, self.router.findNeighbors(node, exclude=source))

    def rpc_find_value(self, sender, nodeid, key):
        source = Node(nodeid, sender[0], sender[1])
        self.router.addContact(source)
        value = self.storage.get(key, None)
        if value is None:
            return self.rpc_find_node(sender, nodeid, key)
        return {'value': value}

    def callFindNode(self, nodeToAsk, nodeToFind):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.find_node(address, self.sourceNode.id, nodeToFind.id)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callFindValue(self, nodeToAsk, nodeToFind):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.find_value(address, self.sourceNode.id, nodeToFind.id)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callPing(self, nodeToAsk):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.ping(address, self.sourceNode.id)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callStore(self, nodeToAsk, key, value):
        self.log.debug("Storing on %s" % str(nodeToAsk))
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.store(address, self.sourceNode.id, key, value)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callSend(self, message, addr, port):
        address = (addr, port)
        self.log.info("Sending message: \"" + message.strip("\n") +
                      "\" to address " + str(address))
        self.send(address, message)

    def transferKeyValues(self, node):
        """
        Given a new node, send it all the keys/values it should be storing.

        @param node: A new node that just joined (or that we just found out
        about).

        Process:
        For each key in storage, get k closest nodes.  If newnode is closer
        than the furtherst in that list, and the node for this server
        is closer than the closest in that list, then store the key/value
        on the new node (per section 2.5 of the paper)
        """
        ds = []
        for key, value in self.storage.iteritems():
            keynode = Node(digest(key))
            neighbors = self.router.findNeighbors(keynode)
            if len(neighbors) > 0:
                newNodeClose = node.distanceTo(
                    keynode) < neighbors[-1].distanceTo(keynode)
                thisNodeClosest = self.sourceNode.distanceTo(
                    keynode) < neighbors[0].distanceTo(keynode)
            if len(neighbors) == 0 or (newNodeClose and thisNodeClosest):
                ds.append(self.callStore(node, key, value))
        return defer.gatherResults(ds)

    def handleCallResponse(self, result, node):
        """
        If we get a response, add the node to the routing table.  If
        we get no response, make sure it's removed from the routing table.
        """
        if result[0]:
            self.log.debug("Result is %s" % str(result))
            self.log.info("got response from %s, adding to router" % node)
            self.router.addContact(node)
            if self.router.isNewNode(node):
                self.transferKeyValues(node)
        else:
            self.log.debug("no response from %s, removing from router" % node)
            self.router.removeContact(node)
        return result
Example #35
0
 def __init__(self, dhtstorage):
     Resource.__init__(self)
     self.dhtstorage = dhtstorage
     self.log = Logger(system=self)
Example #36
0
class BlockStorage(object):
    implements(IStorage)
    """ BlockStorage has following properties:
        a) is content-addressable (all keys must be hash(value))
        b) high TTL (effectively the keys don't expire)
        c) stores only valid JSON values
    """
    def __init__(self, ttl=STORAGE_TTL):
        """
        By default, max age is three years.
        """
        self.data = OrderedDict()
        self.ttl = ttl
        self.log = Logger(system=self)

    def __setitem__(self, key, value):

        try:
            test_value = json.loads(value)
        except:
            self.log.info("value not JSON, not storing")
            return

        hash = coinkit.hex_hash160(value)
        test_key = digest(hash)

        if key != test_key:
            self.log.info("hash(value) doesn't match, not storing")
            return

        if key in self.data:
            del self.data[key]

        self.data[key] = (time.time(), value)
        self.cull()

    def cull(self):
        for k, v in self.iteritemsOlderThan(self.ttl):
            self.data.popitem(first=True)

    def get(self, key, default=None):
        self.cull()
        if key in self.data:
            value = self[key]
            hash = coinkit.hex_hash160(value)

            test_key = digest(hash)

            if key != test_key:
                self.log.info("hash(value) doesn't match, ignoring value")
                return default

            return self[key]

        return default

    def __getitem__(self, key):
        self.cull()
        return self.data[key][1]

    def __iter__(self):
        self.cull()
        return iter(self.data)

    def __repr__(self):
        self.cull()
        return repr(self.data)

    def iteritemsOlderThan(self, secondsOld):
        minBirthday = time.time() - secondsOld
        zipped = self._tripleIterable()
        matches = takewhile(lambda r: minBirthday >= r[1], zipped)
        return imap(operator.itemgetter(0, 2), matches)

    def _tripleIterable(self):
        ikeys = self.data.iterkeys()
        ibirthday = imap(operator.itemgetter(0), self.data.itervalues())
        ivalues = imap(operator.itemgetter(1), self.data.itervalues())
        return izip(ikeys, ibirthday, ivalues)

    def iteritems(self):
        self.cull()
        ikeys = self.data.iterkeys()
        ivalues = imap(operator.itemgetter(1), self.data.itervalues())
        return izip(ikeys, ivalues)
class BlockStorage(object):
    implements(IStorage)

    """ BlockStorage has following properties:
        a) is content-addressable (all keys must be hash(value))
        b) high TTL (effectively the keys don't expire)
        c) stores only valid JSON values
    """

    def __init__(self, ttl=STORAGE_TTL):
        """
        By default, max age is three years.
        """
        self.data = OrderedDict()
        self.ttl = ttl
        self.log = Logger(system=self)

    def __setitem__(self, key, value):

        try:
            test_value = json.loads(value)
        except:
            self.log.info("value not JSON, not storing")
            return

        hash = pybitcoin.hash.hex_hash160(value)
        test_key = digest(hash)

        if key != test_key:
            self.log.info("hash(value) doesn't match, not storing")
            return

        if key in self.data:
            del self.data[key]

        self.data[key] = (time.time(), value)
        self.cull()

    def cull(self):
        for k, v in self.iteritemsOlderThan(self.ttl):
            self.data.popitem(first=True)

    def get(self, key, default=None):
        self.cull()
        if key in self.data:
            value = self[key]
            hash = pybitcoin.hash.hex_hash160(value)

            test_key = digest(hash)

            if key != test_key:
                self.log.info("hash(value) doesn't match, ignoring value")
                return default

            return self[key]

        return default

    def __getitem__(self, key):
        self.cull()
        return self.data[key][1]

    def __iter__(self):
        self.cull()
        return iter(self.data)

    def __repr__(self):
        self.cull()
        return repr(self.data)

    def iteritemsOlderThan(self, secondsOld):
        minBirthday = time.time() - secondsOld
        zipped = self._tripleIterable()
        matches = takewhile(lambda r: minBirthday >= r[1], zipped)
        return imap(operator.itemgetter(0, 2), matches)

    def _tripleIterable(self):
        ikeys = self.data.iterkeys()
        ibirthday = imap(operator.itemgetter(0), self.data.itervalues())
        ivalues = imap(operator.itemgetter(1), self.data.itervalues())
        return izip(ikeys, ibirthday, ivalues)

    def iteritems(self):
        self.cull()
        ikeys = self.data.iterkeys()
        ivalues = imap(operator.itemgetter(1), self.data.itervalues())
        return izip(ikeys, ivalues)
Example #38
0
class TalosSecureDHTServer(TalosDHTServer):
    def __init__(self, ksize=20, alpha=3, priv_key=None, storage=None,
                 talos_vc=None, rebub_delay=3600, c1bits=1, tls_port=-1):
        """
        Create a server instance.  This will start listening on the given port.
        Args:
            ksize (int): The k parameter from the paper
            alpha (int): The alpha parameter from the paper
            id: The id for this node on the network.
            storage: An instance that implements :interface:`~kademlia.storage.IStorage`
        """
        self.ksize = ksize
        self.alpha = alpha
        self.log = Logger(system=self)
        self.storage = storage or TalosLevelDBDHTStorage("./leveldb")
        self.c1bits = c1bits

        if priv_key is None:
            self.priv_key, node_id = generate_keys_with_crypto_puzzle(c1bits)
        else:
            self.priv_key = priv_key
            node_id = pub_to_node_id(self.priv_key.public_key())

        self.node = Node(node_id)

        def start_looping_call(num_seconds):
            self.refreshLoop = LoopingCall(self.refreshTable).start(num_seconds)

        self.delay = rebub_delay
        task.deferLater(reactor, rebub_delay, start_looping_call, rebub_delay)

        self.talos_vc = talos_vc or AsyncPolicyApiClient()
        self.protocol = TalosSKademliaProtocol(self.priv_key, self.node,
                                               self.storage, ksize, talos_vc=self.talos_vc, cbits=c1bits)
        self.httpprotocol_client = None
        self.tls_port = tls_port

    def saveState(self, fname):
        """
        Save the state of this node (the alpha/ksize/id/immediate neighbors)
        to a cache file with the given fname.
        """
        self.log.info("Save state to file %s" % fname)
        data = {'ksize': self.ksize,
                'alpha': self.alpha,
                'priv_key': serialize_priv_key(self.priv_key),
                'c1bits': self.c1bits,
                'neighbors': self.bootstrappableNeighbors()}
        if len(data['neighbors']) == 0:
            self.log.warning("No known neighbors, so not writing to cache.")
            return
        with open(fname, 'w') as f:
            pickle.dump(data, f)

    @classmethod
    def loadState(self, fname, storage=None, talos_vc=None):
        """
        Load the state of this node (the alpha/ksize/id/immediate neighbors)
        from a cache file with the given fname.
        """
        with open(fname, 'r') as f:
            data = pickle.load(f)
        s = TalosSecureDHTServer(data['ksize'], data['alpha'], deserialize_priv_key(data['priv_key']),
                                 storage=None, talos_vc=None, c1bits=data['c1bits'])
        if len(data['neighbors']) > 0:
            s.bootstrap(data['neighbors'])
        return s
Example #39
0
 def __init__(self, storage, rpc_protocol, talos_vc=TalosVCRestClient()):
     Resource.__init__(self)
     self.storage = storage
     self.log = Logger(system=self)
     self.talos_vc = talos_vc
     self.rpc_protocol = rpc_protocol
Example #40
0
class TalosDHTServer(object):
    """
    Modified implementation of bmullers DHT for talos
    High level view of a node instance.  This is the object that should be created
    to start listening as an active node on the network.
    
    We assume public ip addresses! No NAT etc
    """

    def __init__(self, ksize=20, alpha=3, id=None, storage=None,
                 talos_vc=None, rebub_delay=3600, tls_port=-1):
        """
        Create a server instance.  This will start listening on the given port.
        Args:
            ksize (int): The k parameter from the paper
            alpha (int): The alpha parameter from the paper
            id: The id for this node on the network.
            storage: An instance that implements :interface:`~kademlia.storage.IStorage`
        """
        self.ksize = ksize
        self.alpha = alpha
        self.log = Logger(system=self)
        self.storage = storage or TalosLevelDBDHTStorage("./leveldb")
        self.node = Node(id or digest(random.getrandbits(255)))

        def start_looping_call(num_seconds):
            self.refreshLoop = LoopingCall(self.refreshTable).start(num_seconds)

        self.delay = rebub_delay
        task.deferLater(reactor, rebub_delay, start_looping_call, rebub_delay)
        self.talos_vc = talos_vc or AsyncPolicyApiClient()
        self.protocol = TalosKademliaProtocol(self.node, self.storage, ksize, talos_vc=self.talos_vc)
        self.httpprotocol_client = None
        self.tls_port = tls_port

    def listen(self, port, interface="127.0.0.1"):
        """
        Init tcp/udp protocol on the given port
        Start listening on the given port.
        """
        if self.tls_port != -1:
            root1 = Resource()
            root2 = Resource()
            root1.putChild("get_chunk", QueryChunk(self.storage, talos_vc=self.talos_vc))
            root2.putChild("storelargechunk", StoreLargeChunk(self.storage, self.protocol, talos_vc=self.talos_vc))
            factory1 = Site(root1)
            factory2 = Site(root2)

            certData = getModule(__name__).filePath.sibling('server.pem').getContent()
            certificate = ssl.PrivateCertificate.loadPEM(certData)

            self.httpprotocol_client = TalosHTTPClient(self.protocol, port)
            self.protocol.http_client = self.httpprotocol_client
            reactor.listenTCP(port, factory1, interface=interface)
            reactor.listenSSL(self.tls_port, factory2, certificate.options(), interface=interface)
            return reactor.listenUDP(port, self.protocol, interface, maxPacketSize=65535)
        else:
            root = Resource()
            root.putChild("get_chunk", QueryChunk(self.storage, talos_vc=self.talos_vc))
            root.putChild("storelargechunk", StoreLargeChunk(self.storage, self.protocol, talos_vc=self.talos_vc))
            factory = Site(root)

            self.httpprotocol_client = TalosHTTPClient(self.protocol, port)
            self.protocol.http_client = self.httpprotocol_client
            reactor.listenTCP(port, factory, interface=interface)
            return reactor.listenUDP(port, self.protocol, interface, maxPacketSize=65535)

    def refreshTable(self):
        """
        Refresh buckets that haven't had any lookups in the last hour
        (per section 2.3 of the paper).
        """
        self.log.info("Refreshing table")
        ds = []
        for id in self.protocol.getRefreshIDs():
            node = Node(id)
            nearest = self.protocol.router.findNeighbors(node, self.alpha)
            spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
            ds.append(spider.find())

        def republishKeys(_):
            ds = []
            # Republish keys older than one hour
            for dkey, value in self.storage.iteritemsOlderThan(self.delay):
                ds.append(self.digest_set(digest(dkey), value))
            return defer.gatherResults(ds)

        return defer.gatherResults(ds).addCallback(republishKeys)

    def bootstrappableNeighbors(self):
        """
        Get a :class:`list` of (ip, port) :class:`tuple` pairs suitable for use as an argument
        to the bootstrap method.
        The server should have been bootstrapped
        already - this is just a utility for getting some neighbors and then
        storing them if this server is going down for a while.  When it comes
        back up, the list of nodes can be used to bootstrap.
        """
        neighbors = self.protocol.router.findNeighbors(self.node)
        return [tuple(n)[-2:] for n in neighbors]

    def bootstrap(self, addrs):
        """
        Bootstrap the server by connecting to other known nodes in the network.
        Args:
            addrs: A `list` of (ip, port) `tuple` pairs.  Note that only IP addresses
                   are acceptable - hostnames will cause an error.
        """
        # if the transport hasn't been initialized yet, wait a second
        if self.protocol.transport is None:
            return task.deferLater(reactor, 1, self.bootstrap, addrs)

        def initTable(results):
            nodes = []
            for addr, result in results.items():
                if result[0]:
                    nodes.append(Node(result[1], addr[0], addr[1]))
            spider = NodeSpiderCrawl(self.protocol, self.node, nodes, self.ksize, self.alpha)
            return spider.find()

        ds = {}
        for addr in addrs:
            ds[addr] = self.protocol.ping(addr, self.node.id)
        return deferredDict(ds).addCallback(initTable)

    def inetVisibleIP(self):
        """
        Get the internet visible IP's of this node as other nodes see it.
        Returns:
            A `list` of IP's.  If no one can be contacted, then the `list` will be empty.
        """

        def handle(results):
            ips = [result[1][0] for result in results if result[0]]
            self.log.debug("other nodes think our ip is %s" % str(ips))
            return ips

        ds = []
        for neighbor in self.bootstrappableNeighbors():
            ds.append(self.protocol.stun(neighbor))
        return defer.gatherResults(ds).addCallback(handle)

    def store_chunk(self, chunk, policy=None, time_keeper=TimeKeeper()):
        dkey = digest(chunk.key)
        self.log.debug("Storing chunk with key %s" % (binascii.hexlify(dkey),))
        result = self.digest_set(dkey, chunk.encode(), policy_in=policy, time_keeper=time_keeper)
        return result

    def get_addr_chunk(self, chunk_key, policy_in=None, time_keeper=TimeKeeper()):
        # if this node has it, return it
        if self.storage.has_value(chunk_key):
            addr = self.protocol.get_address()
            return defer.succeed("%s:%d" % (addr[0], addr[1]))
        dkey = digest(chunk_key)
        node = Node(dkey)
        nearest = self.protocol.router.findNeighbors(node)
        self.log.debug("Crawling for key %s" % (binascii.hexlify(dkey),))
        if len(nearest) == 0:
            self.log.warning("There are no known neighbors to get key %s" % binascii.hexlify(dkey))
            return defer.succeed(None)
        spider = TalosChunkSpiderCrawl(self.protocol, self.httpprotocol_client, node, chunk_key, nearest, self.ksize,
                                       self.alpha, time_keeper=time_keeper)
        return spider.find()

    def digest_set(self, dkey, value, policy_in=None, time_keeper=TimeKeeper()):
        """
        Set the given SHA1 digest key to the given value in the network.
        """
        node = Node(dkey)
        # this is useful for debugging messages
        hkey = binascii.hexlify(dkey)

        def _anyRespondSuccess(responses, time_keeper, id, name):
            """
            Given the result of a DeferredList of calls to peers, ensure that at least
            one of them was contacted and responded with a Truthy result.
            """
            time_keeper.stop_clock_unique(name, id)

            for deferSuccess, result in responses:
                peerReached, peerResponse = result
                if deferSuccess and peerReached and peerResponse:
                    return True
            return False

        def store(nodes):
            self.log.info("setting '%s' on %s" % (hkey, map(str, nodes)))
            # if this node is close too, then store here as well
            if self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):
                chunk = CloudChunk.decode(value)
                if not digest(chunk.key) == dkey:
                    return {'error': 'key missmatch'}

                def handle_policy(policy):
                    time_keeper.stop_clock(ENTRY_FETCH_POLICY)
                    # Hack no chunk id given -> no key checks, key is in the encoded chunk
                    id = time_keeper.start_clock_unique()
                    self.storage.store_check_chunk(chunk, None, policy, time_keeper=time_keeper)
                    time_keeper.stop_clock_unique(ENTRY_STORE_CHECK, id)

                    id = time_keeper.start_clock_unique()
                    ds = [self.protocol.callStore(n, dkey, value) for n in nodes]
                    return defer.DeferredList(ds).addCallback(_anyRespondSuccess, time_keeper, id,
                                                              ENTRY_STORE_TO_ALL_NODES)

                if not policy_in is None:
                    return handle_policy(policy_in)
                time_keeper.start_clock()
                return self.talos_vc.get_policy_with_txid(chunk.get_tag_hex()).addCallback(handle_policy)

            id = time_keeper.start_clock_unique()
            ds = [self.protocol.callStore(n, dkey, value) for n in nodes]
            return defer.DeferredList(ds).addCallback(_anyRespondSuccess, time_keeper, id, ENTRY_STORE_TO_ALL_NODES)

        nearest = self.protocol.router.findNeighbors(node)
        if len(nearest) == 0:
            self.log.warning("There are no known neighbors to set key %s" % hkey)
            return defer.succeed(False)
        spider = TimedNodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha, time_keeper=time_keeper)
        return spider.find().addCallback(store)

    def saveState(self, fname):
        """
        Save the state of this node (the alpha/ksize/id/immediate neighbors)
        to a cache file with the given fname.
        """
        self.log.info("Save state to file %s" % fname)
        data = {'ksize': self.ksize,
                'alpha': self.alpha,
                'id': self.node.id,
                'neighbors': self.bootstrappableNeighbors()}
        if len(data['neighbors']) == 0:
            self.log.warning("No known neighbors, so not writing to cache.")
            return
        with open(fname, 'w') as f:
            pickle.dump(data, f)

    @classmethod
    def loadState(self, fname, storage=None, talos_vc=None):
        """
        Load the state of this node (the alpha/ksize/id/immediate neighbors)
        from a cache file with the given fname.
        """
        with open(fname, 'r') as f:
            data = pickle.load(f)
        s = TalosDHTServer(data['ksize'], data['alpha'], data['id'], storage=None, talos_vc=None)
        if len(data['neighbors']) > 0:
            s.bootstrap(data['neighbors'])
        return s

    def saveStateRegularly(self, fname, frequency=600):
        """
        Save the state of node with a given regularity to the given
        filename.
        Args:
            fname: File name to save retularly to
            frequencey: Frequency in seconds that the state should be saved.
                        By default, 10 minutes.
        """

        def run_looping_call(freq):
            loop = LoopingCall(self.saveState, fname).start(freq)
            return loop

        return task.deferLater(reactor, frequency, run_looping_call, frequency)
class DHTMirrorRPC(jsonrpc.JSONRPC):
    """ A DHT Mirror with faster get/set."""

    def _get_hash(self, value):

        if type(value) is not dict:
            try:
                #self.log.info("WARNING: converting to json")
                value = json.loads(value)
            except:
                self.log.info("WARNING: not valid json")

        return hex_hash160(json.dumps(value, sort_keys=True))

    def __init__(self, dht_server=None):
        self.dht_server = dht_server
        self.log = Logger(system=self)

    def jsonrpc_ping(self):

        reply = {}
        reply['status'] = "alive"
        return reply

    def jsonrpc_stats(self):
        stats = {}
        stats['entries'] = dht_mirror.count()
        return stats

    def jsonrpc_get(self, key):

        resp = {}
        resp['key'] = key

        self.log.info("Get request for key: %s" % key)

        entry = dht_mirror.find_one({"key": key})

        if entry is not None:
            resp['value'] = entry['value']
        else:
            # if not in mirror/cache get from DHT
            return self.jsonrpc_dht_get(key)

        return resp

    def jsonrpc_set(self, key, value):

        self.log.info("Set request for key: %s" % key)

        resp = {}

        test_hash = self._get_hash(value)

        if test_hash != key:
            resp['error'] = "hash(value) doesn't match key"
            return resp

        write_to_cache(key, value)

        # perform the dht set/refresh in the background
        self.jsonrpc_dht_set(key, value)

        resp['status'] = 'success'

        return resp

    def jsonrpc_dht_get(self, key):

        self.log.info("DHT get request for key: %s" % key)

        resp = {}

        try:
            resp = self.dht_server.get(key)
            value = resp[0]
            write_to_cache(key, value)

        except Exception as e:
            resp['error'] = e

        return resp

    def jsonrpc_dht_set(self, key, value):

        self.log.info("DHT set request for key: %s" % key)

        resp = {}

        try:
            resp = self.dht_server.set(key, value)
        except Exception as e:
            resp['error'] = e

        return resp
Example #42
0
class KademliaProtocol(RPCProtocol):
    def __init__(self, sourceNode, storage, ksize):
        RPCProtocol.__init__(self)
        self.router = RoutingTable(self, ksize, sourceNode)
        self.storage = storage
        self.sourceNode = sourceNode
        self.log = Logger(system=self)

    def getRefreshIDs(self):
        """
        Get ids to search for to keep old buckets up to date.
        """
        ids = []
        for bucket in self.router.getLonelyBuckets():
            ids.append(random.randint(*bucket.range))
        return ids

    def rpc_stun(self, sender):
        return sender

    def rpc_ping(self, sender, nodeid):
        source = Node(nodeid, sender[0], sender[1])
        self.router.addContact(source)
        return self.sourceNode.id

    def rpc_store(self, sender, nodeid, key, value):
        source = Node(nodeid, sender[0], sender[1])
        self.router.addContact(source)
        self.log.debug("got a store request from %s, storing value" % str(sender))
        self.storage[key] = value
        return True

    def rpc_find_node(self, sender, nodeid, key):
        self.log.info("finding neighbors of %i in local table" % long(nodeid.encode('hex'), 16))
        source = Node(nodeid, sender[0], sender[1])
        self.router.addContact(source)
        node = Node(key)
        return map(tuple, self.router.findNeighbors(node, exclude=source))

    def rpc_find_value(self, sender, nodeid, key):
        source = Node(nodeid, sender[0], sender[1])
        self.router.addContact(source)
        value = self.storage.get(key, None)
        if value is None:
            return self.rpc_find_node(sender, nodeid, key)
        return { 'value': value }

    def callFindNode(self, nodeToAsk, nodeToFind):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.find_node(address, self.sourceNode.id, nodeToFind.id)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callFindValue(self, nodeToAsk, nodeToFind):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.find_value(address, self.sourceNode.id, nodeToFind.id)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callPing(self, nodeToAsk):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.ping(address, self.sourceNode.id)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def callStore(self, nodeToAsk, key, value):
        address = (nodeToAsk.ip, nodeToAsk.port)
        d = self.store(address, self.sourceNode.id, key, value)
        return d.addCallback(self.handleCallResponse, nodeToAsk)

    def transferKeyValues(self, node):
        """
        Given a new node, send it all the keys/values it should be storing.

        @param node: A new node that just joined (or that we just found out
        about).

        Process:
        For each key in storage, get k closest nodes.  If newnode is closer
        than the furtherst in that list, and the node for this server
        is closer than the closest in that list, then store the key/value
        on the new node (per section 2.5 of the paper)
        """
        ds = []
        for key, value in self.storage.iteritems():
            keynode = Node(digest(key))
            neighbors = self.router.findNeighbors(keynode)
            if len(neighbors) > 0:
                newNodeClose = node.distanceTo(keynode) < neighbors[-1].distanceTo(keynode)
                thisNodeClosest = self.sourceNode.distanceTo(keynode) < neighbors[0].distanceTo(keynode)
            if len(neighbors) == 0 or (newNodeClose and thisNodeClosest):
                ds.append(self.callStore(node, key, value))
        return defer.gatherResults(ds)

    def handleCallResponse(self, result, node):
        """
        If we get a response, add the node to the routing table.  If
        we get no response, make sure it's removed from the routing table.
        """
        if result[0]:
            self.log.info("got response from %s, adding to router" % node)
            self.router.addContact(node)
            if self.router.isNewNode(node):
                self.transferKeyValues(node)
        else:
            self.log.debug("no response from %s, removing from router" % node)
            self.router.removeContact(node)
        return result
Example #43
0
class NetworkInterface (object):

    # Create a NetworkInterface object to accomplish all network related tasks
    def __init__(self, appDeployer=False):
        self._connected = False
        self._app_deployer = appDeployer

        # optional...
        self._number_of_nodes = 0
        self._list_of_nodes =[] 
        
        # logging capabilities
        self._log = Logger(system=self)

        # HERE--> Implementation specific node instanciation
        from kademlia.network import Server
        self._node = Server()
        self._node.log.level = 4
        # END OF SECTION 

    def bootStrapDone(self, server):
        #contacts = self._node.inetVisibleIP()
        print "BOOOOTTTT STAPPP IT"

    def retrieveContacts(self):
        """
        NEED TO FIND A WAY TO RETRIEVE THE LIST OF NEIGHBORS !!!
        
        """
        # !!! DOES EXACTLY THE SAME AS bootstrappableNeighbors !!!
        for bucket in self._node.protocol.router.buckets:
            print bucket.getNodes()
        
        # !!! bootstrappableNeighbors returns only the list of neighbors that you provided as !!!
        # !!! a bootstrap list, that are also online !!!
        neighbors =  self._node.bootstrappableNeighbors()
        
        print neighbors
        return neighbors

    def connect(self,fromPort,toPort,ip='127.0.0.1'):
        self._log.debug('Connecting...')
        #print "in connect ... "  
        #print "now listening on port: ",fromPort
        self._node.listen(fromPort)
        return self._node.bootstrap([(ip,toPort)]).addCallback(self.bootStrapDone)
            
    # This function is used to set a value in the DHT
    def setDone(self,result):
        print result
        print "set is done"
        
    def set(self,result, key, value):
        # HERE --> Implementation Specific Code
        print result, " :::  ", self,  " ::: ", key, " ::: ", value, " <----------------------------" 
        self._node.set(key, value).addCallback(self.setDone)
        # END OF SECTION

    def done(self,result):
        print "self: ", self
        print "Key result:", result

    def get(self,result, key):
        # HERE --> Implementation Specific Code
        print result, " :::  ", self,  " ::: ", key, " <----------------------------" 
        self._node.get(key).addCallback(self.done)