Exemplo n.º 1
0
def merkle_tree(transactions):
    mt = MerkleTree()

    for t in transactions:
        mt.add(t.encode('utf-8'))

    return codecs.encode(mt.build(), 'hex-codec').decode('utf-8')
Exemplo n.º 2
0
def scan_over():
    top_merkle_leaves = []
    while testblocks:
        top_merkle_leaves.append(block_to_merkle(testblocks.pop(0)))
    top_merkle = MerkleTree(leaves=top_merkle_leaves)
    top_merkle.build()
    return top_merkle
Exemplo n.º 3
0
def block_to_merkle(block_outkeys):
    '''Takes in the outkeys that all belong to the same block (by block hash, we can also do height)
    and then builds a Merkle Tree. It also updates the client side block_root_hash dictionary
    and the server side block_merkle dictionary
    '''
    # change to
    # for block_hash, tx_hash, outkey, idx in block_outkeys
    block_merkle_leaves = []
    block_hash = block_outkeys[0][0]
    assert all(bhash == block_hash for bhash, _, _, _ in block_outkeys)

    while block_outkeys:
        curr_tx_hash = block_outkeys[0][1]
        tx_outkeys = []
        while block_outkeys[0][1] == curr_tx_hash:
            tx_outkeys.append(block_outkeys.pop(0))
            if not block_outkeys:
                break
        block_merkle_leaves.append(tx_to_merkle(tx_outkeys))
    block_merkle = MerkleTree(leaves=block_merkle_leaves)
    block_merkle.build()

    merkle_forest[codecs.encode(block_merkle.root.val,
                                'hex_codec')] = block_merkle
    return (codecs.encode(block_merkle.root.val,
                          'hex_codec'), block_merkle.root.idx)
Exemplo n.º 4
0
 def compute_hash(self):
     '''
     Split the data for every n characters and compute Merkle root hash
     '''
     values = [self.data[i:i+split_n] for i in range(0, len(self.data), split_n)]
     merkle = MerkleTree(values)
     return merkle.address()
Exemplo n.º 5
0
def main():
    for i in range(0, 1000, 5):
        # generate a block

        outputs = []
        for j in range(0, 5):
            outkey = id_generator()
            output = (outkey, i + j)
            outputs.append(output)

        newtree = MerkleTree(leaves=outputs)
        newtree.build()
        k = newtree.root
        root = (codecs.encode(k.val, 'hex_codec'), k.idx)
        blocks[k.idx] = newtree
        block_headers[k.idx] = root

    block_headers_keys = block_headers.keys()
    blocks_keys = blocks.keys()

    #Run a test 100 times
    correct = incorrect = 0
    for _ in range(0, 100):

        c = randint(0, 999)
        #The client will make a request using c, and collect the ground truth

        d = find_nearest_above(block_headers_keys, c)
        truth = block_headers[d]

        e = find_nearest_above(blocks_keys, c)
        tree = blocks[e]

        avail = [leaf.idx for leaf in tree.leaves]
        chosen = avail.index(e)

        proof = tree.get_proof(chosen)

        data = tree.leaves[chosen].data
        print "Chosen index: " + str(c) + ', Returned data: ' + str(data)

        #check if the data returned is hashed to the first element in the proof
        if hash_function(data).hexdigest() == proof[0][0][0]:

            #check the Merkle proof
            if check_proof(proof) == truth[0]:
                correct += 1
            else:
                incorrect += 1
        else:
            incorrect += 1

    total = correct + incorrect
    print "Number Correct: " + str(correct) + '/' + str(total)
    print "Number Incorrect: " + str(incorrect) + '/' + str(total)
Exemplo n.º 6
0
 def verify(self, data_hash, position, proof):
     ''' 
     Proof will have the format of dict(height: hash)
     '''
     data = self.data[data_hash].data
     values = [data[i:i+split_n] for i in range(0, len(data), split_n)]
     merkle = MerkleTree(values)
     # Need to make sure the position is right as well?
     block_hash = merkle.get(position)
     if proof[-1][0] == block_hash or proof[0][0] == block_hash:
         return merkle.contains(proof)
     return None
Exemplo n.º 7
0
 def respond(self, data_hash, position):
     '''
     Respond to a challenge
     '''
     try:
         data = self.data[data_hash].data
     except:
         # You don't have this data. Can implement code to cheat?
         return None
     values = [data[i:i+split_n] for i in range(0, len(data), split_n)]
     merkle = MerkleTree(values)
     return merkle.prove(position)
Exemplo n.º 8
0
def tx_to_merkle(tx_outkeys):
    '''Takes in the outkeys that all belong to the same transaction (by transaction hash) and builds
    a Merkle Tree. It also updates the client side tx_root_hash dictionary and the server side
    tx_dict dictionary'''
    tx_hash = tx_outkeys[0][1]
    assert all(t_hash == tx_hash for _, t_hash, _, _ in tx_outkeys)

    tx_merkle_leaves = [(outkey, idx) for _, _, outkey, idx in tx_outkeys]
    tx_merkle = MerkleTree(leaves=tx_merkle_leaves)
    tx_merkle.build()

    merkle_forest[codecs.encode(tx_merkle.root.val, 'hex_codec')] = tx_merkle
    return (codecs.encode(tx_merkle.root.val, 'hex_codec'), tx_merkle.root.idx)
Exemplo n.º 9
0
def test():
    """
    No need to test otssign or orspublic
    as they are tested by the OTS test
    """

    m = MerkleTree(progressbar=False)

    for _ in range(m.n_keys):

        msg = getrandom(32)
        sign = m.signature(msg)

        assert MerkleTree.verify(msg, sign, m.public_key)

        false_sign = modify_sign1(sign, m.n_keys)
        assert not MerkleTree.verify(msg, false_sign, m.public_key)

        false_sign = modify_sign4(sign)
        assert not MerkleTree.verify(msg, false_sign, m.public_key)

        assert MerkleTree.verify(msg, sign, m.public_key)

    with pytest.raises(AssertionError) as excinfo:
        msg = getrandom(32)
        sign = m.signature(msg)

    assert excinfo.value.args == ('All keys have been used', )
Exemplo n.º 10
0
def part2():
    t, g, points, h_gen, h, domain, p, ev, mt, ch = part1()
    numer0 = p - Polynomial([FieldElement(1)])
    denom0 = Polynomial.gen_linear_term(FieldElement(1))
    q0, r0 = numer0.qdiv(denom0)
    numer1 = p - Polynomial([FieldElement(2338775057)])
    denom1 = Polynomial.gen_linear_term(points[1022])
    q1, r1 = numer1.qdiv(denom1)
    inner_poly0 = Polynomial([FieldElement(0), points[2]])
    final0 = p.compose(inner_poly0)
    inner_poly1 = Polynomial([FieldElement(0), points[1]])
    composition = p.compose(inner_poly1)
    final1 = composition * composition
    final2 = p * p
    numer2 = final0 - final1 - final2
    coef = [FieldElement(1)] + [FieldElement(0)] * 1023 + [FieldElement(-1)]
    numerator_of_denom2 = Polynomial(coef)
    factor0 = Polynomial.gen_linear_term(points[1021])
    factor1 = Polynomial.gen_linear_term(points[1022])
    factor2 = Polynomial.gen_linear_term(points[1023])
    denom_of_denom2 = factor0 * factor1 * factor2
    denom2, r_denom2 = numerator_of_denom2.qdiv(denom_of_denom2)
    q2, r2 = numer2.qdiv(denom2)
    cp0 = q0.scalar_mul(ch.receive_random_field_element())
    cp1 = q1.scalar_mul(ch.receive_random_field_element())
    cp2 = q2.scalar_mul(ch.receive_random_field_element())
    cp = cp0 + cp1 + cp2
    cp_ev = [cp.eval(d) for d in domain]
    cp_mt = MerkleTree(cp_ev)
    ch.send(cp_mt.root)
    return cp, cp_ev, cp_mt, ch, domain
Exemplo n.º 11
0
 def __init__(self):
     super(DynamoNode, self).__init__()
     self.local_store = MerkleTree()  # key => (value, metadata)
     self.pending_put_rsp = {}  # seqno => set of nodes that have stored
     self.pending_put_msg = {}  # seqno => original client message
     self.pending_get_rsp = {}  # seqno => set of (node, value, metadata) tuples
     self.pending_get_msg = {}  # seqno => original client message
     # seqno => set of requests sent to other nodes, for each message class
     self.pending_req = {PutReq: {}, GetReq: {}}
     self.failed_nodes = []
     self.pending_handoffs = {}
     # Rebuild the consistent hash table
     DynamoNode.nodelist.append(self)
     DynamoNode.chash = ConsistentHashTable(DynamoNode.nodelist, DynamoNode.T)
     # Run a timer to retry failed nodes
     self.retry_failed_node("retry")
Exemplo n.º 12
0
 def __init__(self, addr, config_file='server_config'):
     super(DynamoNode, self).__init__()
     self.local_store = MerkleTree()  # key => (value, metadata)
     self.pending_put_rsp = {}  # seqno => set of nodes that have stored
     self.pending_put_msg = {}  # seqno => original client message
     self.pending_get_rsp = {}  # seqno => set of (node, value, metadata) tuples
     self.pending_get_msg = {}  # seqno => original client message
     # seqno => set of requests sent to other nodes, for each message class
     self.pending_req = {PutReq: {}, GetReq: {}}
     self.failed_nodes = []
     self.pending_handoffs = {}
     # Rebuild the consistent hash table
     self.addr = addr
     self.servers = []
     self.db = leveldb.LevelDB('./' + addr + '_db')
     f = open(config_file, 'r')
     for line in f.readlines():
         line = line.rstrip()
         self.servers.append(line)
     for i, server in enumerate(self.servers):
         DynamoNode.nodelist.append(server)
     DynamoNode.chash = ConsistentHashTable(DynamoNode.nodelist, DynamoNode.T)
     # Run a timer to retry failed nodes
     #self.pool = gevent.pool.Group()
     #self.pool.spawn(self.retry_failed_node)
     
     Framework.setNodes(DynamoNode.nodelist)
Exemplo n.º 13
0
    def __init__(self, addr, config_file='server_config'):
        super(DynamoNode, self).__init__()
        self.framework = Framework()
        self.m_addr = "127.0.0.1:29009"
        self.local_store = MerkleTree()  # key => (value, metadata)
        self.pending_put_rsp = {}  # seqno => set of nodes that have stored
        self.pending_put_msg = {}  # seqno => original client message
        self.pending_get_rsp = {}  # seqno => set of (node, value, metadata) tuples
        self.pending_get_msg = {}  # seqno => original client message
        # seqno => set of requests sent to other nodes, for each message class
        self.pending_req = {PutReq: {}, GetReq: {}}
        self.failed_nodes = []
        self.pending_handoffs = {}
        self.result = {}
        self.reduceResult = {}
        self.MapReduceDB = leveldb.LevelDB('./' + addr + '_mrdb')
        self.mapDict = {}
        # Rebuild the consistent hash table
        self.addr = addr
        self.servers = []
        self.db = leveldb.LevelDB('./' + addr + '_db')
        c = zerorpc.Client(timeout=3)
        c.connect('tcp://' + self.m_addr)
        try:
            
            c.add_node(self.addr)
            c.close()
        except:
            pass

        self.pool = gevent.pool.Group()
        self.pool.spawn(self.retry_failed_node)
        self.framework.setDynamo(self)
Exemplo n.º 14
0
 def __init__(self, block_header, transactions):
     self._magic_no = self.MAGIC_NO
     self._block_header = block_header
     self._transactions = transactions
     data = []
     for tx in self._transactions:
         data.append(json.dumps(tx.serialize()))
     merkle_tree = MerkleTree(data)
     self.set_hash_merkle_root_hash(merkle_tree.root_hash)
Exemplo n.º 15
0
def test_merkle_get_authentication_path():
    for _ in range(10):
        data_length = randint(0, 2000)
        data = [FieldElement.random_element() for _ in range(data_length)]
        m = MerkleTree(data)
        leaf_id = randint(0, data_length - 1)
        decommitment = m.get_authentication_path(leaf_id)
        # Check a correct decommitment.
        content = data[leaf_id]
        assert verify_decommitment(leaf_id, content, decommitment, m.root)
        # Check that altering the decommitment causes verification to fail.
        altered = decommitment[:]
        random_index = randint(0, len(altered) - 1)
        altered[random_index] = sha256(
            altered[random_index].encode()).hexdigest()
        assert not verify_decommitment(leaf_id, content, altered, m.root)
        # Check that altering the content causes verification to fail.
        other_content = data[randint(0, data_length - 1)]
        assert not verify_decommitment(leaf_id, other_content, decommitment,
                                       m.root) or other_content == content
Exemplo n.º 16
0
def block_to_merkle(blk):
    tx_leaves = []
    if isinstance(blk, Block):
        for tx in blk.transactions:
            newtree = tx_to_merkle(tx)
            k = newtree.root
            tx_dict[tx.tx_hash] = newtree
            tx_root_hash[tx.tx_hash] = codecs.encode(k.val, 'hex_codec')
            tx_leaves.append((tx.tx_hash, k.idx))

        blk_merkle_tree = MerkleTree(leaves=tx_leaves)
        blk_merkle_tree.build()
        blk_root = blk_merkle_tree.root
        blocks[blk.block_hash] = blk_merkle_tree
        block_root_hash[blk.block_hash] = codecs.encode(
            blk_root.val, 'hex_codec')

        return (blk.block_hash, blk_root.idx)
    else:
        raise TypeError("Input must be a block!")
Exemplo n.º 17
0
def scan_over_new_blocks(new_blocks):
    '''Scan over the utxos, distinguishing new blocks
    We will use block hash to distinguish new blocks. The top Merkle Tree is created
    The client side top_root will be udpated, as well as the top_merkle ADS on the server'''
    top_merkle_leaves = []
    while new_blocks:
        curr_block_hash = new_blocks[0][0]
        block_outkeys = []
        while new_blocks[0][0] == curr_block_hash:
            block_outkeys.append(new_blocks.pop(0))
            if not new_blocks:
                break
        top_merkle_leaves.append(block_to_merkle(block_outkeys))
    global top_merkle
    top_merkle = MerkleTree(leaves=top_merkle_leaves)
    top_merkle.build()

    global top_root
    top_root = (codecs.encode(top_merkle.root.val,
                              'hex_codec'), top_merkle.root.idx)
    merkle_forest[codecs.encode(top_merkle.root.val, 'hex_codec')] = top_merkle
Exemplo n.º 18
0
    def __init__(self, addr, config_file):
        super(DynamoNode, self).__init__()
        self.local_store = MerkleTree()  # key => (value, metadata)
        self.pending_put_rsp = {}  # seqno => set of nodes that have stored
        self.pending_put_msg = {}  # seqno => original client message
        self.pending_get_rsp = {}  # seqno => set of (node, value, metadata) tuples
        self.pending_get_msg = {}  # seqno => original client message
        # seqno => set of requests sent to other nodes, for each message class
        self.pending_req = {PutReq: {}, GetReq: {}}
        self.failed_nodes = []
        self.pending_handoffs = {}
        
        #modifed
        self.servers = []
        self.num = 3 
        self.addr = addr
        
        if not os.path.exists('./' + addr):
            os.mkdir('./' + addr)
        self.db = leveldb.LevelDB('./' + addr + '/db')
        f = open(config_file, 'r')
        for line in f.readlines():
            line = line.rstrip()
            self.servers.append(line)
        print 'My addr: %s' % (self.addr)
        print 'Server list: %s' % (str(self.servers))

        self.connections = []

        for i, server in enumerate(self.servers):
            DynamoNode.nodelist.append(server)                
            if server == self.addr:
                self.i = i
                self.connections.append(self)
            else:
                c = zerorpc.Client(timeout=10)
                c.connect('tcp://' + server)
                self.connections.append(c)                
                
        if not os.path.exists(addr):
            os.mkdir(addr)
        ###################################
        
        # Rebuild the consistent hash table
        #modified
#        DynamoNode.nodelist.append(self)
#        print "append node: ", self,  len(DynamoNode.nodelist)
###################################
        DynamoNode.chash = ConsistentHashTable(DynamoNode.nodelist, DynamoNode.T)
        # Run a timer to retry failed nodes
        #modified
        self.pool = gevent.pool.Group()
        self.check_servers_greenlet = self.pool.spawn(self.retry_failed_node)
Exemplo n.º 19
0
def test_merkle():
    # Handle some data
    filename = 'merkle.txt'
    data = read_data(filename)
    size = check_filesize(filename)

    # Test Merkle tree first
    # Split into 8 parts
    values = data.splitlines()

    # Test the address is root address is always the same
    merkle = MerkleTree(values)
    # print(merkle.address())

    # Test contain text merkle_1 in merkle (should have)
    test_1 = merkle.prove(0)
    # Prove is correct

    # Need to check for contains
    print(merkle.contains(test_1))
    return
Exemplo n.º 20
0
def FriCommit(cp, domain, cp_eval, cp_merkle, channel):    
    fri_polys = [cp]
    fri_domains = [domain]
    fri_layers = [cp_eval]
    fri_merkles = [cp_merkle]
    while fri_polys[-1].degree() > 0:
        beta = channel.receive_random_field_element()
        next_poly, next_domain, next_layer = next_fri_layer(fri_polys[-1], fri_domains[-1], beta)
        fri_polys.append(next_poly)
        fri_domains.append(next_domain)
        fri_layers.append(next_layer)
        fri_merkles.append(MerkleTree(next_layer))
        channel.send(fri_merkles[-1].root)   
    channel.send(str(fri_polys[-1].poly[0]))
    return fri_polys, fri_domains, fri_layers, fri_merkles
Exemplo n.º 21
0
def part1():
    t = [FieldElement(1), FieldElement(3141592)]
    while len(t) < 1023:
        t.append(t[-2] * t[-2] + t[-1] * t[-1])
    g = FieldElement.generator()**(3 * 2**20)
    points = [g**i for i in range(1024)]
    h_gen = FieldElement.generator()**((2**30 * 3) // 8192)
    h = [h_gen**i for i in range(8192)]
    domain = [FieldElement.generator() * x for x in h]
    p = interpolate_poly(points[:-1], t)
    ev = [p.eval(d) for d in domain]
    mt = MerkleTree(ev)
    ch = Channel()
    ch.send(mt.root)
    return t, g, points, h_gen, h, domain, p, ev, mt, ch
Exemplo n.º 22
0
 def __init__(self):
     super(DynamoNode, self).__init__()
     self.local_store = MerkleTree()  # key => (value, metadata)
     self.pending_put_rsp = {}  # seqno => set of nodes that have stored
     self.pending_put_msg = {}  # seqno => original client message
     self.pending_get_rsp = {}  # seqno => set of (node, value, metadata) tuples
     self.pending_get_msg = {}  # seqno => original client message
     # seqno => set of requests sent to other nodes, for each message class
     self.pending_req = {PutReq: {}, GetReq: {}}
     self.failed_nodes = []
     self.pending_handoffs = {}
     # Rebuild the consistent hash table
     DynamoNode.nodelist.append(self)
     DynamoNode.chash = ConsistentHashTable(DynamoNode.nodelist, DynamoNode.T)
     # Run a timer to retry failed nodes
     self.retry_failed_node("retry")
Exemplo n.º 23
0
def part3():
    cp, cp_ev, cp_mt, ch, domain = part2()
    # FriCommit function
    fri_polys = [cp]
    fri_doms = [domain]
    fri_layers = [cp_ev]
    merkles = [cp_mt]
    while fri_polys[-1].degree() > 0:
        alpha = ch.receive_random_field_element()
        next_poly, next_dom, next_layer = next_fri_layer(
            fri_polys[-1], fri_doms[-1], alpha)
        fri_polys.append(next_poly)
        fri_doms.append(next_dom)
        fri_layers.append(next_layer)
        merkles.append(MerkleTree(next_layer))
        ch.send(merkles[-1].root)
    ch.send(str(fri_polys[-1].poly[0]))
    return fri_polys, fri_doms, fri_layers, merkles, ch
Exemplo n.º 24
0
class DynamoNode(Node):
    timer_priority = 20
    T = 10  # Number of repeats for nodes in consistent hash table
    N = 3  # Number of nodes to replicate at
    W = 2  # Number of nodes that need to reply to a write operation
    R = 2  # Number of nodes that need to reply to a read operation
    nodelist = []
    chash = ConsistentHashTable(nodelist, T)

    def __init__(self):
        super(DynamoNode, self).__init__()
        self.local_store = MerkleTree()  # key => (value, metadata)
        self.pending_put_rsp = {}  # seqno => set of nodes that have stored
        self.pending_put_msg = {}  # seqno => original client message
        self.pending_get_rsp = {}  # seqno => set of (node, value, metadata) tuples
        self.pending_get_msg = {}  # seqno => original client message
        # seqno => set of requests sent to other nodes, for each message class
        self.pending_req = {PutReq: {}, GetReq: {}}
        self.failed_nodes = []
        self.pending_handoffs = {}
        # Rebuild the consistent hash table
        DynamoNode.nodelist.append(self)
        DynamoNode.chash = ConsistentHashTable(DynamoNode.nodelist, DynamoNode.T)
        # Run a timer to retry failed nodes
        self.retry_failed_node("retry")

# PART reset
    @classmethod
    def reset(cls):
        cls.nodelist = []
        cls.chash = ConsistentHashTable(cls.nodelist, cls.T)

# PART storage
    def store(self, key, value, metadata):
        self.local_store[key] = (value, metadata)

    def retrieve(self, key):
        if key in self.local_store:
            return self.local_store[key]
        else:
            return (None, None)

# PART retry_failed_node
    def retry_failed_node(self, _):  # Permanently repeating timer
        if self.failed_nodes:
            node = self.failed_nodes.pop(0)
            # Send a test message to the oldest failed node
            pingmsg = PingReq(self, node)
            Framework.send_message(pingmsg)
        # Restart the timer
        TimerManager.start_timer(self, reason="retry", priority=15, callback=self.retry_failed_node)

    def rcv_pingreq(self, pingmsg):
        # Always reply to a test message
        pingrsp = PingRsp(pingmsg)
        Framework.send_message(pingrsp)

    def rcv_pingrsp(self, pingmsg):
        # Remove all instances of recovered node from failed node list
        recovered_node = pingmsg.from_node
        while recovered_node in self.failed_nodes:
            self.failed_nodes.remove(recovered_node)
        if recovered_node in self.pending_handoffs:
            for key in self.pending_handoffs[recovered_node]:
                # Send our latest value for this key
                (value, metadata) = self.retrieve(key)
                putmsg = PutReq(self, recovered_node, key, value, metadata)
                Framework.send_message(putmsg)
            del self.pending_handoffs[recovered_node]

# PART rsp_timer_pop
    def rsp_timer_pop(self, reqmsg):
        # no response to this request; treat the destination node as failed
        _logger.info("Node %s now treating node %s as failed", self, reqmsg.to_node)
        self.failed_nodes.append(reqmsg.to_node)
        failed_requests = Framework.cancel_timers_to(reqmsg.to_node)
        failed_requests.append(reqmsg)
        for failedmsg in failed_requests:
            self.retry_request(failedmsg)

    def retry_request(self, reqmsg):
        if not isinstance(reqmsg, DynamoRequestMessage):
            return
        # Send the request to an additional node by regenerating the preference list
        preference_list = DynamoNode.chash.find_nodes(reqmsg.key, DynamoNode.N, self.failed_nodes)[0]
        kls = reqmsg.__class__
        # Check the pending-request list for this type of request message
        if kls in self.pending_req and reqmsg.msg_id in self.pending_req[kls]:
            for node in preference_list:
                if node not in [req.to_node for req in self.pending_req[kls][reqmsg.msg_id]]:
                    # Found a node on the new preference list that hasn't been sent the request.
                    # Send it a copy
                    newreqmsg = copy.copy(reqmsg)
                    newreqmsg.to_node = node
                    self.pending_req[kls][reqmsg.msg_id].add(newreqmsg)
                    Framework.send_message(newreqmsg)

# PART rcv_clientput
    def rcv_clientput(self, msg):
        preference_list, avoided = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)
        non_extra_count = DynamoNode.N - len(avoided)
        # Determine if we are in the list
        if self not in preference_list:
            # Forward to the coordinator for this key
            _logger.info("put(%s=%s) maps to %s", msg.key, msg.value, preference_list)
            coordinator = preference_list[0]
            Framework.forward_message(msg, coordinator)
        else:
            # Use an incrementing local sequence number to distinguish
            # multiple requests for the same key
            seqno = self.generate_sequence_number()
            _logger.info("%s, %d: put %s=%s", self, seqno, msg.key, msg.value)
            metadata = (self.name, seqno)  # For now, metadata is just sequence number at coordinator
            # Send out to preference list, and keep track of who has replied
            self.pending_req[PutReq][seqno] = set()
            self.pending_put_rsp[seqno] = set()
            self.pending_put_msg[seqno] = msg
            reqcount = 0
            for ii, node in enumerate(preference_list):
                if ii >= non_extra_count:
                    # This is an extra node that's only include because of a failed node
                    handoff = avoided
                else:
                    handoff = None
                # Send message to get node in preference list to store
                putmsg = PutReq(self, node, msg.key, msg.value, metadata, msg_id=seqno, handoff=handoff)
                self.pending_req[PutReq][seqno].add(putmsg)
                Framework.send_message(putmsg)
                reqcount = reqcount + 1
                if reqcount >= DynamoNode.N:
                    # preference_list may have more than N entries to allow for failed nodes
                    break

# PART rcv_clientget
    def rcv_clientget(self, msg):
        preference_list = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)[0]
        # Determine if we are in the list
        if self not in preference_list:
            # Forward to the coordinator for this key
            _logger.info("get(%s=?) maps to %s", msg.key, preference_list)
            coordinator = preference_list[0]
            Framework.forward_message(msg, coordinator)
        else:
            seqno = self.generate_sequence_number()
            self.pending_req[GetReq][seqno] = set()
            self.pending_get_rsp[seqno] = set()
            self.pending_get_msg[seqno] = msg
            reqcount = 0
            for node in preference_list:
                getmsg = GetReq(self, node, msg.key, msg_id=seqno)
                self.pending_req[GetReq][seqno].add(getmsg)
                Framework.send_message(getmsg)
                reqcount = reqcount + 1
                if reqcount >= DynamoNode.N:
                    # preference_list may have more than N entries to allow for failed nodes
                    break

# PART rcv_put
    def rcv_put(self, putmsg):
        _logger.info("%s: store %s=%s", self, putmsg.key, putmsg.value)
        self.store(putmsg.key, putmsg.value, putmsg.metadata)
        if putmsg.handoff is not None:
            for failed_node in putmsg.handoff:
                self.failed_nodes.append(failed_node)
                if failed_node not in self.pending_handoffs:
                    self.pending_handoffs[failed_node] = set()
                self.pending_handoffs[failed_node].add(putmsg.key)
        putrsp = PutRsp(putmsg)
        Framework.send_message(putrsp)

# PART rcv_putrsp
    def rcv_putrsp(self, putrsp):
        seqno = putrsp.msg_id
        if seqno in self.pending_put_rsp:
            self.pending_put_rsp[seqno].add(putrsp.from_node)
            if len(self.pending_put_rsp[seqno]) >= DynamoNode.W:
                _logger.info("%s: written %d copies of %s=%s so done", self, DynamoNode.W, putrsp.key, putrsp.value)
                _logger.debug("  copies at %s", [node.name for node in self.pending_put_rsp[seqno]])
                # Tidy up tracking data structures
                original_msg = self.pending_put_msg[seqno]
                del self.pending_req[PutReq][seqno]
                del self.pending_put_rsp[seqno]
                del self.pending_put_msg[seqno]
                # Reply to the original client
                client_putrsp = ClientPutRsp(original_msg)
                Framework.send_message(client_putrsp)
        else:
            pass  # Superfluous reply

# PART rcv_get
    def rcv_get(self, getmsg):
        _logger.info("%s: retrieve %s=?", self, getmsg.key)
        (value, metadata) = self.retrieve(getmsg.key)
        getrsp = GetRsp(getmsg, value, metadata)
        Framework.send_message(getrsp)

# PART rcv_getrsp
    def rcv_getrsp(self, getrsp):
        seqno = getrsp.msg_id
        if seqno in self.pending_get_rsp:
            self.pending_get_rsp[seqno].add((getrsp.from_node, getrsp.value, getrsp.metadata))
            if len(self.pending_get_rsp[seqno]) >= DynamoNode.R:
                _logger.info("%s: read %d copies of %s=? so done", self, DynamoNode.R, getrsp.key)
                _logger.debug("  copies at %s", [(node.name, value) for (node, value, _) in self.pending_get_rsp[seqno]])
                # Build up all the distinct values/metadata values for the response to the original request
                results = set([(value, metadata) for (node, value, metadata) in self.pending_get_rsp[seqno]])
                # Tidy up tracking data structures
                original_msg = self.pending_get_msg[seqno]
                del self.pending_req[GetReq][seqno]
                del self.pending_get_rsp[seqno]
                del self.pending_get_msg[seqno]
                # Reply to the original client, including all received values
                client_getrsp = ClientGetRsp(original_msg,
                                             [value for (value, metadata) in results],
                                             [metadata for (value, metadata) in results])
                Framework.send_message(client_getrsp)
        else:
            pass  # Superfluous reply

# PART rcvmsg
    def rcvmsg(self, msg):
        if isinstance(msg, ClientPut):
            self.rcv_clientput(msg)
        elif isinstance(msg, PutReq):
            self.rcv_put(msg)
        elif isinstance(msg, PutRsp):
            self.rcv_putrsp(msg)
        elif isinstance(msg, ClientGet):
            self.rcv_clientget(msg)
        elif isinstance(msg, GetReq):
            self.rcv_get(msg)
        elif isinstance(msg, GetRsp):
            self.rcv_getrsp(msg)
        elif isinstance(msg, PingReq):
            self.rcv_pingreq(msg)
        elif isinstance(msg, PingRsp):
            self.rcv_pingrsp(msg)
        else:
            raise TypeError("Unexpected message type %s", msg.__class__)

# PART get_contents
    def get_contents(self):
        results = []
        for key, value in self.local_store.items():
            results.append("%s:%s" % (key, value[0]))
        return results
Exemplo n.º 25
0
# creator_coins = get_hash("Creator")
# first_address = get_hash("Vova")
# second_address = get_hash("Alice")
# third_address = get_hash("Bob")
#
# trx1 = Transaction(creator_coins, first_address, time.time(), 150)
# trx2 = Transaction(creator_coins, second_address, time.time(), 150)
# trx3 = Transaction(creator_coins, third_address, time.time(), 150)
# trx_list = [trx1, trx2, trx3]
# tree = MerkleTree(trx_list)
#
# genesis_block = Block("0", tree.getRootHash(), trx_list)
# Miner.mine(genesis_block)

trx1 = Transaction(
    '14d6f42ada24c3c1a6b839a574fa1dc0c2629011fc732635635e6c6b78192fd1',
    '225e2708d54a4e8e0bfe2393dc2c28a32f2b3dd355706afb49363de7ddbf4c58',
    1569761836.1613867, 150)
trx2 = Transaction(
    '14d6f42ada24c3c1a6b839a574fa1dc0c2629011fc732635635e6c6b78192fd1',
    '150653f51df5e2aa0bda45b6f68ab4fd2c9c5620042baf6124bf5af302780115',
    1569761836.1613867, 150)
trx3 = Transaction(
    '14d6f42ada24c3c1a6b839a574fa1dc0c2629011fc732635635e6c6b78192fd1',
    'ec4006a60556d0f521847f487c4e0c57ee1a84982aa64a4fac2837fc71356d51',
    1569761836.1613867, 150)
trx_list = [trx1, trx2, trx3]
tree = MerkleTree(trx_list)
num = tree.getRootHash()
Exemplo n.º 26
0
class DynamoNode(Node):
    timer_priority = 20
    T = 10  # Number of repeats for nodes in consistent hash table
    N = 3  # Number of nodes to replicate at
    W = 2  # Number of nodes that need to reply to a write operation
    R = 2  # Number of nodes that need to reply to a read operation
    nodelist = []
    chash = ConsistentHashTable(nodelist, T)

    def __init__(self):
        super(DynamoNode, self).__init__()
        self.local_store = MerkleTree()  # key => (value, metadata)
        self.pending_put_rsp = {}  # seqno => set of nodes that have stored
        self.pending_put_msg = {}  # seqno => original client message
        self.pending_get_rsp = {}  # seqno => set of (node, value, metadata) tuples
        self.pending_get_msg = {}  # seqno => original client message
        # seqno => set of requests sent to other nodes, for each message class
        self.pending_req = {PutReq: {}, GetReq: {}}
        self.failed_nodes = []
        self.pending_handoffs = {}
        # Rebuild the consistent hash table
        DynamoNode.nodelist.append(self)
        DynamoNode.chash = ConsistentHashTable(DynamoNode.nodelist, DynamoNode.T)
        # Run a timer to retry failed nodes
        self.retry_failed_node("retry")

# PART reset
    @classmethod
    def reset(cls):
        cls.nodelist = []
        cls.chash = ConsistentHashTable(cls.nodelist, cls.T)

# PART storage
    def store(self, key, value, metadata):
        self.local_store[key] = (value, metadata)

    def retrieve(self, key):
        if key in self.local_store:
            return self.local_store[key]
        else:
            return (None, None)

# PART retry_failed_node
    def retry_failed_node(self, _):  # Permanently repeating timer
        if self.failed_nodes:
            node = self.failed_nodes.pop(0)
            # Send a test message to the oldest failed node
            pingmsg = PingReq(self, node)
            Framework.send_message(pingmsg)
        # Restart the timer
        TimerManager.start_timer(self, reason="retry", priority=15, callback=self.retry_failed_node)

    def rcv_pingreq(self, pingmsg):
        # Always reply to a test message
        pingrsp = PingRsp(pingmsg)
        Framework.send_message(pingrsp)

    def rcv_pingrsp(self, pingmsg):
        # Remove all instances of recovered node from failed node list
        recovered_node = pingmsg.from_node
        while recovered_node in self.failed_nodes:
            self.failed_nodes.remove(recovered_node)

# PART rsp_timer_pop
    def rsp_timer_pop(self, reqmsg):
        # no response to this request; treat the destination node as failed
        _logger.info("Node %s now treating node %s as failed", self, reqmsg.to_node)
        self.failed_nodes.append(reqmsg.to_node)
        failed_requests = Framework.cancel_timers_to(reqmsg.to_node)
        failed_requests.append(reqmsg)
        for failedmsg in failed_requests:
            self.retry_request(failedmsg)

    def retry_request(self, reqmsg):
        if not isinstance(reqmsg, DynamoRequestMessage):
            return
        # Send the request to an additional node by regenerating the preference list
        preference_list = DynamoNode.chash.find_nodes(reqmsg.key, DynamoNode.N, self.failed_nodes)[0]
        kls = reqmsg.__class__
        # Check the pending-request list for this type of request message
        if kls in self.pending_req and reqmsg.msg_id in self.pending_req[kls]:
            for node in preference_list:
                if node not in [req.to_node for req in self.pending_req[kls][reqmsg.msg_id]]:
                    # Found a node on the new preference list that hasn't been sent the request.
                    # Send it a copy
                    newreqmsg = copy.copy(reqmsg)
                    newreqmsg.to_node = node
                    self.pending_req[kls][reqmsg.msg_id].add(newreqmsg)
                    Framework.send_message(newreqmsg)

# PART rcv_clientput
    def rcv_clientput(self, msg):
        preference_list = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)[0]
        # Determine if we are in the list
        if self not in preference_list:
            # Forward to the coordinator for this key
            _logger.info("put(%s=%s) maps to %s", msg.key, msg.value, preference_list)
            coordinator = preference_list[0]
            Framework.forward_message(msg, coordinator)
        else:
            # Use an incrementing local sequence number to distinguish
            # multiple requests for the same key
            seqno = self.generate_sequence_number()
            _logger.info("%s, %d: put %s=%s", self, seqno, msg.key, msg.value)
            metadata = (self.name, seqno)  # For now, metadata is just sequence number at coordinator
            # Send out to preference list, and keep track of who has replied
            self.pending_req[PutReq][seqno] = set()
            self.pending_put_rsp[seqno] = set()
            self.pending_put_msg[seqno] = msg
            reqcount = 0
            for node in preference_list:
                # Send message to get node in preference list to store
                putmsg = PutReq(self, node, msg.key, msg.value, metadata, msg_id=seqno)
                self.pending_req[PutReq][seqno].add(putmsg)
                Framework.send_message(putmsg)
                reqcount = reqcount + 1
                if reqcount >= DynamoNode.N:
                    # preference_list may have more than N entries to allow for failed nodes
                    break

# PART rcv_clientget
    def rcv_clientget(self, msg):
        preference_list = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)[0]
        # Determine if we are in the list
        if self not in preference_list:
            # Forward to the coordinator for this key
            _logger.info("get(%s=?) maps to %s", msg.key, preference_list)
            coordinator = preference_list[0]
            Framework.forward_message(msg, coordinator)
        else:
            seqno = self.generate_sequence_number()
            self.pending_req[GetReq][seqno] = set()
            self.pending_get_rsp[seqno] = set()
            self.pending_get_msg[seqno] = msg
            reqcount = 0
            for node in preference_list:
                getmsg = GetReq(self, node, msg.key, msg_id=seqno)
                self.pending_req[GetReq][seqno].add(getmsg)
                Framework.send_message(getmsg)
                reqcount = reqcount + 1
                if reqcount >= DynamoNode.N:
                    # preference_list may have more than N entries to allow for failed nodes
                    break

# PART rcv_put
    def rcv_put(self, putmsg):
        _logger.info("%s: store %s=%s", self, putmsg.key, putmsg.value)
        self.store(putmsg.key, putmsg.value, putmsg.metadata)
        putrsp = PutRsp(putmsg)
        Framework.send_message(putrsp)

# PART rcv_putrsp
    def rcv_putrsp(self, putrsp):
        seqno = putrsp.msg_id
        if seqno in self.pending_put_rsp:
            self.pending_put_rsp[seqno].add(putrsp.from_node)
            if len(self.pending_put_rsp[seqno]) >= DynamoNode.W:
                _logger.info("%s: written %d copies of %s=%s so done", self, DynamoNode.W, putrsp.key, putrsp.value)
                _logger.debug("  copies at %s", [node.name for node in self.pending_put_rsp[seqno]])
                # Tidy up tracking data structures
                original_msg = self.pending_put_msg[seqno]
                del self.pending_req[PutReq][seqno]
                del self.pending_put_rsp[seqno]
                del self.pending_put_msg[seqno]
                # Reply to the original client
                client_putrsp = ClientPutRsp(original_msg)
                Framework.send_message(client_putrsp)
        else:
            pass  # Superfluous reply

# PART rcv_get
    def rcv_get(self, getmsg):
        _logger.info("%s: retrieve %s=?", self, getmsg.key)
        (value, metadata) = self.retrieve(getmsg.key)
        getrsp = GetRsp(getmsg, value, metadata)
        Framework.send_message(getrsp)

# PART rcv_getrsp
    def rcv_getrsp(self, getrsp):
        seqno = getrsp.msg_id
        if seqno in self.pending_get_rsp:
            self.pending_get_rsp[seqno].add((getrsp.from_node, getrsp.value, getrsp.metadata))
            if len(self.pending_get_rsp[seqno]) >= DynamoNode.R:
                _logger.info("%s: read %d copies of %s=? so done", self, DynamoNode.R, getrsp.key)
                _logger.debug("  copies at %s", [(node.name, value) for (node, value, _) in self.pending_get_rsp[seqno]])
                # Build up all the distinct values/metadata values for the response to the original request
                results = set([(value, metadata) for (node, value, metadata) in self.pending_get_rsp[seqno]])
                # Tidy up tracking data structures
                original_msg = self.pending_get_msg[seqno]
                del self.pending_req[GetReq][seqno]
                del self.pending_get_rsp[seqno]
                del self.pending_get_msg[seqno]
                # Reply to the original client, including all received values
                client_getrsp = ClientGetRsp(original_msg,
                                             [value for (value, metadata) in results],
                                             [metadata for (value, metadata) in results])
                Framework.send_message(client_getrsp)
        else:
            pass  # Superfluous reply

# PART rcvmsg
    def rcvmsg(self, msg):
        if isinstance(msg, ClientPut):
            self.rcv_clientput(msg)
        elif isinstance(msg, PutReq):
            self.rcv_put(msg)
        elif isinstance(msg, PutRsp):
            self.rcv_putrsp(msg)
        elif isinstance(msg, ClientGet):
            self.rcv_clientget(msg)
        elif isinstance(msg, GetReq):
            self.rcv_get(msg)
        elif isinstance(msg, GetRsp):
            self.rcv_getrsp(msg)
        elif isinstance(msg, PingReq):
            self.rcv_pingreq(msg)
        elif isinstance(msg, PingRsp):
            self.rcv_pingrsp(msg)
        else:
            raise TypeError("Unexpected message type %s", msg.__class__)

# PART get_contents
    def get_contents(self):
        results = []
        for key, value in self.local_store.items():
            results.append("%s:%s" % (key, value[0]))
        return results
Exemplo n.º 27
0
class DynamoNode(Node):
    timer_priority = 20
    T = 10  # Number of repeats for nodes in consistent hash table
    N = 2  # Number of nodes to replicate at
    W = 2  # Number of nodes that need to reply to a write operation
    R = 2  # Number of nodes that need to reply to a read operation
    nodelist = []
    chash = ConsistentHashTable(nodelist, T)

    def __init__(self, addr, config_file='server_config'):
        super(DynamoNode, self).__init__()
        self.local_store = MerkleTree()  # key => (value, metadata)
        self.pending_put_rsp = {}  # seqno => set of nodes that have stored
        self.pending_put_msg = {}  # seqno => original client message
        self.pending_get_rsp = {}  # seqno => set of (node, value, metadata) tuples
        self.pending_get_msg = {}  # seqno => original client message
        # seqno => set of requests sent to other nodes, for each message class
        self.pending_req = {PutReq: {}, GetReq: {}}
        self.failed_nodes = []
        self.pending_handoffs = {}
        # Rebuild the consistent hash table
        self.addr = addr
        self.servers = []
        self.db = leveldb.LevelDB('./' + addr + '_db')
        f = open(config_file, 'r')
        for line in f.readlines():
            line = line.rstrip()
            self.servers.append(line)
        for i, server in enumerate(self.servers):
            DynamoNode.nodelist.append(server)
        DynamoNode.chash = ConsistentHashTable(DynamoNode.nodelist, DynamoNode.T)
        # Run a timer to retry failed nodes
        #self.pool = gevent.pool.Group()
        #self.pool.spawn(self.retry_failed_node)
        
        Framework.setNodes(DynamoNode.nodelist)
       # wt = Thread(target=self.retry_failed_node)
        #wt.start()
        

# PART reset
    @classmethod
    def reset(cls):
        cls.nodelist = []
        cls.chash = ConsistentHashTable(cls.nodelist, cls.T)

# PART storage
    def store(self, key, value, metadata):
        self.db.Put(key,pickle.dumps((value,metadata)))

    def retrieve(self, key):
        #print '----------------' + str(key) + '-------------------------'
        return pickle.loads(self.db.Get(key))
        #if key in self.local_store:
        #    return self.local_store[key]
        #else:
        #    return (None, None)

    def addFailedNode(self, node):
        Framework.updateBlock(node.key)
        if node.key not in self.failed_nodes:
            self.failed_nodes.append(node.key)
    
    def are_you_there(self):
        return
# PART retry_failed_node
    def retry_failed_node(self):  # Permanently repeating timer
        
        while True:
            #print 'try'
            if self.failed_nodes:
                #print self.failed_nodes
                for node in self.failed_nodes:
                #node = self.failed_nodes.pop(0)
                ##print self.failed_nodes
                # Send a test message to the oldest failed node
                    #print 'ping' + str(node)
                    c = zerorpc.Client(timeout=1)
                    c.connect('tcp://' + str(node))
                    try:
                        c.are_you_there()
                        c.close()
                        #print 'back'
                        self.recovery(node)
                    except:
                        continue
                    #pingmsg = PingReq(self.addr, node)
                    #Framework.send_message(pingmsg)
                    #Framework.schedule(timers_to_process=0)
            #Framework.schedule(timers_to_process=0)
            time.sleep(1)
        # Restart the timer
        #TimerManager.start_timer(self, reason="retry", priority=15, callback=self.retry_failed_node)

    def rcv_pingreq(self, pingmsg):
        # Always reply to a test message
        pingrsp = PingRsp(pingmsg)
        Framework.send_message(pingrsp)
        
    def recovery(self, node):
        # Remove all instances of recovered node from failed node list
        #print 'recover+++++++++++++++++++++++++++'
        recovered_node = node
        while recovered_node in self.failed_nodes:
            self.failed_nodes.remove(recovered_node)
            Framework.clearBlock(recovered_node)
        if recovered_node in self.pending_handoffs:
            for key in self.pending_handoffs[recovered_node]:
                #print 'recovery ---------------------------------'
                # Send our latest value for this key
                (value, metadata) = self.retrieve(key)
                putmsg = PutReq(self.addr, recovered_node, key, value, metadata)
                Framework.send_message(putmsg)
            Framework.schedule()
            del self.pending_handoffs[recovered_node]

    def rcv_pingrsp(self, pingmsg):
        # Remove all instances of recovered node from failed node list
        recovered_node = pingmsg.from_node
        while recovered_node in self.failed_nodes:
            self.failed_nodes.remove(recovered_node)
            Framework.clearBlock(recovered_node)
        if recovered_node in self.pending_handoffs:
            for key in self.pending_handoffs[recovered_node]:
                #print 'recovery ---------------------------------'
                # Send our latest value for this key
                (value, metadata) = self.retrieve(key)
                putmsg = PutReq(self, recovered_node, key, value, metadata)
                Framework.send_message(putmsg)
            #print 'schedule'
            Framework.schedule(timers_to_process=0)
            del self.pending_handoffs[recovered_node]

# PART rsp_timer_pop
    def rsp_timer_pop(self, reqmsg):
        # no response to this request; treat the destination node as failed
        _logger.info("Node %s now treating node %s as failed", self, reqmsg.to_node)
        self.failed_nodes.append(reqmsg.to_node)
        failed_requests = Framework.cancel_timers_to(reqmsg.to_node)
        failed_requests.append(reqmsg)
        for failedmsg in failed_requests:
            self.retry_request(failedmsg)

    def retry_request(self, reqmsg):
        if not isinstance(reqmsg, DynamoRequestMessage):
            return
        # Send the request to an additional node by regenerating the preference list
        preference_list = DynamoNode.chash.find_nodes(reqmsg.key, DynamoNode.N, self.failed_nodes)[0]
        kls = reqmsg.__class__
        # Check the pending-request list for this type of request message
        if kls in self.pending_req and reqmsg.msg_id in self.pending_req[kls]:
            for node in preference_list:
                if node not in [req.to_node for req in self.pending_req[kls][reqmsg.msg_id]]:
                    # Found a node on the new preference list that hasn't been sent the request.
                    # Send it a copy
                    newreqmsg = copy.copy(reqmsg)
                    newreqmsg.to_node = node
                    self.pending_req[kls][reqmsg.msg_id].add(newreqmsg)
                    Framework.send_message(newreqmsg)

# PART rcv_clientput
    def rcv_clientput(self, msg):
        preference_list, avoided = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)
        #print 'preference list:'
        #print preference_list
        non_extra_count = DynamoNode.N - len(avoided)
        # Determine if we are in the list
        if self.addr not in preference_list:
            # Forward to the coordinator for this key
            _logger.info("put(%s=%s) maps to %s", msg.key, msg.value, preference_list)
            coordinator = preference_list[0]
            Framework.forward_message(msg, coordinator)
        else:
            # Use an incrementing local sequence number to distinguish
            # multiple requests for the same key
            seqno = self.generate_sequence_number()
            _logger.info("%s, %d: put %s=%s", self, seqno, msg.key, msg.value)
            # The metadata for a key is passed in by the client, and updated by the coordinator node.
            metadata = copy.deepcopy(msg.metadata)
            metadata.update(self.name, seqno)
            # Send out to preference list, and keep track of who has replied
            self.pending_req[PutReq][seqno] = set()
            self.pending_put_rsp[seqno] = set()
            self.pending_put_msg[seqno] = msg
            reqcount = 0
            for ii, node in enumerate(preference_list):
                if ii >= non_extra_count:
                    # This is an extra node that's only include because of a failed node
                    handoff = avoided
                else:
                    handoff = None
                # Send message to get node in preference list to store
                putmsg = PutReq(self.addr, node, msg.key, msg.value, metadata, msg_id=seqno, handoff=handoff)
                if seqno in self.pending_req[PutReq].keys():
                    self.pending_req[PutReq][seqno].add(putmsg)
                Framework.send_message(putmsg)
                reqcount = reqcount + 1
                if reqcount >= DynamoNode.N:
                    # preference_list may have more than N entries to allow for failed nodes
                    break

# PART rcv_clientget
    def rcv_clientget(self, msg):
        preference_list = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)[0]
        # Determine if we are in the list
        #print preference_list
        if self.addr not in preference_list:
            # Forward to the coordinator for this key
            _logger.info("get(%s=?) maps to %s", msg.key, preference_list)
            coordinator = preference_list[0]
            Framework.forward_message(msg, coordinator)
        else:
            seqno = self.generate_sequence_number()
            self.pending_req[GetReq][seqno] = set()
            self.pending_get_rsp[seqno] = set()
            self.pending_get_msg[seqno] = msg
            reqcount = 0
            for node in preference_list:
                getmsg = GetReq(self.addr, node, msg.key, msg_id=seqno)
                self.pending_req[GetReq][seqno].add(getmsg)
                Framework.send_message(getmsg)
                reqcount = reqcount + 1
                if reqcount >= DynamoNode.N:
                    # preference_list may have more than N entries to allow for failed nodes
                    break

# PART rcv_put
    def rcv_put(self, putmsg):
        _logger.info("%s: store %s=%s", self, putmsg.key, putmsg.value)
        self.store(putmsg.key, putmsg.value, putmsg.metadata)
        if putmsg.handoff is not None:
            for failed_node in putmsg.handoff:
                if failed_node not in self.failed_nodes:
                    self.failed_nodes.append(failed_node)
                if failed_node not in self.pending_handoffs:
                    self.pending_handoffs[failed_node] = set()
                self.pending_handoffs[failed_node].add(putmsg.key)
        putrsp = PutRsp(putmsg)
        Framework.send_message(putrsp)

# PART rcv_putrsp
    def rcv_putrsp(self, putrsp):
        seqno = putrsp.msg_id
        if seqno in self.pending_put_rsp:
            self.pending_put_rsp[seqno].add(putrsp.from_node)
            if len(self.pending_put_rsp[seqno]) >= DynamoNode.W:
                _logger.info("%s: written %d copies of %s=%s so done", self, DynamoNode.W, putrsp.key, putrsp.value)
                #_logger.debug("  copies at %s", [node.name for node in self.pending_put_rsp[seqno]])
                # Tidy up tracking data structures
                original_msg = self.pending_put_msg[seqno]
                del self.pending_req[PutReq][seqno]
                del self.pending_put_rsp[seqno]
                del self.pending_put_msg[seqno]
                # Reply to the original client
                client_putrsp = ClientPutRsp(original_msg, putrsp.metadata)
                Framework.send_message(client_putrsp)
        else:
            pass  # Superfluous reply

# PART rcv_get
    def rcv_get(self, getmsg):
        _logger.info("%s: retrieve %s=?", self, getmsg.key)
        (value, metadata) = self.retrieve(getmsg.key)
        getrsp = GetRsp(getmsg, value, metadata)
        Framework.send_message(getrsp)

# PART rcv_getrsp
    def rcv_getrsp(self, getrsp):
        seqno = getrsp.msg_id
        if seqno in self.pending_get_rsp:
            self.pending_get_rsp[seqno].add((getrsp.from_node, getrsp.value, getrsp.metadata))
            print len(self.pending_get_rsp[seqno])
            if len(self.pending_get_rsp[seqno]) >= DynamoNode.R:
                _logger.info("%s: read %d copies of %s=? so done", self, DynamoNode.R, getrsp.key)
                #_logger.debug("  copies at %s", [(node.name, value) for (node, value, _) in self.pending_get_rsp[seqno]])
                # Coalesce all compatible (value, metadata) pairs across the responses
                results = VectorClock.coalesce2([(value, metadata) for (node, value, metadata) in self.pending_get_rsp[seqno]])
                # Tidy up tracking data structures
                original_msg = self.pending_get_msg[seqno]
                del self.pending_req[GetReq][seqno]
                del self.pending_get_rsp[seqno]
                del self.pending_get_msg[seqno]
                # Reply to the original client, including all received values
                client_getrsp = ClientGetRsp(original_msg,
                                             [value for (value, metadata) in results],
                                             [metadata for (value, metadata) in results])
                Framework.send_message(client_getrsp)
                #Framework.schedule(timers_to_process=0)
                Framework.schedule()
        else:
            pass  # Superfluous reply

# PART rcvmsg
    def rcvmsg(self, msg):
        msg = pickle.loads(msg)
        if isinstance(msg, ClientPut):
            #print 'get ClientPut'
            self.rcv_clientput(msg)
        elif isinstance(msg, PutReq):
            #print 'get PutReq'
            self.rcv_put(msg)
        elif isinstance(msg, PutRsp):
            #print 'get PutRsp'
            self.rcv_putrsp(msg)
        elif isinstance(msg, ClientGet):
            #print 'get ClientGet'
            self.rcv_clientget(msg)
        elif isinstance(msg, GetReq):
            #print 'get GetReq'
            self.rcv_get(msg)
        elif isinstance(msg, GetRsp):
            #print 'get GetRsp'
            self.rcv_getrsp(msg)
        elif isinstance(msg, PingReq):
            #print 'get PingReq'
            self.rcv_pingreq(msg)
        elif isinstance(msg, PingRsp):
            #print 'get PingRsp'
            self.rcv_pingrsp(msg)
        elif isinstance(msg, BlockRsp):
            #print 'get block'
            self.addFailedNode(msg)
        else:
            raise TypeError("Unexpected message type %s", msg.__class__)

# PART get_contents
    def get_contents(self):
        results = []
        for key, value in self.local_store.items():
            results.append("%s:%s" % (key, value[0]))
        return results
    
    def put_message(self, fromnode, key, value, metadata):
        #print 'client put!!!'
        metadata = pickle.loads(metadata)
        if metadata is None:
            metadata = VectorClock()
        else:
            # A Put operation always implies convergence
            metadata = VectorClock.converge(metadata)
        putmsg = ClientPut(fromnode, self.addr, key, value, metadata)
        Framework.send_message(putmsg)
       # Framework.schedule(timers_to_process=0)
        Framework.schedule()
        
    def get_message(self, fromnode, key):
        #print 'client get!!!'
        getmsg = ClientGet(fromnode, self.addr, key)
        #print '++++++++++' + str(key) + '+++++++++++++++++'
        Framework.send_message(getmsg)
        Framework.schedule()
Exemplo n.º 28
0
from Node import Node
from merkle import get_hash, Transaction, MerkleTree

address = get_hash("Vova")
nodeV = Node(address)
chain = nodeV.get_chain_data

first_transactions = chain[0]['transactions']
trx_lst = []
for trans in first_transactions:
    a = trans['moneyFrom']
    b = trans['moneyWho']
    c = trans['when']
    d = trans['amount']
    new_Tx = Transaction(trans['moneyFrom'], trans['moneyWho'], trans['when'],
                         trans['amount'])
    trx_lst.append(new_Tx)
merkle_t = MerkleTree(trx_lst)
# merkle_t.createNodes()
root = merkle_t.getRootHash()
print(f"root - {root}")
print(f"right - {merkle_t.getRootElement(merkle_t.nodes, []).getRightChild()}")
print(f"left - {merkle_t.getRootElement(merkle_t.nodes, []).getLeftChild()}")
Exemplo n.º 29
0
 def construct_merkle_tree(self):
     mt = MerkleTree(self.__txns)
     self.__merkle_tree_root = mt.root
Exemplo n.º 30
0
messages = [
    b'hello world', b'world hello', b'BTC', b'ETH', b'XLM', b'RawEncoder',
    b'encoder', b'sha256'
]
# messages = messages[0:-1]
# messages = messages[0:-4]
# messages = messages[0:-randint(1, 5)]
leaves = [
    sha256(messages[i], encoder=RawEncoder) for i in range(len(messages))
]

print("Messages: ", messages)
print()

print("MerkleTree:")
m = MerkleTree.from_leaves(leaves)  # base=2
# m = MerkleTree.from_messages(messages) # base=2
# m = MerkleTree.from_leaves(leaves, 3) # base=3
# m = MerkleTree.from_messages(messages, 5) # base=5
m.print_hex()
print()

index = randint(0, len(messages) - 1)
print("Proof that message ", messages[index], "(", str(hexlify(leaves[index])),
      ") is part of the tree:")
proof = m.prove(messages[index])
MerkleTree.print_hex_proof(proof)
print()

print("Proof verified" if MerkleTree.verify(messages[index], proof
                                            ) else "Proof failed verification")
Exemplo n.º 31
0
def compute_combine_tree_etag_from_list(tree_etag_list):
    merkletree = MerkleTree(hash_list=tree_etag_list)
    return merkletree.digest()
Exemplo n.º 32
0
print('Success!')

# evaluate on a coset. use interpolate package
f = interpolate_poly(G[:-1], a)
f_eval = [f(d) for d in eval_domain]

# Test against a precomputed hash.
from hashlib import sha256
from channel import serialize
assert '1d357f674c27194715d1440f6a166e30855550cb8cb8efeb72827f6a1bf9b5bb' == sha256(serialize(f_eval).encode()).hexdigest()
print('Success!')


# Commitments
from merkle import MerkleTree
f_merkle = MerkleTree(f_eval)
assert f_merkle.root == '6c266a104eeaceae93c14ad799ce595ec8c2764359d7ad1b4b7c57a4da52be04'
print('Success!')

# Channel
# need to reduce using Fiat-Shamir. This converts to non-interactive
from channel import Channel
channel = Channel()
channel.send(f_merkle.root)


# print proof generated so far
print(channel.proof)


Exemplo n.º 33
0
 def calculate_merkleRoot(self):
     """ calcul markle tree root et le place dans le header """
     mt = MerkleTree(str(self.transactionFiles))
     mt.build()
     self.header['merkleRoot'] = mt.root.val.encode('hex')
Exemplo n.º 34
0
def compute_combine_tree_etag_from_list(tree_etag_list):
    merkletree = MerkleTree(hash_list=tree_etag_list)
    return merkletree.digest()
Exemplo n.º 35
0
class MerkleTreeTestCase(unittest.TestCase):
    EMPTY_STR_HASH = 'c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470'

    def setUp(self):
        self.tree = MerkleTree()

    def _make_tree(self):
        self.tree.add_node('00')
        self.tree.add_node(bytes.fromhex('01'))
        self.tree.add_node('02')

        self.tree.make()

        # Check extra node of power of 2 is appended
        self.assertEqual(self.tree.node_count, 4)

    def test_get_root(self):
        self._make_tree()
        self.assertEqual(
            self.tree.get_root(),
            '8633f3f58bd5719152d1f244ad09616dbad359515721e8a59ad0eb1823ae3531')

    def test_get_root_empty(self):
        self.tree.make()
        self.assertEqual(self.tree.get_root(), self.EMPTY_STR_HASH)

    def test_get_root_without_make(self):
        with self.assertRaises(ValueError) as e:
            self.tree.get_root()
        self.assertIn('tree is not ready', str(e.exception))

    def test_get_proof(self):
        self._make_tree()
        proof = self.tree.get_proof(2)
        self.assertEqual(proof[0]['right'], self.EMPTY_STR_HASH)
        self.assertEqual(
            proof[1]['left'],
            '49d03a195e239b52779866b33024210fc7dc66e9c2998975c0aa45c1702549d5')

    def test_get_proof_without_make(self):
        with self.assertRaises(ValueError) as e:
            self.tree.get_proof(0)
        self.assertIn('tree is not ready', str(e.exception))

    def test_get_proof_invalid_index(self):
        self._make_tree()
        with self.assertRaises(ValueError) as e:
            self.tree.get_proof(4)
        self.assertIn('Specify the correct index', str(e.exception))

    def test_validate_proof(self):
        self._make_tree()
        proof = self.tree.get_proof(2)
        self.assertTrue(
            self.tree.validate_proof(proof, '02', self.tree.get_root()))

    def test_validate_proof_invalid(self):
        self._make_tree()
        proof = self.tree.get_proof(2)
        self.assertFalse(
            self.tree.validate_proof(proof, '01', self.tree.get_root()))
Exemplo n.º 36
0
class DynamoNode(Node):
    timer_priority = 20
    T = 10  # Number of repeats for nodes in consistent hash table
    N = 2  # Number of nodes to replicate at
    W = 1  # Number of nodes that need to reply to a write operation
    R = 1  # Number of nodes that need to reply to a read operation
    nodelist = []
    chash = ConsistentHashTable(nodelist, T)

    def __init__(self, addr, config_file='server_config'):
        super(DynamoNode, self).__init__()
        self.framework = Framework()
        self.m_addr = "127.0.0.1:29009"
        self.local_store = MerkleTree()  # key => (value, metadata)
        self.pending_put_rsp = {}  # seqno => set of nodes that have stored
        self.pending_put_msg = {}  # seqno => original client message
        self.pending_get_rsp = {}  # seqno => set of (node, value, metadata) tuples
        self.pending_get_msg = {}  # seqno => original client message
        # seqno => set of requests sent to other nodes, for each message class
        self.pending_req = {PutReq: {}, GetReq: {}}
        self.failed_nodes = []
        self.pending_handoffs = {}
        self.result = {}
        self.reduceResult = {}
        self.MapReduceDB = leveldb.LevelDB('./' + addr + '_mrdb')
        self.mapDict = {}
        # Rebuild the consistent hash table
        self.addr = addr
        self.servers = []
        self.db = leveldb.LevelDB('./' + addr + '_db')
        c = zerorpc.Client(timeout=3)
        c.connect('tcp://' + self.m_addr)
        try:
            
            c.add_node(self.addr)
            c.close()
        except:
            pass

        self.pool = gevent.pool.Group()
        self.pool.spawn(self.retry_failed_node)
        self.framework.setDynamo(self)

    def getNodeList(self, servers):
        print servers
        for i, server in enumerate(list(servers)):
            DynamoNode.nodelist.append(server)
        DynamoNode.chash = ConsistentHashTable(DynamoNode.nodelist, DynamoNode.T)
        
# PART reset
    @classmethod
    def reset(cls):
        cls.nodelist = []
        cls.chash = ConsistentHashTable(cls.nodelist, cls.T)

# PART storage
    def store(self, key, value, metadata):
        self.db.Put(key,pickle.dumps((value,metadata)))

    def retrieve(self, key):
        try:
            return pickle.loads(self.db.Get(key))
        except:
            return (None, None)
    
    def are_you_there(self):
        return
# PART retry_failed_node
    def retry_failed_node(self):  # Permanently repeating timer       
        while True:
            if self.failed_nodes:
                for node in self.failed_nodes:
                    c = zerorpc.Client(timeout=1)
                    c.connect('tcp://' + str(node))
                    try:
                        c.are_you_there()
                        c.close()
                        self.recovery(node)
                    except:
                        continue
            time.sleep(1)
        
    def recovery(self, node):
        # Remove all instances of recovered node from failed node list
        recovered_node = node
        while recovered_node in self.failed_nodes:
            self.failed_nodes.remove(recovered_node)
        if recovered_node in self.pending_handoffs:
            for key in self.pending_handoffs[recovered_node]:
                # Send our latest value for this key
                (value, metadata) = self.retrieve(key)
                putmsg = PutReq(self.addr, recovered_node, key, value, metadata)
                self.framework.send_message(putmsg)
                self.framework.schedule()
            del self.pending_handoffs[recovered_node]


    def retry_request(self, reqmsg):
        self.failed_nodes.append(reqmsg.to_node)
        if not isinstance(reqmsg, DynamoRequestMessage):
            return
        # Send the request to an additional node by regenerating the preference list
        preference_list = DynamoNode.chash.find_nodes(reqmsg.key, DynamoNode.N, self.failed_nodes)[0]
        kls = reqmsg.__class__
        # Check the pending-request list for this type of request message
        if kls in self.pending_req and reqmsg.msg_id in self.pending_req[kls]:
            for node in preference_list:
                try:
                    if node not in [req.to_node for req in self.pending_req[kls][reqmsg.msg_id]]:
                        # Found a node on the new preference list that hasn't been sent the request.
                        # Send it a copy
                        newreqmsg = copy.copy(reqmsg)
                        newreqmsg.to_node = node
                        self.pending_req[kls][reqmsg.msg_id].add(newreqmsg)
                        self.framework.send_message(newreqmsg)
                        self.framework.schedule()
                except:
                    pass
                    
# PART rcv_clientput
    def rcv_clientput(self, msg):
        preference_list, avoided = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)
        non_extra_count = DynamoNode.N - len(avoided)
        # Determine if we are in the list
        if self.addr not in preference_list:
            # Forward to the coordinator for this key
            _logger.info("put(%s=%s) maps to %s", msg.key, msg.value, preference_list)
            coordinator = preference_list[0]
            self.framework.forward_message(msg, coordinator)
            self.framework.schedule()
        else:
            # Use an incrementing local sequence number to distinguish
            # multiple requests for the same key
            seqno = self.generate_sequence_number()
            _logger.info("%s, %d: put %s=%s", self, seqno, msg.key, msg.value)
            # The metadata for a key is passed in by the client, and updated by the coordinator node.
            metadata = copy.deepcopy(msg.metadata)
            metadata.update(self.name, seqno)
            # Send out to preference list, and keep track of who has replied
            self.pending_req[PutReq][seqno] = set()
            self.pending_put_rsp[seqno] = set()
            self.pending_put_msg[seqno] = msg
            reqcount = 0
            for ii, node in enumerate(preference_list):
                if ii >= non_extra_count:
                    # This is an extra node that's only include because of a failed node
                    handoff = avoided
                else:
                    handoff = None
                # Send message to get node in preference list to store
                putmsg = PutReq(self.addr, node, msg.key, msg.value, metadata, msg_id=seqno, handoff=handoff)
                try:
                    self.pending_req[PutReq][seqno].add(putmsg)
                except:
                    pass
                self.framework.send_message(putmsg)
                self.framework.schedule()
                reqcount = reqcount + 1
                if reqcount >= DynamoNode.N:
                    # preference_list may have more than N entries to allow for failed nodes
                    break

# PART rcv_clientget
    def rcv_clientget(self, msg):
        preference_list = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)[0]
        # Determine if we are in the list
        #print preference_list
        if self.addr not in preference_list:
            # Forward to the coordinator for this key
            _logger.info("get(%s=?) maps to %s", msg.key, preference_list)
            coordinator = preference_list[0]
            self.framework.forward_message(msg, coordinator)
            self.framework.schedule()
        else:
            seqno = self.generate_sequence_number()
            self.pending_req[GetReq][seqno] = set()
            self.pending_get_rsp[seqno] = set()
            self.pending_get_msg[seqno] = msg
            reqcount = 0
            for node in preference_list:
                getmsg = GetReq(self.addr, node, msg.key, msg_id=seqno)
                try:
                    self.pending_req[GetReq][seqno].add(getmsg)
                except:
                    pass
                self.framework.send_message(getmsg)
                self.framework.schedule()
                reqcount = reqcount + 1
                if reqcount >= DynamoNode.N:
                    # preference_list may have more than N entries to allow for failed nodes
                    break

# PART rcv_put
    def rcv_put(self, putmsg):
        _logger.info("%s: store %s=%s", self, putmsg.key, putmsg.value)
        self.store(putmsg.key, putmsg.value, putmsg.metadata)
        if putmsg.handoff is not None:
            for failed_node in putmsg.handoff:
                if failed_node not in self.failed_nodes:
                    self.failed_nodes.append(failed_node)
                if failed_node not in self.pending_handoffs:
                    self.pending_handoffs[failed_node] = set()
                self.pending_handoffs[failed_node].add(putmsg.key)
        putrsp = PutRsp(putmsg)
        self.framework.send_message(putrsp)
        self.framework.schedule()

# PART rcv_putrsp
    def rcv_putrsp(self, putrsp):
        seqno = putrsp.msg_id
        if seqno in self.pending_put_rsp:
            self.pending_put_rsp[seqno].add(putrsp.from_node)
            
            if len(self.pending_put_rsp[seqno]) >= DynamoNode.W:
                _logger.info("%s: written %d copies of %s=%s so done", self, DynamoNode.W, putrsp.key, putrsp.value)
                #_logger.debug("  copies at %s", [node.name for node in self.pending_put_rsp[seqno]])
                # Tidy up tracking data structures
                original_msg = self.pending_put_msg[seqno]
                del self.pending_req[PutReq][seqno]
                del self.pending_put_rsp[seqno]
                del self.pending_put_msg[seqno]
                # Reply to the original client
                client_putrsp = ClientPutRsp(original_msg, putrsp.metadata)
                self.framework.send_message(client_putrsp)
                self.framework.schedule()
        else:
            pass  # Superfluous reply

# PART rcv_get
    def rcv_get(self, getmsg):
        _logger.info("%s: retrieve %s=?", self, getmsg.key)
        (value, metadata) = self.retrieve(getmsg.key)
        getrsp = GetRsp(getmsg, value, metadata)
        self.framework.send_message(getrsp)
        self.framework.schedule()

# PART rcv_getrsp
    def rcv_getrsp(self, getrsp):
        seqno = getrsp.msg_id
        if seqno in self.pending_get_rsp:
            self.pending_get_rsp[seqno].add((getrsp.from_node, getrsp.value, getrsp.metadata))
            #print len(self.pending_get_rsp[seqno])
            if len(self.pending_get_rsp[seqno]) >= DynamoNode.R:
                _logger.info("%s: read %d copies of %s=? so done", self, DynamoNode.R, getrsp.key)
                #_logger.debug("  copies at %s", [(node.name, value) for (node, value, _) in self.pending_get_rsp[seqno]])
                # Coalesce all compatible (value, metadata) pairs across the responses
                results = VectorClock.coalesce2([(value, metadata) for (node, value, metadata) in self.pending_get_rsp[seqno]])
                # Tidy up tracking data structures
                original_msg = self.pending_get_msg[seqno]
                del self.pending_req[GetReq][seqno]
                del self.pending_get_rsp[seqno]
                del self.pending_get_msg[seqno]
                # Reply to the original client, including all received values
                client_getrsp = ClientGetRsp(original_msg,
                                             [value for (value, metadata) in results],
                                             [metadata for (value, metadata) in results])
                self.framework.send_message(client_getrsp)
                #Framework.schedule(timers_to_process=0)
                self.framework.schedule()
        else:
            pass  # Superfluous reply

# PART rcvmsg
    def rcvmsg(self, msg):
        msg = pickle.loads(msg)
        if isinstance(msg, ClientPut):
            #print 'get ClientPut'
            self.rcv_clientput(msg)
        elif isinstance(msg, PutReq):
            #print 'get PutReq'
            self.rcv_put(msg)
        elif isinstance(msg, PutRsp):
            #print 'get PutRsp'
            self.rcv_putrsp(msg)
        elif isinstance(msg, ClientGet):
            #print 'get ClientGet'
            self.rcv_clientget(msg)
        elif isinstance(msg, GetReq):
            #print 'get GetReq'
            self.rcv_get(msg)
        elif isinstance(msg, GetRsp):
            #print 'get GetRsp'
            self.rcv_getrsp(msg)
        elif isinstance(msg, ClientGetRsp):
            print msg.value
            self.mapDict[msg.key] = msg.value[0]
        else:
            raise TypeError("Unexpected message type %s", msg.__class__)

# PART get_contents
    def get_contents(self):
        results = []
        for key, value in self.local_store.items():
            results.append("%s:%s" % (key, value[0]))
        return results
    
    def put_message(self, fromnode, key, value, metadata):
        #print 'client put!!!'
        metadata = pickle.loads(metadata)
        if metadata is None:
            metadata = VectorClock()
        else:
            # A Put operation always implies convergence
            metadata = VectorClock.converge(metadata)
        putmsg = ClientPut(fromnode, self.addr, key, value, metadata)
        self.rcv_clientput(putmsg)
        #self.framework.send_message(putmsg)
       # Framework.schedule(timers_to_process=0)
        #self.framework.schedule()
        
    def get_message(self, fromnode, key):
        #print 'client get!!!'
        getmsg = ClientGet(fromnode, self.addr, key)
        #print '++++++++++' + str(key) + '+++++++++++++++++'
        self.rcv_clientget(getmsg)
        #self.framework.send_message(getmsg)
        #self.framework.schedule()
        #Framework.schedule(timers_to_process=0)

    def get_keys(self):
        keys = []
        for (key, value) in self.db.RangeIter():
            keys.append(key)
        return keys
        
    def startMap(self, keys):
        self.mapDict.clear()
        self.result.clear()
        print 'start mapping'
        for (k,v) in self.MapReduceDB.RangeIter():
            self.MapReduceDB.Delete(k)
        #print keys
        for key in keys:
            self.get_message(self.addr, key)
        #print 'get key finish'
        while True:
            if len(self.mapDict.keys()) == len(keys):
                break
#            gevent.sleep(0.5)
        #print self.mapDict
        #self.count(self.mapDict)
        for key in self.mapDict.keys():
            self.map(key, self.mapDict[key])
        for k in self.result:
            self.MapReduceDB.Put(k,pickle.dumps(self.result[k]))
        return
            
        
        
    def startReduce(self, reduce_nodelist, addr, node):
        print 'start reducing'
        #print addr
        #print dict
        d = {}
        
        print node
        print self.addr
        if addr != self.addr:
            c = zerorpc.Client(timeout = 10)
            c.connect('tcp://' + addr)
            
            try:
                
                d = c.send_to_reduce(reduce_nodelist, node)
                #print 'return'
                #print d
            except:
                pass
            c.close()
        else:
            d = self.send_to_reduce(reduce_nodelist, node)   
            #print 'return'
            #print d 
        self.reduceResult.clear()
        for k in d:
            #print k
            #print len(pickle.loads(d[k]))
            self.reduce(k, pickle.loads(d[k]))
        return self.reduceResult
    
    def reduce(self, word, iterator):
        self.reduceResult[word] = len(iterator)

    def emit(self, word, sum):
        if word not in self.result.keys():
            self.result[word] = [sum]
        else:
            tmplist = []
            tmplist = self.result[word]
            tmplist.append(sum)
            self.result[word] = tmplist
  
    def map(self, name, document):
        result = {}
        #for k in d:
        content = document.__str__().lower()
        #print 'content is: ' + content
        words = content.split()
        for word in words:
            self.emit(word, 1)
    
    


    def send_to_reduce(self, reduce_nodelist, remoteaddr):
        print 'sent------------------------'
        chash = ConsistentHashTable(reduce_nodelist, DynamoNode.T)
        d = {}
        print 'remote: ' + remoteaddr
        #l = []
        for (k,v) in self.MapReduceDB.RangeIter():
            
            print chash.find_nodes(k)[0][0]
            if remoteaddr == chash.find_nodes(k)[0][0]:
                print k
                print pickle.loads(self.MapReduceDB.Get(k))
                d[k] = self.MapReduceDB.Get(k)
                #l.append(k)
        #for k in l:
            #self.MapReduceDB.Delete(k)
        return d
Exemplo n.º 37
0
 def setUp(self):
     self.tree = MerkleTree()
Exemplo n.º 38
0
def create_new_block(trx_list):
    tree = MerkleTree(trx_list)
    previous_hash = get_previous_hash(filename)
    new_block = Block(previous_hash, tree.getRootHash(), trx_list)
    mine(new_block)
Exemplo n.º 39
0
class DynamoNode(Node):
    timer_priority = 20
    T = 10  # Number of repeats for nodes in consistent hash table
    N = 3  # Number of nodes to replicate at
    W = 2  # Number of nodes that need to reply to a write operation
    R = 3  # Number of nodes that need to reply to a read operation
    nodelist = []
    chash = ConsistentHashTable(nodelist, T)

    def __init__(self, addr, config_file):
        super(DynamoNode, self).__init__()
        self.local_store = MerkleTree()  # key => (value, metadata)
        self.pending_put_rsp = {}  # seqno => set of nodes that have stored
        self.pending_put_msg = {}  # seqno => original client message
        self.pending_get_rsp = {}  # seqno => set of (node, value, metadata) tuples
        self.pending_get_msg = {}  # seqno => original client message
        # seqno => set of requests sent to other nodes, for each message class
        self.pending_req = {PutReq: {}, GetReq: {}}
        self.failed_nodes = []
        self.pending_handoffs = {}
        
        #modifed
        self.servers = []
        self.num = 3 
        self.addr = addr
        
        if not os.path.exists('./' + addr):
            os.mkdir('./' + addr)
        self.db = leveldb.LevelDB('./' + addr + '/db')
        f = open(config_file, 'r')
        for line in f.readlines():
            line = line.rstrip()
            self.servers.append(line)
        print 'My addr: %s' % (self.addr)
        print 'Server list: %s' % (str(self.servers))

        self.connections = []

        for i, server in enumerate(self.servers):
            DynamoNode.nodelist.append(server)                
            if server == self.addr:
                self.i = i
                self.connections.append(self)
            else:
                c = zerorpc.Client(timeout=10)
                c.connect('tcp://' + server)
                self.connections.append(c)                
                
        if not os.path.exists(addr):
            os.mkdir(addr)
        ###################################
        
        # Rebuild the consistent hash table
        #modified
#        DynamoNode.nodelist.append(self)
#        print "append node: ", self,  len(DynamoNode.nodelist)
###################################
        DynamoNode.chash = ConsistentHashTable(DynamoNode.nodelist, DynamoNode.T)
        # Run a timer to retry failed nodes
        #modified
        self.pool = gevent.pool.Group()
        self.check_servers_greenlet = self.pool.spawn(self.retry_failed_node)
#        self.retry_failed_node("retry")
        #################################


# PART reset
    @classmethod
    def reset(cls):
        cls.nodelist = []
        cls.chash = ConsistentHashTable(cls.nodelist, cls.T)

# PART storage
    def store(self, key, value, metadata):
        self.local_store[key] = (value, metadata)
        #modified
        self.db.Put(key, value)
#        print "sotre:", key, value
        #########################

    def retrieve(self, key):
        try: 
            return self.db.Get(key), None
        except Exception:
            return None, None
#        if key in self.local_store:
#            #modified
#            return self.db.Get(key), self.local_store[key][1]
#            #########################
##            return self.local_store[key]
#        else:
#            return (None, None)

# PART retry_failed_node

    
    def retry_failed_node(self):  # Permanently repeating timer
        #modified
        while True:
        ####################
            gevent.sleep(5)
#            print 'sleeping...'
            if self.failed_nodes:
                
                if len(self.failed_nodes) < 1:
                    continue
#                print "self.failed_nodes: ",self.failed_nodes
                node = self.failed_nodes.pop(0)
                # Send a test message to the oldest failed node
                pingmsg = PingReq(self.addr, node)
                #modified
                con = self.connections[self.servers.index(node)]
                result = Framework.send_message(pingmsg, con)
                if result is False and node not in self.failed_nodes:
                    self.failed_nodes.append(node)
                ############################
        # Restart the timer
        TimerManager.start_timer(self, reason="retry", priority=15, callback=None)#self.retry_failed_node)

    def rcv_pingreq(self, pingmsg):
        # Always reply to a test message
        pingrsp = PingRsp(pingmsg)
#        print '-------------------------------------pingreq', pingmsg.from_node, pingmsg.to_node
        #modified
        con = self.connections[self.servers.index(pingrsp.to_node)]
        result = Framework.send_message(pingrsp, con)
        if result is False and pingrsp.to_node not in self.failed_nodes:
            self.failed_nodes.append(pingrsp.to_node)
        #########################################

    def rcv_pingrsp(self, pingmsg):
        # Remove all instances of recovered node from failed node list
#        print '+++++++++++++++++++++++++++++++++++++pingrsp', pingmsg.from_node, pingmsg.to_node
        recovered_node = pingmsg.from_node
        while recovered_node in self.failed_nodes:
            self.failed_nodes.remove(recovered_node)
#        print recovered_node in self.pending_handoffs
        if recovered_node in self.pending_handoffs:
            for key in self.pending_handoffs[recovered_node]:
                # Send our latest value for this key
                (value, metadata) = self.retrieve(key)
                putmsg = PutReq(self.addr, recovered_node, key, value, metadata)
                
                #modified
                con = self.connections[self.servers.index(putmsg.to_node)]
                result = Framework.send_message(putmsg, con)
#                if result is False:
#                    self.failed_nodes.append(recovered_node)
#                    break
                #######################################
#            print    "==========:",recovered_node
            if recovered_node in self.pending_handoffs:
                del self.pending_handoffs[recovered_node]
            
            

# PART rsp_timer_pop
    def rsp_timer_pop(self, reqmsg):
        # no response to this request; treat the destination node as failed
        _logger.info("Node %s now treating node %s as failed", self, reqmsg.to_node)
        self.failed_nodes.append(reqmsg.to_node)
        failed_requests = Framework.cancel_timers_to(reqmsg.to_node)
        failed_requests.append(reqmsg)
        for failedmsg in failed_requests:
            self.retry_request(failedmsg)

    def retry_request(self, reqmsg):
        if not isinstance(reqmsg, DynamoRequestMessage):
            return
        # Send the request to an additional node by regenerating the preference list
        preference_list = DynamoNode.chash.find_nodes(reqmsg.key, DynamoNode.N, self.failed_nodes)[0]
        kls = reqmsg.__class__
        # Check the pending-request list for this type of request message
        if kls in self.pending_req and reqmsg.msg_id in self.pending_req[kls]:
            for node in preference_list:
                if node not in [req.to_node for req in self.pending_req[kls][reqmsg.msg_id]]:
                    # Found a node on the new preference list that hasn't been sent the request.
                    # Send it a copy
                    newreqmsg = copy.copy(reqmsg)
                    newreqmsg.to_node = node
                    self.pending_req[kls][reqmsg.msg_id].add(newreqmsg)
                    
                    #modified 
                    con = self.connections[self.servers.index(newreqmsg.to_node)]
                    Framework.send_message(newreqmsg, con)
                    ###########################
                    
# PART rcv_clientput
    def rcv_clientput(self, msg):
        preference_list, avoided = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)
        non_extra_count = DynamoNode.N - len(avoided)
        # Determine if we are in the list
        #modified
        if self.addr not in preference_list:
            # Forward to the coordinator for this key
            _logger.info("put(%s=%s) maps to %s", msg.key, msg.value, preference_list)
            result = True
            for e in preference_list:
                con = self.connections[self.servers.index(e)]
                result = Framework.forward_message(msg, con, e)
                if result is not False:
                    break
                if e not in self.failed_nodes:
                    self.failed_nodes.append(e)
            return result 
        #####################################
        
        else:
            # Use an incrementing local sequence number to distinguish
            # multiple requests for the same key
            seqno = self.generate_sequence_number()
            _logger.info("%s, %d: put %s=%s", self, seqno, msg.key, msg.value)
            # The metadata for a key is passed in by the client, and updated by the coordinator node.
            metadata = copy.deepcopy(msg.metadata)
            metadata.update(self.name, seqno)
            # Send out to preference list, and keep track of who has replied
            self.pending_req[PutReq][seqno] = set()
            self.pending_put_rsp[seqno] = set()
            self.pending_put_msg[seqno] = msg
            reqcount = 0
            
            #modified
            nodes = []
            #####################
            for ii, node in enumerate(preference_list):
                if ii >= non_extra_count:
                    # This is an extra node that's only include because of a failed node
                    handoff = avoided
                else:
                    handoff = None
                # Send message to get node in preference list to store
                putmsg = PutReq(self.addr, node, msg.key, msg.value, metadata, msg_id=seqno, handoff=handoff)
                self.pending_req[PutReq][seqno].add(putmsg)

                #modified
                con = self.connections[self.servers.index(putmsg.to_node)]
                result = Framework.send_message(putmsg, con)
                if result is False and putmsg.to_node not in self.failed_nodes:
                    self.failed_nodes.append(putmsg.to_node)
                if result is not False:
                    nodes.append(node)
                    reqcount = reqcount + 1
                ####################################
#                print "---------------------------------", type(putmsg)                
                if reqcount >= DynamoNode.N:
                    # preference_list may have more than N entries to allow for failed nodes
                    break
            #modified
            if reqcount >= DynamoNode.N:
                return nodes
            return False
            ###############
    
# PART rcv_clientget
    def rcv_clientget(self, msg):
        preference_list = DynamoNode.chash.find_nodes(msg.key, DynamoNode.N, self.failed_nodes)[0]
        # Determine if we are in the list
        
        #modified
        if self.addr not in preference_list:
        ################################
            # Forward to the coordinator for this key
            _logger.info("get(%s=?) maps to %s", msg.key, preference_list)
            for e in preference_list:
                con = self.connections[self.servers.index(e)]
                result = Framework.forward_message(msg, con, e)
                if result is not False:
                    break
                if e not in self.failed_nodes:
                    self.failed_nodes.append(e)
            return result
        else:
            seqno = self.generate_sequence_number()
            self.pending_req[GetReq][seqno] = set()
            self.pending_get_rsp[seqno] = set()
            self.pending_get_msg[seqno] = msg
            reqcount = 0
            #modified
            value = []
            ################
            for node in preference_list:
                getmsg = GetReq(self.addr, node, msg.key, msg_id=seqno)
                self.pending_req[GetReq][seqno].add(getmsg)               
                #modified
                con = self.connections[self.servers.index(getmsg.to_node)]
                result = Framework.send_message(getmsg, con)
                if result is not False:
                    value.append(result)                           
                    reqcount = reqcount + 1
                ############################################
                if reqcount >= DynamoNode.N:
                    # preference_list may have more than N entries to allow for failed nodes
                    break
#            print value
            #modified
            # no value for this key
            if len(value) < 1:
                return False
            ##########################
            return value

# PART rcv_put
    def rcv_put(self, putmsg):
        _logger.info("%s: store %s=%s on %s", self, putmsg.key, putmsg.value, self.addr)
        self.store(putmsg.key, putmsg.value, putmsg.metadata)
        if putmsg.handoff is not None:
            for failed_node in putmsg.handoff:
                if failed_node not in self.failed_nodes:
                    self.failed_nodes.append(failed_node)
                if failed_node not in self.pending_handoffs:
                    self.pending_handoffs[failed_node] = set()
                self.pending_handoffs[failed_node].add(putmsg.key)
        putrsp = PutRsp(putmsg)
        
        #modified
        return 
#        con = self.connections[self.servers.index(putrsp.to_node)]
#        Framework.send_message(putrsp, con)
        #################################

# PART rcv_putrsp
    def rcv_putrsp(self, putrsp):
        seqno = putrsp.msg_id
        if seqno in self.pending_put_rsp:
            self.pending_put_rsp[seqno].add(putrsp.from_node)
            if len(self.pending_put_rsp[seqno]) >= DynamoNode.W:
                _logger.info("%s: written %d copies of %s=%s so done", self, DynamoNode.W, putrsp.key, putrsp.value)
#                _logger.debug("  copies at %s", [node.name for node in self.pending_put_rsp[seqno]])
                # Tidy up tracking data structures
                original_msg = self.pending_put_msg[seqno]
                del self.pending_req[PutReq][seqno]
                del self.pending_put_rsp[seqno]
                del self.pending_put_msg[seqno]
                # Reply to the original client
                client_putrsp = ClientPutRsp(original_msg, putrsp.metadata)
                
                #mofied
                con = self.connections[self.servers.index(client_putrsp.to_node)]
                Framework.send_message(client_putrsp, con)
                ###################################
        else:
            pass  # Superfluous reply

# PART rcv_get
    def rcv_get(self, getmsg):
        _logger.info("%s: retrieve %s=?", self, getmsg.key)
        (value, metadata) = self.retrieve(getmsg.key)
        getrsp = GetRsp(getmsg, value, metadata)
        
        #modified
#        print "rcv_get", value
        return value
#        con = self.connections[self.servers.index(getrsp.to_node)]
#        Framework.send_message(getrsp, con)
        #############################33
        
# PART rcv_getrsp
    def rcv_getrsp(self, getrsp):
        seqno = getrsp.msg_id
        if seqno in self.pending_get_rsp:
            self.pending_get_rsp[seqno].add((getrsp.from_node, getrsp.value, getrsp.metadata))
            if len(self.pending_get_rsp[seqno]) >= DynamoNode.R:
                _logger.info("%s: read %d copies of %s=? so done", self, DynamoNode.R, getrsp.key)
#                _logger.debug("  copies at %s", [(node.name, value) for (node, value, _) in self.pending_get_rsp[seqno]])
                # Coalesce all compatible (value, metadata) pairs across the responses
                results = VectorClock.coalesce2([(value, metadata) for (node, value, metadata) in self.pending_get_rsp[seqno]])
                # Tidy up tracking data structures
                original_msg = self.pending_get_msg[seqno]
                del self.pending_req[GetReq][seqno]
                del self.pending_get_rsp[seqno]
                del self.pending_get_msg[seqno]
                # Reply to the original client, including all received values
                client_getrsp = ClientGetRsp(original_msg,
                                             [value for (value, metadata) in results],
                                             [metadata for (value, metadata) in results])
                #modified
                con = self.connections[self.servers.index(client_getrsp.to_node)]
                Framework.send_message(client_getrsp, con)
                ########################################
                
        else:
            pass  # Superfluous reply

# PART rcvmsg
    def rcvmsg(self, pmsg):
        
        #modified
        msg = pickle.loads(pmsg)
        
        #########################
        
        if isinstance(msg, ClientPut):
#            print "reveive message", msg
            return self.rcv_clientput(msg)
        elif isinstance(msg, PutReq):
            return self.rcv_put(msg)
        elif isinstance(msg, PutRsp):
            self.rcv_putrsp(msg)
        elif isinstance(msg, ClientGet):
            return self.rcv_clientget(msg)
        elif isinstance(msg, GetReq):
            return self.rcv_get(msg)
        elif isinstance(msg, GetRsp):
            self.rcv_getrsp(msg)
        elif isinstance(msg, PingReq):
            self.rcv_pingreq(msg)
        elif isinstance(msg, PingRsp):
            self.rcv_pingrsp(msg)
        else:
            raise TypeError("Unexpected message type %s", msg.__class__)

# PART get_contents
    def get_contents(self):
        results = []
        for key, value in self.local_store.items():
            results.append("%s:%s" % (key, value[0]))
        return results