def mine(self, cons): """ Create and send block in PUB socket based on consensus """ name = threading.current_thread().getName() while True and not self.k.is_set(): # move e flag inside generate? self.start.wait() self.f.wait() lastblock = self.bchain.getLastBlock() node = hashlib.sha256(self.ipaddr).hexdigest() self.stake = self.balance # find new block b = self.generateNewblock(lastblock, node, self.stake, cons) if b and not self.e.is_set(): logging.info("Mined block %s" % b.hash) sqldb.writeBlock(b) sqldb.writeChain(b) self.bchain.addBlocktoBlockchain(b) self.psocket.send_multipart( [consensus.MSG_BLOCK, self.ipaddr, pickle.dumps(b, 2)]) time.sleep(parameter.timeout) else: if (self.synced == True): self.e.clear()
def validateChain(bc, l): lastBlock = bc.getLastBlock() print lastBlock.blockInfo() for b in l: b = sqldb.dbtoBlock(b) if not validateBlockHeader(b): # invalid return b, True if validateBlock(b, lastBlock): lastBlock = b bc.addBlocktoBlockchain(b) sqldb.writeBlock(b) sqldb.writeChain(b) else: # fork return b, False return None, False
def listen(self): """ Listen to block messages in a SUB socket Message frames: [ 'block', ip, block data ] """ self.bind(self.psocket) while True and not self.k.is_set(): try: msg, ip, block_recv = self.subsocket.recv_multipart() self.f.clear() # serialize b = pickle.loads(block_recv) logging.info("Got block %s miner %s" % (b.hash, ip)) # Verify block if consensus.validateBlockHeader(b): logging.debug('valid block header') lb = self.bchain.getLastBlock() if (b.index - lb.index == 1) and consensus.validateBlock( b, lb): self.e.set() sqldb.writeBlock(b) sqldb.writeChain(b) self.bchain.addBlocktoBlockchain(b) # rebroadcast #logging.debug('rebroadcast') self.psocket.send_multipart( [consensus.MSG_BLOCK, ip, pickle.dumps(b, 2)]) elif b.index - lb.index > 1: self.synced = False self.sync(b, ip) elif b.index == lb.index: if b.hash == lb.hash: logging.debug('retransmission') else: logging.debug('fork') # double entry sqldb.writeBlock(b) else: # ignore old block logging.debug('old') # self.f.set() except (zmq.ContextTerminated): break
def mine(self, cons): """ Create and send block in PUB socket based on consensus """ name = threading.current_thread().getName() while True and not self.k.is_set(): # move e flag inside generate? self.start.wait() self.f.wait() lastblock = self.bchain.getLastBlock() # find new block b = cons.generateNewblock(lastblock, self.e) if b and not self.e.is_set(): logging.info("Mined block %s" % b.hash) sqldb.writeBlock(b) sqldb.writeChain(b) self.bchain.addBlocktoBlockchain(b) self.psocket.send_multipart( [consensus.MSG_BLOCK, self.ipaddr, pickle.dumps(b, 2)]) else: self.e.clear()
def validateChain(bc, chain, stake): lastBlock = bc.getLastBlock() for b in chain: b = sqldb.dbtoBlock(b) if not validateBlockHeader(b): # invalid #print("HEADER NOT OK") return b, True #print("lastblock index",lastBlock.index) #print("current block index", b.index) if validateBlock(b, lastBlock): #print("BLOCK OK") if validateChallenge(b, stake) and validateRound( b, bc) and validateExpectedRound(b, lastBlock): print("BLOCO VALIDO SINCRONIZADO") lastBlock = b bc.addBlocktoBlockchain(b) sqldb.writeBlock(b) sqldb.writeChain(b) else: # fork return b, False return None, False
def sync(self, rBlock=None, address=None): """ Syncronize with peers and validate chain rBlock -- can be passed as argument to sync based on that block index instead of requesting address -- try to force requests to use this ip address """ self.synced = True logging.debug('syncing...') # Request before sync if not rBlock: rBlock = self.bchain.getLastBlock() # limit number of peers request for i in range(0, min(len(self.peers), 3)): i += 1 logging.debug('request #%d' % i) b, ip = self.reqLastBlock() if b: logging.debug('Block index %s' % b.index) if (b and (b.index > rBlock.index)): rBlock = b address = ip logging.debug('Best index %s with ip %s' % (b.index, ip)) last = self.bchain.getLastBlock() # Sync based on rBlock if (rBlock.index > last.index): self.e.set() if (rBlock.index - last.index == 1) and consensus.validateBlock( rBlock, last): logging.debug('valid block') sqldb.writeBlock(rBlock) sqldb.writeChain(rBlock) self.bchain.addBlocktoBlockchain(rBlock) else: l = self.reqBlocks(last.index + 1, rBlock.index, address) if l: # validate and write b_error, h_error = consensus.validateChain(self.bchain, l) if b_error: if not h_error and b_error.index == last.index + 1: logging.debug('fork') sqldb.writeBlock(b_error) # trying to solve and pick a fork n = self.recursiveValidate(b_error) if n: self.bchain.chain.clear( ) # TODO change this and refactor for i in xrange(n.index, last.index + 1): logging.debug('updating chain') if i == 1: sqldb.replaceChain(n) self.bchain.addBlocktoBlockchain(n) else: n = sqldb.forkUpdate(i) sqldb.replaceChain(n) self.bchain.addBlocktoBlockchain( sqldb.dbtoBlock(n)) consensus.validateChain(self.bchain, l) else: logging.debug('invalid') # request again new = self.reqBlock(b_error.index) self.sync(new) logging.debug('synced')
def sync(self, rBlock=None, address=None): """ Syncronize with peers and validate chain rBlock -- can be passed as argument to sync based on that block index instead of requesting address -- try to force requests to use this ip address """ logging.debug('syncing...') # Request before sync if not rBlock: rBlock = self.bchain.getLastBlock() # limit number of peers request for i in xrange(0, min(len(self.peers), 3)): i += 1 logging.debug('request #%d' % i) b, ip = self.reqLastBlock() if b: logging.debug('Block index %s' % b.index) if (b and (b.index > rBlock.index)): rBlock = b address = ip logging.debug('Best index %s with ip %s' % (b.index, ip)) last = self.bchain.getLastBlock() #print('INDEX BLOCK', last.index) #print('LAST BLOCK ON SYNC FUNCTION', last.hash) #print('rBLOCK', rBlock.index) # Sync based on rBlock if (rBlock.index > last.index): #print("RBLOCK", rBlock.index) if (rBlock.index - last.index == 1): if (validations.validateBlockHeader(rBlock) and validations.validateChallenge(rBlock, self.stake) and validations.validateExpectedRound(rBlock, last)): if (validations.validateBlock(rBlock, last)): #print('SYNC-BLOCO CADEIA ATUAL') logging.debug('valid block') sqldb.writeBlock(rBlock) sqldb.writeChain(rBlock) self.bchain.addBlocktoBlockchain(rBlock) else: #print('SYNC-BLOCO OUTRA CADEIA') sqldb.writeBlock(rBlock) # trying to solve and pick a fork n = self.recursiveValidate(rBlock, address) #print('B_ERROR', b_error.index) #print("PONTO DO FORK:", n.index) if n: fork = n #print('BLOCO EM FORK', fork.index) #print('BLOCO EM FORK - HASH', fork.index) #self.bchain.chain.clear() # TODO change this and refactor #remove all blocks after fork point for i in xrange(n.index, last.index + 1): self.bchain.chain.popleft() teste = self.bchain.getLastBlock() #print('ULTIMO BLOCO DEPOIS DE REMOVER BLOCO DO FORK', teste.index) #print('ULTIMO BLOCO DEPOIS DE REMOVER BLOCO DO FORK - HASH', teste.hash) #insert new blocks starting on n block for i in xrange(last.index + 1, n.index - 1, -1): logging.debug('updating chain') if i == 1: sqldb.replaceChain(n) self.bchain.addBlocktoBlockchain(n) else: if (i == rBlock.index): #print('INSERIR rBLock', rBlock.index) sqldb.writeChain(rBlock) else: lastBlock = sqldb.dbtoBlock( sqldb.blockQuery(['', i + 1])) actualBlock = sqldb.dbtoBlock( sqldb.blockQuery(['', i])) #print('LAST BLOCK INDEX', lastBlock.index) #print('LAST BLOCK PREV_HASH', lastBlock.prev_hash) #print('ACTUAL BLOCK INDEX', actualBlock.index) #print('ACTUAL BLOCK CHAIN', actualBlock.hash) if (lastBlock.prev_hash != actualBlock.hash): search = sqldb.blockQueryFork( ['', i]) for j in search: value = sqldb.dbtoBlock(j) if (value.hash == lastBlock.prev_hash): sqldb.replaceChain(j) for i in xrange(n.index, last.index + 2): block = sqldb.dbtoBlock( sqldb.blockQuery(['', i])) self.bchain.addBlocktoBlockchain(block) #n = sqldb.forkUpdate(i) #if(i == rBlock.index-1): # fork = sqldb.dbtoBlock(sqldb.blockQuery(['',i])) # print('SUBSTITUINDO O FORK') # print('FORK HASH', fork.hash) # print('rBlock PREV HASH', rBlock.prev_hash) # if(fork.hash != rBlock.prev_hash): # print('SUBSTITUIR') # n = sqldb.blockQueryFork(['',i]) # for j in n: # value = sqldb.dbtoBlock(j) # if (value.hash == rBlock.prev_hash): # sqldb.replaceChain(j) # self.bchain.addBlocktoBlockchain(sqldb.dbtoBlock(j)) # else: # print('NAO SUBSTITUIR') # self.bchain.addBlocktoBlockchain(fork) #else: # sqldb.replaceChain(n) # self.bchain.addBlocktoBlockchain(sqldb.dbtoBlock(n)) teste = self.bchain.getLastBlock() #print('ULTIMO BLOCO DEPOIS DE INSERIR OS BLOCOS DA NOVA CADEIA', teste.index) self.synced = True else: if (validations.validateBlockHeader(rBlock) and validations.validateChallenge(rBlock, self.stake)): print('BLOCO RECEBIDO > 1 ON SYNC FUNCTION') chain = self.reqBlocks(last.index + 1, rBlock.index, address) if chain: # validate and write b_error, h_error = validations.validateChain( self.bchain, chain, self.stake) # update last block last = self.bchain.getLastBlock() print('LAST BLOCK ON LOCAL CHAIN', last.index) # if b_error is diffent to None if b_error: print('b_error', b_error.index) # TODO review from next line, because it is strange # if h_error is false and block index equal last block index plus one if not h_error and b_error.index == last.index + 1: print('FORK') sqldb.writeBlock(b_error) # trying to solve and pick a fork n = self.recursiveValidate(b_error, address) print('B_ERROR', b_error.index) print("PONTO DO FORK:", n.index) if n: #self.bchain.chain.clear() # TODO change this and refactor #remove all blocks after fork point teste = self.bchain.getLastBlock() print('BCHAIN BEFORE POPLEFT', teste.index) for i in xrange(n.index, last.index + 1): self.bchain.chain.popleft() teste = self.bchain.getLastBlock() print('BCHAIN AFTER POPLEFT', teste.index) #insert new blocks starting on n block for i in xrange(last.index + 1, n.index - 1, -1): logging.debug('updating chain') if i == 1: sqldb.replaceChain(n) self.bchain.addBlocktoBlockchain(n) else: if (i == b_error.index): print('INSERIR b_error', b_error.index) sqldb.writeChain(b_error) else: lastBlock = sqldb.dbtoBlock( sqldb.blockQuery( ['', i + 1])) actualBlock = sqldb.dbtoBlock( sqldb.blockQuery(['', i])) if (lastBlock.prev_hash != actualBlock.hash): search = sqldb.blockQueryFork( ['', i]) for j in search: value = sqldb.dbtoBlock( j) if (value.hash == lastBlock. prev_hash): sqldb.replaceChain( j) for i in xrange(n.index, last.index + 2): block = sqldb.dbtoBlock( sqldb.blockQuery(['', i])) self.bchain.addBlocktoBlockchain(block) #for i in xrange(n.index,last.index+1): # logging.debug('updating chain') # if i == 1: # sqldb.replaceChain(n) # self.bchain.addBlocktoBlockchain(n) # else: # n = sqldb.forkUpdate(i) # sqldb.replaceChain(n) # self.bchain.addBlocktoBlockchain(sqldb.dbtoBlock(n)) #validations.validateChain(self.bchain, chain, self.stake) self.synced = True else: logging.debug('invalid') # request again chainnew = self.reqBlocks( b_error.index, b_error.index, address) new = chainnew[0] new = sqldb.dbtoBlock(new) #print('NEW RETURN SYNC', new.index) self.sync(new) else: self.synced = True else: self.synced = True logging.debug('synced')
def listen(self): """ Listen to block messages in a SUB socket Message frames: [ 'block', ip, block data ] """ self.bind(self.psocket) while True and not self.k.is_set(): try: msg, ip, block_recv = self.subsocket.recv_multipart() self.f.clear() newChain = False # serialize b = pickle.loads(block_recv) logging.info("Got block %s miner %s" % (b.hash, ip)) b.arrive_time = int( time.mktime(datetime.datetime.now().timetuple())) # Verify block if validations.validateBlockHeader(b): logging.debug('valid block header') lb = self.bchain.getLastBlock() if (b.index - lb.index == 1): print('BLOCK', b.index) print('VALIDATEBLOCK', validations.validateBlock(b, lb)) print('VALIDATEROUND', validations.validateRound(b, self.bchain)) print('VALIDATECHALLENGE', validations.validateChallenge(b, self.stake)) self.e.set() if (validations.validateBlock(b, lb)): if (validations.validateRound(b, self.bchain) and validations.validateChallenge( b, self.stake) and validations.validateExpectedRound( b, lb)): #print('NOVO BLOCO RECEBIDO---ACEITO SEM PROBLEMAS') self.bchain.addBlocktoBlockchain(b) sqldb.writeBlock(b) sqldb.writeChain(b) else: if (validations.validateRound(b, self.bchain) and validations.validateChallenge( b, self.stake) and validations.validateExpectedRound( b, lb)): print('BLOCO RECEBIDO APOS O FORK-', b.index) self.synced = False self.sync(b, ip) # rebroadcast logging.debug('rebroadcast') self.psocket.send_multipart( [consensus.MSG_BLOCK, ip, pickle.dumps(b, 2)]) #self.e.clear() elif b.index - lb.index > 1: print('BLOCO RECEBIDO INDEX MAIOR QUE 1', b.index) self.e.set() self.synced = False self.sync(b, ip) self.e.clear() elif b.index == lb.index: if b.hash == lb.hash: logging.debug('retransmission') else: if (b.round == lb.round): logging.debug('possible fork') pre_block = sqldb.dbtoBlock( sqldb.blockQuery([ '', lb.index - 1 ])) #get the block that b and lb point. if (validations.validateBlock(b, pre_block) and validations.validateChallenge( b, self.stake) and validations.validateExpectedRound( b, pre_block)): # double entry sqldb.writeBlock(b) else: # ignore old block logging.debug('old') else: logging.debug('invalid block') # self.f.set() except (zmq.ContextTerminated): break