def tryInstruction(hash): # Test that this instruction works in a candidate block. # TODO - clean up the state once finalised list of instructions in the candidate block (done when processing next block) logging.debug(f'trying instruction with hash {hash}') instruction = getInstruction(hash) if instruction == None: logging.error(f'Tried an instruction for candidate block that was not in the pool {hash}') return 'ERROR: no instruction with {hash} in pool' args = [] keys = [] args.append('mining') args.append(instruction['instructionHash']) args.append(redisUtilities.getBlockHash()) # latest block we would roll back to args.extend(instruction['instruction']['args']) keys.append(instruction['instruction']['sender']) keys.extend(instruction['instruction']['keys']) logging.debug(f'Instruction retrieved is {instruction}\n') luaHash = instruction['instruction']['luaHash'] logging.debug(f'luaHash for instruction is {luaHash}') logging.debug(f'keys are {keys}, args are {args}') output = red.execute_command("EVALSHA", luaHash, len(keys), *(keys+args)) logging.debug(f'output is {output}') if output[0] == 0: return False # we have rejected the instruction. Need to remove from block else: return True
def rollBack(to): # setup the block pipe to queue the transaction pipe = red.pipeline(transaction=True) # First rollback the state # Currently hardcoded the sha but TODO needs to be in a set of scripts. (Not an instruction rather a helper script) luaHash = '6826d23d0d3b10dc89f76f80869eb111e4841bc9' # Now rollback the blocks: currBlock = redisUtilities.getBlockHash() endBlock = red.rpop("blocks") red.rpush("blocks", endBlock) while currBlock != to: keys = [] args = [] args.append(currBlock) # Rollback the state through a LUA script so is 100% pass / fail on state update pipe.evalsha(luaHash, len(keys), *(keys+args)) # TODO: put all the below into LUA and even the rollback multiple blocks # should be able to use LPOP here because we are removing in order from # the latest until the 'to' block. lrem will ensure the right one is removed # though but if it is logically impossible to remove the wrong one. lpop would # be more efficient pipe.lrem("blocks", "0", currBlock) pipe.srem("blockSet", currBlock) # if we have removed blocks from our recent block queue to add the block(s) # we are nor rolling back we should re-add these old blocks onto the the end # of the recent block strucure if (red.llen("blocks") >= int(red.hget("state", "numBlocksStored"))): # if not full size the block will already be in the block set endBlock = redisUtilities.getPreviousBlock(endBlock) pipe.rpush("blocks", endBlock) pipe.sadd("blockSet", endBlock) currBlock = redisUtilities.getPreviousBlock(currBlock) pipe.hset("state", "latestBlock", to) pipe.hset(to, "nextBlock", "None") # provided that the block is in the last n blocks stored in redis this will all # be already in redis so it does not need to be readded. # newBlockPipe.hset(id, "previousBlock", newBlock.getPreviousBlock()) # newBlockPipe.hset(newBlock.getPreviousBlock(), "nextBlock", id) # newBlockPipe.hset(id, "circleDistance", newBlock.getCircleDistance()) # newBlockPipe.hset(id, "blockHeight", newBlock.getBlockHeight()) # newBlockPipe.hset(id, "outputMatrix", json.dumps(newBlock.getOutputMatrix())) # execute. pipe.execute() return True
def executeInstruction(hash, blockHeight=0, pipe=None): # This routine is not used directly in production but in setup and for testing - Only part of mined block instruction = getInstruction(hash) if instruction == None: return 'ERROR: no instruction with {hash} in pool' logging.debug(f'\n instruction retrieved is {instruction}\n') # TODO: confirm instruction has a unique nonce (in LUA) - and extended from previous one # TODO - need to check syntax of instruction (number fields). Fail if not setup properly # TODO create InstructionException and throw this for the different reasons rather than return False. Can then propogate the LUA reasons for failures args = [] keys = [] args.append('mined') args.append(instruction['instructionHash']) # append blockheight - we dont create a rollback state # TODO update scripts for this args.append(redisUtilities.getBlockHash()) # latest block we would roll back to args.extend(instruction['instruction']['args']) keys.append(instruction['instruction']['sender']) keys.extend(instruction['instruction']['keys']) # TODO: as part of instruction validation is this a valid hash we accept as an instructionType? luaHash = instruction['instruction']['luaHash'] if pipe == None: # this is not in a block so just execute the instruction output = red.evalsha(luaHash, len(keys), *(keys+args)) if output[0] == 0: logging.error(f'ERROR in executing instruction : {output[1]}') return False else: # Queue in the pipeline - no response as not executed pipe.evalsha(luaHash, len(keys), *(keys+args)) return True
def retrieveBlock(): global networkOn # Testing parameters - is network on if not networkOn: response = {'network': f'{networkOn}'} return jsonify(response), 400 logging.info( "return the Hash of the block at the top of our chain and the block height" ) # TODO generic function for returning all the blocks and contents try: response = { 'lastBlock': redisUtilities.getBlockHash(), 'blockHeight': redisUtilities.getBlockHeight(), 'circleDistance': redisUtilities.getCircleDistance() } except RedisError as error: response = f"Redis error: {error}" return jsonify(response), 200
def processBlock(self, blockID): # Testing parameters - is network on agentResponse = {} agentResponse['success'] = True logging.debug("new block published, retrieve validate and process it") # TODO - optimise rejection if not latest block newBlock = parseBlock(blockID, self.entityInstructions) # is the block Valid? if newBlock.getBlockPass() == False: agentResponse['message'] = { 'chainLength': redisUtilities.getBlockHeight(), 'lastBlock': redisUtilities.getBlockHash(), 'error': newBlock.getBlockComment() } agentResponse['success'] = False return agentResponse # TODO: forking - if forks can be of depth > 1 before agent seeing a block in the fork # then parse block will need to be updated if (redisUtilities.getNextBlock(newBlock.getPreviousBlock()) != None): #there is some kind of fork logging.info('A FORK HAS BEEN DISCOVERED!') if (redisUtilities.getNextBlock(newBlock.getPreviousBlock()) == redisUtilities.getBlockHash()): # circle distance is calculated in parseBlock relative to the named previous block # so all you need to do is retrieve and compare the circle distance value # for the two competing blocks if (redisUtilities.getCircleDistance() <= int( newBlock.getCircleDistance(), 16)): agentResponse['message'] = { 'chainLength': redisUtilities.getBlockHeight(), 'lastBlock': redisUtilities.getBlockHash(), 'error': 'newBlock was a fork on current block and current block had a smaller circle distance' } agentResponse['success'] = False return agentResponse else: blockUtilities.rollBack(newBlock.getPreviousBlock()) else: #get weighted circle distance and compare. potentially rule out heightDiff = redisUtilities.getHeightDiff( newBlock.getPreviousBlock) newBlockWeightedCircleDistance = newBlock.getCircleDistance( ) + newBlock.getCircleDistance() * (heightDiff - 1) if (redisUtilities.getWeightedCircleDistance( newBlock.getPreviousBlock()) < newBlockWeightedCircleDistance): agentResponse['message'] = { 'chainLength': redisUtilities.getBlockHeight(), 'lastBlock': redisUtilities.getBlockHash(), 'error': 'newBlock was a fork on current block and current chain had a smaller weighted circle distance' } agentResponse['success'] = False else: blockUtilities.rollBack(newBlock.getPreviousBlock()) # Normal processing, new block built on our chain. READ NOTES # execute instructions on the block state and update the block state to the latest try: blockUtilities.addNewBlock(newBlock) except BlockError as bError: logging.error( f'Block parsing failed - error {bError.reason} for block id {bError.id}' ) agentResponse['message'] = { 'chainLength': redisUtilities.getBlockHeight(), 'lastBlock': redisUtilities.getBlockHash(), 'error': f'block failed to process - {bError.reason}' } agentResponse['success'] = False return agentResponse logging.info( f'\n ** NEW BLOCK PUBLISHED. ** Block distance = {newBlock.getCircleDistance()}\n' ) # TODO: next circle could have race condition for a promoted agent. Agents need some N number of blocks old before being eligible (to stop race condition) logging.debug( f'New block output matrix is {newBlock.getOutputMatrix()}') # move to RQ Worker blockUtilities.generateNextCircle() agentResponse['message'] = { 'chainLength': newBlock.getBlockHeight(), 'lastBlock': newBlock.getBlockHash(), "circleDistance": newBlock.getCircleDistance(), 'message': newBlock.getBlockComment(), "error": ("No error checking candidate block" + str(redisUtilities.getCandidateBlocks())) } return agentResponse
def generateNextCircle(): logging.debug("in generateNextCircle") # should always be the latest block that you are generating the next circle from circle = nextCircle(redisUtilities.getOutputMatrix( )) # No excluded agents for now, get matrix of last block logging.debug(f'next circle outputMatrix is {circle}') # Am I in the circle? if not (redisUtilities.getMyID() in circle): # what should happen if not in next circle? logging.info("I AM NOT IN THE NEXT CIRCLE.") return logging.debug("Agent is in next circle") # gather and check instructions possibleInstructions = redisUtilities.getInstructionHashes() logging.debug(f'possible instructions are {possibleInstructions}') validInstructions = [] validInstructionHashes = [] # TODO here: clear any mining pool from previous iterations. (clearMining.lua script). Hardcoded for now but put in redisUtilities luaHash = 'b5ef661e48d6306417d1f645c358f3d98a6148a1' red.evalsha(luaHash, 0) # TODO catch around this. If instruction fails on a non matching script it should be removed. for instructionHash in possibleInstructions: if blockUtilities.tryInstruction(instructionHash): logging.debug('instruction was valid') validInstructionHashes.append(instructionHash) validInstructions.append( redisUtilities.getInstruction(instructionHash)) logging.debug(f'valid instructions are: {validInstructionHashes}') if len(validInstructionHashes) == 0: logging.info("there are no valid instructions and so, no valid block") # TODO - do we broadcast no valid block? Or do we pause and retry in 10s? return # TODO: global static (in Redis?) for random number size, agents in the CIRCLE myRandoms = [g for g in encryptionUtilities.getRandomNumbers(32, 5)] logging.debug(f'myRandoms are {myRandoms}') mySeed = encryptionUtilities.getRandomNumber(32) logging.debug(f'mySeed is {mySeed}') mySeededRandomHash = encryptionUtilities.getHashWithSeed(myRandoms, mySeed) logging.debug(f'seeded hash is {mySeededRandomHash}') convergenceHeader = { "previousBlock": redisUtilities.getBlockHash(), "instructionsMerkleRoot": encryptionUtilities.returnMerkleRoot(validInstructionHashes), "instructionCount": len(validInstructions), "blockHeight": (redisUtilities.getBlockHeight() + 1), "randomNumberHash": mySeededRandomHash } logging.debug(f'convergenceHeader is {convergenceHeader}') signature = encryptionUtilities.signMessage( encryptionUtilities.getHashofInput(convergenceHeader), redisUtilities.getMyPrivKey()) logging.debug(f'signature is {signature}') blockSignatures = [{redisUtilities.getMyID(): signature}] logging.debug(f'blockSignatures is {blockSignatures}') proposedBlock = { "convergenceHeader": json.dumps(convergenceHeader), "blockSignatures": blockSignatures, "instructions": json.dumps(validInstructions), "broadcaster": redisUtilities.getMyID() } logging.info(f'Proposed Block for initial convergence is {proposedBlock}') # Write these to blockchain? (or after all done?) # TODO setup signature for convergence header # TODO create proposedBlock and convergence header. Broadcast proposed block. Need to lookup addresses of the # TODO also update consensus emulator to emit new block type structure. # TODO update NODE to accumulate latest block (same convergence header) with all the random numbers # GREG: I think this is where we emulate the full block creation? approval = consensusEmulator.proposeConvergenceHeader( proposedBlock, circle) # (proposedBlock, broadcaster, signature, circle, randomHashes) convergenceHeader = approval['header'] signatures = approval['signatures'] broadcaster = approval['broadcaster'] validInstruction = approval['validInstructions'] circleAgents = approval['agentInfo'] # GREG: Do in a Lucid Chart on how the circle converges # Setup the candidate structure and post to our convergenceProcessor to kick off the convergence process candidate = {} # candidate['blocksize'] = 0 #TODO candidate['blockHeader'] = { "version": "entityVersionUsed", #TODO "staticHeight": "height below which a fork is not allowed", #TODO "convergenceHeader": convergenceHeader, "consensusCircle": circleAgents, "blockSignatures": signatures, } candidate["blockHash"] = encryptionUtilities.getHashofInput( candidate['blockHeader']) candidate["blockOriginatedAgent"] = broadcaster candidate["instructions"] = validInstructions logging.debug(f'candidate = {candidate}') # writing out a file doesnt work because in the rq worker container distributeBlock(candidate) return