def handleChainBackTracking(self, peer): try: ret = self.handleBackTracking(peer) except Exception: d("Major issue in backtracking, stopped back tracking") ret = errMsg("Exception raised, invalid peer claim") m_cfg['checkingChain'] = False return ret
def asAsynchPost(self, ptype, url, data, cnt): for peer in self.randomOrderFor(m_cfg[ptype]): if m_cfg[ptype][peer]['active'] is True: try: d("asynch: " + peer + url + str(data)) self.doPOSTx(peer + url, data) cnt = cnt + 1 if cnt > m_cfg['minPeers']: break except Exception: m_cfg[ptype][peer][ 'numberFail'] = m_cfg[ptype][peer]['numberFail'] + 1 d("Exception occurred when trying to asynch: " + url + " type " + str(ptype)) return cnt
def getNextBlock(self, peer, offset): if (len(peer) > 0) and (peer[-1] != "/"): peer = peer + "/" base = "blocks/" url = base + str(len(m_Blocks)+offset) try: res, stat = c_peer.sendGETToPeer(peer + url) except Exception: d("peer failed" + peer) return "Block not available at " + peer, -1 # do not change it is checked outside if stat == 200: d("got peer block as requested with OK") m, l, f = checkRequiredFields(res, m_genesisSet[0], [], False) if len(m) == 0: if res['index'] == len(m_Blocks)+offset: return res, stat return "Block not available/valid at " + peer, -1 # do not change it is checked outside
def suitablePeer(self, peer, fastTrack=False): try: s1 = self.doGET(peer + "/info", fastTrack) reply = json.loads(s1.text) m, l, f = checkRequiredFields(reply, m_info, ["chainId", "about"], False) if (len(f) > 0) or ( len(m) > 0): # we skip to check any additional fields, is that OK print("Peer " + peer + " reply not accepted, considered not alive") # as it is invalid peer, don't try again m_cfg['peers'][peer][ 'wrongType'] = m_cfg['peers'][peer]['wrongType'] + 1 del m_cfg['peers'][peer] m_info['peers'] = len(m_cfg['peers']) return {'wrongChain': True} if peer != reply['nodeUrl']: return {'Inconsistency in nodeURL': True} if not self.ensureBCNode(reply['type']): return {'wrongType': True} if isBCNode(): if m_cfg['chainLoaded'] is True: if (reply['blocksCount'] > len(m_Blocks)) or ( reply['blockHash'] != m_Blocks[-1]['blockHash']): # ignore the return data from the situational check as here is primarily peer # and informing the blockmanager about potential peers is secondary here d("info showed longer block or different hash:" + str(reply['blocksCount']) + " " + str(len(m_Blocks)) + " " + reply['blockHash'] + " " + m_Blocks[-1]['blockHash']) project.classes.c_blockchainNode.c_blockchainHandler.checkChainSituation( 'info', reply) if reply['blocksCount'] < len(m_Blocks): # we found a laggard, let him know we are further project.classes.c_blockchainNode.c_blockchainHandler.asynchNotifyPeers( ) return reply except Exception: return {'fail': True}
def makeDelay(self, url, json, isAsyncPost): try: if m_cfg['canTrack'] is True: if m_visualCFG['active'] is True: urlc = url if url.startswith("http"): urlc = url[url.index("//") + 3:] if (m_visualCFG['pattern'] == ".") or (m_visualCFG['pattern'].search(urlc)): id = random.randint(100, 10000000) m_Delay.append({ "delayID": id, "url": url, "json": json, "asynchPOST": isAsyncPost }) return id except Exception: d("Make delay raised exception on: " + url + " is asynch:" + isAsyncPost) return -1
def nodeSpecificGET(self, url, linkInfo): ret = { 'NodeType': m_info['type'], 'info': "This URL/API is not available/broken" } try: if self.permittedURLGET(url) is False: return errMsg("This URL/API is invalid or not available. " + url) if "chainInit" not in m_cfg: m_cfg['chainInit'] = False # backward compatible maxWait = 12 while (len(m_isPOST) > 0) or (m_cfg['chainInit'] is True): if url == "/info": # this is needed for peers to cross reference each other break if url.startswith( "/blockBalances" ): # this is needed for peers to cross reference each other break if self.delay(url, 1) is False: break # for some reason we decide to ignore the lock maxWait = maxWait - 1 if maxWait < 0: print( "Console maxwait for chain update reached, just go ahead now ...." ) break m_simpleLock.append(url) if isBCNode(): ret = self.c_blockInterface.nodeSpecificGETNow(url, linkInfo) elif isWallet(): ret = self.c_walletInterface.nodeSpecificGETNow(url, linkInfo) elif isFaucet(): ret = self.c_faucetInterface.nodeSpecificGETNow(url, linkInfo) elif isGenesis(): ret = self.c_genesisInterface.nodeSpecificGETNow(url, linkInfo) except Exception: d("*********************************************") d("*********************************************") print("GET exception caught, isPoststack " + str(len(m_isPOST))) d("*********************************************") d("*********************************************") if url in m_simpleLock: m_simpleLock.remove( url ) # maybe need to check for being there, then need to add random to URL return ret
def changeNonceDate(): d("change nonce") cfg['countSame'] = 0 #TODO how to realistically estimate solution date, what are permitted differences? newCandidate['dateCreated'] = getFutureTime( (newCandidate['difficulty'] - 4) * (newCandidate['difficulty'] - 4) * 10) cfg['dateCreated'] = newCandidate['dateCreated'] newCandidate['fixDat'] = newCandidate[ 'blockDataHash'] + "|" + newCandidate['dateCreated'] + "|" newCandidate['nonce'] = random.randint( 0, cfg['maxNonce'] - 1) # avoid that each miner starts at same level d("Start Nonce " + str(newCandidate['nonce'])) d("nonce done") return
def asynchPOST(self, url, data): try: sent = 0 retry = 5 while sent == 0: if url[0] != "/": url = "/" + url sent = self.asAsynchPost('activePeers', url, data, sent) if sent < m_cfg['minPeers']: sent = self.asAsynchPost('shareToPeers', url, data, sent) if sent == 0: d("Asynch POST not sent to anyone, retryCnt: " + str(retry) + " for: " + url) sleep(2) retry = retry - 1 if retry <= 0: d("Give up trying to asynch at least one peer for:" + url) break except Exception: d("asynchPOST exception for " + url)
def getBlocksFromPeer(self, peer, upLimit, isAlert, gotBlock, retry, isCheck): if isCheck and (self.status['getMissingBlocks'] is True): d("Already checking on missing blocks, return empty") return "" try: self.status['getMissingBlocks'] = True d("Work on missing block: "+peer) while True: if (upLimit > 1) and (upLimit <= len(m_Blocks)): self.status['getMissingBlocks'] = False m_cfg['chainLoaded'] = True m_cfg['checkingChain'] = False d("Upper limit reached") return "" if len(gotBlock) > 0: stat = 200 res = gotBlock d("directly use received block") gotBlock = {} else: d("get next block") res, stat = self.getNextBlock(peer, 0) if stat == 200: d("good block received") if res['difficulty'] < m_info['currentDifficulty']: m_cfg['checkingChain'] = False d("Invalid difficulty") return "Invalid difficulty in block detected" err = self.checkAndAddBlock(res, isAlert) if len(err) > 0: m_cfg['chainLoaded'] = True m_cfg['checkingChain'] = False d("Invalid block") return "Invalid block received" else: d("no 200 reply from node to get block") if stat == -1: # special signal telling us no more blocks there m_cfg['checkingChain'] = False self.status['getMissingBlocks'] = False return "" retry = retry - 1 # maybe block has not spread well yet if retry <= 0: self.status['getMissingBlocks'] = False m_cfg['chainLoaded'] = True m_cfg['checkingChain'] = False return "No valid information, stopped block updates." except Exception: d("Exception occured in getBlocksFromPeer") self.status['getMissingBlocks'] = False d("Checking failed in the end...") m_cfg['checkingChain'] = False m_cfg['chainLoaded'] = (len(m_Blocks) > 0) return "Verification failed"
def checkChainSituation(self, source, blockInfo): #We arrive here either because a block notification was sent, #or because our peer got an info claiming the peer has longer chain. # here we decide on the situation of whether blocks are simply added on top # or if something more compliucated is needed, e.g. go back the stack to find # common block and then recover shared TXs etc. etc. #These are shared among info and block notification # "nodeUrl": sender # "blocksCount": 25, "cumulativeDifficulty": 127 # added by PDPCOin : blockHash for notification #only in info: # "confirmedTransactions": 208 try: d("checking status due to " + source) peer = blockInfo['nodeUrl'] if blockInfo['blocksCount'] < len(m_Blocks): d("stay with local chain anyway as it is longer than for " + peer) self.asynchNotifyPeers() return errMsg("Notified chain shorter than local current, current is:" + str(len(m_Blocks))) if source == "notification": if 'blockHash' in blockInfo: #PDPCCoin specific shortcut if blockInfo['blockHash'] == m_Blocks[-1]['blockHash']: d("is the same, probably rebound...") return setOK("Thank you for the notification.") while m_cfg['checkingChain'] is True: d("Already checking chain status, so complete the first one") #return errMsg("Please wait for current synchronisation to complete...") sleep(1) m_cfg['checkingChain'] = True if blockInfo['blocksCount'] == len(m_Blocks): d("blocks on par, check next step with "+peer) #this means we have conflict on the same top block, probably parallel mined if blockInfo['cumulativeDifficulty'] < m_info['cumulativeDifficulty']: self.asynchNotifyPeers() m_cfg['checkingChain'] = False d("local difficulty higher, no change") return errMsg("Peers chain cumulativeDifficulty lower than local current, current is:" + str(m_info['cumulativeDifficulty'])) else: # we have same height and same or lower difficulty, so we need to roll the dice for now # based on hash #get the actual block from peer res, stat = self.getNextBlock(peer, -1) if stat == 200: # yoursbetter must not be based on the claim but based # on the block data and its difficulty versus my cumulative # else an attacker might cheat with high claim but low delivery # 0) we ignore your cumDiff # a) myDiff versus your blockDifficulty # b) mycum-myDiff+yourDiff == your claimed cumDif d("got peer block as requested with OK") # We repeat the check here in case we had info instead of notification! if res['blockHash'] == m_Blocks[-1]['blockHash']: d("anyway the same") m_cfg['checkingChain'] = False return setOK("Thank you for the notification.") # need to include difficulty in this decision yoursBetter = blockInfo['cumulativeDifficulty'] > m_info['cumulativeDifficulty'] if yoursBetter is False: yoursBetter = res['difficulty'] > m_Blocks[-1]['difficulty'] if yoursBetter is False: d("same block difficulty") #we are confirmed same same in all, so lets roll the deterministic dice #by crossing the two inputs instead of checking umber of TX or value etc., #all of which could lead to easier rigging than dice lstDice = "" indexMy=0 indexYou=0 dice = "x" xst = [res['blockDataHash'], m_Blocks[-1]['blockHash'], m_Blocks[-1]['blockDataHash'], res['blockHash']] xst.sort() for lst in xst: lstDice = lstDice + lst while indexMy == indexYou: lstDice = lstDice+dice d("roll the dice...") dice = sha256StrToHex(lstDice)[0] d("Dice value:"+str(dice)) indexMy = m_Blocks[-1]['blockHash'].index(dice) indexYou = res['blockHash'].index(dice) d(str(indexMy) + " vs " + str(indexYou)) if (indexYou > indexMy): yoursBetter = True d("dice said yoursBetter :" + str(yoursBetter)) if yoursBetter is True: if res['prevBlockHash'] != m_Blocks[-1]['prevBlockHash']: d("hashes different need to settle backtrack") return self.handleChainBackTracking(peer) d("!!!we conceeded, add peers block from "+peer) restor = m_Blocks[-1] confirmRevertBalances(restor['transactions']) del m_Blocks[-1] err = self.checkAndAddBlock(res, True) if len(err) > 0: d("something was wrong, restore own previous block") self.checkAndAddBlock(restor, True) m_cfg['checkingChain'] = False return errMsg("Invalid block received") else: self.asynchNotifyPeers() d("local copy maintained after all") i=0 m_cfg['checkingChain'] = False return setOK("Thank you for the notification.") m_cfg['checkingChain'] = False d("The reply did not have the correct fields") return errMsg("No proper reply, ignored") else: d("local chain appears shorter anyway, so just ask for up to block "+str(blockInfo['blocksCount'])) # the peer claims to be ahead of us with at leats one block, so catch up until something happens # easy case just add the new block on top, and each block is checked fully, no backtrack res, stat = self.getNextBlock(peer, 0) if stat == 200: # backtrack into the stack!!! if res['prevBlockHash'] != m_Blocks[-1]['blockHash']: d("hashes different need to settle backtrack") return self.handleChainBackTracking(peer) if source == 'notification': d("Sender want a reply, so process") err = self.getMissingBlocksFromPeer(blockInfo['nodeUrl'], blockInfo['blocksCount'], True, res) m_cfg['checkingChain'] = False if len(err) > 0: return errMsg(err) return setOK("Thank you for the notification.") else: d("this is info internal, so create thread and don't care result") threadx = Thread(target=self.getMissingBlocksFromPeer, args=(blockInfo['nodeUrl'], blockInfo['blocksCount'], False, res)) threadx.start() return setOK("No one sees this answer anyway, but in case, we are processing blocks") m_cfg['checkingChain'] = False return errMsg("Invalid block received") # for info this is ignored anyway except Exception: m_cfg['checkingChain'] = False return errMsg("Processing error occurred")
def handleBackTracking(self, peer): # now we now there is a chain fork, as some hash link is different at the end of our chain # we may have no block yet, or the blocks are same index yet conflict hash, but ut was decided # by length of chain or by roll of dice that the other chain has won, if it can remain consistent # 1. find first/deepest different block by binary increased range knownDiffIdx = len(m_Blocks)-1 srchLen = 1 start = knownDiffIdx hlen = len(defHash) while True: fromBlock = start-srchLen if fromBlock < 0: fromBlock = 0 url = "/blocks/hash/"+str(fromBlock)+"/"+str(fromBlock + srchLen-1)+"/0" try: res, stat = c_peer.sendGETToPeer(peer + url) except Exception: return errMsg(d("Unsupported block claimed ")) if stat != 200: return errMsg(d("Unsupported/Invalid block claimed")) if len(res) != srchLen: return errMsg(d("Insufficient data received")) idx, hash = res[0] if (idx != fromBlock) or (len(hash) != hlen): return errMsg(d("Invalid answer to block: "+idx)) if hash == m_Blocks[fromBlock]['blockHash']: break if fromBlock <= 0: return errMsg(d("Different chain must be assumed")) srchLen = srchLen * 2 start = fromBlock # 2. find the first block which is different, big data would call for binary search.... for indx in range(1, len(res)): idx, hash = res[indx] if len(hash) != hlen: return errMsg(d("Invalid answer to block: "+idx)) if hash != m_Blocks[fromBlock+indx]['blockHash']: knownDiffIdx = fromBlock+indx break # 3. up to the different block, our balances must be equal else we are totally out of synch myBal = {} self.getBalanceFromToBlock(0, knownDiffIdx-1, myBal) try: yourBal, stat = c_peer.sendGETToPeer(peer + "/blockBalances/" + str(knownDiffIdx-1)) except Exception: return errMsg(d("Unsupported blockBalance")) if stat != 200: return errMsg(d("Unsupported blockBalance")) if len(myBal) != len(yourBal): return self.balanceWrong() for chk in myBal: if (chk not in yourBal) or (myBal[chk] != yourBal[chk]): return self.balanceWrong() # 4. save current status in case the other chain has faults tempBlocks = m_Blocks[knownDiffIdx:] del m_Blocks[knownDiffIdx:] if len(m_pendingTX) > 0: tmpTx = m_pendingTX[0:] m_pendingTX.clear() setBalanceTo(myBal, knownDiffIdx-1) d("All saved and ok, no get all the blocks...") # TODO current difficult must be adjusted ret = self.getBlocksFromPeer(peer, -1, False, {}, 2, False) #TODO how to insert now the old pending TX, would be nice to not just skip them # but as a lot has changed, some may not be valid anymore, so need to test them if ret != "": # TODO arg, must restore return errMsg(d("Invalid blocks received, loading aborted")) return setOK("Thank you for the notification.")
def balanceWrong(self): return errMsg(d("Oh oh, balances oif same chains are different, fork unrepairable"))
def isDataValid(resp_text): m, l, f = checkRequiredFields(resp_text, m_candidateMiner, [], True) if (len(m) > 0) or (l != 0): d("required fields not matched") return False #TODO shoul dthis be further limited to half of zero and at least 5??? if (resp_text['difficulty'] >= len( cfg['zero_string'])) or (resp_text['difficulty'] < 0): d("Difficulty not possible") return False if (resp_text['rewardAddress'] != cfg['address']) or (resp_text['index'] <= 0): d("Wrong reward address") return False if len(defHash) != len(resp_text['blockDataHash']): d("Wrong Hash length in blockDataHash" + resp_text['blockDataHash']) return False if resp_text['expectedReward'] < minBlockReward: d("Wrong Hash or too low expectedRewards: " + str(resp_text['expectedReward'])) return False if resp_text['transactionsIncluded'] <= 0: d("No transaction at all") return False return True
def confirmUpdateBalances(txList, isGenesis): # entering here we know the structure of the TX are all ok, so settle only balances # first we update the buffer info, and only if all pass # then update the actual balances involved # theoretically if it is our own block, all should be correct, but we check anyway updBalance = updateConfirmedBalance(txList, isGenesis) if len(updBalance) == 0: return "Block rejected, invalid TX detected in: " + txList[ 'transactionDataHash'] # all tx in this block are valid, so update actual balances based on the results from checking for addr in updBalance: m_AllBalances[addr]['curBalance'] = updBalance[addr] # balances updated, remove TX from pending list for tx in txList: if tx['transactionDataHash'] in m_pendingTX: del m_pendingTX[tx['transactionDataHash']] # now invalid Tx are correctly removed, as another block and new balances applied if len(m_pendingTX) > 0: d("test that now invalid Tx are correctly removed, as another block and new balances applied" ) for tx in m_pendingTX: d("Check for " + tx) tmpBal = getBalanceUpTo(m_pendingTX[tx]['from'], m_pendingTX, tx) if tmpBal['confirmedBalance'] + tmpBal[ 'pendingBalance'] < m_pendingTX[tx]['value'] + m_pendingTX[ tx]['fee']: # Not enough balance anymore to keep the TX alive d("Not enough balance anymore to keep the TX alive, set TX not successFul" ) m_pendingTX[tx]["transferSuccessful"] = False d("set tx false, now must send asynch") try: c_peer.sendAsynchPOSTToPeers("/transactions/send", m_pendingTX[tx]) except Exception: d("asynch excrption detected") d("asynch initiated, return") return ""
def pull(): try: d("asking") cfg['pulling'] = True resp_text = getCandidate() d("got " + str(resp_text)) if "peerError" in resp_text: d("Peer error, sleep") sleep(3) return if isDataValid(resp_text) is False: # no point to waste time and effort on this invalid/incomplete candidate d("Invalid node block data detected, ignored....") sleep(3) return d("ok check, miner was idle " + str(cfg['done'])) if cfg['blockHash'] != resp_text['blockDataHash']: d("new block data") cfg['done'] = True #stop any ongoing looping if cfg['done'] is True: d("prepare block data for miner whenever miner is ready, even if we already have it all set" ) cfg['nonceCnt'] cfg['blockHash'] = resp_text['blockDataHash'] newCandidate['blockDataHash'] = resp_text['blockDataHash'] newCandidate['difficulty'] = resp_text['difficulty'] changeNonceDate() else: cfg['countSame'] = cfg['countSame'] + 1 #TODO stratgey: go by number of pulls or go by number of tries? Here it is pulls #but aside from pulls if we ever reach maxcount of nonce tries, we also change if (cfg['countSame'] > cfg['refresh']) and (cfg['foundSolution'] is False): #as the block has not changed, we change the date and restart changeNonceDate() cfg['pulling'] = False except Exception: d("No/Invalid peer reply, retry....") sleep( 5 ) # no peer, give a bit of time to recover, keep the 'pulling' flag to avoid waste of calc power # we keep the pulling flag, as without peer no point to churn CPUf return
def verifyBasicTX(trans, isCoinBase, ref): m, l, f = checkRequiredFields(trans, ref, [], False) colErr = "" for x in m: if colErr == "": colErr = "Missing field(s): '" + x + "'" else: colErr = colErr + "and '" + x + "'" # the entire check on the length has now been removed, because any addiitonal fields # within the TX are allowed, instead of being strict to follow only the exact specification, # such as to accept new forks which add new fields, but we do not accept missing fields # and by accepting addiiotnal fields, the check for unsuccessful is also not needed anymore # if l != 0: # # only for cases of rejected TX we need to additionally check the two separate fields # # if (l != 2): # # colErr = colErr + " Invalid number of fields in transaction: " + str(l) + str(trans) # # else: # # if ("transferSuccessful" not in trans) or ("transactionDataHash" not in trans): # # colErr = colErr + " Problem with TX successfulField " + str(trans) # # else: # # l = 0 # not used so far, but prepare for any changes later # # note we only check for known issues. If the TX has other more fields, so be it # # because it could be from a fork # # the only currently known issue is that more fields appear in unsuccessful TX, so this must be confirmed # # a unsuccessful TX can have 2 or 3 more fields, depending the status it had when it was rejected # # original version had only allowed 2 and did not consider the case that it could have been mined # # and then the 'minedInBlock' also appears, so now we don't check length but just the two critical fields # if isCoinBase is False: # if 'transferSuccessful' in trans: # if (trans['transferSuccessful'] is True) or ("transactionDataHash" not in trans): # colErr = colErr + "Problem with recognising TX as unsuccessful" if colErr == "": # final checks if len(trans['senderSignature']) != 2: colErr = colErr + " Invalid number of elements in 'senderSignature' field" else: # len0 = len(trans['senderSignature'][0]) # len1 = len(trans['senderSignature'][1]) # len2 = len(defSig) if (len(trans['senderSignature'][0]) != len(trans['senderSignature'][1])) or\ (len(trans['senderSignature'][0]) != len(defSig)): colErr = colErr + " Invalid/Unpadded 'senderSignature' length" else: if isCoinBase: if (trans['senderSignature'][0] != defSig) or (trans['senderSignature'][1] != defSig): colErr = colErr + "Invalid senderSignature" else: if (trans['senderSignature'][0] == defSig) or (trans['senderSignature'][1] == defSig): colErr = colErr + "Invalid senderSignature" if not isinstance(trans['fee'], int): colErr = colErr + "Fees must be integer, you sent: " + str( trans['fee']) else: if isCoinBase: if trans['fee'] != 0: colErr = colErr + "Coinbase fee should be zero, you sent: " + str( trans['fee']) else: if trans['fee'] < m_stats['m_minFee']: colErr = colErr + "Minimun fee 10 micro-coins, you sent: " + str( trans['fee']) if not isinstance(trans['value'], int): colErr = colErr + "Value must be integer, you sent: " + str( trans['value']) else: # slide 39 confirm that 0 value transactions are allowed if trans['value'] < 0: colErr = colErr + "Minimum value 0 micro-coins, you sent: " + str( trans['value']) if isCoinBase: # TODO any other coinbase checks colErr = colErr + verifyPubKey(trans['senderPubKey'], True) if trans['from'] != defAdr: colErr = colErr + "Invalid from in Coinbase" else: colErr = colErr + verifyAddr(trans['from'], trans['senderPubKey']) colErr = colErr + verifyAddr(trans['to']) if len(colErr) > 0: d("Verification problem: " + colErr) return colErr
def nodeSpecificPOST(self, url, linkInfo, json, request): ret = { 'NodeType': m_info['type'], 'info': "This URL/API is not available/broken" } try: for x in json: if not re.match("[0-9a-zA-Z]+", x): return errMsg("Invalid JSON key: " + str(x)) if isinstance(json[x], str): if not re.match("[0-9a-zA-Z \.%!@#$\-_+=;:,/?<>]*", json[x]): return errMsg("Invalid character in JSON data: " + str(x)) elif isinstance(json[x], list): for xx in json[x]: if isinstance(xx, str): if not re.match("[0-9a-zA-Z \.%!@#$\-_+=;:,/?<>]*", xx): return errMsg( "Invalid character in JSON data: " + str(xx)) elif not isinstance(json[x], int): return errMsg("Invalid character in JSON data: " + str(json[x])) # This is only applicable to POST, and is a shortcut to stop endless broadcast # of the same message for urlJson in m_peerSkip: if urlJson['url'] == url: m, l, f = checkRequiredFields(json, urlJson['json'], urlJson['json'], True) if len(m) + len(f) == 0: #TODO what text here? return setOK("Acknowledge... ") # for bigger network, make this bigger as well? if len(m_peerSkip) > 10: del m_peerSkip[0] if self.permittedURLPOST(url) is False: return errMsg("This URL/API is invalid or not available. " + url) d("Add delay url: '" + url + "' before we had " + str(len(m_isPOST))) m_isPOST.append(url) while (len(m_isPOST) > 1) or (m_cfg['chainInit'] is True): if self.delay(url, 2) is False: break # for some reason we decide to ignore the loop while len(m_simpleLock) > 0: if self.delay(url, 3) is False: break # for some reason we decide to ignore the loop self.release = False if isBCNode(): ret = self.c_blockInterface.nodeSpecificPOSTNow( url, linkInfo, json, request) elif isWallet(): ret = self.c_walletInterface.nodeSpecificPOSTNow( url, linkInfo, json, request) elif isFaucet(): ret = self.c_faucetInterface.nodeSpecificPOSTNow( url, linkInfo, json, request) elif isGenesis(): ret = self.c_genesisInterface.nodeSpecificPOSTNow( url, linkInfo, json, request) except Exception: d("*********************************************") d("*********************************************") print("POST exception caught, isPoststack " + str(len(m_isPOST))) d("*********************************************") d("*********************************************") if url in m_isPOST: m_isPOST.remove(url) d("Removed delay url: '" + url + "' back to " + str(len(m_isPOST))) self.release = True return ret
def doMine(): # miners continue to try to mine # request some block for mining to the networks(Node) # then try to find a hashing code and nonce value to meet with the difficulty while m_cfg['shutdown'] is False: cfg['foundSolution'] = False cfg['done'] = True cfg['pulling'] = True if m_cfg['mode'] == "Y": d("Enter m <return> to (re-)start mining, candidate changed or new:" ) choice = 0 while True: try: if choice > 0: d("Invalid request: " + sel) choice = choice + 1 sel = input().lower() if sel == 'm': sel = 0 break if sel[0] == "d": secs = int(sel[1:]) if secs > 0: sel = datetime.datetime.now() + datetime.timedelta( seconds=secs) sel = "d" + str(int(time.mktime(sel.timetuple()))) break if sel[0] == "s": secs = int(sel[1:]) if (secs >= 0) and (secs < 60): break except Exception: d("Invalid input: " + sel) cfg['mineSend'] = sel while cfg['pulling'] is True: sleep(1) cfg['done'] = False candidate = deepcopy(newCandidate) target = cfg['zero_string'][:candidate['difficulty']] cfg['nonceCnt'] = 0 try: count = 0 # show progress of nonce finding show = 0 minedBlockHash = "N/A" while (cfg['foundSolution'] is False) and (cfg['done'] is False): candidate['nonce'] = candidate['nonce'] + 1 if candidate['nonce'] >= cfg['maxNonce']: candidate['nonce'] = 0 d("wrap around encountered") count = count + 1 show = show + 1 if show > 25000: cfg['nonceCnt'] = show d(str(count) + " " + str(candidate['nonce'])) show = 0 if count >= cfg['maxNonceTry']: d("Max trial number reached, get new date") cfg['waitAck'] = True changeNonceDate() candidate = deepcopy(newCandidate) count = 0 #this does not use the make miner-hash as it is optimised for fixDat to be faster minedBlockHash = hashlib.sha256( (candidate['fixDat'] + str(candidate['nonce'])).encode("utf8")).hexdigest() if minedBlockHash[:candidate['difficulty']] == target: cfg['foundSolution'] = True cfg['done'] = True if cfg['foundSolution'] is True: cfg['nonceCnt'] = show # After finding a hashcode, now submit the mined block by POST # Data for POST ndata = { "blockDataHash": candidate['blockDataHash'], "dateCreated": candidate['dateCreated'], "nonce": candidate['nonce'], "blockHash": minedBlockHash } sent = False if cfg['mineSend'] != 0: try: if cfg['mineSend'][0] == "d": sel = int(cfg['mineSend'][1:]) while int( time.mktime(datetime.datetime.now(). timetuple())) < sel: d("Wait to send due to delay...") sleep(1) elif cfg['mineSend'][0] == "s": sel = int(cfg['mineSend'][1:]) isLower = (datetime.datetime.now().second <= sel) while datetime.datetime.now().second != sel: if (isLower is True) and ( datetime.datetime.now().second >= sel): break else: isLower = (datetime.datetime.now().second <= sel) d("Wait to send due seconds check..." + str(isLower)) sleep(1) except Exception: print("send control invalid: " + cfg['mineSend']) for peer in m_cfg['activePeers']: resp = c_peer.doPOST(peer + "/mining/submit-mined-block", ndata) sent = True break if sent is False: for peer in m_cfg['shareToPeers']: resp = c_peer.doPOST( peer + "/mining/submit-mined-block", ndata) sent = True break if (sent is True) and (resp.status_code == 200): d("MINING SUCCESS (" + str(count) + " tries): " + resp.text) else: d("MINING FAILED: ", resp.text) else: d("No solution found or new block came in") except Exception: d("Exception occurred... clear and refresh candidate")