def syncAllAddresses(): session = Session() r = requests.get(constants.ADDRESS_URL) addressesData = r.json() # logger.info(addressesData) networkAddresses = addressesData["address"] logger.info("obtaining db address list") dbAddresses = commonUtils.getDataFromGet( session, constants.listAllAddressesBasicUrl) logger.info("preparing db address map for {} addresses".format( len(dbAddresses))) addressMap = commonUtils.getMapFromList(dbAddresses, "address") logger.info("processing addresses") updates, inserts = processAddresses(networkAddresses, addressMap) logger.info("syncing all addresses with database") addReqDetails = { "type": "addressSync", "updates": updates, "inserts": inserts, "balanceIncludesStake": False, "allMode": allMode, } commonUtils.postReq(constants.syncAddressesUrl, addReqDetails) logger.info("after syncing all addresses with database")
def syncDelegations(): logger.info("obtaining db delegates") session = Session() dbDelegates = commonUtils.getDataFromGet(session, constants.listDelegatesUrl) # logger.info(dbDelegates) logger.info("obtaining network delegates") allDelegations = getAllDelegations(session) # logger.info(allDelegations) logger.info("processing delegates") updates, inserts, deletes = processDelegations(allDelegations, dbDelegates) logger.info("after processing delegates") logger.info("deletes len is: {}".format(len(deletes))) logger.info("syncing all delegations - delegateSync") delReqDetails = { "type": "delegateSync", "updates": updates, "inserts": inserts, "deletes": deletes } commonUtils.postReq(constants.updateUrl, delReqDetails) logger.info("after syncing all delegations - delegateSync")
def syncStats(): session = Session() dbData = commonUtils.getDataFromGet(session, constants.listDataForEpochSign) dbValidators = dbData["validators"] syncedTillEpoch = dbData["epoch"] currentEpoch = commonUtils.getHarmonyResultDataFromPost( session, "hmyv2_getEpoch", []) prevEpoch = currentEpoch - 1 logger.info("prevEpoch: {} syncedTillEpoch: {}".format( prevEpoch, syncedTillEpoch)) if prevEpoch <= syncedTillEpoch: logger.info( "skipping processing as data is already synced up. prevEpoch <= syncedTillEpoch" ) return allData = [] for epoch in range(syncedTillEpoch + 1, prevEpoch + 1): # logger.info("processing epoch: {}".format(epoch)) data = processEpoch(session, epoch, dbValidators) allData.append(data) logger.info("# of epochs processed: {}".format(len(allData))) addReqDetails = { "type": "epochSignSync", "allData": allData, "epoch": prevEpoch } # logger.info(addReqDetails) commonUtils.postReq(constants.syncEpochSignUrl, addReqDetails)
def processEvents(): global attempts logger.info("starting processing") startBlockHeight, endBlockHeight, more = commonUtils.getBlockHeightRange( constants.eventSyncBlockHeightUrl, constants.EVENT_SYNC_MAX_LIMIT) if startBlockHeight is None: logger.info("starting block obtained is None. Exiting") raise Exception("Starting block obtained is None. Exiting") session = Session() allEvents = [] blockCount = 0 for blockNum in range(startBlockHeight, endBlockHeight): blockCount += 1 # if len(allEvents) > 1: # break logger.info("processing block: {}".format(blockNum)) index = 0 # logger.info("processing events transaction for block: {}, and index: {}, is: {}".format(blockNum, index, transaction)) while True and index < 1000: transaction = commonUtils.getHarmonyResultDataFromPost( session, constants.STAKING_TRANSACTION_URL, [blockNum, index]) # logger.info(str(attempts) + " - " + str(blockCount) + " transaction for block: {}, and index: {}, is: {}".format(blockNum, index, transaction)) if transaction: event = harmonyNetworkUtils.processStakingTransaction( session, transaction) if event is not None: allEvents.append(event) else: # logger.info("ending transaction processing as no more transaction were found") break # details = processBlockEvent(event) # allEvents.append(details) index += 1 logger.info("processed all events") reqDetails = { "type": "eventsSync", "blockHeight": endBlockHeight, "startBlockHeight": startBlockHeight, "events": allEvents, } # logger.info(reqDetails) commonUtils.postReq(constants.syncEventsUrl, reqDetails) if more and attempts < constants.EVENT_SYNC_MAX_LOOPS: attempts += 1 time.sleep(constants.SLEEP_TIME_FOR_MORE) logger.info(str(attempts) + " - starting new cycle") processEvents()
def syncValidator(session, allValidators, delegateCount): stakingInfo = getStakingNetworkInfo(session, delegateCount) epochInfo = getEpochInfo(session, stakingInfo) reqDetails = { "type": "valSync", "stakingInfo": stakingInfo, "epochInfo": epochInfo, "allValidators": allValidators } # logger.info(reqDetails) commonUtils.postReq(constants.harmonyValSyncUrl, reqDetails)
def syncValVersions(): session = Session() dbValKeys = commonUtils.getDataFromGet(session, constants.listValKeys) # logger.info(dbValKeys) dbKeyValMap = commonUtils.getMapFromList(dbValKeys, "blsKey") metrics = commonUtils.getDataFromGet(session, constants.METRICS_URL) # logger.info(metrics) data = processValVersions(metrics, dbKeyValMap) logger.info("version number data: {}".format(len(data))) addReqDetails = {"type": "versionSync", "data": data} # logger.info(addReqDetails) commonUtils.postReq(constants.versionSync, addReqDetails)
def processTransactionsByRange(startBlockHeight, endBlockHeight): global attempt txData = {"summaryMap": {}, "txs": []} # logger.info("txData: {}".format(txData)) processTransactionRange(constants.REGULAR_TRANSACTION_URL, startBlockHeight, endBlockHeight, txData, constants.TX_REGULAR) if shardId == 0 and startBlockHeight > constants.STAKING_START_BLOCK: processTransactionRange(constants.STAKING_TRANSACTION_URL, startBlockHeight, endBlockHeight, txData, constants.TX_STAKING) reqDetails = {"type": "txSync", "blockHeight": endBlockHeight, "normalMode": normalMode, "startBlockHeight": startBlockHeight, "txData": txData, "shardId": shardId} # logger.info(reqDetails) logger.info("attempt: {} , endBlockHeight: {}, startBlockHeight: {}, summary: {}, txs: {}".format( attempt, endBlockHeight, startBlockHeight, txData["summaryMap"], len(txData["txs"]))) commonUtils.postReq(constants.syncTxUrl, reqDetails) logger.info("after submitting transactions to backend") attempt += 1
def syncValVersions(): session = Session() dbValKeys = commonUtils.getDataFromGet(session, constants.listValKeys) # logger.info(dbValKeys) dbKeyValMap = commonUtils.getMapFromList(dbValKeys, "blsKey") metrics = commonUtils.getDataFromGet(session, constants.METRICS_URL) # logger.info(metrics) keyVersionMap = processValVersions(metrics, dbKeyValMap) # logger.info(keyVersionMap) keyVersions = list(keyVersionMap.values()) # logger.info(keyVersions) logger.info("version number data: {}".format(len(keyVersions))) addReqDetails = {"type": "versionSync", "keyVersions": keyVersions} # logger.info("final request is: {}".format(addReqDetails)) commonUtils.postReq(constants.versionSync, addReqDetails)
def saveHealthCheck(blockDiff, networkHeight, nodeHeight, shardId): logger.debug("in save_health_check") # data_json = '{"nodeName": "' + node_name + '", "symbol": "AION", "checkupTime": "' + str(datetime.now()) + \ # '", "networkBlockHeight": "' + block_height + '", "nodeBlockHeight": "' + node_height + \ # '", "heightGap": "' + block_diff + '", "lastBlockValidated": 120}' reqData = { "type": "saveHealthCheck", "nodeName": node_name, "symbol": constants.app, "checkupTime": datetime.datetime.now(), "networkBlockHeight": networkHeight, "nodeBlockHeight": nodeHeight, "heightGap": blockDiff, "poolId": constants.DEFAULT_POOL_ID, "shardId": shardId } # "key": key, # "token": token, logger.debug(reqData) commonUtils.postReq(constants.saveHealthCheckUrl, reqData)
def processShardUtils(): # hmyv2_getValidatorInformation # capture block production rate # obtain oldest block production rate after current epoch # capture pace of blocks session = Session() currentEpoch = commonUtils.getHarmonyResultDataFromPost( session, "hmyv2_getEpoch", []) dbDetails = commonUtils.getDataFromGet(session, constants.shardSyncDetailsUrl) logger.info("dbDetails: {}".format(dbDetails)) syncedEpoch = dbDetails["epochNumber"] dbShardData = dbDetails["shardData"] logger.info("currentEpoch: {}, syncedEpoch: {}".format( currentEpoch, syncedEpoch)) logger.info("obtaining shard details") shard0Details = getShardDetails(session, constants.HARMONY_BASE_URL, 0) shard1Details = getShardDetails(session, constants.HARMONY_BASE_URL_S1, 1) shard2Details = getShardDetails(session, constants.HARMONY_BASE_URL_S2, 2) shard3Details = getShardDetails(session, constants.HARMONY_BASE_URL_S3, 3) logger.info("after obtaining shard details") shardDetails = [] if "0" not in dbShardData: # this means this is the first execution of the job logger.info( "this means this is the first execution of the job. preparing initAllShardData" ) dbShardData = initAllShardData(session, shard0Details, shard1Details, shard2Details, shard3Details) logger.info("after preparing initAllShardData") # return elif currentEpoch != syncedEpoch: # update previous epoch first logger.info( "epochs have changed. this means we need to update previous epoch data first" ) shardDetails.append(processShardEnd(dbShardData["0"])) shardDetails.append(processShardEnd(dbShardData["1"])) shardDetails.append(processShardEnd(dbShardData["2"])) shardDetails.append(processShardEnd(dbShardData["3"])) # init shard data for this epoch dbShardData = initAllShardData(session, shard0Details, shard1Details, shard2Details, shard3Details) # lastBlock = getEpochBlockDetails(session) shardDetails.append(processShard0(shard0Details, dbShardData["0"])) shardDetails.append( processOtherShard(shard1Details, dbShardData["1"], dbShardData["0"])) shardDetails.append( processOtherShard(shard2Details, dbShardData["2"], dbShardData["0"])) shardDetails.append( processOtherShard(shard3Details, dbShardData["3"], dbShardData["0"])) # get latest shard 0 record for calculations # get first block of epoch to calculate pace or may be do it in db # shardDetails = {} # shardDetails["0"] = getShardDetails(session, constants.HARMONY_BASE_URL, 0) # shardDetails["0"]["epochLastBlock"] = epochLastBlock # # shardDetails["1"] = getShardDetails(session, constants.HARMONY_BASE_URL_S1, 1) # shardDetails["2"] = getShardDetails(session, constants.HARMONY_BASE_URL_S2, 2) # shardDetails["3"] = getShardDetails(session, constants.HARMONY_BASE_URL_S3, 3) reqDetails = { "type": "shardSync", "shardDetails": shardDetails, "currentEpoch": currentEpoch } # logger.info(reqDetails) commonUtils.postReq(constants.syncShardUrl, reqDetails)
def syncElections(): reqDetails = {"type": "electionSync"} # logger.info(reqDetails) commonUtils.postReq(constants.electionSyncUrl, reqDetails)
def processBlsUtils(): # hmyv2_getValidatorInformation # capture block production rate # obtain oldest block production rate after current epoch # capture pace of blocks # get latest next election slots # get all validators session = Session() logger.info("obtaining blsKeySyncDetailsUrl data") blsKeySyncDetails = commonUtils.getDataFromGet( session, constants.blsKeySyncDetailsUrl) # logger.info(blsKeySyncDetails) dbValidators = blsKeySyncDetails["validators"] # dbShards = blsKeySyncDetails["shards"] keys = blsKeySyncDetails["keys"] dbKeyMap = commonUtils.getMapFromList(keys, "blsKey") logger.info("obtaining epoch details") currentEpoch = commonUtils.getHarmonyResultDataFromPost( session, "hmyv2_getEpoch", []) shardDetails = getAllShardDetails(session, currentEpoch) # latestBlock = commonUtils.getHarmonyResultDataFromPost(session, "hmyv2_blockNumber", []) #note down block for which data is being captured ... record block number as well along with bls data logger.info("obtaining validators") # dbValidators = commonUtils.getDataFromGet(session, constants.listAllValidatorsBasicUrl) blsKeyInserts, blsPerfData = [], [] rewardsDetails = {"0": 0, "1": 0, "2": 0, "3": 0, "totalRewards": 0} i = 0 for validator in dbValidators: #exclude the ones not elected # status = commonUtils.getStatus(validatorDetails["eposStatus"]) # logger.info(validator) status = validator["status"] if status != constants.H_STATUS_ELECTED: # logger.info("skipping validator, {}, as status is not elected, it is: {}".format( # validator["name"], status)) continue address = validator["address"] i += 1 logger.info("{} - obtaining all keys for validator: {}".format( i, address)) validatorDetails = getValidatorDetails(session, address) perfData, keyInserts = processKeys(validatorDetails["perfByKeys"], dbKeyMap, validator["hPoolId"], currentEpoch, rewardsDetails) blsPerfData.extend(perfData) blsKeyInserts.extend(keyInserts) # loop again here to calculate overall averages # logger.info("blsPerfData before overall percentages: {}".format(blsPerfData)) processForOverallPercentages(blsPerfData, rewardsDetails) # logger.info("blsPerfData after overall percentages: {}".format(blsPerfData)) logger.info("blsPerfData # of keys is: {}".format(len(blsPerfData))) logger.info("syncing all addresses with database") addReqDetails = { "type": "blsKeySync", "blsPerfData": blsPerfData, "inserts": blsKeyInserts, "shardDetails": shardDetails, "currentEpoch": currentEpoch } commonUtils.postReq(constants.syncBlsPerfUrl, addReqDetails) logger.info("after syncing all addresses with database")
def syncNotifications(): reqDetails = {"type": "sendNotifications"} # logger.info(reqDetails) commonUtils.postReq(constants.sendNotificationsUrl, reqDetails)