def broadcast(date, targeting=False, tx_per_block=50, tx_per_req=10): config = loadConfig() sqlite = initDb() cursor = sqlite.cursor() registry = loadJson(os.path.join(ROOT, "%s.registry" % date)) transactions = [tx for tx in registry.values()] seed = config["peer"] logMsg("Broadcasting %d transactions..." % len(registry)) sliced = [ transactions[i:i + tx_per_block] for i in range(0, len(transactions), tx_per_block) ] while len(registry): for slc in sliced: if targeting: sys.stdout.write("Waiting for delegate...\n") waitFor(**config) for txs in [ slc[i:i + tx_per_req] for i in range(0, len(slc), tx_per_req) ]: str_txs = [ "transaction(type=%(type)d, amount=%(amount)d address=%(recipientId)s)" % tx for tx in txs ] result = requests.post(seed + "/peer/transactions", json={ "transactions": txs }, headers={ "nethash": config["nethash"], "version": config["version"], "port": "%d" % config["port"] }).json() if result.get("success", False): sys.stdout.write("\n".join( "%s -> %s" % (a, b) for a, b in zip(str_txs, result["transactionIds"])) + "\n") else: pass time.sleep(config["blocktime"] * 2) for txid in list(registry.keys()): if requests.get(seed + "/api/transactions/get?id=%s" % txid).json().get("success", False): tx = registry.pop(txid) cursor.execute( "INSERT OR REPLACE INTO transactions(date, timestamp, amount, address, id) VALUES(?,?,?,?,?);", (date, tx["timestamp"], tx["amount"] / 100000000., tx["recipientId"], tx["id"])) sqlite.commit() os.remove(os.path.join(ROOT, "%s.registry" % date))
def rebuild(): logMsg("Rebuilding node...") config = loadConfig() status = loadStatus() status["rebuilding"] = True dumpStatus(status) snapshot_folder = "%(homedir)s/snapshots" % config try: os.makedirs(snapshot_folder) except: pass """ snapshots = [os.stat(os.path.join(snapshot_folder, f)) for f in os.listdir(snapshot_folder)] if len(snapshots): snapshot_size = max(snp.st_size for snp in snapshots) else: snapshot_size = 0 size, url = getBestSnapshot() sys.stdout.write(" Checking for best snapshots\n") sys.stdout.write(" > size:%so - url:%s (previous snapshot size %so)\n" % (size, url, snapshot_size)) try: if size > snapshot_size: execute("wget -nv %(best_snapshot)s -O %(homedir)s/snapshots/%(database)s", best_snapshot=url, homedir=config["homedir"], database=config["database"] ) except Exception as error: sys.stdout.write(" Error occured : %s\n" % error) status["rebuilding"] = False dumpStatus(status) return else: new_snapshot_size = os.stat("%(homedir)s/snapshots/%(database)s" % config).st_size if new_snapshot_size < snapshot_size: sys.stdout.write(" > new size:%so\n > it seems download did not finish properly...\n" % new_snapshot_size) status["rebuilding"] = False dumpStatus(status) return """ try: out, err = execute(*config["cmd"]["rebuild"], **config) with io.open("rebuild.log", "wb") as log: log.write(err if isinstance(err, bytes) else err.encode("utf-8")) except Exception as error: sys.stdout.write(" Error occured : %s\n" % error) status["rebuilding"] = False else: status.pop("rebuilding", False) dumpStatus(status)
def getBestSnapshot(): config = loadConfig() size, snapshot = 0, None for snapshot in config.get("snapshots", []): try: req = requests.get(snapshot, stream=True) content_length = int(req.headers["Content-length"]) if content_length > size: size = content_length best = snapshot except Exception as error: sys.stdout.write(" Error occured with %s : %s\n" % (snapshot, error)) return size, snapshot
def restart(): logMsg("Restarting node...") config = loadConfig() status = loadStatus() status["restarted"] = True dumpStatus(status) try: out, err = execute(*config["cmd"]["restart"], **config) with io.open("restart.log", "wb") as log: log.write(err if isinstance(err, bytes) else err.encode("utf-8")) except Exception as error: sys.stdout.write(" Error occured : %s\n" % error) status["restarted"] = False else: status.pop("block speed issue round", False) dumpStatus(status)
def build(): config = loadConfig() sqlite = initDb() cursor = sqlite.cursor() seed = config["peer"] tbw_config = loadJson(os.path.join(ROOT, "tbw.json")) _cnf = loadJson(tbw_config["node"]) secret = _cnf["forging"]["secret"][0] keys = crypto.getKeys(secret) account = requests.get( seed + "/api/delegates/get?publicKey=%s" % keys["publicKey"], verify=True, timeout=5).json().get("delegate", {}) address = account["address"] begintime = datetime.datetime(*config["begin"], tzinfo=pytz.UTC) offset = 0 transactions = [] search = requests.get(seed + "/api/transactions?senderId=%s&limit=50&offset=%d" % (address, offset)).json().get("transactions", []) while len(search) == 50: transactions.extend([t for t in search if t["type"] == 0]) offset += 50 search = requests.get( seed + "/api/transactions?senderId=%s&limit=50&offset=%d" % (address, offset)).json().get("transactions", []) transactions.extend(search) for tx in transactions: date = getRealTime(begintime, tx["timestamp"]) cursor.execute( "INSERT OR REPLACE INTO transactions(date, timestamp, amount, address, id) VALUES(?,?,?,?,?);", (date.strftime("%Y-%m-%d"), tx["timestamp"], tx["amount"] / 100000000., tx["recipientId"], tx["id"])) sqlite.commit()
def dumpRegistry(date): config = loadConfig() tbw = loadJson(os.path.join(ROOT, "%s.tbw" % date)) param = loadParam() _cnf = loadJson(param["node"]) keys = crypto.getKeys(_cnf["forging"]["secret"][0]) if param.get("#2", False): keys["secondPrivateKey"] = crypto.getKeys( seed=crypto.unhexlify(param["#2"]))["privateKey"] amount = tbw["amount"] registry = OrderedDict() begintime = datetime.datetime(*config["begin"], tzinfo=pytz.UTC) for address, weight in sorted(tbw["weight"].items(), key=lambda e: e[-1], reverse=True): delta = datetime.datetime.now(pytz.UTC) - begintime payload = dict(type=0, fee=10000000, timestamp=int(delta.total_seconds()), amount=int(amount * 100000000 * weight - 10000000), recipientId=address, senderPublicKey=keys["publicKey"], vendorField=param.get( "vendorField", "%s reward" % param.get("username", "Delegate"))) payload["signature"] = crypto.getSignature(payload, keys["privateKey"]) if "secondPrivateKey" in keys: payload["signSignature"] = crypto.getSignature( payload, keys["privateKey"]) payload["id"] = crypto.getId(payload) registry[payload["id"]] = payload if param.get("funds", False): account = requests.get( config["peer"] + "/api/delegates/get?publicKey=%s" % keys["publicKey"], verify=True, timeout=5).json().get("delegate", {}) balance = int( requests.get( config["peer"] + "/api/accounts/getBalance?address=%s" % account["address"], verify=True, timeout=5).json().get("balance", 0)) / 100000000. if balance > 0: income = balance - tbw["amount"] - tbw["saved"] delta = datetime.datetime.now(pytz.UTC) - begintime payload = dict(type=0, fee=10000000, timestamp=int(delta.total_seconds()), amount=int(income * 100000000 - 10000000), recipientId=param["funds"], senderPublicKey=keys["publicKey"], vendorField="%s share" % param.get("username", "Delegate")) payload["signature"] = crypto.getSignature(payload, keys["privateKey"]) if "secondPrivateKey" in keys: payload["signSignature"] = crypto.getSignature( payload, keys["privateKey"]) payload["id"] = crypto.getId(payload) registry[payload["id"]] = payload dumpJson(registry, os.path.join(ROOT, "%s.registry" % date)) try: os.makedirs(os.path.join(ROOT, "archive")) except: pass shutil.move(os.path.join(ROOT, "%s.tbw" % date), os.path.join(ROOT, "archive", "%s.tbw" % date))
def getNodeHeight(): config = loadConfig() config["quiet"] = True return int(execute(*config["cmd"]["nodeheight"], **config)[0].strip())
def check(): # load app configuration, last known status and best seed config = loadConfig() status = loadStatus() # exit if node is rebuilding if status.get("rebuilding", False): logMsg("Node is rebuilding...") # dumpStatus(status) return # load best seed seed = getBestSeed(*config.get("seeds", [])) # get values to check node health if not seed: # better use peer if no seeds available logMsg("No seed available...") seed = config["peer"] net_height = getNetHeight(seed) else: try: net_height = max(getNetHeight(seed), getNetHeight(config["peer"])) except requests.exceptions.ConnectionError: restart() return # estimate time remaining for next block forge status["next forge round"] = getNextForgeRound(seed, **config) node_height = getNodeHeight() timestamp = time.time() height_diff = net_height - node_height block_speed = 60*(node_height - status.get("node height", 0))/ \ (timestamp - status.get("timestamp", 0)) # update computed values status.update( **{ "net height": net_height, "node height": node_height, "height diff": height_diff, "timestamp": timestamp, "block speed": block_speed }) # if node block speed under 80% of theorical blockchain speed if block_speed < 0.8 * 60.0 / config[ "blocktime"]: # 60/blocktime => blockchain speed in block per minute status["block speed issue round"] = status.get( "block speed issue round", 0) + 1 logMsg("Block speed issue : %.2f blk/min instead of %.2f (round %d)" % \ (block_speed, 60.0/config["blocktime"], status["block speed issue round"])) else: status.pop("block speed issue round", False) # if node height is far from half day produced block if height_diff > 60. / config["blocktime"] * 60 * 12: logMsg("Node too far away from height, better rebuild ! (%d blocks)" % (height_diff)) rebuild() restart() # node height is same as net height elif -1 <= height_diff <= 0: status["synch"] = True if status.pop("restarted", False): logMsg("Node is synch !") dumpStatus(status) else: status["synch"] = False dumpStatus(status) # node is not stuck and is too far from net height if height_diff > config["blocktime"] / 0.8: logMsg("Height difference is too high ! (%d blocks)" % (height_diff)) # node already restarted and we gave it sufficient time to reach net height if status.get("restarted", False) and \ status.get("block speed issue round", 0) >= 0.5*config["delegates"]*config["blocktime"]//60: rebuild() restart() # node is not restarted since last synch elif not status.get("restarted", False): restart() # node is going solo --> fork ! elif height_diff < -config["blocktime"] / 0.8: logMsg( "Node is going solo with %d blocks forward ! It is forking..." % (-height_diff)) pprint.pprint(status, indent=4) if status.get("block speed issue round", 0) > 0.8 * config["delegates"] * config["blocktime"] // 60: restart()
PERMANENT_SESSION_LIFETIME = 300, # used to encrypt cookies # secret key is generated each time app is restarted SECRET_KEY = os.urandom(24), # JS can't access cookies SESSION_COOKIE_HTTPONLY = True, # bi use of https SESSION_COOKIE_SECURE = False, # update cookies on each request # cookie are outdated after PERMANENT_SESSION_LIFETIME seconds of idle SESSION_REFRESH_EACH_REQUEST = True ) # load all information CONFIG = loadConfig() PARAM = loadParam() LOCAL_API = "http://*****:*****@app.route("/") def render(): global CONFIG, PARAM spread() weight = loadTBW() tokens = sum(weight.values()) c = float(sum(weight.values())) items = [[k,v/max(1.0, c)*100] for k,v in weight.items()]