def update_outputs_count(grin_owner_api_url, target_output_count): global LOGGER LOGGER.warn("Running update_outputs_count() ---") current_height = grin.blocking_get_current_height() r = retrieve_outputs() #LOGGER.warn("Wallet outputs are {}".format(r.json())) # Itr through the list of outputs and count the ones that could be consolidated (the spendable outputs) outputs = r.json()[1] count = 0 for output in outputs: output = output[0] if output["status"] == 'Unspent' and output[ "lock_height"] < current_height: count = count + 1 LOGGER.warn("Wallet has {} spendable outputs".format(count)) min_count = target_output_count - (target_output_count / 2) max_count = target_output_count + (target_output_count * 2) if (count < min_count) or (count > max_count): LOGGER.warn("Setting number of wallet outputs to {}".format(count)) try: payments.tidy_outputs( target_output_count, logger=LOGGER, wallet_auth=(wallet_api_user, wallet_api_key), ) except Exception as e: LOGGER.warn("Failed to set number of wallet outputs: {}".format( repr(e)))
def main(): global LOGGER global CONFIG global SHARES global HEIGHT global GRINSHARE_HEIGHT global POOLSHARE_HEIGHT CONFIG = lib.get_config() # XXX TODO: Put in config HOST = "0.0.0.0" PORT = 32080 GRINSHARE_HEIGHT = 0 POOLSHARE_HEIGHT = 0 LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) database = lib.get_db() HEIGHT = Worker_shares.get_latest_height() if HEIGHT is None: HEIGHT = grin.blocking_get_current_height() SHARES = WorkerShares(LOGGER) #server = ThreadedHTTPServer((HOST, PORT), ShareHandler) #server = HTTPServer((HOST, PORT), ShareHandler) # server = socketserver.TCPServer((HOST, PORT), ShareHandler) # server.handle_request() # server.server_close() commit_thread = threading.Thread(target=ShareCommitScheduler, args=(15, )) commit_thread.start() server = ThreadedTCPServer((HOST, PORT), ShareHandler) server.serve_forever()
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Get Config settings pool_fee = float(CONFIG[PROCESS]["pool_fee"]) # Number of blocks of share data used to calculate rewards PPLNG_WINDOW_SIZE = 60 try: PPLNG_WINDOW_SIZE = int(os.environ["PPLNG_WINDOW_SIZE"]) except Exception as e: LOGGER.error( "Failed to get PPLNG_WINDOW_SIZE from the environment: {} Using default size of {}" .format(e, PPLNG_WINDOW_SIZE)) # Connect to DB database = lib.get_db() # Get current blockchain height chain_height = grin.blocking_get_current_height() # Get unlocked blocks from the db unlocked_blocks = Pool_blocks.get_all_unlocked() unlocked_blocks = [blk.height for blk in unlocked_blocks] LOGGER.warn("Paying for {} pool blocks: {}".format(len(unlocked_blocks), unlocked_blocks)) for height in unlocked_blocks: try: LOGGER.warn("Processing unlocked block: {}".format(height)) # Call the library routine to get this blocks payout map payout_map = pool.calculate_block_payout_map( height, PPLNG_WINDOW_SIZE, pool_fee, LOGGER, False) #print("payout_map = {}".format(payout_map)) # Store the payment map for this block credits_record = Pool_credits(chain_height, height, payout_map) database.db.getSession().add(credits_record) # Make payments based on the workers total share_value Pool_blocks.setState(height, "paid") for user_id, payment_amount in payout_map.items(): # Add worker rewards to pool account balance LOGGER.warn("Credit to user: {} = {}".format( user_id, payment_amount)) worker_utxo = Pool_utxo.credit_worker(user_id, payment_amount) # Worker_stats accounting and running totals #latest_worker_stats = Worker_stats.get_latest_by_id(user_id) #latest_worker_stats.dirty = True database.db.getSession().commit() except Exception as e: database.db.getSession().rollback() LOGGER.exception("Something went wrong: {}".format(repr(e))) LOGGER.warn("=== Completed {}".format(PROCESS)) sys.stdout.flush()
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) database = lib.get_db() LOGGER.warn("=== Starting {}".format(PROCESS)) check_interval = float(CONFIG[PROCESS]["check_interval"]) max_rebuild_depth = float(CONFIG[PROCESS]["max_rebuild_depth"]) avg_over_range_grin = int(CONFIG["grinStats"]["avg_over_range"]) avg_over_range_pool = int(CONFIG["poolStats"]["avg_over_range"]) avg_over_range_worker = int(CONFIG["workerStats"]["avg_over_range"]) current_height = grin.blocking_get_current_height() rebuild_height = current_height - max_rebuild_depth while True: # Grin blocks and therefore grin stats cant be dirty # # Check for dirty grin stats # dirty = Grin_stats.get_first_dirty() # if dirty is not None: # LOGGER.warn("Recalculating Grin Stats from {}".format(dirty.height)) # end_height = grinstats.recalculate(dirty.height, avg_over_range_grin) # LOGGER.warn("Finished Recalculating Grin Stats: {} - {}".format(dirty.height, end_height)) # Check for dirty pool stats dirty = Pool_stats.get_first_dirty(rebuild_height) if dirty is not None: LOGGER.warn("Recalculating Pool Stats from {}".format( dirty.height)) end_height = poolstats.recalculate(dirty.height, avg_over_range_pool) LOGGER.warn("Finished Recalculating Pool Stats: {} - {}".format( dirty.height, end_height)) # # Check for dirty worker stats dirty = Worker_stats.get_first_dirty(rebuild_height) while dirty is not None: LOGGER.warn("Recalculating Worker Stats for {} from {}".format( dirty.height, avg_over_range_worker)) end_height = workerstats.recalculate(dirty.height, avg_over_range_worker) LOGGER.warn( "Finished Recalculating Worker Stats for {} - {}".format( dirty.height, end_height)) dirty = Worker_stats.get_first_dirty() sys.stdout.flush() time.sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() # Get the list of pool_blocks that are # old enough to unlock and # are not orphan blocks # XXX TODO: The node may not be synced, may need to wait? block_locktime = int(CONFIG[PROCESS]["block_locktime"]) block_expiretime = int(CONFIG[PROCESS]["block_expiretime"]) LOGGER.warn("using locktime: {}, expiretime: {}".format( block_locktime, block_expiretime)) latest = grin.blocking_get_current_height() LOGGER.warn("Latest: {}".format(latest)) new_poolblocks = Pool_blocks.get_all_new() for pb in new_poolblocks: if pb.height < (latest - block_expiretime): # Dont re-process very old blocks - protection against duplicate payouts. LOGGER.error("Processed expired pool block at height: {}".format( pb.height)) pb.state = "expired" continue response = grin.get_block_by_height(pb.height) if response == None: # Unknown. Leave as "new" for now and attempt to validate next run LOGGER.error("Failed to get block {}".format(pb.height)) continue if int(response["header"]["nonce"]) != int(pb.nonce): LOGGER.warn("Processed orphan pool block at height: {}".format( pb.height)) pb.state = "orphan" continue if pb.height < (latest - block_locktime): # This block seems valid, and old enough to unlock LOGGER.warn("Unlocking pool block at height: {}".format(pb.height)) pb.state = "unlocked" sys.stdout.flush() # db.set_last_run(PROCESS, str(time.time())) database.db.getSession().commit() LOGGER.warn("=== Completed {}".format(PROCESS)) sys.stdout.flush()
def recalculate(start_height, avg_range): database = lib.get_db() height = start_height while height <= grin.blocking_get_current_height(): old_stats = Worker_stats.get_by_height(height) new_stats = calculate(height, avg_range) for old_stat in old_stats: database.db.deleteDataObj(old_stat) for stats in new_stats: print("new/updated stats: {} ".format(stats)) database.db.getSession().add(stats) if(height % BATCHSZ == 0): database.db.getSession().commit() height = height + 1 database.db.getSession().commit()
def recalculate(start_height, avg_range): database = lib.get_db() height = start_height while height <= grin.blocking_get_current_height(): old_stats = Grin_stats.get_by_height(height) new_stats = calculate(height, avg_range) if old_stats is None: database.db.createDataObj(new_stats) else: old_stats.timestamp = new_stats.timestamp old_stats.difficulty = new_stats.difficulty old_stats.gps = new_stats.gps old_stats.difficulty = new_stats.difficulty old_stats.total_utxoset_size = new_stats.total_utxoset_size database.db.getSession().commit() height = height + 1
def recalculate(start_height, window_size): database = lib.get_db() height = start_height while height < grin.blocking_get_current_height(): old_stats = Pool_stats.get_by_height(height) new_stats = calculate(height, window_size) if old_stats is None: database.db.createDataObj(new_stats) else: old_stats.timestamp = new_stats.timestamp old_stats.active_miners = new_stats.active_miners old_stats.shares_processed = new_stats.shares_processed old_stats.total_blocks_found = new_stats.total_blocks_found old_stats.total_shares_processed = new_stats.total_shares_processed old_stats.dirty = False database.db.getSession().commit() height = height + 1
def ShareCommitScheduler(interval=15): global LOGGER global SHARES global HEIGHT global GRINSHARE_HEIGHT global POOLSHARE_HEIGHT LOGGER.warn( "HEIGHT: {}, POOLSHARE_HEIGHT: {}, GRINSHARE_HEIGHT: {}".format( HEIGHT, POOLSHARE_HEIGHT, GRINSHARE_HEIGHT)) # XXX TODO: enhance while True: bc_height = grin.blocking_get_current_height() while (HEIGHT < POOLSHARE_HEIGHT and HEIGHT < GRINSHARE_HEIGHT) or (bc_height > HEIGHT): LOGGER.warn("Commit shares for height: {}".format(HEIGHT)) SHARES.commit(HEIGHT) SHARES.clear(HEIGHT) HEIGHT = HEIGHT + 1 time.sleep(interval)
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Find the height of the latest stats record last_height = 0 latest_stat = Grin_stats.get_latest() if latest_stat == None: # Special case for new pool startup - Need 3 stats records to bootstrap LOGGER.warn("Initializing Grin_stats") grinstats.initialize() last_height = 2 else: last_height = latest_stat.height height = last_height + 1 LOGGER.warn("grinStats service starting at block height: {}".format(height)) # Generate grin stats records - one per grin block while True: latest = grin.blocking_get_current_height() while latest >= height: try: new_stats = grinstats.calculate(height, avg_over_range) # Batch new stats when possible, but commit at reasonable intervals database.db.getSession().add(new_stats) if( (height % BATCHSZ == 0) or (height >= (latest-10)) ): database.db.getSession().commit() LOGGER.warn("Added Grin_stats for block: {} - gps:{} diff:{}".format(new_stats.height, new_stats.gps, new_stats.difficulty)) height = height + 1 except AssertionError as e: LOGGER.error("Something went wrong: {}".format(e)) sleep(check_interval) sys.stdout.flush() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def estimated_daily_reward(gps=1, height=0, range=60): if height == 0: height = grin.blocking_get_current_height() # Get Avg Network GPS network_gps_avgs = grinstats.avg_network_gps(height, range) # Get POW Ratios C29Ratio = grin.secondary_pow_ratio(height) PrimaryRatio = 100 - C29Ratio # Calculate Expected Grin Reward per Day estReward = {} for powsize, gpsavg in network_gps_avgs.items(): if powsize == grin.SECONDARY_SIZE: estReward[powsize] = float(gps) / network_gps_avgs[ powsize] * C29Ratio / 100.0 * grin.DAY_HEIGHT * get_block_reward( ) else: estReward[powsize] = float(gps) / network_gps_avgs[ powsize] * PrimaryRatio / 100.0 * grin.DAY_HEIGHT * get_block_reward( ) return estReward
def recalculate(start_height, avg_range): database = lib.get_db() height = start_height while height <= grin.blocking_get_current_height(): old_stats = Worker_stats.get_by_height(height) new_stats = calculate(height, avg_range) for old_stat in old_stats: database.db.deleteDataObj(old_stat) for stats in new_stats: print("new/updated stats: {} ".format(stats)) worker = stats.worker database.db.getSession().add(stats) if (height % BATCHSZ == 0): database.db.getSession().commit() height = height + 1 # We updated one or more worker stats so we mark the Pool_stats dirty stats_rec = Pool_stats.get_by_height(height) if stats_rec is not None: stats_rec.dirty = True database.db.getSession().commit()
def ShareCommitScheduler(interval=15): global LOGGER global SHARES global HEIGHT global GRINSHARE_HEIGHT global POOLSHARE_HEIGHT # XXX TODO: enhance while True: bc_height = grin.blocking_get_current_height() LOGGER.warn( "HEIGHT={}, POOLSHARE_HEIGHT={}, GRINSHARE_HEIGHT={}".format( HEIGHT, POOLSHARE_HEIGHT, GRINSHARE_HEIGHT)) while (HEIGHT < POOLSHARE_HEIGHT and HEIGHT < GRINSHARE_HEIGHT) or (bc_height > HEIGHT): # Commit and purge current block share data if we are starting a new block LOGGER.warn("Commit shares for height: {}".format(HEIGHT)) # time.sleep(5) # Give straggler shares a chance to come in SHARES.commit(HEIGHT) HEIGHT = HEIGHT + 1 # Commit and purge all old share data (except current block) every 'interval' seconds SHARES.commit() # All except current block time.sleep(interval)
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() block_expiretime = int(CONFIG["poolblockUnlocker"]["block_expiretime"]) current_height = grin.blocking_get_current_height() new_poolshares = Pool_shares.getUnvalidated(current_height, VALIDATION_DEPTH) # XXX TODO: Batch by block for pool_share in new_poolshares: if pool_share.height < (current_height - block_expiretime): # this is for an expired block if pool_share.is_valid == True: Pool_stats.mark_dirty(pool_share.height) pool_share.is_valid = False pool_share.validated = True pool_share.invalid_reason = "expired" else: grin_share = Grin_shares.get_by_nonce(pool_share.nonce) if grin_share == None: if pool_share.is_valid == True: Pool_stats.mark_dirty(pool_share.height) # No matching validated grin share was found (yet) if pool_share.height < current_height - block_expiretime: pool_share.is_valid = False pool_share.validated = True pool_share.invalid_reason = "no grin_share" else: # Mark invalid, but dont finalize validation, so we will check again later pool_share.is_valid = False pool_share.invalid_reason = "no grin_share" else: # We did find a matching grin share, make sure it is valid and grin accepted it if pool_share.nonce != grin_share.nonce: if pool_share.is_valid == True: Pool_stats.mark_dirty(pool_share.height) pool_share.is_valid = False pool_share.validated = True pool_share.invalid_reason = "nonce mismatch" elif pool_share.worker_difficulty > grin_share.actual_difficulty: if pool_share.is_valid == True: Pool_stats.mark_dirty(pool_share.height) pool_share.is_valid = False pool_share.validated = True pool_share.invalid_reason = "low difficulty" else: # It did not fail any of our tests, its valid if pool_share.is_valid == False: Pool_stats.mark_dirty(pool_share.height) pool_share.validated = True pool_share.is_valid = True pool_share.invalid_reason = "None" # LOGGER.warn("Share {}, {} is {} because {}".format(pool_share.height, pool_share.nonce, pool_share.is_valid, pool_share.invalid_reason)) database.db.getSession().commit() LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() # Get the list of pool_blocks that are # old enough to unlock and # are not orphan blocks # XXX TODO: The node may not be synced, may need to wait? block_locktime = int(CONFIG[PROCESS]["block_locktime"]) block_expiretime = int(CONFIG[PROCESS]["block_expiretime"]) LOGGER.warn("using locktime: {}, expiretime: {}".format(block_locktime, block_expiretime)) latest = grin.blocking_get_current_height() LOGGER.warn("Latest: {}".format(latest)) # Get outputs form the wallet wallet_outputs = wallet.retrieve_outputs(refresh=True) wallet_outputs_map = wallet.outputs_to_map_by_height(wallet_outputs) wallet_output_heights = list(wallet_outputs_map.keys()) wallet_output_heights.sort() #print("wallet_output_heights = {}".format(wallet_output_heights)) new_poolblocks = Pool_blocks.get_all_new() for pb in new_poolblocks: if pb.height < (latest - block_expiretime): # Dont re-process very old blocks - protection against duplicate payouts. LOGGER.error("Processed expired pool block at height: {}".format(pb.height)) pb.state = "expired" continue response = grin.get_block_by_height(pb.height) # Check for unknown block if response == None: # Unknown. Leave as "new" for now and attempt to validate next run LOGGER.error("Failed to get block {}".format(pb.height)) continue # Check for orphans if int(response["header"]["nonce"]) != int(pb.nonce): LOGGER.warn("Processed orphan pool block at height: {}".format(pb.height)) pb.state = "orphan" continue # # Check that we have a coinbase output in the wallet for this block # if pb.height not in wallet_output_heights: # LOGGER.warn("Wallet has no output for pool block at height: {}".format(pb.height)) # pb.state = "no_wallet_output" # continue # Check if its old enough to be mature if pb.height < (latest - block_locktime): # This block seems valid, and old enough to unlock LOGGER.warn("Unlocking pool block at height: {}".format(pb.height)) pb.state = "unlocked" sys.stdout.flush() # db.set_last_run(PROCESS, str(time.time())) database.db.getSession().commit() LOGGER.warn("=== Completed {}".format(PROCESS)) sys.stdout.flush()
def ShareCommitScheduler(max_lag, commit_interval, logger): global SHARES_MUTEX while True: try: database = lib.get_db() latest_block = Blocks.get_latest() while latest_block is None: logger.warn("Waiting for first block") time.sleep(10) latest_block = Blocks.get_latest() chain_height = latest_block.height SHARES_MUTEX.acquire() try: logger.warn("= Begin ShareCommitScheduler") # Itterate over each sharedata key in redis redisdb = lib.get_redis_db() redis_key = "{}-*".format(REDIS_SHAREDATA_KEY) keys = [] for key in redisdb.scan_iter(match=redis_key, count=100): keys.append(key.decode()) for key in sorted(keys): share_height = int(key.split("-")[1]) if share_height < chain_height - max_lag: # Commit this record logger.warn( "-- ShareCommitScheduler processing record at height: {}" .format(share_height)) redis_sharedata = redisdb.get(key) redis_sharedata = json.loads(redis_sharedata.decode()) ts_str = redis_sharedata.pop("timestamp", str(datetime.utcnow())) ts = datetime.strptime( ts_str.split(".")[0], '%Y-%m-%d %H:%M:%S') for worker, worker_shares in redis_sharedata.items(): # Get any existing record worker_shares_rec = Worker_shares.get_by_height_and_id( share_height, worker) if worker_shares_rec is None: # No existing record for this worker at this height, create it logger.warn( "New share record for worker {} at height {}" .format(worker, share_height)) worker_shares_rec = Worker_shares( height=share_height, user_id=worker, timestamp=ts, ) database.db.createDataObj(worker_shares_rec) else: logger.warn( "Add to existing record for worker {} at height {}" .format(worker, share_height)) for edge_bits, shares_count in worker_shares.items( ): worker_shares_rec.add_shares( edge_bits, shares_count["difficulty"], shares_count["accepted"], shares_count["rejected"], shares_count["stale"]) # Debug logger.warn("Worker Shares: {}".format( worker_shares_rec)) # We wrote this record to mysql, so remove the redis cache database.db.getSession().commit() redisdb.delete(key) # Write fillter record if needed share_height = Worker_shares.get_latest_height() if share_height is None: share_height = grin.blocking_get_current_height() share_height = share_height + 1 while share_height < (chain_height - max_lag): logger.warn( "Processed 0 shares in block {} - Creating filler record" .format(share_height)) filler_worker_shares_rec = Worker_shares( height=share_height, user_id=1, # Pool User timestamp=datetime.utcnow(), ) database.db.createDataObj(filler_worker_shares_rec) share_height += 1 finally: database.db.getSession().commit() SHARES_MUTEX.release() lib.teardown_db() logger.warn("= End ShareCommitScheduler") time.sleep(commit_interval) except Exception as e: lib.teardown_db() logger.exception("Something went wrong: {} ".format( traceback.format_exc())) time.sleep(10)
def main(): global CONFIG global LOGGER CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() atexit.register(lib.teardown_db) # Get Config check_interval = float(CONFIG[PROCESS]["check_interval"]) # Find the height of the latest block record last_height = grin.blocking_get_current_height() - 1400 latest_block = Blocks.get_latest() if latest_block is not None: last_height = latest_block.height height = last_height + 1 height = max(0, height) LOGGER.warn("Starting at block height: {}".format(height)) while True: try: latest = grin.blocking_get_current_height() while latest >= height: response = grin.blocking_get_block_by_height(height) LOGGER.warn("New Block: {} at {}".format(response["header"]["hash"], response["header"]["height"])) #print("sleeping 60....") #sleep(60) #print(".....GO") try: new_block = Blocks(hash = response["header"]["hash"], version = response["header"]["version"], height = response["header"]["height"], previous = response["header"]["previous"], timestamp = datetime.strptime(response["header"]["timestamp"][:-1], "%Y-%m-%dT%H:%M:%S+00:0"), output_root = response["header"]["output_root"], range_proof_root = response["header"]["range_proof_root"], kernel_root = response["header"]["kernel_root"], nonce = response["header"]["nonce"], edge_bits = response["header"]["edge_bits"], total_difficulty = response["header"]["total_difficulty"], secondary_scaling = response["header"]["secondary_scaling"], num_inputs = len(response["inputs"]), num_outputs = len(response["outputs"]), num_kernels = len(response["kernels"]), fee = sum(k["fee"] for k in response["kernels"]), lock_height = response["kernels"][0]["lock_height"] if(len(response["kernels"])>0) else 0, total_kernel_offset = response["header"]["total_kernel_offset"], state = "new") # Batch inserts when catching up database.db.getSession().add(new_block) if( (height % BATCHSZ == 0) or (height >= (latest-10)) ): database.db.getSession().commit() height = height + 1 except (sqlalchemy.exc.IntegrityError, pymysql.err.IntegrityError): LOGGER.warn("Attempted to re-add block: {}".format(response["header"]["height"])) database.db.getSession().rollback() latest_block = Blocks.get_latest() height = latest_block.height + 1 sleep(check_interval) sys.stdout.flush() sleep(check_interval) except Exception as e: LOGGER.error("Something went wrong: {}\n{}".format(e, traceback.format_exc().splitlines())) database.db.getSession().rollback() sys.stdout.flush() sleep(check_interval) # Should never get here, but.... LOGGER.warn("=== Completed {}".format(PROCESS))