def estimate_gps_for_all_sizes(window): print("estimate_gps_for_all_sizes across all workers") sys.stdout.flush() if len(window) < 2: return [] first_height = window[0].height last_height = window[-1].height first_grin_block = Blocks.get_by_height(first_height) last_grin_block = Blocks.get_by_height(last_height) assert first_grin_block is not None, "Missing grin block at height: {}".format( first_height) assert last_grin_block is not None, "Missing grin block at height: {}".format( last_height) valid_cnt = {} for pool_shares_rec in window: for shares in pool_shares_rec.shares: if shares.edge_bits not in valid_cnt: valid_cnt[shares.edge_bits] = 0 valid_cnt[shares.edge_bits] += shares.valid print("Valid Share Counts entire window:") pp.pprint(valid_cnt) all_gps = [] for sz, cnt in valid_cnt.items(): gps = lib.calculate_graph_rate(window[0].timestamp, window[-1].timestamp, cnt, sz, last_height) all_gps.append(( sz, gps, )) sys.stdout.flush() return all_gps
def initialize(): database = lib.get_db() # Special case for new pool startup - Need 3 stats records to bootstrap block_zero = Blocks.get_by_height(0) seed_stat0 = Grin_stats(height=0, timestamp=block_zero.timestamp, gps=0, difficulty=block_zero.total_difficulty, total_utxoset_size=block_zero.num_inputs) database.db.createDataObj(seed_stat0) block_one = Blocks.get_by_height(1) seed_stat1 = Grin_stats(height=1, timestamp=block_one.timestamp, gps=0, difficulty=block_one.total_difficulty - block_zero.total_difficulty, total_utxoset_size=seed_stat0.total_utxoset_size + block_one.num_outputs - block_one.num_inputs) database.db.createDataObj(seed_stat1) block_two = Blocks.get_by_height(2) seed_stat2 = Grin_stats(height=2, timestamp=block_two.timestamp, gps=0, difficulty=block_two.total_difficulty - block_one.total_difficulty, total_utxoset_size=seed_stat1.total_utxoset_size + block_two.num_outputs - block_two.num_inputs) database.db.createDataObj(seed_stat2)
def estimate_gps_for_all_sizes(user_id, window): first_height = window[0].height last_height = window[-1].height print("estimate_gps_for_all_sizes: user_id: {}".format(user_id)) # print("All Worker_shares in the window:") # pp.pprint(window) first_grin_block = Blocks.get_by_height(first_height) last_grin_block = Blocks.get_by_height(last_height) assert first_grin_block is not None, "Missing grin block at height: {}".format(first_height) assert last_grin_block is not None, "Missing grin block at height: {}".format(last_height) # Get the Worker_shares in the window *for this user_id* this_workers_shares = [ws for ws in window if ws.user_id == user_id] print("This workers Shares records for the entire window: {}".format(len(this_workers_shares))) #pp.pprint(this_workers_shares) # Get a count of number of each valid solution size in this_workers_shares in this window valid_cnt = {} for worker_shares_rec in this_workers_shares: for shares in worker_shares_rec.shares: if shares.edge_bits not in valid_cnt: valid_cnt[shares.edge_bits] = 0 valid_cnt[shares.edge_bits] += shares.valid #print("Valid Share Counts entire window for {}:".format(user_id)) #pp.pprint(valid_cnt) # Calcualte the gps for each graph size in the window all_gps = [] for sz, cnt in valid_cnt.items(): gps = lib.calculate_graph_rate(window[0].timestamp, window[-1].timestamp, cnt) all_gps.append((sz, gps, )) sys.stdout.flush() return all_gps
def RigDataCommitScheduler(max_lag, commit_interval, logger): global RIGDATA_MUTEX global RIGDATA_GROUPSIZE global REDIS_RIGDATA_KEY global REDIS_RIGDATA_EXPIRETIME while True: try: database = lib.get_db() latest_block = Blocks.get_latest() while latest_block is None: logger.warn("Cant get latest block from database") time.sleep(10) latest_block = Blocks.get_latest() chain_height = latest_block.height lib.teardown_db() RIGDATA_MUTEX.acquire() try: logger.warn("= Begin RigDataCommitScheduler") # Itterate over each rigdata cache key in redis redisdb = lib.get_redis_db() redis_key = "{}-*".format(REDIS_RIGDATA_KEY) keys = [] for key in redisdb.scan_iter(match=redis_key, count=100): keys.append(key.decode()) for key in sorted(keys): share_height = int(key.split("-")[1]) if share_height < chain_height - RIGDATA_GROUPSIZE - max_lag: # Commit this set of rigdata records logger.warn( "-- RigDataCommitScheduler processing record at height: {}" .format(share_height)) redis_cached_rigdata = redisdb.get(key) redis_cached_rigdata = json.loads( redis_cached_rigdata.decode()) for user, rigdata in redis_cached_rigdata.items(): redis_key = "{}.{}.{}".format( REDIS_RIGDATA_KEY, share_height, user) if redisdb.exists(redis_key): # XXX TODO logger.warn( "XXX TODO: DUPLICATE RIGDATA WORKER KEY - MERGE ???" ) else: redisdb.set(redis_key, json.dumps(rigdata), ex=REDIS_RIGDATA_EXPIRETIME) # Wrote this rigdata to REDIS, so remove the cache record now redisdb.delete(key) finally: RIGDATA_MUTEX.release() logger.warn("= End RigDataCommitScheduler") time.sleep(commit_interval) except Exception as e: logger.exception("Something went wrong: {}".format( traceback.format_exc())) time.sleep(10)
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() grin_api_url = "http://" + CONFIG["grin_node"]["address"] + ":" + CONFIG["grin_node"]["api_port"] status_url = grin_api_url + "/v1/status" blocks_url = grin_api_url + "/v1/blocks/" validation_depth = int(CONFIG[PROCESS]["validation_depth"]) response = requests.get(status_url) latest = int(response.json()["tip"]["height"]) last = latest - validation_depth # start a reasonable distance back if last < 0: last = 1 LOGGER.warn("Starting from block #{}".format(last)) # last = 0 for i in range(last, latest): url = blocks_url + str(i) response = requests.get(url).json() # print("{}: {}".format(response["header"]["height"], response["header"]["hash"])) try: rec = Blocks.get_by_height([i]) if rec is not None: if rec.hash != response["header"]["hash"] and rec.state != "orphan": LOGGER.warn("Found an orphan - height: {}, hash: {} vs {}".format(rec.height, rec.hash, response["header"]["hash"])) rec.state = "orphan" database.db.getSession().commit() else: LOGGER.warn("Adding missing block - height: {}".format(response["header"]["height"])) # XXX TODO: Probably want to mark it as "missing" so we know it was filled in after the fact? missing_block = Blocks(hash=response["header"]["hash"], version=response["header"]["version"], height = response["header"]["height"], previous = response["header"]["previous"], timestamp = response["header"]["timestamp"][:-1], output_root = response["header"]["output_root"], range_proof_root = response["header"]["range_proof_root"], kernel_root = response["header"]["kernel_root"], nonce = response["header"]["nonce"], total_difficulty = response["header"]["total_difficulty"], total_kernel_offset = response["header"]["total_kernel_offset"], state = "missing") database.db.createDataObj(missing_block) except Exception as e: # XXX TODO: Something more ? LOGGER.error("Something went wrong: {}".format(e)) sys.stdout.flush() # db.set_last_run(PROCESS, str(time.time())) database.db.getSession().commit()
def calculate(height, avg_range): # Get the most recent pool data from which to generate the stats previous_stats_record = Pool_stats.get_by_height(height - 1) assert previous_stats_record is not None, "No previous stats record found" avg_over_first_grin_block = Blocks.get_by_height(max( height - avg_range, 1)) assert avg_over_first_grin_block is not None, "Missing grin block: {}".format( max(height - avg_range, 1)) grin_block = Blocks.get_by_height(height) assert grin_block is not None, "Missing grin block: {}".format(height) latest_worker_shares = Worker_shares.get_by_height(height) # If no shares are found for this height, we have 2 options: # 1) Assume the share data is *delayed* so dont create the stats record now # assert len(latest_worker_shares) > 0, "No worker shares found" # 2) If we want we can create the record without share data and then when shares are added later this record will be recalculated avg_over_worker_shares = Worker_shares.get_by_height(height, avg_range) # Calculate the stats data timestamp = grin_block.timestamp difficulty = POOL_MIN_DIFF # XXX TODO - enchance to support multiple difficulties gps = 0 active_miners = 0 shares_processed = 0 num_shares_in_range = 0 if len(avg_over_worker_shares) > 0: num_shares_in_range = sum( [shares.valid for shares in avg_over_worker_shares]) gps = grin.calculate_graph_rate(difficulty, avg_over_first_grin_block.timestamp, grin_block.timestamp, num_shares_in_range) print("XXX: difficulty={}, {}-{}, len={}".format( difficulty, avg_over_first_grin_block.timestamp, grin_block.timestamp, num_shares_in_range)) if latest_worker_shares is not None: active_miners = len(latest_worker_shares) # XXX NO, FIX THIS num_valid = sum([shares.valid for shares in latest_worker_shares]) num_invalid = sum([shares.invalid for shares in latest_worker_shares]) shares_processed = num_valid + num_invalid total_shares_processed = previous_stats_record.total_shares_processed + shares_processed total_grin_paid = previous_stats_record.total_grin_paid # XXX TODO total_blocks_found = previous_stats_record.total_blocks_found if Pool_blocks.get_by_height(height - 1) is not None: total_blocks_found = total_blocks_found + 1 return Pool_stats(height=height, timestamp=timestamp, gps=gps, active_miners=active_miners, shares_processed=shares_processed, total_shares_processed=total_shares_processed, total_grin_paid=total_grin_paid, total_blocks_found=total_blocks_found)
def initialize(avg_over_range, logger): database = lib.get_db() # Special case for new pool startup - Need 3 stats records to bootstrap block_zero = None while block_zero is None: logger.warn("Waiting for the first block record in the database") time.sleep(1) block_zero = Blocks.get_earliest() print("block_zero={}".format(block_zero)) height = block_zero.height # Create avg_over_range dummy block records prior to block_zero print("Create block filtters: {} - {}".format(height-avg_over_range, height)) for h in range(height-avg_over_range, height): print("Creating fillter at height {}".format(h)) new_block = Blocks(hash = "x", version = 0, height = h, previous = "x", timestamp = datetime.utcnow(), output_root = "x", range_proof_root = "x", kernel_root = "x", nonce = 0, edge_bits = 29, total_difficulty = block_zero.total_difficulty, secondary_scaling = 0, num_inputs = 0, num_outputs = 0, num_kernels = 0, fee = 0, lock_height = 0, total_kernel_offset = "x", state = "filler") database.db.getSession().add(new_block) database.db.getSession().commit() seed_stat0 = Grin_stats( height=height-2, timestamp=block_zero.timestamp, difficulty=block_zero.total_difficulty) database.db.createDataObj(seed_stat0) seed_stat1 = Grin_stats( height=height-1, timestamp=block_zero.timestamp, difficulty=block_zero.total_difficulty) database.db.createDataObj(seed_stat1) seed_stat2 = Grin_stats( height=height, timestamp=block_zero.timestamp, difficulty=block_zero.total_difficulty) database.db.createDataObj(seed_stat2) return height
def get(self, height=0, range=None, fields=None): database = lib.get_db() fields = lib.fields_to_list(fields) if height == 0: height = grin.get_current_height() if range == None: block = Blocks.get_by_height(height) if block is None: return None return block.to_json(fields) else: blocks = [] for block in Blocks.get_by_height(height, range): blocks.append(block.to_json(fields)) return blocks
def post(self): global database LOGGER = lib.get_logger(PROCESS) username = None password = None try: debug and print("json request = {}".format(request.form)) username = request.form.get('username') password = request.form.get('password') debug and LOGGER.warn("PoolAPI_users POST: user:{} password:{}".format(username, password)) except AttributeError as e: LOGGER.warn("Missing username or password - {}".format(str(e))) if username is None or password is None: response = jsonify({ 'message': 'Missing arguments: username and pasword required' }) response.status_code = 400 return response if username == "" or password == "": response = jsonify({ 'message': 'Missing arguments: username and pasword required' }) response.status_code = 400 return response if "." in username: response = jsonify({ 'message': 'Invalid Username: May not contain "."' }) response.status_code = 400 return response # Check if the username is taken exists = Users.check_username_exists(username) if exists: debug and print("Failed to add - conflict with existing user = {}".format(username)) response = jsonify({ 'message': 'Conflict with existing account' }) response.status_code = 409 return response # Create the users record user_rec = Users.create(username, password) if user_rec is None: debug and print("Failed to add - unable to create a new user record") response = jsonify({ 'message': 'System Error: Failed to create account' }) response.status_code = 500 return response # initialize a worker_stats record for this user (previous block) so they get instance feedback on the UI lb = Blocks.get_latest() if lb is not None: height = Blocks.get_latest().height initial_stat = Worker_stats(datetime.utcnow(), height, user_rec.id) database.db.createDataObj(initial_stat) debug and print("Added user = {}".format(user_rec)) response = jsonify({ 'username': user_rec.username, 'id': user_rec.id }) response.status_code = 201 return response
def calculate(height, avg_range=DIFFICULTY_ADJUST_WINDOW): # Get the most recent blocks from which to generate the stats recent_blocks = [] previous_stats_record = Grin_stats.get_by_height(height-1) print("XXX: {}".format(previous_stats_record)) assert previous_stats_record is not None, "No provious stats record found" recent_blocks = Blocks.get_by_height(height, avg_range) if len(recent_blocks) < min(avg_range, height): # We dont have all of these blocks in the DB raise AssertionError("Missing blocks in range: {}:{}".format(height-avg_range, height)) assert recent_blocks[-1].height == height, "Invalid height in recent_blocks[-1]" assert recent_blocks[-2].height == height - 1, "Invalid height in recent_blocks[-2]: {} vs {}".format(recent_blocks[-2].height, height - 1) # Calculate the stats data first_block = recent_blocks[0] last_block = recent_blocks[-1] timestamp = last_block.timestamp difficulty = recent_blocks[-1].total_difficulty - recent_blocks[-2].total_difficulty new_stats = Grin_stats( height = height, timestamp = timestamp, difficulty = difficulty, ) # Caclulate estimated GPS for recent edge_bits sizes all_gps = estimate_all_gps(recent_blocks) for gps in all_gps: gps_rec = Gps( edge_bits = gps[0], gps = gps[1], ) new_stats.gps.append(gps_rec) return new_stats
def get_reward_by_block(height): # Get the block and determine how much its worth to the winner theblock = Blocks.get_by_height(height) #print("The block {}".format(theblock.to_json())) if theblock is None: return 0 return get_block_reward_nanogrin() + theblock.fee
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() atexit.register(lib.teardown_db) # Get config check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Initialize poolStats records if this is the first run latest_stat = Pool_stats.get_latest() if latest_stat is None: # Special case for new pool startup poolstats.initialize(avg_over_range, LOGGER) latest_stat = Pool_stats.get_latest() LOGGER.warn("Starting at height: {}".format(latest_stat.height)) # Generate pool stats records - one per grin block while True: # Find the height of the latest stats record latest_stat = Pool_stats.get_latest() height = latest_stat.height + 1 LOGGER.warn("Starting at height: {}".format(height)) try: while True: share_height = Worker_shares.get_latest_height() while share_height is None: LOGGER.warn("Waiting for shares") share_height = Worker_shares.get_latest_height() sleep(10) latest = Blocks.get_latest().height stats_height = height - 1 LOGGER.warn( "Running: Chain height: {}, share height: {}, stats height: {}" .format(latest, share_height, stats_height)) while share_height - 1 > height: new_stats = poolstats.calculate(height, avg_over_range) # Batch new stats when possible, but commit at reasonable intervals database.db.getSession().add(new_stats) if ((height % BATCHSZ == 0) or (height >= (latest - 10))): database.db.getSession().commit() LOGGER.warn( "Added Pool_stats for block: {} - {} {} {}".format( new_stats.height, new_stats.gps, new_stats.active_miners, new_stats.shares_processed)) height = height + 1 sys.stdout.flush() sleep(check_interval) except Exception as e: LOGGER.error("Something went wrong: {} - {}".format( e, traceback.print_stack())) database.db.getSession().rollback() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def get(self, id=None, height=0, range=0, fields=None): database = lib.get_db() LOGGER = lib.get_logger(PROCESS) LOGGER.warn( "WorkerAPI_shares get id:{} height:{} range:{} fields:{}".format( id, height, range, fields)) fields = lib.fields_to_list(fields) if height == 0: height = Blocks.get_latest().height shares_records = [] if id is None: for shares in Worker_shares.get_by_height(height, range): shares_records.append(shares.to_json(fields)) return shares_records else: if range is None: worker_sh_recs = Worker_shares.get_by_height_and_id(height, id) #print("worker_sh_recs = {}".format(worker_sh_recs)) if res is None: return "[]".to_json() return res.to_json(fields) else: for share in Worker_shares.get_by_height_and_id( height, id, range): shares_records.append(share.to_json(fields)) return shares_records
def get(self, id=None, height=None, range=0, fields=None): global database #database = lib.get_db() LOGGER = lib.get_logger(PROCESS) # AUTH FILTER if id != g.user.id: response = jsonify({ 'message': 'Not authorized to access data for other users' }) response.status_code = 403 return response debug and LOGGER.warn("WorkerAPI_shares get id:{} height:{} range:{} fields:{}".format(id, height, range, fields)) # Enforce range limit if range is not None: range = min(range, worker_shares_range_limit) fields = lib.fields_to_list(fields) if height is None: return Worker_shares.get_latest_height(id) if height == 0: height = Blocks.get_latest().height shares_records = [] if id is None: for shares in Worker_shares.get_by_height(height, range): shares_records.append(shares.to_json(fields)) return shares_records else: if range is None: worker_sh_recs = Worker_shares.get_by_height_and_id(height, id) #print("worker_sh_recs = {}".format(worker_sh_recs)) if res is None: return "[]".to_json() return res.to_json(fields) else: for share in Worker_shares.get_by_height_and_id(height, id, range): shares_records.append(share.to_json(fields)) return shares_records
def ShareCommitScheduler(interval, database): global LOGGER global SHARES global HEIGHT global GRINSHARE_HEIGHT global POOLSHARE_HEIGHT database = lib.get_db() try: # XXX TODO: enhance while True: bc_height = Blocks.get_latest( ).height # grin.blocking_get_current_height() LOGGER.warn( "HEIGHT={}, POOLSHARE_HEIGHT={}, GRINSHARE_HEIGHT={}".format( HEIGHT, POOLSHARE_HEIGHT, GRINSHARE_HEIGHT)) while (HEIGHT < POOLSHARE_HEIGHT and HEIGHT < GRINSHARE_HEIGHT) or (bc_height > HEIGHT): # Commit and purge current block share data if we are starting a new block LOGGER.warn("Commit shares for height: {}".format(HEIGHT)) # time.sleep(5) # Give straggler shares a chance to come in SHARES.commit(HEIGHT) HEIGHT = HEIGHT + 1 # Commit and purge all old share data (except current block) every 'interval' seconds try: SHARES.commit() # All except current block except Exception as e: LOGGER.error("Failed to commit: {}".format(e)) time.sleep(interval) except Exception as e: LOGGER.error("Something went wrong: {}\n{}".format( e, traceback.format_exc().splitlines())) time.sleep(interval) lib.teardown_db()
def initialize(window_size, logger): database = lib.get_db() # Special case for new pool startup block_zero = None while block_zero is None: logger.warn("Waiting for the first block record in the database") time.sleep(1) block_zero = Blocks.get_earliest() print("block_zero={}".format(block_zero)) stat_height = max(0, block_zero.height + window_size) seed_stat = Pool_stats( height=stat_height, timestamp=datetime.utcnow(), active_miners=0, shares_processed=0, share_counts=None, total_blocks_found=0, total_shares_processed=0, dirty=False, ) database.db.createDataObj(seed_stat) seed_share = Worker_shares( height=stat_height, user_id=1, timestamp=datetime.utcnow(), ) database.db.createDataObj(seed_share)
def get(self, id=None, height=0, range=None, fields=None): database = lib.get_db() LOGGER = lib.get_logger(PROCESS) LOGGER.warn( "WorkerAPI_stats get id:{} height:{} range:{} fields:{}".format( id, height, range, fields)) fields = lib.fields_to_list(fields) if height == 0: height = Blocks.get_latest().height stats = [] if id is None: for stat in Worker_stats.get_by_height(height, range): #print("YYY: {}".format(stats)) stats.append(stat.to_json(fields)) return stats else: if range is None: res = Worker_stats.get_by_height_and_id(id, height) if res is None: return "[]".to_json() return res.to_json(fields) else: for stat in Worker_stats.get_by_height_and_id( id, height, range): stats.append(stat.to_json(fields)) return stats
def get_reward_by_block(height): # Get the block and determine how much its worth to the winner database = lib.get_db() theblock = Blocks.get_by_height(height) #print("The block {}".format(theblock.to_json())) if theblock is None: return 0 return 60 * 1000000000 + theblock.fee
def get(self, height=None, range=None, fields=None): LOGGER = lib.get_logger(PROCESS) LOGGER.warn("GrinAPI_blocks get height:{} range:{} fields:{}".format( height, range, fields)) fields = lib.fields_to_list(fields) if height is None or height == 0: blocks = Blocks.get_latest(range) else: blocks = Blocks.get_by_height(height, range) if range == None: if blocks is None: return None return blocks.to_json(fields) else: bl = [] for block in blocks: bl.append(block.to_json(fields)) return bl
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() # Get config check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Find the height of the latest stats record last_height = 0 latest_stat = Worker_stats.get_latest() if latest_stat != None: last_height = latest_stat.height height = last_height + 1 LOGGER.warn("Starting at block height: {}".format(height)) # Generate worker stats records - one per grin block for each active worker while True: # latest = grin.blocking_get_current_height() latest = Blocks.get_latest().height #LOGGER.warn("Latest Network Block Height = {}".format(latest)) while latest > height: try: new_stats = workerstats.calculate(height, avg_over_range) LOGGER.warn("{} new stats for height {}".format( len(new_stats), height)) # mark any existing pool_stats dirty pool_stats = Pool_stats.get_by_height(height) if pool_stats is not None: LOGGER.warn( "Marked existing pool_stats dirty for height: {}". format(height)) pool_stats.dirty = True database.db.getSession().bulk_save_objects(new_stats) if ((height % BATCHSZ == 0) or (height >= (latest - 10))): database.db.getSession().commit() for stats in new_stats: LOGGER.warn( "Added Worker_stats for block: {}, Worker: {} - {} {} {} {} {} {}" .format(stats.height, stats.worker, stats.gps, stats.shares_processed, stats.total_shares_processed, stats.grin_paid, stats.total_grin_paid, stats.balance)) height = height + 1 except Exception as e: LOGGER.error("Something went wrong: {}".format(e)) LOGGER.error("Traceback: {}".format( traceback.format_exc().splitlines())) database.db.getSession().rollback() sleep(check_interval) sys.stdout.flush() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def calculate(height, avg_range): avg_over_first_grin_block = Blocks.get_by_height( max(height-avg_range, 1) ) assert avg_over_first_grin_block is not None, "Missing grin block: {}".format(max(height-avg_range, 1)) grin_block = Blocks.get_by_height(height) assert grin_block is not None, "Missing grin block: {}".format(height) # Get all workers share records for the current range of blocks latest_worker_shares = Worker_shares.get_by_height(height) # assert len(latest_worker_shares) != 0, "Missing worker shares record for height {}".format(height) avg_over_worker_shares = Worker_shares.get_by_height(height, avg_range) # Create a worker_stats for each user who submitted a share in this range workers = list(set([share.worker for share in latest_worker_shares])) new_stats = [] for worker in workers: # Get this workers most recent worker_stats record (for running totals) last_stat = Worker_stats.get_latest_by_id(worker) if last_stat is None: # A new worker last_stat = Worker_stats(None, datetime.utcnow(), height-1, worker, 0, 0, 0, 0, 0, 0) new_stats.append(last_stat) # Calculate this workers stats data timestamp = grin_block.timestamp difficulty = POOL_MIN_DIFF # XXX TODO - enchance to support multiple difficulties num_shares_in_range = sum([shares.valid for shares in avg_over_worker_shares if shares.worker == worker]) gps = grin.calculate_graph_rate(difficulty, avg_over_first_grin_block.timestamp, grin_block.timestamp, num_shares_in_range) num_valid_this_block = [shares.valid for shares in latest_worker_shares if shares.worker == worker][0] num_invalid_this_block = [shares.invalid for shares in latest_worker_shares if shares.worker == worker][0] shares_processed = num_valid_this_block + num_invalid_this_block # latest_worker_shares = [share for share in latest_pool_shares if share.found_by == worker] #shares_processed = len(worker_shares_this_block) total_shares_processed = last_stat.total_shares_processed + shares_processed stats = Worker_stats( id = None, height = height, timestamp = timestamp, worker = worker, gps = gps, shares_processed = shares_processed, total_shares_processed = total_shares_processed, grin_paid = 123, # XXX TODO total_grin_paid = 456, # XXX TODO balance = 1) # XXX TODO new_stats.append(stats) return new_stats
def get_blocks_found_data(num_blocks): ## # Returns data needed to create a *blocks found* chart over the past num_blocks history blocks_found_data = [] latest_blocks = Blocks.get_last_n(num_blocks) for block in iter(latest_blocks): blockdata = {} blockdata["time"] = block.timestamp.strftime('%s') blockdata["height"] = block.height blocks_found_data.append(blockdata) return blocks_found_data
def main(): global LOGGER global CONFIG global SHARES global HEIGHT global GRINSHARE_HEIGHT global POOLSHARE_HEIGHT global SHARE_EXPIRETIME global database global RABBITMQ_USER global RABBITMQ_PASSWORD CONFIG = lib.get_config() atexit.register(lib.teardown_db) GRINSHARE_HEIGHT = 0 POOLSHARE_HEIGHT = 0 LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) SHARE_EXPIRETIME = int(CONFIG[PROCESS]["share_expire_time"]) commit_interval = int(CONFIG[PROCESS]["commit_interval"]) rmq_endpoints = json.loads(CONFIG[PROCESS]["rmq"]) RABBITMQ_USER = os.environ["RABBITMQ_USER"] RABBITMQ_PASSWORD = os.environ["RABBITMQ_PASSWORD"] database = lib.get_db() HEIGHT = Worker_shares.get_latest_height() while HEIGHT is None: LOGGER.warn("Waiting on the first grin block...") time.sleep(5) latest_block = Blocks.get_latest() if latest_block is not None: HEIGHT = latest_block.height SHARES = WorkerShares(LOGGER) ## # Start a thread to commit shares commit_thread = threading.Thread(target=ShareCommitScheduler, args=( commit_interval, database, )) commit_thread.start() ## # Start a pika consumer thread for each rabbit we want to consume from for rmq in rmq_endpoints: rmq_thread = threading.Thread(target=RmqConsumer, args=(rmq, )) rmq_thread.start()
def calculate(height, window_size): # Get the most recent pool data from which to generate the stats previous_stats_record = Pool_stats.get_by_height(height - 1) assert previous_stats_record is not None, "No previous Pool_stats record found" grin_block = Blocks.get_by_height(height) assert grin_block is not None, "Missing grin block: {}".format(height) window = Worker_shares.get_by_height(height, window_size) # assert window[-1].height - window[0].height >= window_size, "Failed to get proper window size" # print("Sanity: window size: {} vs {}".format(window[-1].height - window[0].height, window_size)) # Calculate the stats data timestamp = grin_block.timestamp active_miners = len(list(set([s.user_id for s in window]))) print("active_miners = {}".format(active_miners)) # Keep track of share totals - sum counts of all share sizes submitted for this block num_shares_processed = 0 share_counts = {} for ws in Worker_shares.get_by_height(height): num_shares_processed += ws.num_shares() for size in ws.sizes(): size_str = "{}{}".format("C", size) if size_str not in share_counts: share_counts[size_str] = {"valid": 0, "invalid": 0, "stale": 0} share_counts[size_str] = { "valid": share_counts[size_str]["valid"] + ws.num_valid(size), "invalid": share_counts[size_str]["invalid"] + ws.num_invalid(size), "stale": share_counts[size_str]["stale"] + ws.num_stale(size) } print("num_shares_processed this block= {}".format(num_shares_processed)) total_shares_processed = previous_stats_record.total_shares_processed + num_shares_processed total_blocks_found = previous_stats_record.total_blocks_found # Caclulate estimated GPS for all sizes with shares submitted all_gps = estimate_gps_for_all_sizes(window) if Pool_blocks.get_by_height(height - 1) is not None: total_blocks_found = total_blocks_found + 1 new_stats = Pool_stats( height=height, timestamp=timestamp, active_miners=active_miners, share_counts=share_counts, shares_processed=num_shares_processed, total_blocks_found=total_blocks_found, total_shares_processed=total_shares_processed, dirty=False, ) print("all_gps for all pool workers") pp.pprint(all_gps) for gps_est in all_gps: gps_rec = Gps(edge_bits=gps_est[0], gps=gps_est[1]) new_stats.gps.append(gps_rec) sys.stdout.flush() return new_stats
def get(self, height=0, range=None, fields=None): global database #database = lib.get_db() LOGGER = lib.get_logger(PROCESS) debug and LOGGER.warn("WorkersAPI_shares get height:{} range:{} fields:{}".format(height, range, fields)) fields = lib.fields_to_list(fields) shares_records = [] if height == 0: height = Blocks.get_latest().height for shares in Worker_shares.get_by_height(height, range): # AUTH FILTER if shares.user_id == g.user.id: shares_records.append(shares.to_json(fields)) return shares_records
def addPoolBlock(logger, timestamp, height, hash, found_by, serverid): global POOLBLOCK_MUTEX POOLBLOCK_MUTEX.acquire() database = lib.get_db() try: logger.warn( "Adding A PoolBlock: Timestamp: {}, ServerID: {}, Height: {}, Hash: {}" .format(timestamp, serverid, height, hash)) state = "new" this_block = Blocks.get_by_height(height) while this_block is None: this_block = Blocks.get_by_height(height) time.sleep(1) nonce = this_block.nonce actual_difficulty = grin.difficulty(this_block.hash, this_block.edge_bits, this_block.secondary_scaling) net_difficulty = grin.get_network_difficulty(height) # Create the DB record new_pool_block = Pool_blocks(hash=hash, height=height, nonce=nonce, actual_difficulty=actual_difficulty, net_difficulty=net_difficulty, timestamp=timestamp, found_by=found_by, state=state) duplicate = lib.get_db().db.createDataObj_ignore_duplicates( new_pool_block) if duplicate: logger.warn( "Failed to add duplicate Pool Block: {}".format(height)) else: logger.warn("Added Pool Block: {}".format(height)) finally: POOLBLOCK_MUTEX.release()
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() atexit.register(lib.teardown_db) check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Find the height of the latest stats record last_height = 0 latest_stat = Grin_stats.get_latest() print("latest_stat = {}".format(latest_stat)) if latest_stat == None: LOGGER.warn("Initializing Grin_stats") grinstats.initialize(avg_over_range, LOGGER) latest_stat = Grin_stats.get_latest() print("Finished initializing, latest_stat height = {}".format( latest_stat.height)) last_height = latest_stat.height height = last_height + 1 LOGGER.warn( "grinStats service starting at block height: {}".format(height)) # Generate grin stats records - one per grin block while True: #latest_db_block = Blocks.get_latest() latest = Blocks.get_latest().height while latest >= height: try: new_stats = grinstats.calculate(height, avg_over_range) # Batch new stats when possible, but commit at reasonable intervals database.db.getSession().add(new_stats) # if( (height % BATCHSZ == 0) or (height >= (latest-10)) ): database.db.getSession().commit() LOGGER.warn( "Added Grin_stats for block: {} - gps:{} diff:{}".format( new_stats.height, new_stats.gps, new_stats.difficulty)) height = height + 1 except AssertionError as e: LOGGER.error("Something went wrong: {}".format(e)) sleep(check_interval) sys.stdout.flush() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() grin_api_url = "http://" + CONFIG["grin_node"]["address"] + ":" + CONFIG[ "grin_node"]["api_port"] status_url = grin_api_url + "/v1/status" blocks_url = grin_api_url + "/v1/blocks/" check_interval = float(CONFIG[PROCESS]["check_interval"]) last = get_current_height(status_url) while True: latest = get_current_height(status_url) for i in range(last + 1, latest + 1): last = latest url = blocks_url + str(i) r = requests.get(url) if not r.ok: LOGGER.error( "Failed to get block info for block {}".format(last)) continue response = requests.get(url).json() LOGGER.warn("New Block: {} at {}".format( response["header"]["hash"], response["header"]["height"])) try: new_block = Blocks( hash=response["header"]["hash"], version=response["header"]["version"], height=response["header"]["height"], previous=response["header"]["previous"], timestamp=response["header"]["timestamp"][:-1], output_root=response["header"]["output_root"], range_proof_root=response["header"]["range_proof_root"], kernel_root=response["header"]["kernel_root"], nonce=response["header"]["nonce"], total_difficulty=response["header"]["total_difficulty"], total_kernel_offset=response["header"] ["total_kernel_offset"], state="new") database.db.createDataObj(new_block) except Exception as e: LOGGER.error("Something went wrong: {}".format(e)) sys.stdout.flush() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def get(self, height=0, range=None, fields=None): global database #database = lib.get_db() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("WorkersAPI_stats get height:{} range:{} fields:{}".format( height, range, fields)) fields = lib.fields_to_list(fields) stats = [] if height == 0: height = Blocks.get_latest().height for stat in Worker_stats.get_by_height(height, range): # AUTH FILTER if stat.user_id == ADMIN_ID: stats.append(stat.to_json(fields)) return stats
def RigDataCommitScheduler(max_lag, logger): global RIGDATA_MUTEX global RIGDATA global REDIS_RIGDATA_KEY global REDIS_RIGDATA_EXPIRETIME while True: try: redisdb = lib.get_redis_db() while True: database = lib.get_db() chain_height = Blocks.get_latest().height logger.warn( "RIGDATA commit scheduler - chain_height = {}".format( chain_height)) RIGDATA_MUTEX.acquire() try: for height in [ h for h in RIGDATA.keys() if h < (chain_height - max_lag) ]: logger.warn( "Commit RIGDATA for height: {}".format(height)) # Picke RIGDATA and write to redis for user, rigdata in RIGDATA[height].items(): key = "{}.{}.{}".format(REDIS_RIGDATA_KEY, height, user) if redisdb.exists(key): logger.warn( "XXX TODO - MERGE THIS ADDITIONAL SHARE DATA" ) else: redisdb.set(key, json.dumps(rigdata), ex=REDIS_RIGDATA_EXPIRETIME) RIGDATA.pop(height, None) finally: RIGDATA_MUTEX.release() lib.teardown_db() time.sleep(30) except Exception as e: logger.error("Something went wrong: {}\n{}".format( e, traceback.format_exc().splitlines())) lib.teardown_db() time.sleep(10)