def get(self, height=None, range=None): global database #database = lib.get_db() LOGGER = lib.get_logger(PROCESS) debug and LOGGER.warn("PoolAPI_shareCount get height:{} range:{}".format(height, range)) # Totals across all workers are stored in the Pool_stats record if range is None: if height is None: height = 0 pool_st_rec = Pool_stats.get_by_height(height) if pool_st_rec is None: total = 0 this_block = 0 else: total = pool_st_rec.total_shares_processed this_block = pool_st_rec.shares_processed return { "height": pool_st_rec.height, "total": total, "count": this_block, } else: counts = [] pool_st_recs = Pool_stats.get_by_height(height, range) for st_rec in pool_st_recs: rec = { "height": st_rec.height, "total": st_rec.total_shares_processed, "count": st_rec.shares_processed, } counts.append(rec) return counts
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() atexit.register(lib.teardown_db) # Get config check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Initialize poolStats records if this is the first run latest_stat = Pool_stats.get_latest() if latest_stat is None: # Special case for new pool startup poolstats.initialize(avg_over_range, LOGGER) latest_stat = Pool_stats.get_latest() LOGGER.warn("Starting at height: {}".format(latest_stat.height)) # Generate pool stats records - one per grin block while True: # Find the height of the latest stats record latest_stat = Pool_stats.get_latest() height = latest_stat.height + 1 LOGGER.warn("Starting at height: {}".format(height)) try: while True: share_height = Worker_shares.get_latest_height() while share_height is None: LOGGER.warn("Waiting for shares") share_height = Worker_shares.get_latest_height() sleep(10) latest = Blocks.get_latest().height stats_height = height - 1 LOGGER.warn( "Running: Chain height: {}, share height: {}, stats height: {}" .format(latest, share_height, stats_height)) while share_height - 1 > height: new_stats = poolstats.calculate(height, avg_over_range) # Batch new stats when possible, but commit at reasonable intervals database.db.getSession().add(new_stats) if ((height % BATCHSZ == 0) or (height >= (latest - 10))): database.db.getSession().commit() LOGGER.warn( "Added Pool_stats for block: {} - {} {} {}".format( new_stats.height, new_stats.gps, new_stats.active_miners, new_stats.shares_processed)) height = height + 1 sys.stdout.flush() sleep(check_interval) except Exception as e: LOGGER.error("Something went wrong: {} - {}".format( e, traceback.print_stack())) database.db.getSession().rollback() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def calculate(height, avg_range): # Get the most recent pool data from which to generate the stats previous_stats_record = Pool_stats.get_by_height(height - 1) assert previous_stats_record is not None, "No previous stats record found" avg_over_first_grin_block = Blocks.get_by_height(max( height - avg_range, 1)) assert avg_over_first_grin_block is not None, "Missing grin block: {}".format( max(height - avg_range, 1)) grin_block = Blocks.get_by_height(height) assert grin_block is not None, "Missing grin block: {}".format(height) latest_worker_shares = Worker_shares.get_by_height(height) # If no shares are found for this height, we have 2 options: # 1) Assume the share data is *delayed* so dont create the stats record now # assert len(latest_worker_shares) > 0, "No worker shares found" # 2) If we want we can create the record without share data and then when shares are added later this record will be recalculated avg_over_worker_shares = Worker_shares.get_by_height(height, avg_range) # Calculate the stats data timestamp = grin_block.timestamp difficulty = POOL_MIN_DIFF # XXX TODO - enchance to support multiple difficulties gps = 0 active_miners = 0 shares_processed = 0 num_shares_in_range = 0 if len(avg_over_worker_shares) > 0: num_shares_in_range = sum( [shares.valid for shares in avg_over_worker_shares]) gps = grin.calculate_graph_rate(difficulty, avg_over_first_grin_block.timestamp, grin_block.timestamp, num_shares_in_range) print("XXX: difficulty={}, {}-{}, len={}".format( difficulty, avg_over_first_grin_block.timestamp, grin_block.timestamp, num_shares_in_range)) if latest_worker_shares is not None: active_miners = len(latest_worker_shares) # XXX NO, FIX THIS num_valid = sum([shares.valid for shares in latest_worker_shares]) num_invalid = sum([shares.invalid for shares in latest_worker_shares]) shares_processed = num_valid + num_invalid total_shares_processed = previous_stats_record.total_shares_processed + shares_processed total_grin_paid = previous_stats_record.total_grin_paid # XXX TODO total_blocks_found = previous_stats_record.total_blocks_found if Pool_blocks.get_by_height(height - 1) is not None: total_blocks_found = total_blocks_found + 1 return Pool_stats(height=height, timestamp=timestamp, gps=gps, active_miners=active_miners, shares_processed=shares_processed, total_shares_processed=total_shares_processed, total_grin_paid=total_grin_paid, total_blocks_found=total_blocks_found)
def calculate(height, window_size): # Get the most recent pool data from which to generate the stats previous_stats_record = Pool_stats.get_by_height(height - 1) assert previous_stats_record is not None, "No previous Pool_stats record found" grin_block = Blocks.get_by_height(height) assert grin_block is not None, "Missing grin block: {}".format(height) window = Worker_shares.get_by_height(height, window_size) # assert window[-1].height - window[0].height >= window_size, "Failed to get proper window size" # print("Sanity: window size: {} vs {}".format(window[-1].height - window[0].height, window_size)) # Calculate the stats data timestamp = grin_block.timestamp active_miners = len(list(set([s.user_id for s in window]))) print("active_miners = {}".format(active_miners)) # Keep track of share totals - sum counts of all share sizes submitted for this block num_shares_processed = 0 share_counts = {} for ws in Worker_shares.get_by_height(height): num_shares_processed += ws.num_shares() for size in ws.sizes(): size_str = "{}{}".format("C", size) if size_str not in share_counts: share_counts[size_str] = {"valid": 0, "invalid": 0, "stale": 0} share_counts[size_str] = { "valid": share_counts[size_str]["valid"] + ws.num_valid(size), "invalid": share_counts[size_str]["invalid"] + ws.num_invalid(size), "stale": share_counts[size_str]["stale"] + ws.num_stale(size) } print("num_shares_processed this block= {}".format(num_shares_processed)) total_shares_processed = previous_stats_record.total_shares_processed + num_shares_processed total_blocks_found = previous_stats_record.total_blocks_found # Caclulate estimated GPS for all sizes with shares submitted all_gps = estimate_gps_for_all_sizes(window) if Pool_blocks.get_by_height(height - 1) is not None: total_blocks_found = total_blocks_found + 1 new_stats = Pool_stats( height=height, timestamp=timestamp, active_miners=active_miners, share_counts=share_counts, shares_processed=num_shares_processed, total_blocks_found=total_blocks_found, total_shares_processed=total_shares_processed, dirty=False, ) print("all_gps for all pool workers") pp.pprint(all_gps) for gps_est in all_gps: gps_rec = Gps(edge_bits=gps_est[0], gps=gps_est[1]) new_stats.gps.append(gps_rec) sys.stdout.flush() return new_stats
def get(self, height=0, range=None, fields=None): database = lib.get_db() fields = lib.fields_to_list(fields) if height == 0: height = grin.get_current_height() if range == None: stat = Pool_stats.get_by_height(height) if stat is None: return None return stat.to_json(fields) else: stats = [] for stat in Pool_stats.get_by_height(height, range): stats.append(stat.to_json(fields)) return stats
def initialize(window_size, logger): database = lib.get_db() # Special case for new pool startup block_zero = None while block_zero is None: logger.warn("Waiting for the first block record in the database") time.sleep(1) block_zero = Blocks.get_earliest() print("block_zero={}".format(block_zero)) stat_height = max(0, block_zero.height + window_size) seed_stat = Pool_stats( height=stat_height, timestamp=datetime.utcnow(), active_miners=0, shares_processed=0, share_counts=None, total_blocks_found=0, total_shares_processed=0, dirty=False, ) database.db.createDataObj(seed_stat) seed_share = Worker_shares( height=stat_height, user_id=1, timestamp=datetime.utcnow(), ) database.db.createDataObj(seed_share)
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() # Get config check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Find the height of the latest stats record last_height = 0 latest_stat = Worker_stats.get_latest() if latest_stat != None: last_height = latest_stat.height height = last_height + 1 LOGGER.warn("Starting at block height: {}".format(height)) # Generate worker stats records - one per grin block for each active worker while True: # latest = grin.blocking_get_current_height() latest = Blocks.get_latest().height #LOGGER.warn("Latest Network Block Height = {}".format(latest)) while latest > height: try: new_stats = workerstats.calculate(height, avg_over_range) LOGGER.warn("{} new stats for height {}".format( len(new_stats), height)) # mark any existing pool_stats dirty pool_stats = Pool_stats.get_by_height(height) if pool_stats is not None: LOGGER.warn( "Marked existing pool_stats dirty for height: {}". format(height)) pool_stats.dirty = True database.db.getSession().bulk_save_objects(new_stats) if ((height % BATCHSZ == 0) or (height >= (latest - 10))): database.db.getSession().commit() for stats in new_stats: LOGGER.warn( "Added Worker_stats for block: {}, Worker: {} - {} {} {} {} {} {}" .format(stats.height, stats.worker, stats.gps, stats.shares_processed, stats.total_shares_processed, stats.grin_paid, stats.total_grin_paid, stats.balance)) height = height + 1 except Exception as e: LOGGER.error("Something went wrong: {}".format(e)) LOGGER.error("Traceback: {}".format( traceback.format_exc().splitlines())) database.db.getSession().rollback() sleep(check_interval) sys.stdout.flush() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def get(self, height=None, range=None, fields=None): LOGGER = lib.get_logger(PROCESS) LOGGER.warn("PoolAPI_stats get height:{} range:{} fields:{}".format( height, range, fields)) fields = lib.fields_to_list(fields) if height is None or height == 0: stats = Pool_stats.get_latest(range) else: stats = Pool_stats.get_by_height(height, range) if range == None: if stats is None: return None return stats.to_json(fields) else: st = [] for stat in stats: st.append(stat.to_json(fields)) return st
def commit(self, height): print("self.shares.keys(): {}".format(self.shares.keys())) if height not in self.shares or len(self.shares[height]) == 0: self.LOGGER.warn( "Processed 0 shares in block {} - Creatiing filler record". format(height)) # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height filler_shares_rec = Worker_shares(height=height, worker="GrinPool", timestamp=datetime.utcnow(), difficulty=DIFFICULTY, valid=0, invalid=0) lib.get_db().db.createDataObj_ignore_duplicates(filler_shares_rec) return byWorker = {} for nonce in self.shares[height]: share = self.shares[height][nonce] # Sort shares by worker if share.found_by not in byWorker: byWorker[share.found_by] = [] byWorker[share.found_by].append(share) # Create Pool_blocks for full solution shares if share.is_solution: self.addPoolBlock(share) # Create a Worker_shares record for each user and commit to DB # XXX TODO: Bulk Insert - will be needed when the pool has hundredes or thousands of workers for worker in byWorker: workerShares = byWorker[worker] # Not possible? # if len(workerShares) == 0: # continue self.LOGGER.warn( "Processed {} shares in block {} for worker {}".format( len(workerShares), height, worker)) valid_list = [share.is_valid for share in workerShares] # self.LOGGER.warn("xxx: {}".format(valid_list)) num_valid = sum([int(share.is_valid) for share in workerShares]) new_shares_rec = Worker_shares(height=height, worker=worker, timestamp=datetime.utcnow(), difficulty=DIFFICULTY, valid=num_valid, invalid=len(workerShares) - num_valid) lib.get_db().db.createDataObj_ignore_duplicates(new_shares_rec) # We added new worker share data, so if a Pool_stats record already exists at this height, we mark it dirty so it gets recalulated stats_rec = Pool_stats.get_by_height(height) if stats_rec is not None: stats_rec.dirty = True lib.get_db().db.getSession().commit()
def initialize(): database = lib.get_db() # Special case for new pool startup seed_stat = Pool_stats(height=0, timestamp=datetime.utcnow(), gps=0, active_miners=0, shares_processed=0, total_shares_processed=0, total_grin_paid=0, total_blocks_found=0) database.db.createDataObj(seed_stat)
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) database = lib.get_db() LOGGER.warn("=== Starting {}".format(PROCESS)) check_interval = float(CONFIG[PROCESS]["check_interval"]) max_rebuild_depth = float(CONFIG[PROCESS]["max_rebuild_depth"]) avg_over_range_grin = int(CONFIG["grinStats"]["avg_over_range"]) avg_over_range_pool = int(CONFIG["poolStats"]["avg_over_range"]) avg_over_range_worker = int(CONFIG["workerStats"]["avg_over_range"]) current_height = grin.blocking_get_current_height() rebuild_height = current_height - max_rebuild_depth while True: # Grin blocks and therefore grin stats cant be dirty # # Check for dirty grin stats # dirty = Grin_stats.get_first_dirty() # if dirty is not None: # LOGGER.warn("Recalculating Grin Stats from {}".format(dirty.height)) # end_height = grinstats.recalculate(dirty.height, avg_over_range_grin) # LOGGER.warn("Finished Recalculating Grin Stats: {} - {}".format(dirty.height, end_height)) # Check for dirty pool stats dirty = Pool_stats.get_first_dirty(rebuild_height) if dirty is not None: LOGGER.warn("Recalculating Pool Stats from {}".format( dirty.height)) end_height = poolstats.recalculate(dirty.height, avg_over_range_pool) LOGGER.warn("Finished Recalculating Pool Stats: {} - {}".format( dirty.height, end_height)) # # Check for dirty worker stats dirty = Worker_stats.get_first_dirty(rebuild_height) while dirty is not None: LOGGER.warn("Recalculating Worker Stats for {} from {}".format( dirty.height, avg_over_range_worker)) end_height = workerstats.recalculate(dirty.height, avg_over_range_worker) LOGGER.warn( "Finished Recalculating Worker Stats for {} - {}".format( dirty.height, end_height)) dirty = Worker_stats.get_first_dirty() sys.stdout.flush() time.sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def calculate(height, window_size): # Get the most recent pool data from which to generate the stats previous_stats_record = Pool_stats.get_by_height(height - 1) assert previous_stats_record is not None, "No previous Pool_stats record found" grin_block = Blocks.get_by_height(height) assert grin_block is not None, "Missing grin block: {}".format(height) window = Worker_shares.get_by_height(height, window_size) # Calculate the stats data timestamp = grin_block.timestamp active_miners = len(list(set([s.worker for s in window]))) print("active_miners = {}".format(active_miners)) # Keep track of share totals - sum counts of all share sizes submitted for this block shares_processed = 0 if len(window) > 0: shares_processed = window[-1].num_shares() print("shares_processed this block= {}".format(shares_processed)) total_shares_processed = previous_stats_record.total_shares_processed + shares_processed total_grin_paid = previous_stats_record.total_grin_paid # XXX TODO total_blocks_found = previous_stats_record.total_blocks_found # Caclulate estimated GPS for all sizes with shares submitted all_gps = estimate_gps_for_all_sizes(window) if Pool_blocks.get_by_height(height - 1) is not None: total_blocks_found = total_blocks_found + 1 new_stats = Pool_stats( height=height, timestamp=timestamp, active_miners=active_miners, shares_processed=shares_processed, total_blocks_found=total_blocks_found, total_shares_processed=total_shares_processed, total_grin_paid=total_grin_paid, dirty=False, ) print("all_gps for all pool workers") pp.pprint(all_gps) for gps_est in all_gps: gps_rec = Gps(edge_bits=gps_est[0], gps=gps_est[1]) new_stats.gps.append(gps_rec) sys.stdout.flush() return new_stats
def recalculate(start_height, window_size): database = lib.get_db() height = start_height while height < grin.blocking_get_current_height(): old_stats = Pool_stats.get_by_height(height) new_stats = calculate(height, window_size) if old_stats is None: database.db.createDataObj(new_stats) else: old_stats.timestamp = new_stats.timestamp old_stats.active_miners = new_stats.active_miners old_stats.shares_processed = new_stats.shares_processed old_stats.total_blocks_found = new_stats.total_blocks_found old_stats.total_shares_processed = new_stats.total_shares_processed old_stats.dirty = False database.db.getSession().commit() height = height + 1
def recalculate(start_height, window_size): database = lib.get_db() height = start_height worker_shares_height = Worker_shares.get_latest_height() - 1 while worker_shares_height > height: old_stats = Pool_stats.get_by_height(height) new_stats = calculate(height, window_size) if old_stats is None: database.db.createDataObj(new_stats) else: old_stats.timestamp = new_stats.timestamp old_stats.active_miners = new_stats.active_miners old_stats.share_counts = new_stats.share_counts old_stats.shares_processed = new_stats.shares_processed old_stats.total_blocks_found = new_stats.total_blocks_found old_stats.total_shares_processed = new_stats.total_shares_processed old_stats.dirty = False database.db.getSession().commit() height = height + 1
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() # Get config check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Find the height of the latest stats record last_height = 0 latest_stat = Pool_stats.get_latest() if latest_stat is None: # Special case for new pool startup poolstats.initialize() last_height = 0 else: last_height = latest_stat.height height = last_height + 1 LOGGER.warn("Starting at height: {}".format(height)) # Generate pool stats records - one per grin block while True: try: # latest = grin.blocking_get_current_height() latest = Blocks.get_latest().height while latest > height: new_stats = poolstats.calculate(height, avg_over_range) # Batch new stats when possible, but commit at reasonable intervals database.db.getSession().add(new_stats) if( (height % BATCHSZ == 0) or (height >= (latest-10)) ): database.db.getSession().commit() LOGGER.warn("Added Pool_stats for block: {} - {} {} {}".format(new_stats.height, new_stats.gps, new_stats.active_miners, new_stats.shares_processed)) height = height + 1 sys.stdout.flush() except Exception as e: # AssertionError as e: LOGGER.error("Something went wrong: {} - {}".format(e, traceback.print_stack())) sleep(check_interval) sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def recalculate(start_height, avg_range): database = lib.get_db() height = start_height while height <= grin.blocking_get_current_height(): old_stats = Worker_stats.get_by_height(height) new_stats = calculate(height, avg_range) for old_stat in old_stats: database.db.deleteDataObj(old_stat) for stats in new_stats: print("new/updated stats: {} ".format(stats)) worker = stats.worker database.db.getSession().add(stats) if (height % BATCHSZ == 0): database.db.getSession().commit() height = height + 1 # We updated one or more worker stats so we mark the Pool_stats dirty stats_rec = Pool_stats.get_by_height(height) if stats_rec is not None: stats_rec.dirty = True database.db.getSession().commit()
def commit(self, height=None): global HEIGHT if height is None: block_heights = list(self.shares.keys()) try: # Process all heights, except # Dont process shares from current block or newer block_heights = [h for h in block_heights if h < HEIGHT] except ValueError as e: pass else: block_heights = [height] #pp.pprint(self.shares) if len(block_heights) > 0: self.LOGGER.warn( "Committing shares for blocks: {}".format(block_heights)) for height in block_heights: if height not in self.shares or len(self.shares[height]) == 0: # XXX TODO: Only create filler record if no records already exist for this height self.LOGGER.warn( "Processed 0 shares in block {} - Creating filler record". format(height)) # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height filler_worker_shares_rec = Worker_shares( height=height, user_id=1, timestamp=datetime.utcnow(), ) database.db.createDataObj(filler_worker_shares_rec) return # Sort the shares by worker and graph size # byWorker is multi-level structure: # 1) hash by worker id # 2) hash by graph size # 3) List of shares byWorker = {} for hash in self.shares[height]: share = self.shares[height][hash] # Sort shares by worker if share.found_by not in byWorker: byWorker[share.found_by] = {} if share.edge_bits not in byWorker[share.found_by]: byWorker[share.found_by][share.edge_bits] = [] #print("XXX Adding share to workerShares: {}".format(share)) byWorker[share.found_by][share.edge_bits].append(share) # Create Pool_blocks for full solution shares if share.is_solution: self.addPoolBlock(share) #pp.pprint(byWorker) # Create/update a Worker_shares record for each user and commit to DB # XXX TODO: Bulk Insert - will be needed when the pool has hundredes or thousands of workers for worker in byWorker: if worker == 0: continue workerShares = byWorker[worker] #print("workerShares for {} = {}".format(worker, workerShares)) # Count them (just for logging) num_valid_shares = 0 num_invalid_shares = 0 num_stale_shares = 0 for graph_size in workerShares: for share in workerShares[graph_size]: if share.is_valid: num_valid_shares += 1 elif share.is_valid == False and share.invalid_reason == 'too late': num_stale_shares += 1 elif share.is_valid == False: num_invalid_shares += 1 self.LOGGER.warn( "Processed {} shares in block {} for user_id {}: Valid: {}, stale: {}, invalid: {}" .format(len(workerShares), height, worker, num_valid_shares, num_stale_shares, num_invalid_shares)) pp.pprint(workerShares) # xxx # Get any existing record for this worker at this height worker_shares_rec = Worker_shares.get_by_height_and_id( height, worker) existing = True if worker_shares_rec is None or len(worker_shares_rec) == 0: # No existing record, create it self.LOGGER.warn( "This is a new share record for worker: {}".format( worker)) worker_shares_rec = Worker_shares( height=height, user_id=worker, timestamp=datetime.utcnow(), ) database.db.createDataObj(worker_shares_rec) existing = False else: print("XXXXXXXXXXXXXXXXXXXXX") pp.pprint(worker_shares_rec) worker_shares_rec = worker_shares_rec[0] # Create/update Shares records - one per graph size mined in this block #pp.pprint(workerShares) for a_share_list in workerShares.values(): for a_share in a_share_list: if a_share.edge_bits == 0: # We dont actually know what size this share was since we # only got the pools half. Its invalid anyway, so just ignore # for now. XXX TODO something better continue #print("a_share = {}".format(a_share)) edge_bits = a_share.edge_bits difficulty = a_share.share_difficulty valid = 0 stale = 0 invalid = 0 if share.is_valid: valid = 1 elif share.is_valid == False and share.invalid_reason == 'too late': stale = 1 else: invalid = 1 worker_shares_rec.add_shares(a_share.edge_bits, a_share.share_difficulty, valid, invalid, stale) try: database.db.getSession().commit() # After we commit share data we need to ack the rmq messages and clear the committed shares self.ack_and_clear(height) # We added new worker share data, so if a Pool_stats record already exists at this height, # we mark it dirty so it gets recalulated by thre shareValidator service stats_rec = Pool_stats.get_by_height(height) if stats_rec is not None: stats_rec.dirty = True # Commit any changes if existing == True: self.LOGGER.warn( "XXX UPDATED worker share record: {}".format( worker_shares_rec)) else: self.LOGGER.warn( "XXX NEW worker share record: {}".format( worker_shares_rec)) database.db.getSession().commit() except Exception as e: self.LOGGER.error( "Failed to commit worker shares for {} at height {} - {}" .format(worker, height, e))
def commit(self, height=None): global HEIGHT database = lib.get_db() if height is None: block_heights = list(self.shares.keys()) try: # Process all heights, except # Dont process shares from current block or newer block_heights = [h for h in block_heights if h < HEIGHT] except ValueError as e: pass self.LOGGER.warn( "Committing shares for blocks: {}".format(block_heights)) else: block_heights = [height] self.LOGGER.warn( "Will commit shares for blocks: {} - (current height: {})".format( block_heights, HEIGHT)) for height in block_heights: if height not in self.shares or len(self.shares[height]) == 0: # XXX TODO: Only create filler record if no records already exist for this height self.LOGGER.warn( "Processed 0 shares in block {} - Creatiing filler record". format(height)) # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height filler_shares_rec = Worker_shares(height=height, worker="GrinPool", timestamp=datetime.utcnow(), difficulty=DIFFICULTY, valid=0, invalid=0) database.db.createDataObj_ignore_duplicates(filler_shares_rec) return byWorker = {} for nonce in self.shares[height]: share = self.shares[height][nonce] # Sort shares by worker if share.found_by not in byWorker: byWorker[share.found_by] = [] byWorker[share.found_by].append(share) # Create Pool_blocks for full solution shares if share.is_solution: self.addPoolBlock(share) # Create/update a Worker_shares record for each user and commit to DB # XXX TODO: Bulk Insert - will be needed when the pool has hundredes or thousands of workers for worker in byWorker: workerShares = byWorker[worker] self.LOGGER.warn( "Processed {} shares in block {} for worker {}".format( len(workerShares), height, worker)) valid_list = [share.is_valid for share in workerShares] # self.LOGGER.warn("xxx: {}".format(valid_list)) num_valid = sum( [int(share.is_valid) for share in workerShares]) # Get any existing record for this worker at this height existing_shares_rec = Worker_shares.get_by_user_and_height( worker, height) if len(existing_shares_rec) == 0: # No existing record, create it self.LOGGER.warn( "New share record for {} at height {} with {} valid shares, {} invalid share" .format(worker, height, num_valid, len(workerShares) - num_valid)) new_shares_rec = Worker_shares(height=height, worker=worker, timestamp=datetime.utcnow(), difficulty=DIFFICULTY, valid=num_valid, invalid=len(workerShares) - num_valid) database.db.createDataObj_ignore_duplicates(new_shares_rec) else: existing_shares_rec = existing_shares_rec[0] self.LOGGER.warn( "Updated share record for {} at height {}: Prev={} valid, {} invalid ; Now={} valid, {} invalid" .format( worker, height, existing_shares_rec.valid, existing_shares_rec.invalid, existing_shares_rec.valid + num_valid, existing_shares_rec.invalid + len(workerShares) - num_valid)) existing_shares_rec.valid += num_valid existing_shares_rec.invalid += len( workerShares) - num_valid # After we commit share data we need to clear it self.clear(height) # We added new worker share data, so if a Pool_stats record already exists at this height, # we mark it dirty so it gets recalulated by thre shareValidator service stats_rec = Pool_stats.get_by_height(height) if stats_rec is not None: stats_rec.dirty = True # Commit any changes database.db.getSession().commit()
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() # Get config check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Find the height of the latest stats record last_height = 0 latest_stat = Worker_stats.get_latest() if latest_stat != None: last_height = latest_stat.height else: latest = Blocks.get_latest() while latest is None: LOGGER.warn("Waiting for the first block...") sleep(10) latest = Blocks.get_latest() last_height = latest.height height = last_height + 1 LOGGER.warn("Starting at block height: {}".format(height)) # Generate worker stats records - one per grin block for each active worker while True: # latest = grin.blocking_get_current_height() latest = Blocks.get_latest().height share_height = Worker_shares.get_latest_height() while share_height is None: LOGGER.warn("waiting for the first worker shares") sleep(10) share_height = Worker_shares.get_latest_height() stats_height = height - 1 LOGGER.warn( "Running: chain height: {}, share height: {} vs stats height: {}". format(latest, share_height, stats_height)) while share_height > height: try: new_stats = workerstats.calculate(height, avg_over_range) LOGGER.warn("{} new stats for height {}".format( len(new_stats), height)) for stats in new_stats: LOGGER.warn("Added Worker_stats: {}".format(stats)) # mark any existing pool_stats dirty pool_stats = Pool_stats.get_by_height(height) for stat_rec in new_stats: database.db.getSession().add(stat_rec) if pool_stats is not None: LOGGER.warn( "Marked existing pool_stats dirty for height: {}". format(height)) pool_stats.dirty = True # Pool_stats need to be recalculated if ((height % BATCHSZ == 0) or (height >= (latest - 10))): LOGGER.warn("Commit ---") database.db.getSession().commit() height = height + 1 except Exception as e: LOGGER.exception("Something went wrong: {}".format(e)) database.db.getSession().rollback() sleep(check_interval) sys.stdout.flush() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() block_expiretime = int(CONFIG["poolblockUnlocker"]["block_expiretime"]) current_height = grin.blocking_get_current_height() new_poolshares = Pool_shares.getUnvalidated(current_height, VALIDATION_DEPTH) # XXX TODO: Batch by block for pool_share in new_poolshares: if pool_share.height < (current_height - block_expiretime): # this is for an expired block if pool_share.is_valid == True: Pool_stats.mark_dirty(pool_share.height) pool_share.is_valid = False pool_share.validated = True pool_share.invalid_reason = "expired" else: grin_share = Grin_shares.get_by_nonce(pool_share.nonce) if grin_share == None: if pool_share.is_valid == True: Pool_stats.mark_dirty(pool_share.height) # No matching validated grin share was found (yet) if pool_share.height < current_height - block_expiretime: pool_share.is_valid = False pool_share.validated = True pool_share.invalid_reason = "no grin_share" else: # Mark invalid, but dont finalize validation, so we will check again later pool_share.is_valid = False pool_share.invalid_reason = "no grin_share" else: # We did find a matching grin share, make sure it is valid and grin accepted it if pool_share.nonce != grin_share.nonce: if pool_share.is_valid == True: Pool_stats.mark_dirty(pool_share.height) pool_share.is_valid = False pool_share.validated = True pool_share.invalid_reason = "nonce mismatch" elif pool_share.worker_difficulty > grin_share.actual_difficulty: if pool_share.is_valid == True: Pool_stats.mark_dirty(pool_share.height) pool_share.is_valid = False pool_share.validated = True pool_share.invalid_reason = "low difficulty" else: # It did not fail any of our tests, its valid if pool_share.is_valid == False: Pool_stats.mark_dirty(pool_share.height) pool_share.validated = True pool_share.is_valid = True pool_share.invalid_reason = "None" # LOGGER.warn("Share {}, {} is {} because {}".format(pool_share.height, pool_share.nonce, pool_share.is_valid, pool_share.invalid_reason)) database.db.getSession().commit() LOGGER.warn("=== Completed {}".format(PROCESS))