def get(self, id=None, height=0, range=0, fields=None): database = lib.get_db() LOGGER = lib.get_logger(PROCESS) LOGGER.warn( "WorkerAPI_shares get id:{} height:{} range:{} fields:{}".format( id, height, range, fields)) fields = lib.fields_to_list(fields) if height == 0: height = Blocks.get_latest().height shares_records = [] if id is None: for shares in Worker_shares.get_by_height(height, range): shares_records.append(shares.to_json(fields)) return shares_records else: if range is None: worker_sh_recs = Worker_shares.get_by_height_and_id(height, id) #print("worker_sh_recs = {}".format(worker_sh_recs)) if res is None: return "[]".to_json() return res.to_json(fields) else: for share in Worker_shares.get_by_height_and_id( height, id, range): shares_records.append(share.to_json(fields)) return shares_records
def get(self, id=None, height=0, range=None, fields=None): database = lib.get_db() LOGGER = lib.get_logger(PROCESS) LOGGER.warn( "WorkerAPI_stats get id:{} height:{} range:{} fields:{}".format( id, height, range, fields)) fields = lib.fields_to_list(fields) if height == 0: height = Blocks.get_latest().height stats = [] if id is None: for stat in Worker_stats.get_by_height(height, range): #print("YYY: {}".format(stats)) stats.append(stat.to_json(fields)) return stats else: if range is None: res = Worker_stats.get_by_height_and_id(id, height) if res is None: return "[]".to_json() return res.to_json(fields) else: for stat in Worker_stats.get_by_height_and_id( id, height, range): stats.append(stat.to_json(fields)) return stats
def get(self, height=None, range=None): global database #database = lib.get_db() LOGGER = lib.get_logger(PROCESS) debug and LOGGER.warn("PoolAPI_shareCount get height:{} range:{}".format(height, range)) # Totals across all workers are stored in the Pool_stats record if range is None: if height is None: height = 0 pool_st_rec = Pool_stats.get_by_height(height) if pool_st_rec is None: total = 0 this_block = 0 else: total = pool_st_rec.total_shares_processed this_block = pool_st_rec.shares_processed return { "height": pool_st_rec.height, "total": total, "count": this_block, } else: counts = [] pool_st_recs = Pool_stats.get_by_height(height, range) for st_rec in pool_st_recs: rec = { "height": st_rec.height, "total": st_rec.total_shares_processed, "count": st_rec.shares_processed, } counts.append(rec) return counts
def main(): global CONFIG global LOGGER CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Number of blocks of share data used to calculate rewards PPLNG_WINDOW_SIZE = 60 try: PPLNG_WINDOW_SIZE = int(os.environ["PPLNG_WINDOW_SIZE"]) except Exception as e: LOGGER.error( "Failed to get PPLNG_WINDOW_SIZE from the environment: {}. Using default size of {}" .format(e, PPLNG_WINDOW_SIZE)) # Connect to DB database = lib.get_db() esitmated = [ ] # Blocks we know have already been estimated - XXX TODO: Clean paid blocks out of this list # Get Config settings pool_fee = float(CONFIG[PROCESS]["pool_fee"]) while True: # Generate pool block reward estimates for all new and unlocked blocks try: database.db.initializeSession() unlocked_blocks = Pool_blocks.get_all_unlocked() new_blocks = Pool_blocks.get_all_new() unlocked_blocks_h = [blk.height for blk in unlocked_blocks] new_blocks_h = [blk.height for blk in new_blocks] need_estimates = [] for height in unlocked_blocks_h + new_blocks_h: if height not in esitmated: need_estimates.append(height) if need_estimates: LOGGER.warn("Will ensure estimate for blocks: {}".format( need_estimates)) # Generate Estimate for height in need_estimates: LOGGER.warn("Ensure estimate for block: {}".format(height)) payout_map = pool.calculate_block_payout_map( height, PPLNG_WINDOW_SIZE, pool_fee, LOGGER, True) # Double check the total paid is correct esitmated.append(height) LOGGER.warn( "Completed estimate for block: {}".format(height)) LOGGER.warn("Completed estimates") database.db.destroySession() sleep(check_interval) except Exception as e: # AssertionError as e: LOGGER.error("Something went wrong: {} - {}".format( e, traceback.print_stack())) LOGGER.warn("=== Completed {}".format(PROCESS)) sleep(check_interval)
def get(self, id=None, height=None, range=0, fields=None): global database #database = lib.get_db() LOGGER = lib.get_logger(PROCESS) # AUTH FILTER if id != g.user.id: response = jsonify({ 'message': 'Not authorized to access data for other users' }) response.status_code = 403 return response debug and LOGGER.warn("WorkerAPI_shares get id:{} height:{} range:{} fields:{}".format(id, height, range, fields)) # Enforce range limit if range is not None: range = min(range, worker_shares_range_limit) fields = lib.fields_to_list(fields) if height is None: return Worker_shares.get_latest_height(id) if height == 0: height = Blocks.get_latest().height shares_records = [] if id is None: for shares in Worker_shares.get_by_height(height, range): shares_records.append(shares.to_json(fields)) return shares_records else: if range is None: worker_sh_recs = Worker_shares.get_by_height_and_id(height, id) #print("worker_sh_recs = {}".format(worker_sh_recs)) if res is None: return "[]".to_json() return res.to_json(fields) else: for share in Worker_shares.get_by_height_and_id(height, id, range): shares_records.append(share.to_json(fields)) return shares_records
def post(self, id, field, value): global database #database = lib.get_db() LOGGER = lib.get_logger(PROCESS) # AUTH FILTER if id != g.user.id: response = jsonify( {'message': 'Not authorized to access data for other users'}) response.status_code = 403 return response debug and LOGGER.warn( "WorkerAPI_utxo post id:{} field:{} value:{}".format( id, field, value)) allowed_fields = ["address", "method"] if field not in allowed_fields: response = jsonify({'message': 'Invalid field for update'}) response.status_code = 403 return response st = False if field == "address": st = Pool_utxo.update_address(id, value) elif field == "method": st = Pool_utxo.update_method(id, value) if st == False: response = jsonify( {'message': 'Failed to update {}'.format(field)}) response.status_code = 500 return response else: response = jsonify({field: value}) response.status_code = 200 return response
def get(self, id, range=None, fields=None): global database #database = lib.get_db() LOGGER = lib.get_logger(PROCESS) # AUTH FILTER if id != g.user.id: response = jsonify( {'message': 'Not authorized to access data for other users'}) response.status_code = 403 return response debug and LOGGER.warn( "WorkerAPI_payments get id:{} range:{} fields:{}".format( id, range, fields)) # Enforce range limit if range is not None: range = min(range, worker_payment_record_range_limit) fields = lib.fields_to_list(fields) if range is None: payment_rec = Pool_payment.get_latest_by_userid(id) if payment_rec is None: return None return payment_rec.to_json(fields) else: payments = [] for payment_rec in Pool_payment.get_latest_by_userid(id, range): payments.append(payment_rec.to_json(fields)) return payments
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() database.db.initializeSession() pp = pprint.PrettyPrinter(indent=4) # Fetch and print pool block reward estimates for latest N pool blocks try: pool_blocks = Pool_blocks.get_latest(NUM_BLOCKS) pool_blocks_h = [blk.height for blk in pool_blocks] LOGGER.warn( "Will report estimates for pool blocks: {}".format(pool_blocks_h)) # Print Estimate for height in pool_blocks_h: pp.pprint("Eestimate for block: {}".format(height)) payout_map = pool.get_block_payout_map_estimate(height, LOGGER) pp.pprint(payout_map) except Exception as e: # AssertionError as e: LOGGER.error("Something went wrong: {} - {}".format( e, traceback.print_stack())) LOGGER.warn("=== Completed {}".format(PROCESS))
def get(self, id, height=0): LOGGER = lib.get_logger(PROCESS) if id != g.user.id: response = jsonify({ 'message': 'Not authorized to access data for other users' }) response.status_code = 403 return response debug and LOGGER.warn("EstimateApi_payment get id:{} height:{}".format(id, height)) if height != 0: # Request is for a single block reward payout_map = pool.get_block_payout_map_estimate(height, LOGGER) if payout_map is None: return 0 #print("payout map: {}".format(payout_map)) #sys.stdout.flush() if id in payout_map: return payout_map[id] else: return 0 # Get a list of all new and unlocked blocks unlocked_blocks = Pool_blocks.get_all_unlocked() unlocked_blocks_h = [blk.height for blk in unlocked_blocks] #LOGGER.warn("EstimateApi_payment unlocked blocks: {}".format(unlocked_blocks)) new_blocks = Pool_blocks.get_all_new() new_blocks_h = [blk.height for blk in new_blocks] #LOGGER.warn("EstimateApi_payment new blocks: {}".format(new_blocks)) total = 0 for height in unlocked_blocks_h + new_blocks_h: debug and print("Estimate block at height: {}".format(height)) payout_map = pool.get_block_payout_map_estimate(height, LOGGER) if payout_map is not None and id in payout_map: total = total + payout_map[id] return total
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() database.db.initializeSession() while True: # Generate pool block reward estimates for all new and unlocked blocks try: unlocked_blocks = Pool_blocks.get_all_unlocked() unlocked_blocks_h = [blk.height for blk in unlocked_blocks] new_blocks = Pool_blocks.get_all_new() new_blocks_h = [blk.height for blk in new_blocks] LOGGER.warn("Will ensure estimate for blocks: {}".format( unlocked_blocks_h + new_blocks_h)) # Generate Estimate for height in unlocked_blocks_h + new_blocks_h: LOGGER.warn("Ensure estimate for block: {}".format(height)) payout_map = pool.calculate_block_payout_map( height, 60, LOGGER, True) LOGGER.warn("Completed estimate for block: {}".format(height)) #database.db.getSession().commit() LOGGER.warn("Completed estimates") sleep(check_interval) except Exception as e: # AssertionError as e: LOGGER.error("Something went wrong: {} - {}".format( e, traceback.print_stack())) LOGGER.warn("=== Completed {}".format(PROCESS)) sleep(check_interval)
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() # Get the list of pool_blocks that have been paid paid_poolblocks = Pool_blocks.get_all_paid() print("Number of paid_poolblocks: {}".format(len(paid_poolblocks))) sys.stdout.flush() for pb in paid_poolblocks: # Get the blockchain data for this block response = grin.get_block_by_height(pb.height) if response == None: LOGGER.error("Failed to get block {}".format(pb.height)) continue if int(response["header"]["nonce"]) != int(pb.nonce): print("") sys.stdout.flush() LOGGER.warn("Processed orphan pool block at height: {}".format( pb.height)) else: sys.stdout.write(".") sys.stdout.flush()
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() atexit.register(lib.teardown_db) # Get config check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Initialize poolStats records if this is the first run latest_stat = Pool_stats.get_latest() if latest_stat is None: # Special case for new pool startup poolstats.initialize(avg_over_range, LOGGER) latest_stat = Pool_stats.get_latest() LOGGER.warn("Starting at height: {}".format(latest_stat.height)) # Generate pool stats records - one per grin block while True: # Find the height of the latest stats record latest_stat = Pool_stats.get_latest() height = latest_stat.height + 1 LOGGER.warn("Starting at height: {}".format(height)) try: while True: share_height = Worker_shares.get_latest_height() while share_height is None: LOGGER.warn("Waiting for shares") share_height = Worker_shares.get_latest_height() sleep(10) latest = Blocks.get_latest().height stats_height = height - 1 LOGGER.warn( "Running: Chain height: {}, share height: {}, stats height: {}" .format(latest, share_height, stats_height)) while share_height - 1 > height: new_stats = poolstats.calculate(height, avg_over_range) # Batch new stats when possible, but commit at reasonable intervals database.db.getSession().add(new_stats) if ((height % BATCHSZ == 0) or (height >= (latest - 10))): database.db.getSession().commit() LOGGER.warn( "Added Pool_stats for block: {} - {} {} {}".format( new_stats.height, new_stats.gps, new_stats.active_miners, new_stats.shares_processed)) height = height + 1 sys.stdout.flush() sleep(check_interval) except Exception as e: LOGGER.error("Something went wrong: {} - {}".format( e, traceback.print_stack())) database.db.getSession().rollback() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): config = lib.get_config() PROCESS = "libNetworkTest" LOGGER = lib.get_logger(PROCESS) database = lib.get_db() # Get stats stats = get_stats() LOGGER.warn("stats = {}".format(stats)) LOGGER.warn("") # Get blocks found bf = get_blocks_found_data(5) LOGGER.warn("blocks found = {}".format(bf)) LOGGER.warn("") # Get graph rate gr = get_graph_rate_data(20) LOGGER.warn("graph rate = {}".format(gr)) LOGGER.warn("") # Get difficulty data diff = get_difficulty_data(200) LOGGER.warn("difficulty = {}".format(diff)) sys.exit(1)
def main(): global LOGGER global CONFIG global SHARES global HEIGHT global GRINSHARE_HEIGHT global POOLSHARE_HEIGHT CONFIG = lib.get_config() # XXX TODO: Put in config HOST = "0.0.0.0" PORT = 32080 GRINSHARE_HEIGHT = 0 POOLSHARE_HEIGHT = 0 LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) database = lib.get_db() HEIGHT = Worker_shares.get_latest_height() if HEIGHT is None: HEIGHT = grin.blocking_get_current_height() SHARES = WorkerShares(LOGGER) #server = ThreadedHTTPServer((HOST, PORT), ShareHandler) #server = HTTPServer((HOST, PORT), ShareHandler) # server = socketserver.TCPServer((HOST, PORT), ShareHandler) # server.handle_request() # server.server_close() commit_thread = threading.Thread(target=ShareCommitScheduler, args=(15, )) commit_thread.start() server = ThreadedTCPServer((HOST, PORT), ShareHandler) server.serve_forever()
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) # XXX TODO: Kubernetes does not always get the volume mounted before the processes start # maybe need a loop waiting on it process_grin_log()
def main(): global PORT global LOGGER CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) app.run(debug=True, host='0.0.0.0', port=PORT) LOGGER.warn("=== Completed {}".format(PROCESS))
def get(self, id=None, fields=None): database = lib.get_db() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("WorkerAPI_payments get id:{} fields:{}".format( id, fields)) fields = lib.fields_to_list(fields) utxo = Pool_utxo.get_by_address(id) return utxo.to_json(fields)
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Get Config settings pool_fee = float(CONFIG[PROCESS]["pool_fee"]) # Number of blocks of share data used to calculate rewards PPLNG_WINDOW_SIZE = 60 try: PPLNG_WINDOW_SIZE = int(os.environ["PPLNG_WINDOW_SIZE"]) except Exception as e: LOGGER.error( "Failed to get PPLNG_WINDOW_SIZE from the environment: {} Using default size of {}" .format(e, PPLNG_WINDOW_SIZE)) # Connect to DB database = lib.get_db() # Get current blockchain height chain_height = grin.blocking_get_current_height() # Get unlocked blocks from the db unlocked_blocks = Pool_blocks.get_all_unlocked() unlocked_blocks = [blk.height for blk in unlocked_blocks] LOGGER.warn("Paying for {} pool blocks: {}".format(len(unlocked_blocks), unlocked_blocks)) for height in unlocked_blocks: try: LOGGER.warn("Processing unlocked block: {}".format(height)) # Call the library routine to get this blocks payout map payout_map = pool.calculate_block_payout_map( height, PPLNG_WINDOW_SIZE, pool_fee, LOGGER, False) #print("payout_map = {}".format(payout_map)) # Store the payment map for this block credits_record = Pool_credits(chain_height, height, payout_map) database.db.getSession().add(credits_record) # Make payments based on the workers total share_value Pool_blocks.setState(height, "paid") for user_id, payment_amount in payout_map.items(): # Add worker rewards to pool account balance LOGGER.warn("Credit to user: {} = {}".format( user_id, payment_amount)) worker_utxo = Pool_utxo.credit_worker(user_id, payment_amount) # Worker_stats accounting and running totals #latest_worker_stats = Worker_stats.get_latest_by_id(user_id) #latest_worker_stats.dirty = True database.db.getSession().commit() except Exception as e: database.db.getSession().rollback() LOGGER.exception("Something went wrong: {}".format(repr(e))) LOGGER.warn("=== Completed {}".format(PROCESS)) sys.stdout.flush()
def main(): global LOGGER LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() latest_block = 0 # XXX All in one db transaction.... # Get unlocked blocks from the db unlocked_blocks = Pool_blocks.get_all_unlocked() database.db.getSession().commit() for pb in unlocked_blocks: try: LOGGER.warn("Processing unlocked block: {}".format(pb)) if pb.height > latest_block: latest_block = pb.height # Get valid pool_shares for that block from the db pool_shares = Pool_shares.get_valid_by_height(pb.height) # Calculate Payment info: worker_shares = {} for ps in pool_shares: LOGGER.warn("Processing pool_shares: {}".format(ps)) # Need to get actual_difficulty gs = Grin_shares.get_by_nonce(ps.nonce) if gs == None: # XXX NOTE: no payout for shares not accepted by grin node continue if ps.found_by in worker_shares: worker_shares[ps.found_by] += gs.actual_difficulty else: worker_shares[ps.found_by] = gs.actual_difficulty if len(worker_shares) > 0: # Calcualte reward/difficulty: XXX TODO: Enhance # What algorithm to use? Maybe: https://slushpool.com/help/manual/rewards r_per_d = REWARD / sum(worker_shares.values()) for worker in worker_shares.keys(): # Calculate reward per share worker_rewards = worker_shares[worker] * r_per_d # Add or create worker rewards worker_utxo = Pool_utxo.credit_worker( worker, worker_rewards) LOGGER.warn("Credit to user: {} = {}".format( worker, worker_rewards)) # Mark the pool_block state="paid" (maybe "processed" would be more accurate?) pb.state = "paid" database.db.getSession().commit() except Exception as e: database.db.getSession().rollback() LOGGER.error("Something went wrong: {}".format(e)) #database.db.getSession().commit() # db.set_last_run(PROCESS, str(time.time())) LOGGER.warn("=== Completed {}".format(PROCESS)) sys.stdout.flush()
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) ## # Update user records in MySQL and REDIS database = lib.get_db() database.db.initializeSession() redisdb = lib.get_redis_db() redis_userid_key = "userid." id = 1 try: while True: thisuser = Users.get_by_id(id) if thisuser is None: if id > 2358: LOGGER.warn("last id = {}".format(id)) break id = id + 1 continue if thisuser.username == "bjg62hj8byyksphuw95vqc3f74.lionm1": orig_username = thisuser.username thisuser.username = "******" LOGGER.warn("Updated: {} to {}".format(orig_username, thisuser.username)) if thisuser.username == "[email protected]_d1": orig_username = thisuser.username thisuser.username = "******" LOGGER.warn("Updated: {} to {}".format(orig_username, thisuser.username)) if thisuser.username == "*****@*****.**": orig_username = thisuser.username thisuser.username = "******" LOGGER.warn("Updated: {} to {}".format(orig_username, thisuser.username)) if "." in thisuser.username: orig_username = thisuser.username # Update mysql thisuser.username = thisuser.username.replace(".", "_") # Update redis redis_key = redis_userid_key + orig_username COMMIT and redisdb.delete(redis_key) redis_key = redis_userid_key + thisuser.username COMMIT and redisdb.set(redis_key, id) LOGGER.warn("Updated: {} to {}".format(orig_username, thisuser.username)) id = id + 1 except Exception as e: # AssertionError as e: LOGGER.error("Something went wrong: {} - {}".format( e, traceback.print_stack())) COMMIT or LOGGER.warn("XXX No Commit - Edit for final run") COMMIT and database.db.getSession().commit() LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() # Get config check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Find the height of the latest stats record last_height = 0 latest_stat = Worker_stats.get_latest() if latest_stat != None: last_height = latest_stat.height height = last_height + 1 LOGGER.warn("Starting at block height: {}".format(height)) # Generate worker stats records - one per grin block for each active worker while True: # latest = grin.blocking_get_current_height() latest = Blocks.get_latest().height #LOGGER.warn("Latest Network Block Height = {}".format(latest)) while latest > height: try: new_stats = workerstats.calculate(height, avg_over_range) LOGGER.warn("{} new stats for height {}".format( len(new_stats), height)) # mark any existing pool_stats dirty pool_stats = Pool_stats.get_by_height(height) if pool_stats is not None: LOGGER.warn( "Marked existing pool_stats dirty for height: {}". format(height)) pool_stats.dirty = True database.db.getSession().bulk_save_objects(new_stats) if ((height % BATCHSZ == 0) or (height >= (latest - 10))): database.db.getSession().commit() for stats in new_stats: LOGGER.warn( "Added Worker_stats for block: {}, Worker: {} - {} {} {} {} {} {}" .format(stats.height, stats.worker, stats.gps, stats.shares_processed, stats.total_shares_processed, stats.grin_paid, stats.total_grin_paid, stats.balance)) height = height + 1 except Exception as e: LOGGER.error("Something went wrong: {}".format(e)) LOGGER.error("Traceback: {}".format( traceback.format_exc().splitlines())) database.db.getSession().rollback() sleep(check_interval) sys.stdout.flush() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() grin_api_url = "http://" + CONFIG["grin_node"]["address"] + ":" + CONFIG["grin_node"]["api_port"] status_url = grin_api_url + "/v1/status" blocks_url = grin_api_url + "/v1/blocks/" validation_depth = int(CONFIG[PROCESS]["validation_depth"]) response = requests.get(status_url) latest = int(response.json()["tip"]["height"]) last = latest - validation_depth # start a reasonable distance back if last < 0: last = 1 LOGGER.warn("Starting from block #{}".format(last)) # last = 0 for i in range(last, latest): url = blocks_url + str(i) response = requests.get(url).json() # print("{}: {}".format(response["header"]["height"], response["header"]["hash"])) try: rec = Blocks.get_by_height([i]) if rec is not None: if rec.hash != response["header"]["hash"] and rec.state != "orphan": LOGGER.warn("Found an orphan - height: {}, hash: {} vs {}".format(rec.height, rec.hash, response["header"]["hash"])) rec.state = "orphan" database.db.getSession().commit() else: LOGGER.warn("Adding missing block - height: {}".format(response["header"]["height"])) # XXX TODO: Probably want to mark it as "missing" so we know it was filled in after the fact? missing_block = Blocks(hash=response["header"]["hash"], version=response["header"]["version"], height = response["header"]["height"], previous = response["header"]["previous"], timestamp = response["header"]["timestamp"][:-1], output_root = response["header"]["output_root"], range_proof_root = response["header"]["range_proof_root"], kernel_root = response["header"]["kernel_root"], nonce = response["header"]["nonce"], total_difficulty = response["header"]["total_difficulty"], total_kernel_offset = response["header"]["total_kernel_offset"], state = "missing") database.db.createDataObj(missing_block) except Exception as e: # XXX TODO: Something more ? LOGGER.error("Something went wrong: {}".format(e)) sys.stdout.flush() # db.set_last_run(PROCESS, str(time.time())) database.db.getSession().commit()
def main(): global LOGGER global CONFIG global SHARE_EXPIRETIME global RABBITMQ_USER global RABBITMQ_PASSWORD global RMQ CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) SHARE_EXPIRETIME = int(CONFIG[PROCESS]["share_expire_time"]) COMMIT_INTERVAL = int(CONFIG[PROCESS]["commit_interval"]) SHAREDATA_COMMIT_LAG = 3 # Allow shares to lag up to max_lag behind the blockchain before filler record is created RIGDATA_COMMIT_LAG = 3 # Allow rigdata to lag up to max_lag behind the blockchain before creating the record RABBITMQ_USER = os.environ["RABBITMQ_USER"] RABBITMQ_PASSWORD = os.environ["RABBITMQ_PASSWORD"] try: RMQ = os.environ["RMQ"] except KeyError as e: RMQ = "rmq" LOGGER.warn("Cant determine RMQ servsers, default to {}".format(RMQ)) ## # Start a thread to commit share records commit_thread = threading.Thread(target=ShareCommitScheduler, args=( SHAREDATA_COMMIT_LAG, COMMIT_INTERVAL, LOGGER, )) commit_thread.start() ## rigdata_thread = threading.Thread(target=RigDataCommitScheduler, args=( RIGDATA_COMMIT_LAG, COMMIT_INTERVAL, LOGGER, )) rigdata_thread.start() ## # Start a pika consumer thread for each rabbit we want to consume from for rmq in RMQ.split(): try: LOGGER.warn("Connecting to RMQ server: {}".format(rmq)) rmq_thread = threading.Thread(target=RmqConsumer, args=(rmq, )) rmq_thread.start() except Exception as e: logger.error("Failed to connect to RMQ: {} - {}".format(rmq, e))
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) database = lib.get_db() LOGGER.warn("=== Starting {}".format(PROCESS)) check_interval = float(CONFIG[PROCESS]["check_interval"]) max_rebuild_depth = float(CONFIG[PROCESS]["max_rebuild_depth"]) avg_over_range_grin = int(CONFIG["grinStats"]["avg_over_range"]) avg_over_range_pool = int(CONFIG["poolStats"]["avg_over_range"]) avg_over_range_worker = int(CONFIG["workerStats"]["avg_over_range"]) current_height = grin.blocking_get_current_height() rebuild_height = current_height - max_rebuild_depth while True: # Grin blocks and therefore grin stats cant be dirty # # Check for dirty grin stats # dirty = Grin_stats.get_first_dirty() # if dirty is not None: # LOGGER.warn("Recalculating Grin Stats from {}".format(dirty.height)) # end_height = grinstats.recalculate(dirty.height, avg_over_range_grin) # LOGGER.warn("Finished Recalculating Grin Stats: {} - {}".format(dirty.height, end_height)) # Check for dirty pool stats dirty = Pool_stats.get_first_dirty(rebuild_height) if dirty is not None: LOGGER.warn("Recalculating Pool Stats from {}".format( dirty.height)) end_height = poolstats.recalculate(dirty.height, avg_over_range_pool) LOGGER.warn("Finished Recalculating Pool Stats: {} - {}".format( dirty.height, end_height)) # # Check for dirty worker stats dirty = Worker_stats.get_first_dirty(rebuild_height) while dirty is not None: LOGGER.warn("Recalculating Worker Stats for {} from {}".format( dirty.height, avg_over_range_worker)) end_height = workerstats.recalculate(dirty.height, avg_over_range_worker) LOGGER.warn( "Finished Recalculating Worker Stats for {} - {}".format( dirty.height, end_height)) dirty = Worker_stats.get_first_dirty() sys.stdout.flush() time.sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): global LOGGER global CONFIG global SHARES global HEIGHT global GRINSHARE_HEIGHT global POOLSHARE_HEIGHT global SHARE_EXPIRETIME global database global RABBITMQ_USER global RABBITMQ_PASSWORD CONFIG = lib.get_config() atexit.register(lib.teardown_db) GRINSHARE_HEIGHT = 0 POOLSHARE_HEIGHT = 0 LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) SHARE_EXPIRETIME = int(CONFIG[PROCESS]["share_expire_time"]) commit_interval = int(CONFIG[PROCESS]["commit_interval"]) rmq_endpoints = json.loads(CONFIG[PROCESS]["rmq"]) RABBITMQ_USER = os.environ["RABBITMQ_USER"] RABBITMQ_PASSWORD = os.environ["RABBITMQ_PASSWORD"] database = lib.get_db() HEIGHT = Worker_shares.get_latest_height() while HEIGHT is None: LOGGER.warn("Waiting on the first grin block...") time.sleep(5) latest_block = Blocks.get_latest() if latest_block is not None: HEIGHT = latest_block.height SHARES = WorkerShares(LOGGER) ## # Start a thread to commit shares commit_thread = threading.Thread(target=ShareCommitScheduler, args=( commit_interval, database, )) commit_thread.start() ## # Start a pika consumer thread for each rabbit we want to consume from for rmq in rmq_endpoints: rmq_thread = threading.Thread(target=RmqConsumer, args=(rmq, )) rmq_thread.start()
def get(self, height=0, range=None, fields=None): global database #database = lib.get_db() LOGGER = lib.get_logger(PROCESS) debug and LOGGER.warn("WorkersAPI_shares get height:{} range:{} fields:{}".format(height, range, fields)) fields = lib.fields_to_list(fields) shares_records = [] if height == 0: height = Blocks.get_latest().height for shares in Worker_shares.get_by_height(height, range): # AUTH FILTER if shares.user_id == g.user.id: shares_records.append(shares.to_json(fields)) return shares_records
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() # Get the list of pool_blocks that are # old enough to unlock and # are not orphan blocks # XXX TODO: The node may not be synced, may need to wait? block_locktime = int(CONFIG[PROCESS]["block_locktime"]) block_expiretime = int(CONFIG[PROCESS]["block_expiretime"]) LOGGER.warn("using locktime: {}, expiretime: {}".format( block_locktime, block_expiretime)) latest = grin.blocking_get_current_height() LOGGER.warn("Latest: {}".format(latest)) new_poolblocks = Pool_blocks.get_all_new() for pb in new_poolblocks: if pb.height < (latest - block_expiretime): # Dont re-process very old blocks - protection against duplicate payouts. LOGGER.error("Processed expired pool block at height: {}".format( pb.height)) pb.state = "expired" continue response = grin.get_block_by_height(pb.height) if response == None: # Unknown. Leave as "new" for now and attempt to validate next run LOGGER.error("Failed to get block {}".format(pb.height)) continue if int(response["header"]["nonce"]) != int(pb.nonce): LOGGER.warn("Processed orphan pool block at height: {}".format( pb.height)) pb.state = "orphan" continue if pb.height < (latest - block_locktime): # This block seems valid, and old enough to unlock LOGGER.warn("Unlocking pool block at height: {}".format(pb.height)) pb.state = "unlocked" sys.stdout.flush() # db.set_last_run(PROCESS, str(time.time())) database.db.getSession().commit() LOGGER.warn("=== Completed {}".format(PROCESS)) sys.stdout.flush()
def verify_password(username_or_token, password=None): global database #database = lib.get_db() LOGGER = lib.get_logger(PROCESS) print("Will Verify User: {}, {}", username_or_token, password) # First try to verify via token user_rec = Users.verify_auth_token(app.config['SECRET_KEY'], username_or_token) if user_rec is None: # try to authenticate with username/password user_rec = Users.get(username_or_token, password) if user_rec is None: return False g.user = user_rec return True
def get(self, id, fields=None): global database #database = lib.get_db() LOGGER = lib.get_logger(PROCESS) # AUTH FILTER if id != g.user.id: response = jsonify({ 'message': 'Not authorized to access data for other users' }) response.status_code = 403 return response debug and LOGGER.warn("WorkerAPI_utxo get id:{} fields:{}".format(id, fields)) fields = lib.fields_to_list(fields) utxo = Pool_utxo.get_by_userid(id) if utxo is None: return None return utxo.to_json(fields)
def post(self): global database LOGGER = lib.get_logger(PROCESS) username = None password = None try: debug and print("json request = {}".format(request.form)) username = request.form.get('username') password = request.form.get('password') debug and LOGGER.warn("PoolAPI_users POST: user:{} password:{}".format(username, password)) except AttributeError as e: LOGGER.warn("Missing username or password - {}".format(str(e))) if username is None or password is None: response = jsonify({ 'message': 'Missing arguments: username and pasword required' }) response.status_code = 400 return response if username == "" or password == "": response = jsonify({ 'message': 'Missing arguments: username and pasword required' }) response.status_code = 400 return response if "." in username: response = jsonify({ 'message': 'Invalid Username: May not contain "."' }) response.status_code = 400 return response # Check if the username is taken exists = Users.check_username_exists(username) if exists: debug and print("Failed to add - conflict with existing user = {}".format(username)) response = jsonify({ 'message': 'Conflict with existing account' }) response.status_code = 409 return response # Create the users record user_rec = Users.create(username, password) if user_rec is None: debug and print("Failed to add - unable to create a new user record") response = jsonify({ 'message': 'System Error: Failed to create account' }) response.status_code = 500 return response # initialize a worker_stats record for this user (previous block) so they get instance feedback on the UI lb = Blocks.get_latest() if lb is not None: height = Blocks.get_latest().height initial_stat = Worker_stats(datetime.utcnow(), height, user_rec.id) database.db.createDataObj(initial_stat) debug and print("Added user = {}".format(user_rec)) response = jsonify({ 'username': user_rec.username, 'id': user_rec.id }) response.status_code = 201 return response