def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() atexit.register(lib.teardown_db) # Get config check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Initialize poolStats records if this is the first run latest_stat = Pool_stats.get_latest() if latest_stat is None: # Special case for new pool startup poolstats.initialize(avg_over_range, LOGGER) latest_stat = Pool_stats.get_latest() LOGGER.warn("Starting at height: {}".format(latest_stat.height)) # Generate pool stats records - one per grin block while True: # Find the height of the latest stats record latest_stat = Pool_stats.get_latest() height = latest_stat.height + 1 LOGGER.warn("Starting at height: {}".format(height)) try: while True: share_height = Worker_shares.get_latest_height() while share_height is None: LOGGER.warn("Waiting for shares") share_height = Worker_shares.get_latest_height() sleep(10) latest = Blocks.get_latest().height stats_height = height - 1 LOGGER.warn( "Running: Chain height: {}, share height: {}, stats height: {}" .format(latest, share_height, stats_height)) while share_height - 1 > height: new_stats = poolstats.calculate(height, avg_over_range) # Batch new stats when possible, but commit at reasonable intervals database.db.getSession().add(new_stats) if ((height % BATCHSZ == 0) or (height >= (latest - 10))): database.db.getSession().commit() LOGGER.warn( "Added Pool_stats for block: {} - {} {} {}".format( new_stats.height, new_stats.gps, new_stats.active_miners, new_stats.shares_processed)) height = height + 1 sys.stdout.flush() sleep(check_interval) except Exception as e: LOGGER.error("Something went wrong: {} - {}".format( e, traceback.print_stack())) database.db.getSession().rollback() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): global LOGGER global CONFIG global SHARES global HEIGHT global GRINSHARE_HEIGHT global POOLSHARE_HEIGHT CONFIG = lib.get_config() # XXX TODO: Put in config HOST = "0.0.0.0" PORT = 32080 GRINSHARE_HEIGHT = 0 POOLSHARE_HEIGHT = 0 LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) database = lib.get_db() HEIGHT = Worker_shares.get_latest_height() if HEIGHT is None: HEIGHT = grin.blocking_get_current_height() SHARES = WorkerShares(LOGGER) #server = ThreadedHTTPServer((HOST, PORT), ShareHandler) #server = HTTPServer((HOST, PORT), ShareHandler) # server = socketserver.TCPServer((HOST, PORT), ShareHandler) # server.handle_request() # server.server_close() commit_thread = threading.Thread(target=ShareCommitScheduler, args=(15, )) commit_thread.start() server = ThreadedTCPServer((HOST, PORT), ShareHandler) server.serve_forever()
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() # Get config check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Find the height of the latest stats record last_height = 0 latest_stat = Worker_stats.get_latest() if latest_stat != None: last_height = latest_stat.height height = last_height + 1 LOGGER.warn("Starting at block height: {}".format(height)) # Generate worker stats records - one per grin block for each active worker while True: # latest = grin.blocking_get_current_height() latest = Blocks.get_latest().height #LOGGER.warn("Latest Network Block Height = {}".format(latest)) while latest > height: try: new_stats = workerstats.calculate(height, avg_over_range) LOGGER.warn("{} new stats for height {}".format( len(new_stats), height)) # mark any existing pool_stats dirty pool_stats = Pool_stats.get_by_height(height) if pool_stats is not None: LOGGER.warn( "Marked existing pool_stats dirty for height: {}". format(height)) pool_stats.dirty = True database.db.getSession().bulk_save_objects(new_stats) if ((height % BATCHSZ == 0) or (height >= (latest - 10))): database.db.getSession().commit() for stats in new_stats: LOGGER.warn( "Added Worker_stats for block: {}, Worker: {} - {} {} {} {} {} {}" .format(stats.height, stats.worker, stats.gps, stats.shares_processed, stats.total_shares_processed, stats.grin_paid, stats.total_grin_paid, stats.balance)) height = height + 1 except Exception as e: LOGGER.error("Something went wrong: {}".format(e)) LOGGER.error("Traceback: {}".format( traceback.format_exc().splitlines())) database.db.getSession().rollback() sleep(check_interval) sys.stdout.flush() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() grin_api_url = "http://" + CONFIG["grin_node"]["address"] + ":" + CONFIG["grin_node"]["api_port"] status_url = grin_api_url + "/v1/status" blocks_url = grin_api_url + "/v1/blocks/" validation_depth = int(CONFIG[PROCESS]["validation_depth"]) response = requests.get(status_url) latest = int(response.json()["tip"]["height"]) last = latest - validation_depth # start a reasonable distance back if last < 0: last = 1 LOGGER.warn("Starting from block #{}".format(last)) # last = 0 for i in range(last, latest): url = blocks_url + str(i) response = requests.get(url).json() # print("{}: {}".format(response["header"]["height"], response["header"]["hash"])) try: rec = Blocks.get_by_height([i]) if rec is not None: if rec.hash != response["header"]["hash"] and rec.state != "orphan": LOGGER.warn("Found an orphan - height: {}, hash: {} vs {}".format(rec.height, rec.hash, response["header"]["hash"])) rec.state = "orphan" database.db.getSession().commit() else: LOGGER.warn("Adding missing block - height: {}".format(response["header"]["height"])) # XXX TODO: Probably want to mark it as "missing" so we know it was filled in after the fact? missing_block = Blocks(hash=response["header"]["hash"], version=response["header"]["version"], height = response["header"]["height"], previous = response["header"]["previous"], timestamp = response["header"]["timestamp"][:-1], output_root = response["header"]["output_root"], range_proof_root = response["header"]["range_proof_root"], kernel_root = response["header"]["kernel_root"], nonce = response["header"]["nonce"], total_difficulty = response["header"]["total_difficulty"], total_kernel_offset = response["header"]["total_kernel_offset"], state = "missing") database.db.createDataObj(missing_block) except Exception as e: # XXX TODO: Something more ? LOGGER.error("Something went wrong: {}".format(e)) sys.stdout.flush() # db.set_last_run(PROCESS, str(time.time())) database.db.getSession().commit()
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) database = lib.get_db() LOGGER.warn("=== Starting {}".format(PROCESS)) check_interval = float(CONFIG[PROCESS]["check_interval"]) max_rebuild_depth = float(CONFIG[PROCESS]["max_rebuild_depth"]) avg_over_range_grin = int(CONFIG["grinStats"]["avg_over_range"]) avg_over_range_pool = int(CONFIG["poolStats"]["avg_over_range"]) avg_over_range_worker = int(CONFIG["workerStats"]["avg_over_range"]) current_height = grin.blocking_get_current_height() rebuild_height = current_height - max_rebuild_depth while True: # Grin blocks and therefore grin stats cant be dirty # # Check for dirty grin stats # dirty = Grin_stats.get_first_dirty() # if dirty is not None: # LOGGER.warn("Recalculating Grin Stats from {}".format(dirty.height)) # end_height = grinstats.recalculate(dirty.height, avg_over_range_grin) # LOGGER.warn("Finished Recalculating Grin Stats: {} - {}".format(dirty.height, end_height)) # Check for dirty pool stats dirty = Pool_stats.get_first_dirty(rebuild_height) if dirty is not None: LOGGER.warn("Recalculating Pool Stats from {}".format( dirty.height)) end_height = poolstats.recalculate(dirty.height, avg_over_range_pool) LOGGER.warn("Finished Recalculating Pool Stats: {} - {}".format( dirty.height, end_height)) # # Check for dirty worker stats dirty = Worker_stats.get_first_dirty(rebuild_height) while dirty is not None: LOGGER.warn("Recalculating Worker Stats for {} from {}".format( dirty.height, avg_over_range_worker)) end_height = workerstats.recalculate(dirty.height, avg_over_range_worker) LOGGER.warn( "Finished Recalculating Worker Stats for {} - {}".format( dirty.height, end_height)) dirty = Worker_stats.get_first_dirty() sys.stdout.flush() time.sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() atexit.register(lib.teardown_db) check_interval = float(CONFIG[PROCESS]["check_interval"]) avg_over_range = int(CONFIG[PROCESS]["avg_over_range"]) # Find the height of the latest stats record last_height = 0 latest_stat = Grin_stats.get_latest() print("latest_stat = {}".format(latest_stat)) if latest_stat == None: LOGGER.warn("Initializing Grin_stats") grinstats.initialize(avg_over_range, LOGGER) latest_stat = Grin_stats.get_latest() print("Finished initializing, latest_stat height = {}".format( latest_stat.height)) last_height = latest_stat.height height = last_height + 1 LOGGER.warn( "grinStats service starting at block height: {}".format(height)) # Generate grin stats records - one per grin block while True: #latest_db_block = Blocks.get_latest() latest = Blocks.get_latest().height while latest >= height: try: new_stats = grinstats.calculate(height, avg_over_range) # Batch new stats when possible, but commit at reasonable intervals database.db.getSession().add(new_stats) # if( (height % BATCHSZ == 0) or (height >= (latest-10)) ): database.db.getSession().commit() LOGGER.warn( "Added Grin_stats for block: {} - gps:{} diff:{}".format( new_stats.height, new_stats.gps, new_stats.difficulty)) height = height + 1 except AssertionError as e: LOGGER.error("Something went wrong: {}".format(e)) sleep(check_interval) sys.stdout.flush() sleep(check_interval) LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() # Configs minimum_payout = int(CONFIG[PROCESS]["minimum_payout"]) walletauth = (wallet_api_user, wallet_api_key) utxos = Pool_utxo.getPayable(minimum_payout) # XXX TODO: Use the current balance, timestamp, the last_attempt timestamp, last_payout, and failed_attempts # XXX TODO: to filter and sort by order we want to make payment attempts for utxo in utxos: try: # Try less often for wallets that dont answer if utxo.amount < utxo.failure_count: if randint(0, 11) != 0: continue LOGGER.warn( "Processing utxo for: {} {} {} using method: {}".format( utxo.user_id, utxo.address, utxo.amount, utxo.method)) if utxo.method in ["http", "https", "keybase"]: try: #user_id, address, logger, database, wallet_auth, method, invoked_by payments.atomic_send(utxo.user_id, utxo.address, LOGGER, database, walletauth, utxo.method, "schedule") except payments.PaymentError as e: LOGGER.error("Failed to make http payment: {}".format(e)) else: LOGGER.warn( "Automatic payment does not (yet?) support method: {}". format(utxo.method)) except Exception as e: LOGGER.error("Failed to process utxo: {} because {}".format( utxo.user_id, str(e))) database.db.getSession().rollback() sys.exit(1) LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) database = lib.get_db() grin_owner_api_url = grin.get_owner_api_url() # Get Config values tx_expire_seconds = int(CONFIG[PROCESS]["tx_expire_seconds"]) tx_repost_seconds = int(CONFIG[PROCESS]["tx_repost_seconds"]) tx_expire_delta = timedelta(seconds=tx_expire_seconds) tx_repost_delta = timedelta(seconds=tx_expire_seconds) target_output_count = 100 # XXX TODO GET FROM CONFIG ## # Run a few tidy algorithms # 1) Run tidys # Cancel any "sent" (but nt posted) transactions older than the timeout try: cancel_expired_tx(database, grin_owner_api_url, tx_expire_delta) except Exception as e: LOGGER.error("Failed to cancel_expired_tx(): {}".format(repr(e))) # Repost any "posted" (but not mined) transactions older than the timeout try: repost_tx(database, grin_owner_api_url, tx_repost_delta) except Exception as e: LOGGER.error("Failed to repost_tx(): {}".format(repr(e))) # 2) Set UTXO # If there are too many, or too few outputs in the wallet, re-output try: update_outputs_count(grin_owner_api_url, target_output_count) except Exception as e: LOGGER.error("Failed to update_outputs_count(): {}".format(repr(e))) LOGGER.warn("=== Completed {}".format(PROCESS))
def main(): global LOGGER global CONFIG CONFIG = lib.get_config() LOGGER = lib.get_logger(PROCESS) LOGGER.warn("=== Starting {}".format(PROCESS)) # Connect to DB database = lib.get_db() new_poolshares = Pool_shares.getUnvalidated() for pool_share in new_poolshares: grin_share = Grin_shares.get_by_nonce(pool_share.nonce) if grin_share == None: # No matching validated grin share was found # XXX TODO: Only invalidate if its old enough pool_share.validated = True pool_share.is_valid = False pool_share.invalid_reason = "no grin_share" else: if pool_share.nonce != grin_share.nonce: pool_share.validated = True pool_share.is_valid = False pool_share.invalid_reason = "nonce mismatch" if pool_share.worker_difficulty > grin_share.actual_difficulty: pool_share.validated = True pool_share.is_valid = False pool_share.invalid_reason = "low difficulty" # Update record pool_share.validated = True pool_share.is_valid = True pool_share.invalid_reason = "None" LOGGER.warn("Share {}, {} is {} because {}".format( pool_share.height, pool_share.nonce, pool_share.is_valid, pool_share.invalid_reason)) database.db.getSession().commit() # db.set_last_run(PROCESS, str(time.time())) LOGGER.warn("=== Completed {}".format(PROCESS))