コード例 #1
0
 def get(self, id=None, height=0, range=None, fields=None):
     database = lib.get_db()
     LOGGER = lib.get_logger(PROCESS)
     LOGGER.warn(
         "WorkerAPI_stats get id:{} height:{} range:{} fields:{}".format(
             id, height, range, fields))
     fields = lib.fields_to_list(fields)
     if height == 0:
         height = Blocks.get_latest().height
     stats = []
     if id is None:
         for stat in Worker_stats.get_by_height(height, range):
             #print("YYY: {}".format(stats))
             stats.append(stat.to_json(fields))
         return stats
     else:
         if range is None:
             res = Worker_stats.get_by_height_and_id(id, height)
             if res is None:
                 return "[]".to_json()
             return res.to_json(fields)
         else:
             for stat in Worker_stats.get_by_height_and_id(
                     id, height, range):
                 stats.append(stat.to_json(fields))
             return stats
コード例 #2
0
 def get(self, id=None, height=0, range=0, fields=None):
     database = lib.get_db()
     LOGGER = lib.get_logger(PROCESS)
     LOGGER.warn(
         "WorkerAPI_shares get id:{} height:{} range:{} fields:{}".format(
             id, height, range, fields))
     fields = lib.fields_to_list(fields)
     if height == 0:
         height = Blocks.get_latest().height
     shares_records = []
     if id is None:
         for shares in Worker_shares.get_by_height(height, range):
             shares_records.append(shares.to_json(fields))
         return shares_records
     else:
         if range is None:
             worker_sh_recs = Worker_shares.get_by_height_and_id(height, id)
             #print("worker_sh_recs = {}".format(worker_sh_recs))
             if res is None:
                 return "[]".to_json()
             return res.to_json(fields)
         else:
             for share in Worker_shares.get_by_height_and_id(
                     height, id, range):
                 shares_records.append(share.to_json(fields))
             return shares_records
コード例 #3
0
 def get(self, height=None, range=None):
     database = lib.get_db()
     LOGGER = lib.get_logger(PROCESS)
     LOGGER.warn(
         "PoolAPI_shareCount get height:{} range:{} fields:{}".format(
             height, range))
     # Totals across all workers are stored in the Pool_stats record
     if range is None:
         if height is None:
             height = 0
         pool_st_rec = Pool_stats.get_by_height(height)
         if pool_st_rec is None:
             count = 0
         else:
             count = pool_st_rec.total_shares_processed
         return {
             "height": height,
             "count": count,
         }
     else:
         counts = []
         pool_st_recs = Pool_stats.get_by_height(height, range)
         for st_rec in pool_st_recs:
             rec = {
                 "height": st_rec.height,
                 "count": st_rec.total_shares_processed,
             }
             counts.append(rec)
         return counts
コード例 #4
0
ファイル: poolstats.py プロジェクト: waosman/grin-pool
def initialize(window_size, logger):
    database = lib.get_db()
    # Special case for new pool startup
    block_zero = None
    while block_zero is None:
        logger.warn("Waiting for the first block record in the database")
        time.sleep(1)
        block_zero = Blocks.get_earliest()
    print("block_zero={}".format(block_zero))

    stat_height = max(0, block_zero.height + window_size)
    seed_stat = Pool_stats(
        height=stat_height,
        timestamp=datetime.utcnow(),
        active_miners=0,
        shares_processed=0,
        share_counts=None,
        total_blocks_found=0,
        total_shares_processed=0,
        dirty=False,
    )
    database.db.createDataObj(seed_stat)
    seed_share = Worker_shares(
        height=stat_height,
        user_id=1,
        timestamp=datetime.utcnow(),
    )
    database.db.createDataObj(seed_share)
コード例 #5
0
ファイル: paymentEstimator.py プロジェクト: bitgrin/grin-pool
def main():
    global CONFIG
    global LOGGER
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    # Number of blocks of share data used to calculate rewards
    PPLNG_WINDOW_SIZE = 60
    try:
        PPLNG_WINDOW_SIZE = int(os.environ["PPLNG_WINDOW_SIZE"])
    except Exception as e:
        LOGGER.error(
            "Failed to get PPLNG_WINDOW_SIZE from the environment: {}.  Using default size of {}"
            .format(e, PPLNG_WINDOW_SIZE))

    # Connect to DB
    database = lib.get_db()
    esitmated = [
    ]  # Blocks we know have already been estimated - XXX TODO: Clean paid blocks out of this list

    # Get Config settings
    pool_fee = float(CONFIG[PROCESS]["pool_fee"])

    while True:
        # Generate pool block reward estimates for all new and unlocked blocks
        try:
            database.db.initializeSession()
            unlocked_blocks = Pool_blocks.get_all_unlocked()
            new_blocks = Pool_blocks.get_all_new()
            unlocked_blocks_h = [blk.height for blk in unlocked_blocks]
            new_blocks_h = [blk.height for blk in new_blocks]

            need_estimates = []
            for height in unlocked_blocks_h + new_blocks_h:
                if height not in esitmated:
                    need_estimates.append(height)
            if need_estimates:
                LOGGER.warn("Will ensure estimate for blocks: {}".format(
                    need_estimates))

                # Generate Estimate
                for height in need_estimates:
                    LOGGER.warn("Ensure estimate for block: {}".format(height))
                    payout_map = pool.calculate_block_payout_map(
                        height, PPLNG_WINDOW_SIZE, pool_fee, LOGGER, True)
                    # Double check the total paid is correct
                    esitmated.append(height)
                    LOGGER.warn(
                        "Completed estimate for block: {}".format(height))

                LOGGER.warn("Completed estimates")
            database.db.destroySession()
            sleep(check_interval)
        except Exception as e:  # AssertionError as e:
            LOGGER.error("Something went wrong: {} - {}".format(
                e, traceback.print_stack()))

        LOGGER.warn("=== Completed {}".format(PROCESS))
        sleep(check_interval)
コード例 #6
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()
    database.db.initializeSession()

    while True:
        # Generate pool block reward estimates for all new and unlocked blocks
        try:
            unlocked_blocks = Pool_blocks.get_all_unlocked()
            unlocked_blocks_h = [blk.height for blk in unlocked_blocks]
            new_blocks = Pool_blocks.get_all_new()
            new_blocks_h = [blk.height for blk in new_blocks]
            LOGGER.warn("Will ensure estimate for blocks: {}".format(
                unlocked_blocks_h + new_blocks_h))

            # Generate Estimate
            for height in unlocked_blocks_h + new_blocks_h:
                LOGGER.warn("Ensure estimate for block: {}".format(height))
                payout_map = pool.calculate_block_payout_map(
                    height, 60, LOGGER, True)
                LOGGER.warn("Completed estimate for block: {}".format(height))

            #database.db.getSession().commit()
            LOGGER.warn("Completed estimates")
            sleep(check_interval)
        except Exception as e:  # AssertionError as e:
            LOGGER.error("Something went wrong: {} - {}".format(
                e, traceback.print_stack()))

        LOGGER.warn("=== Completed {}".format(PROCESS))
        sleep(check_interval)
コード例 #7
0
def main():
    config = lib.get_config()
    PROCESS = "libNetworkTest"
    LOGGER = lib.get_logger(PROCESS)

    database = lib.get_db()
    
    # Get stats
    stats = get_stats()
    LOGGER.warn("stats = {}".format(stats))
    LOGGER.warn("")

    # Get blocks found
    bf = get_blocks_found_data(5)
    LOGGER.warn("blocks found = {}".format(bf))
    LOGGER.warn("")

    # Get graph rate
    gr = get_graph_rate_data(20)
    LOGGER.warn("graph rate = {}".format(gr))
    LOGGER.warn("")

    # Get difficulty data
    diff = get_difficulty_data(200)
    LOGGER.warn("difficulty = {}".format(diff))

    sys.exit(1)
コード例 #8
0
ファイル: paymentReport.py プロジェクト: waosman/grin-pool
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()
    database.db.initializeSession()

    pp = pprint.PrettyPrinter(indent=4)

    # Fetch and print pool block reward estimates for latest N pool blocks
    try:
        pool_blocks = Pool_blocks.get_latest(NUM_BLOCKS)
        pool_blocks_h = [blk.height for blk in pool_blocks]
        LOGGER.warn(
            "Will report estimates for pool blocks: {}".format(pool_blocks_h))

        # Print Estimate
        for height in pool_blocks_h:
            pp.pprint("Eestimate for block: {}".format(height))
            payout_map = pool.get_block_payout_map_estimate(height, LOGGER)
            pp.pprint(payout_map)
    except Exception as e:  # AssertionError as e:
        LOGGER.error("Something went wrong: {} - {}".format(
            e, traceback.print_stack()))

        LOGGER.warn("=== Completed {}".format(PROCESS))
コード例 #9
0
def initialize():
    database = lib.get_db()
    # Special case for new pool startup - Need 3 stats records to bootstrap
    block_zero = Blocks.get_by_height(0)
    seed_stat0 = Grin_stats(height=0,
                            timestamp=block_zero.timestamp,
                            gps=0,
                            difficulty=block_zero.total_difficulty,
                            total_utxoset_size=block_zero.num_inputs)
    database.db.createDataObj(seed_stat0)
    block_one = Blocks.get_by_height(1)
    seed_stat1 = Grin_stats(height=1,
                            timestamp=block_one.timestamp,
                            gps=0,
                            difficulty=block_one.total_difficulty -
                            block_zero.total_difficulty,
                            total_utxoset_size=seed_stat0.total_utxoset_size +
                            block_one.num_outputs - block_one.num_inputs)
    database.db.createDataObj(seed_stat1)
    block_two = Blocks.get_by_height(2)
    seed_stat2 = Grin_stats(height=2,
                            timestamp=block_two.timestamp,
                            gps=0,
                            difficulty=block_two.total_difficulty -
                            block_one.total_difficulty,
                            total_utxoset_size=seed_stat1.total_utxoset_size +
                            block_two.num_outputs - block_two.num_inputs)
    database.db.createDataObj(seed_stat2)
コード例 #10
0
def process_pool_log():
    global LOGGER
    global CONFIG
    # Connect to DB
    database = lib.get_db()

    POOL_LOG = CONFIG["stratum"]["log_dir"] + "/" + CONFIG["stratum"]["log_filename"]

    # (re)Process all logs
    logfiles = glob.glob(POOL_LOG + '*')
    LOGGER.warn("Processing existing logs: {}".format(logfiles))
    sys.stdout.flush()
    for logfile in logfiles:
        with open(logfile) as f:
            for line in f:
                try:
                    process_pool_logmessage(line, database)
                except Exception as e:
                    LOGGER.error("Failed to process pool log message: {} {}".format(line, e))
        f.close()

    # Read future log messages
    LOGGER.warn("Processing new logs: {}".format(POOL_LOG))
    sys.stdout.flush()
    poollog = subprocess.Popen(
        ['tail', '-F', POOL_LOG],
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE)
    while True:
        line = poollog.stdout.readline().decode('utf-8')
        try:
            process_pool_logmessage(line, database)
        except Exception as e:
            LOGGER.error("Failed to process pool log message: {} {}".format(line, e))
コード例 #11
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()

    # Get the list of pool_blocks that have been paid
    paid_poolblocks = Pool_blocks.get_all_paid()
    print("Number of paid_poolblocks: {}".format(len(paid_poolblocks)))
    sys.stdout.flush()

    for pb in paid_poolblocks:
        # Get the blockchain data for this block
        response = grin.get_block_by_height(pb.height)
        if response == None:
            LOGGER.error("Failed to get block {}".format(pb.height))
            continue
        if int(response["header"]["nonce"]) != int(pb.nonce):
            print("")
            sys.stdout.flush()
            LOGGER.warn("Processed orphan pool block at height: {}".format(
                pb.height))
        else:
            sys.stdout.write(".")
            sys.stdout.flush()
コード例 #12
0
ファイル: shareAggr.py プロジェクト: zhuk2303/grin-pool
def ShareCommitScheduler(interval, database):
    global LOGGER
    global SHARES
    global HEIGHT
    global GRINSHARE_HEIGHT
    global POOLSHARE_HEIGHT
    database = lib.get_db()

    try:
        # XXX TODO:  enhance
        while True:
            bc_height = Blocks.get_latest(
            ).height  # grin.blocking_get_current_height()
            LOGGER.warn(
                "HEIGHT={}, POOLSHARE_HEIGHT={}, GRINSHARE_HEIGHT={}".format(
                    HEIGHT, POOLSHARE_HEIGHT, GRINSHARE_HEIGHT))
            while (HEIGHT < POOLSHARE_HEIGHT
                   and HEIGHT < GRINSHARE_HEIGHT) or (bc_height > HEIGHT):
                # Commit and purge current block share data if we are starting a new block
                LOGGER.warn("Commit shares for height: {}".format(HEIGHT))
                # time.sleep(5) # Give straggler shares a chance to come in
                SHARES.commit(HEIGHT)
                HEIGHT = HEIGHT + 1
            # Commit and purge all old share data (except current block) every 'interval' seconds
            try:
                SHARES.commit()  # All except current block
            except Exception as e:
                LOGGER.error("Failed to commit: {}".format(e))
            time.sleep(interval)
    except Exception as e:
        LOGGER.error("Something went wrong: {}\n{}".format(
            e,
            traceback.format_exc().splitlines()))
        time.sleep(interval)
    lib.teardown_db()
コード例 #13
0
ファイル: poolStats.py プロジェクト: waosman/grin-pool
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()
    atexit.register(lib.teardown_db)

    # Get config
    check_interval = float(CONFIG[PROCESS]["check_interval"])
    avg_over_range = int(CONFIG[PROCESS]["avg_over_range"])

    # Initialize poolStats records if this is the first run
    latest_stat = Pool_stats.get_latest()
    if latest_stat is None:
        # Special case for new pool startup
        poolstats.initialize(avg_over_range, LOGGER)

    latest_stat = Pool_stats.get_latest()
    LOGGER.warn("Starting at height: {}".format(latest_stat.height))

    # Generate pool stats records - one per grin block
    while True:
        # Find the height of the latest stats record
        latest_stat = Pool_stats.get_latest()
        height = latest_stat.height + 1
        LOGGER.warn("Starting at height: {}".format(height))
        try:
            while True:
                share_height = Worker_shares.get_latest_height()
                while share_height is None:
                    LOGGER.warn("Waiting for shares")
                    share_height = Worker_shares.get_latest_height()
                    sleep(10)
                latest = Blocks.get_latest().height
                stats_height = height - 1
                LOGGER.warn(
                    "Running: Chain height: {}, share height: {},  stats height: {}"
                    .format(latest, share_height, stats_height))
                while share_height - 1 > height:
                    new_stats = poolstats.calculate(height, avg_over_range)
                    # Batch new stats when possible, but commit at reasonable intervals
                    database.db.getSession().add(new_stats)
                    if ((height % BATCHSZ == 0) or (height >= (latest - 10))):
                        database.db.getSession().commit()
                    LOGGER.warn(
                        "Added Pool_stats for block: {} - {} {} {}".format(
                            new_stats.height, new_stats.gps,
                            new_stats.active_miners,
                            new_stats.shares_processed))
                    height = height + 1
                    sys.stdout.flush()
                sleep(check_interval)
        except Exception as e:
            LOGGER.error("Something went wrong: {} - {}".format(
                e, traceback.print_stack()))
            database.db.getSession().rollback()
            sleep(check_interval)

    LOGGER.warn("=== Completed {}".format(PROCESS))
コード例 #14
0
ファイル: shareAggr.py プロジェクト: yinfeng2016/grin-pool
def main():
    global LOGGER
    global CONFIG
    global SHARES
    global HEIGHT
    global GRINSHARE_HEIGHT
    global POOLSHARE_HEIGHT
    CONFIG = lib.get_config()

    # XXX TODO: Put in config
    HOST = "0.0.0.0"
    PORT = 32080
    GRINSHARE_HEIGHT = 0
    POOLSHARE_HEIGHT = 0

    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    database = lib.get_db()
    HEIGHT = Worker_shares.get_latest_height()
    if HEIGHT is None:
        HEIGHT = grin.blocking_get_current_height()
    SHARES = WorkerShares(LOGGER)

    #server = ThreadedHTTPServer((HOST, PORT), ShareHandler)
    #server = HTTPServer((HOST, PORT), ShareHandler)

    #    server = socketserver.TCPServer((HOST, PORT), ShareHandler)
    #    server.handle_request()
    #    server.server_close()

    commit_thread = threading.Thread(target=ShareCommitScheduler, args=(15, ))
    commit_thread.start()
    server = ThreadedTCPServer((HOST, PORT), ShareHandler)
    server.serve_forever()
コード例 #15
0
 def get(self, id=None, height=0, range=None):
     database = lib.get_db()
     print("id={} , height={}, range = {}".format(id, height, range))
     if range == 0:
         range = grin.get_current_height()
     if height == 0:
         height = grin.get_current_height()
     return Pool_shares.count(height, range, id)
コード例 #16
0
def get_reward_by_block(height):
    # Get the block and determine how much its worth to the winner
    database = lib.get_db()
    theblock = Blocks.get_by_height(height)
    #print("The block {}".format(theblock.to_json()))
    if theblock is None:
        return 0
    return 60 * 1000000000 + theblock.fee
コード例 #17
0
 def get(self, id=None, fields=None):
     database = lib.get_db()
     LOGGER = lib.get_logger(PROCESS)
     LOGGER.warn("WorkerAPI_payments get id:{} fields:{}".format(
         id, fields))
     fields = lib.fields_to_list(fields)
     utxo = Pool_utxo.get_by_address(id)
     return utxo.to_json(fields)
コード例 #18
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    ##
    # Update user records in MySQL and REDIS
    database = lib.get_db()
    database.db.initializeSession()

    redisdb = lib.get_redis_db()
    redis_userid_key = "userid."

    id = 1
    try:
        while True:
            thisuser = Users.get_by_id(id)
            if thisuser is None:
                if id > 2358:
                    LOGGER.warn("last id = {}".format(id))
                    break
                id = id + 1
                continue
            if thisuser.username == "bjg62hj8byyksphuw95vqc3f74.lionm1":
                orig_username = thisuser.username
                thisuser.username = "******"
                LOGGER.warn("Updated: {} to {}".format(orig_username,
                                                       thisuser.username))
            if thisuser.username == "[email protected]_d1":
                orig_username = thisuser.username
                thisuser.username = "******"
                LOGGER.warn("Updated: {} to {}".format(orig_username,
                                                       thisuser.username))
            if thisuser.username == "*****@*****.**":
                orig_username = thisuser.username
                thisuser.username = "******"
                LOGGER.warn("Updated: {} to {}".format(orig_username,
                                                       thisuser.username))
            if "." in thisuser.username:
                orig_username = thisuser.username
                # Update mysql
                thisuser.username = thisuser.username.replace(".", "_")
                # Update redis
                redis_key = redis_userid_key + orig_username
                COMMIT and redisdb.delete(redis_key)
                redis_key = redis_userid_key + thisuser.username
                COMMIT and redisdb.set(redis_key, id)
                LOGGER.warn("Updated: {} to {}".format(orig_username,
                                                       thisuser.username))
            id = id + 1
    except Exception as e:  # AssertionError as e:
        LOGGER.error("Something went wrong: {} - {}".format(
            e, traceback.print_stack()))

    COMMIT or LOGGER.warn("XXX No Commit - Edit for final run")
    COMMIT and database.db.getSession().commit()
    LOGGER.warn("=== Completed {}".format(PROCESS))
コード例 #19
0
ファイル: paymentMaker.py プロジェクト: smith7800/grin-pool
def main():
    global LOGGER
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    # Connect to DB
    database = lib.get_db()

    latest_block = 0

    # XXX All in one db transaction....
    # Get unlocked blocks from the db
    unlocked_blocks = Pool_blocks.get_all_unlocked()
    database.db.getSession().commit()
    for pb in unlocked_blocks:
        try:
            LOGGER.warn("Processing unlocked block: {}".format(pb))
            if pb.height > latest_block:
                latest_block = pb.height
            # Get valid pool_shares for that block from the db
            pool_shares = Pool_shares.get_valid_by_height(pb.height)
            # Calculate Payment info:
            worker_shares = {}
            for ps in pool_shares:
                LOGGER.warn("Processing pool_shares: {}".format(ps))
                # Need to get actual_difficulty
                gs = Grin_shares.get_by_nonce(ps.nonce)
                if gs == None:
                    # XXX NOTE: no payout for shares not accepted by grin node
                    continue
                if ps.found_by in worker_shares:
                    worker_shares[ps.found_by] += gs.actual_difficulty
                else:
                    worker_shares[ps.found_by] = gs.actual_difficulty
            if len(worker_shares) > 0:
                # Calcualte reward/difficulty: XXX TODO: Enhance
                #  What algorithm to use?  Maybe: https://slushpool.com/help/manual/rewards
                r_per_d = REWARD / sum(worker_shares.values())
                for worker in worker_shares.keys():
                    # Calculate reward per share
                    worker_rewards = worker_shares[worker] * r_per_d
                    # Add or create worker rewards
                    worker_utxo = Pool_utxo.credit_worker(
                        worker, worker_rewards)
                    LOGGER.warn("Credit to user: {} = {}".format(
                        worker, worker_rewards))
            # Mark the pool_block state="paid" (maybe "processed" would be more accurate?)
            pb.state = "paid"
            database.db.getSession().commit()
        except Exception as e:
            database.db.getSession().rollback()
            LOGGER.error("Something went wrong: {}".format(e))

    #database.db.getSession().commit()
    # db.set_last_run(PROCESS, str(time.time()))
    LOGGER.warn("=== Completed {}".format(PROCESS))
    sys.stdout.flush()
コード例 #20
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()

    # Get config
    check_interval = float(CONFIG[PROCESS]["check_interval"])
    avg_over_range = int(CONFIG[PROCESS]["avg_over_range"])

    # Find the height of the latest stats record
    last_height = 0
    latest_stat = Worker_stats.get_latest()

    if latest_stat != None:
        last_height = latest_stat.height
    height = last_height + 1
    LOGGER.warn("Starting at block height: {}".format(height))

    # Generate worker stats records - one per grin block for each active worker
    while True:
        # latest = grin.blocking_get_current_height()
        latest = Blocks.get_latest().height
        #LOGGER.warn("Latest Network Block Height = {}".format(latest))
        while latest > height:
            try:
                new_stats = workerstats.calculate(height, avg_over_range)
                LOGGER.warn("{} new stats for height {}".format(
                    len(new_stats), height))
                # mark any existing pool_stats dirty
                pool_stats = Pool_stats.get_by_height(height)
                if pool_stats is not None:
                    LOGGER.warn(
                        "Marked existing pool_stats dirty for height: {}".
                        format(height))
                    pool_stats.dirty = True
                database.db.getSession().bulk_save_objects(new_stats)
                if ((height % BATCHSZ == 0) or (height >= (latest - 10))):
                    database.db.getSession().commit()
                for stats in new_stats:
                    LOGGER.warn(
                        "Added Worker_stats for block: {}, Worker: {} - {} {} {} {} {} {}"
                        .format(stats.height, stats.worker, stats.gps,
                                stats.shares_processed,
                                stats.total_shares_processed, stats.grin_paid,
                                stats.total_grin_paid, stats.balance))
                height = height + 1
            except Exception as e:
                LOGGER.error("Something went wrong: {}".format(e))
                LOGGER.error("Traceback: {}".format(
                    traceback.format_exc().splitlines()))
                database.db.getSession().rollback()
                sleep(check_interval)
        sys.stdout.flush()
        sleep(check_interval)
    LOGGER.warn("=== Completed {}".format(PROCESS))
コード例 #21
0
def main():
    global LOGGER
    global CONFIG
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    # Get Config settings
    pool_fee = float(CONFIG[PROCESS]["pool_fee"])
    # Number of blocks of share data used to calculate rewards
    PPLNG_WINDOW_SIZE = 60
    try:
        PPLNG_WINDOW_SIZE = int(os.environ["PPLNG_WINDOW_SIZE"])
    except Exception as e:
        LOGGER.error(
            "Failed to get PPLNG_WINDOW_SIZE from the environment: {}  Using default size of {}"
            .format(e, PPLNG_WINDOW_SIZE))

    # Connect to DB
    database = lib.get_db()

    # Get current blockchain height
    chain_height = grin.blocking_get_current_height()

    # Get unlocked blocks from the db
    unlocked_blocks = Pool_blocks.get_all_unlocked()
    unlocked_blocks = [blk.height for blk in unlocked_blocks]
    LOGGER.warn("Paying for {} pool blocks: {}".format(len(unlocked_blocks),
                                                       unlocked_blocks))
    for height in unlocked_blocks:
        try:
            LOGGER.warn("Processing unlocked block: {}".format(height))
            # Call the library routine to get this blocks payout map
            payout_map = pool.calculate_block_payout_map(
                height, PPLNG_WINDOW_SIZE, pool_fee, LOGGER, False)
            #print("payout_map = {}".format(payout_map))
            # Store the payment map for this block
            credits_record = Pool_credits(chain_height, height, payout_map)
            database.db.getSession().add(credits_record)
            # Make payments based on the workers total share_value
            Pool_blocks.setState(height, "paid")
            for user_id, payment_amount in payout_map.items():
                # Add worker rewards to pool account balance
                LOGGER.warn("Credit to user: {} = {}".format(
                    user_id, payment_amount))
                worker_utxo = Pool_utxo.credit_worker(user_id, payment_amount)
                # Worker_stats accounting and running totals
                #latest_worker_stats = Worker_stats.get_latest_by_id(user_id)
                #latest_worker_stats.dirty = True
            database.db.getSession().commit()

        except Exception as e:
            database.db.getSession().rollback()
            LOGGER.exception("Something went wrong: {}".format(repr(e)))

    LOGGER.warn("=== Completed {}".format(PROCESS))
    sys.stdout.flush()
コード例 #22
0
ファイル: shareAggr.py プロジェクト: waosman/grin-pool
def RigDataCommitScheduler(max_lag, commit_interval, logger):
    global RIGDATA_MUTEX
    global RIGDATA_GROUPSIZE
    global REDIS_RIGDATA_KEY
    global REDIS_RIGDATA_EXPIRETIME
    while True:
        try:
            database = lib.get_db()
            latest_block = Blocks.get_latest()
            while latest_block is None:
                logger.warn("Cant get latest block from database")
                time.sleep(10)
                latest_block = Blocks.get_latest()
            chain_height = latest_block.height
            lib.teardown_db()
            RIGDATA_MUTEX.acquire()
            try:
                logger.warn("= Begin RigDataCommitScheduler")
                # Itterate over each rigdata cache key in redis
                redisdb = lib.get_redis_db()
                redis_key = "{}-*".format(REDIS_RIGDATA_KEY)
                keys = []
                for key in redisdb.scan_iter(match=redis_key, count=100):
                    keys.append(key.decode())
                for key in sorted(keys):
                    share_height = int(key.split("-")[1])
                    if share_height < chain_height - RIGDATA_GROUPSIZE - max_lag:
                        # Commit this set of rigdata records
                        logger.warn(
                            "-- RigDataCommitScheduler processing record at height: {}"
                            .format(share_height))
                        redis_cached_rigdata = redisdb.get(key)
                        redis_cached_rigdata = json.loads(
                            redis_cached_rigdata.decode())
                        for user, rigdata in redis_cached_rigdata.items():
                            redis_key = "{}.{}.{}".format(
                                REDIS_RIGDATA_KEY, share_height, user)
                            if redisdb.exists(redis_key):
                                # XXX TODO
                                logger.warn(
                                    "XXX TODO: DUPLICATE RIGDATA WORKER KEY - MERGE ???"
                                )
                            else:
                                redisdb.set(redis_key,
                                            json.dumps(rigdata),
                                            ex=REDIS_RIGDATA_EXPIRETIME)
                        # Wrote this rigdata to REDIS, so remove the cache record now
                        redisdb.delete(key)
            finally:
                RIGDATA_MUTEX.release()
                logger.warn("= End RigDataCommitScheduler")
            time.sleep(commit_interval)
        except Exception as e:
            logger.exception("Something went wrong: {}".format(
                traceback.format_exc()))
            time.sleep(10)
コード例 #23
0
ファイル: blockValidator.py プロジェクト: smith7800/grin-pool
def main():
    global LOGGER
    global CONFIG

    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    
    # Connect to DB
    database = lib.get_db()

    grin_api_url = "http://" + CONFIG["grin_node"]["address"] + ":" + CONFIG["grin_node"]["api_port"]
    status_url = grin_api_url + "/v1/status"
    blocks_url = grin_api_url + "/v1/blocks/"
    validation_depth = int(CONFIG[PROCESS]["validation_depth"])

    response = requests.get(status_url)
    latest = int(response.json()["tip"]["height"])
    last = latest - validation_depth  # start a reasonable distance back
    if last < 0:
        last = 1
    LOGGER.warn("Starting from block #{}".format(last))
    #    last = 0
    for i in range(last, latest):
        url = blocks_url + str(i)
        response = requests.get(url).json()
        # print("{}: {}".format(response["header"]["height"], response["header"]["hash"]))
        try:
            rec = Blocks.get_by_height([i])
            if rec is not None:
                if rec.hash != response["header"]["hash"] and rec.state != "orphan":
                    LOGGER.warn("Found an orphan - height: {}, hash: {} vs {}".format(rec.height, rec.hash, response["header"]["hash"]))
                    rec.state = "orphan"
                    database.db.getSession().commit()
            else:
                LOGGER.warn("Adding missing block - height: {}".format(response["header"]["height"]))
                # XXX TODO:  Probably want to mark it as "missing" so we know it was filled in after the fact?
                missing_block = Blocks(hash=response["header"]["hash"],
                                       version=response["header"]["version"],
                                       height = response["header"]["height"],
                                       previous = response["header"]["previous"],
                                       timestamp = response["header"]["timestamp"][:-1],
                                       output_root = response["header"]["output_root"],
                                       range_proof_root = response["header"]["range_proof_root"],
                                       kernel_root = response["header"]["kernel_root"],
                                       nonce = response["header"]["nonce"],
                                       total_difficulty = response["header"]["total_difficulty"],
                                       total_kernel_offset = response["header"]["total_kernel_offset"],
                                       state = "missing")
                database.db.createDataObj(missing_block)
        except Exception as e:
            # XXX TODO: Something more ?
            LOGGER.error("Something went wrong: {}".format(e))
        sys.stdout.flush()
    # db.set_last_run(PROCESS, str(time.time()))
    database.db.getSession().commit()
コード例 #24
0
def initialize():
    database = lib.get_db()
    # Special case for new pool startup
    seed_stat = Pool_stats(height=0,
                           timestamp=datetime.utcnow(),
                           gps=0,
                           active_miners=0,
                           shares_processed=0,
                           total_shares_processed=0,
                           total_grin_paid=0,
                           total_blocks_found=0)
    database.db.createDataObj(seed_stat)
コード例 #25
0
ファイル: statsValidator.py プロジェクト: dewdeded/grin-pool
def main():
    global LOGGER
    global CONFIG

    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    database = lib.get_db()
    LOGGER.warn("=== Starting {}".format(PROCESS))

    check_interval = float(CONFIG[PROCESS]["check_interval"])
    max_rebuild_depth = float(CONFIG[PROCESS]["max_rebuild_depth"])
    avg_over_range_grin = int(CONFIG["grinStats"]["avg_over_range"])
    avg_over_range_pool = int(CONFIG["poolStats"]["avg_over_range"])
    avg_over_range_worker = int(CONFIG["workerStats"]["avg_over_range"])

    current_height = grin.blocking_get_current_height()
    rebuild_height = current_height - max_rebuild_depth

    while True:
        # Grin blocks and therefore grin stats cant be dirty
        #        # Check for dirty grin stats
        #        dirty = Grin_stats.get_first_dirty()
        #        if dirty is not None:
        #            LOGGER.warn("Recalculating Grin Stats from {}".format(dirty.height))
        #            end_height = grinstats.recalculate(dirty.height, avg_over_range_grin)
        #            LOGGER.warn("Finished Recalculating Grin Stats: {} - {}".format(dirty.height, end_height))
        # Check for dirty pool stats
        dirty = Pool_stats.get_first_dirty(rebuild_height)
        if dirty is not None:
            LOGGER.warn("Recalculating Pool Stats from {}".format(
                dirty.height))
            end_height = poolstats.recalculate(dirty.height,
                                               avg_over_range_pool)
            LOGGER.warn("Finished Recalculating Pool Stats: {} - {}".format(
                dirty.height, end_height))


#        # Check for dirty worker stats
        dirty = Worker_stats.get_first_dirty(rebuild_height)
        while dirty is not None:
            LOGGER.warn("Recalculating Worker Stats for {} from {}".format(
                dirty.height, avg_over_range_worker))
            end_height = workerstats.recalculate(dirty.height,
                                                 avg_over_range_worker)
            LOGGER.warn(
                "Finished Recalculating Worker Stats for {} - {}".format(
                    dirty.height, end_height))
            dirty = Worker_stats.get_first_dirty()

        sys.stdout.flush()
        time.sleep(check_interval)

    LOGGER.warn("=== Completed {}".format(PROCESS))
コード例 #26
0
ファイル: shareAggr.py プロジェクト: zhuk2303/grin-pool
def main():
    global LOGGER
    global CONFIG
    global SHARES
    global HEIGHT
    global GRINSHARE_HEIGHT
    global POOLSHARE_HEIGHT
    global SHARE_EXPIRETIME
    global database
    global RABBITMQ_USER
    global RABBITMQ_PASSWORD
    CONFIG = lib.get_config()
    atexit.register(lib.teardown_db)

    GRINSHARE_HEIGHT = 0
    POOLSHARE_HEIGHT = 0

    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    SHARE_EXPIRETIME = int(CONFIG[PROCESS]["share_expire_time"])
    commit_interval = int(CONFIG[PROCESS]["commit_interval"])
    rmq_endpoints = json.loads(CONFIG[PROCESS]["rmq"])

    RABBITMQ_USER = os.environ["RABBITMQ_USER"]
    RABBITMQ_PASSWORD = os.environ["RABBITMQ_PASSWORD"]

    database = lib.get_db()
    HEIGHT = Worker_shares.get_latest_height()
    while HEIGHT is None:
        LOGGER.warn("Waiting on the first grin block...")
        time.sleep(5)
        latest_block = Blocks.get_latest()
        if latest_block is not None:
            HEIGHT = latest_block.height

    SHARES = WorkerShares(LOGGER)

    ##
    # Start a thread to commit shares
    commit_thread = threading.Thread(target=ShareCommitScheduler,
                                     args=(
                                         commit_interval,
                                         database,
                                     ))
    commit_thread.start()

    ##
    # Start a pika consumer thread for each rabbit we want to consume from
    for rmq in rmq_endpoints:
        rmq_thread = threading.Thread(target=RmqConsumer, args=(rmq, ))
        rmq_thread.start()
コード例 #27
0
ファイル: grinstats.py プロジェクト: waosman/grin-pool
def initialize(avg_over_range, logger):
    database = lib.get_db()
    # Special case for new pool startup - Need 3 stats records to bootstrap
    block_zero = None
    while block_zero is None:
        logger.warn("Waiting for the first block record in the database")
        time.sleep(1)
        block_zero = Blocks.get_earliest()
    print("block_zero={}".format(block_zero))
    height = block_zero.height
    # Create avg_over_range dummy block records prior to block_zero
    print("Create block filtters: {} - {}".format(height-avg_over_range, height))
    for h in range(height-avg_over_range, height):
        print("Creating fillter at height {}".format(h))
        new_block = Blocks(hash = "x",
            version = 0,
            height = h,
            previous = "x",
            timestamp = datetime.utcnow(),
            output_root = "x",
            range_proof_root = "x",
            kernel_root = "x",
            nonce = 0,
            edge_bits = 29,
            total_difficulty = block_zero.total_difficulty,
            secondary_scaling = 0,
            num_inputs = 0,
            num_outputs = 0,
            num_kernels = 0,
            fee = 0,
            lock_height = 0,
            total_kernel_offset = "x",
            state = "filler")
        database.db.getSession().add(new_block)
    database.db.getSession().commit()
    seed_stat0 = Grin_stats(
        height=height-2,
        timestamp=block_zero.timestamp,
        difficulty=block_zero.total_difficulty)
    database.db.createDataObj(seed_stat0)
    seed_stat1 = Grin_stats(
        height=height-1,
        timestamp=block_zero.timestamp,
        difficulty=block_zero.total_difficulty)
    database.db.createDataObj(seed_stat1)
    seed_stat2 = Grin_stats(
        height=height,
        timestamp=block_zero.timestamp,
        difficulty=block_zero.total_difficulty)
    database.db.createDataObj(seed_stat2)
    return height
コード例 #28
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()

    # Get the list of pool_blocks that are
    # old enough to unlock and
    # are not orphan blocks

    # XXX TODO: The node may not be synced, may need to wait?

    block_locktime = int(CONFIG[PROCESS]["block_locktime"])
    block_expiretime = int(CONFIG[PROCESS]["block_expiretime"])
    LOGGER.warn("using locktime: {}, expiretime: {}".format(
        block_locktime, block_expiretime))

    latest = grin.blocking_get_current_height()
    LOGGER.warn("Latest: {}".format(latest))

    new_poolblocks = Pool_blocks.get_all_new()
    for pb in new_poolblocks:
        if pb.height < (latest - block_expiretime):
            # Dont re-process very old blocks - protection against duplicate payouts.
            LOGGER.error("Processed expired pool block at height: {}".format(
                pb.height))
            pb.state = "expired"
            continue
        response = grin.get_block_by_height(pb.height)
        if response == None:
            # Unknown.  Leave as "new" for now and attempt to validate next run
            LOGGER.error("Failed to get block {}".format(pb.height))
            continue
        if int(response["header"]["nonce"]) != int(pb.nonce):
            LOGGER.warn("Processed orphan pool block at height: {}".format(
                pb.height))
            pb.state = "orphan"
            continue
        if pb.height < (latest - block_locktime):
            # This block seems valid, and old enough to unlock
            LOGGER.warn("Unlocking pool block at height: {}".format(pb.height))
            pb.state = "unlocked"
        sys.stdout.flush()

    # db.set_last_run(PROCESS, str(time.time()))
    database.db.getSession().commit()
    LOGGER.warn("=== Completed {}".format(PROCESS))
    sys.stdout.flush()
コード例 #29
0
def recalculate(start_height, avg_range):
    database = lib.get_db()
    height = start_height
    while height <= grin.blocking_get_current_height():
        old_stats = Worker_stats.get_by_height(height)
        new_stats = calculate(height, avg_range)
        for old_stat in old_stats:
            database.db.deleteDataObj(old_stat)
        for stats in new_stats:
            print("new/updated stats: {} ".format(stats))
            database.db.getSession().add(stats)
            if(height % BATCHSZ == 0):
                database.db.getSession().commit()
        height = height + 1
    database.db.getSession().commit()
コード例 #30
0
 def get(self, height=0, range=None, fields=None):
     database = lib.get_db()
     fields = lib.fields_to_list(fields)
     if height == 0:
         height = grin.get_current_height()
     if range == None:
         stat = Pool_stats.get_by_height(height)
         if stat is None:
             return None
         return stat.to_json(fields)
     else:
         stats = []
         for stat in Pool_stats.get_by_height(height, range):
             stats.append(stat.to_json(fields))
         return stats