Beispiel #1
0
 def get(self, height=None, range=None, fields=None):
     LOGGER = lib.get_logger(PROCESS)
     debug and LOGGER.warn("PoolAPI_blocks get height:{}, range:{}, fields:{}".format(height, range, fields))
     # Enforce range limit
     if range is not None:
         range = min(range, pool_blocks_range_limit)
     fields = lib.fields_to_list(fields)
     if height is None or height == 0:
         blocks = Pool_blocks.get_latest(range)
     else:
         blocks = Pool_blocks.get_by_height(height, range)
     if range == None:
         if blocks is None:
             return None
         return blocks.to_json(fields)
     else:
         bl = []
         for block in blocks:
             bl = [block.to_json(fields)] + bl
         return bl
Beispiel #2
0
def get_blocks_found_data(num_blocks):
    ##
    # Returns data needed to create a *blocks found* chart over the past num_blocks history
    blocks_found_data = []
    latest_blocks = Pool_blocks.get_last_n(num_blocks)
    for block in iter(latest_blocks):
        blockdata = {}
        blockdata["time"] = block.timestamp.strftime('%s')
        blockdata["height"] = block.height
        blocks_found_data.append(blockdata)
    return blocks_found_data
Beispiel #3
0
def calculate(height, avg_range):
    # Get the most recent pool data from which to generate the stats
    previous_stats_record = Pool_stats.get_by_height(height - 1)
    assert previous_stats_record is not None, "No previous stats record found"
    avg_over_first_grin_block = Blocks.get_by_height(max(
        height - avg_range, 1))
    assert avg_over_first_grin_block is not None, "Missing grin block: {}".format(
        max(height - avg_range, 1))
    grin_block = Blocks.get_by_height(height)
    assert grin_block is not None, "Missing grin block: {}".format(height)
    latest_worker_shares = Worker_shares.get_by_height(height)
    # If no shares are found for this height, we have 2 options:
    # 1) Assume the share data is *delayed* so dont create the stats record now
    # assert len(latest_worker_shares) > 0, "No worker shares found"
    # 2) If we want we can create the record without share data and then when shares are added later this record will be recalculated
    avg_over_worker_shares = Worker_shares.get_by_height(height, avg_range)
    # Calculate the stats data
    timestamp = grin_block.timestamp
    difficulty = POOL_MIN_DIFF  # XXX TODO - enchance to support multiple difficulties
    gps = 0
    active_miners = 0
    shares_processed = 0
    num_shares_in_range = 0
    if len(avg_over_worker_shares) > 0:
        num_shares_in_range = sum(
            [shares.valid for shares in avg_over_worker_shares])
        gps = grin.calculate_graph_rate(difficulty,
                                        avg_over_first_grin_block.timestamp,
                                        grin_block.timestamp,
                                        num_shares_in_range)
        print("XXX: difficulty={}, {}-{}, len={}".format(
            difficulty, avg_over_first_grin_block.timestamp,
            grin_block.timestamp, num_shares_in_range))
    if latest_worker_shares is not None:
        active_miners = len(latest_worker_shares)  # XXX NO, FIX THIS
        num_valid = sum([shares.valid for shares in latest_worker_shares])
        num_invalid = sum([shares.invalid for shares in latest_worker_shares])
        shares_processed = num_valid + num_invalid

    total_shares_processed = previous_stats_record.total_shares_processed + shares_processed
    total_grin_paid = previous_stats_record.total_grin_paid  # XXX TODO
    total_blocks_found = previous_stats_record.total_blocks_found
    if Pool_blocks.get_by_height(height - 1) is not None:
        total_blocks_found = total_blocks_found + 1
    return Pool_stats(height=height,
                      timestamp=timestamp,
                      gps=gps,
                      active_miners=active_miners,
                      shares_processed=shares_processed,
                      total_shares_processed=total_shares_processed,
                      total_grin_paid=total_grin_paid,
                      total_blocks_found=total_blocks_found)
Beispiel #4
0
def calculate(height, window_size):
    # Get the most recent pool data from which to generate the stats
    previous_stats_record = Pool_stats.get_by_height(height - 1)
    assert previous_stats_record is not None, "No previous Pool_stats record found"
    grin_block = Blocks.get_by_height(height)
    assert grin_block is not None, "Missing grin block: {}".format(height)
    window = Worker_shares.get_by_height(height, window_size)
    #    assert window[-1].height - window[0].height >= window_size, "Failed to get proper window size"
    #    print("Sanity: window size:  {} vs  {}".format(window[-1].height - window[0].height, window_size))
    # Calculate the stats data
    timestamp = grin_block.timestamp
    active_miners = len(list(set([s.user_id for s in window])))
    print("active_miners = {}".format(active_miners))
    # Keep track of share totals - sum counts of all share sizes submitted for this block
    num_shares_processed = 0
    share_counts = {}
    for ws in Worker_shares.get_by_height(height):
        num_shares_processed += ws.num_shares()
        for size in ws.sizes():
            size_str = "{}{}".format("C", size)
            if size_str not in share_counts:
                share_counts[size_str] = {"valid": 0, "invalid": 0, "stale": 0}
            share_counts[size_str] = {
                "valid": share_counts[size_str]["valid"] + ws.num_valid(size),
                "invalid":
                share_counts[size_str]["invalid"] + ws.num_invalid(size),
                "stale": share_counts[size_str]["stale"] + ws.num_stale(size)
            }
    print("num_shares_processed this block= {}".format(num_shares_processed))
    total_shares_processed = previous_stats_record.total_shares_processed + num_shares_processed
    total_blocks_found = previous_stats_record.total_blocks_found
    # Caclulate estimated GPS for all sizes with shares submitted
    all_gps = estimate_gps_for_all_sizes(window)
    if Pool_blocks.get_by_height(height - 1) is not None:
        total_blocks_found = total_blocks_found + 1
    new_stats = Pool_stats(
        height=height,
        timestamp=timestamp,
        active_miners=active_miners,
        share_counts=share_counts,
        shares_processed=num_shares_processed,
        total_blocks_found=total_blocks_found,
        total_shares_processed=total_shares_processed,
        dirty=False,
    )
    print("all_gps for all pool workers")
    pp.pprint(all_gps)
    for gps_est in all_gps:
        gps_rec = Gps(edge_bits=gps_est[0], gps=gps_est[1])
        new_stats.gps.append(gps_rec)
    sys.stdout.flush()
    return new_stats
Beispiel #5
0
def main():
    global LOGGER
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    # Connect to DB
    database = lib.get_db()

    # XXX All in one db transaction....
    # Get unlocked blocks from the db
    unlocked_blocks = Pool_blocks.get_all_unlocked()
    unlocked_blocks = [blk.height for blk in unlocked_blocks]
    for height in unlocked_blocks:
        try:
            LOGGER.warn("Processing unlocked block: {}".format(height))
            # Call the library routine to get this blocks payout map
            payout_map = pool.calculate_block_payout_map(
                height, PPLNS_WINDOW, LOGGER, False)
            #print("payout_map = {}".format(payout_map))
            # Make payments based on the workers total share_value
            Pool_blocks.setState(height, "paid")
            database.db.getSession().commit()
            for user_id, payment_amount in payout_map.items():
                # Add worker rewards to pool account balance
                LOGGER.warn("Credit to user: {} = {}".format(
                    user_id, payment_amount))
                worker_utxo = Pool_utxo.credit_worker(user_id, payment_amount)
                # Worker_stats accounting and running totals
                #latest_worker_stats = Worker_stats.get_latest_by_id(user_id)
                #latest_worker_stats.dirty = True
            database.db.getSession().commit()

        except Exception as e:
            database.db.getSession().rollback()
            LOGGER.error("Something went wrong: {} - {}".format(
                e, traceback.print_exc()))

    LOGGER.warn("=== Completed {}".format(PROCESS))
    sys.stdout.flush()
Beispiel #6
0
 def get(self, id, height=None, range=None, fields=None):
     global database
     #database = lib.get_db()
     fields = lib.fields_to_list(fields)
     # AUTH FILTER
     if id != g.user.id:
         response = jsonify(
             {'message': 'Not authorized to access data for other users'})
         response.status_code = 403
         return response
     if height is None or height == 0:
         blocks = Pool_blocks.get_latest(range, id)
     else:
         blocks = Pool_blocks.get_by_height(height, range, id)
     if range == None:
         if blocks is None:
             return None
         return blocks.to_json(fields, True)
     else:
         bl = []
         for block in blocks:
             bl.append(block.to_json(fields, True))
         return bl
Beispiel #7
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()

    # Get the list of pool_blocks that are
    # old enough to unlock and
    # are not orphan blocks

    # XXX TODO: The node may not be synced, may need to wait?

    block_locktime = int(CONFIG[PROCESS]["block_locktime"])
    block_expiretime = int(CONFIG[PROCESS]["block_expiretime"])
    LOGGER.warn("using locktime: {}, expiretime: {}".format(
        block_locktime, block_expiretime))

    latest = grin.blocking_get_current_height()
    LOGGER.warn("Latest: {}".format(latest))

    new_poolblocks = Pool_blocks.get_all_new()
    for pb in new_poolblocks:
        if pb.height < (latest - block_expiretime):
            # Dont re-process very old blocks - protection against duplicate payouts.
            LOGGER.error("Processed expired pool block at height: {}".format(
                pb.height))
            pb.state = "expired"
            continue
        response = grin.get_block_by_height(pb.height)
        if response == None:
            # Unknown.  Leave as "new" for now and attempt to validate next run
            LOGGER.error("Failed to get block {}".format(pb.height))
            continue
        if int(response["header"]["nonce"]) != int(pb.nonce):
            LOGGER.warn("Processed orphan pool block at height: {}".format(
                pb.height))
            pb.state = "orphan"
            continue
        if pb.height < (latest - block_locktime):
            # This block seems valid, and old enough to unlock
            LOGGER.warn("Unlocking pool block at height: {}".format(pb.height))
            pb.state = "unlocked"
        sys.stdout.flush()

    # db.set_last_run(PROCESS, str(time.time()))
    database.db.getSession().commit()
    LOGGER.warn("=== Completed {}".format(PROCESS))
    sys.stdout.flush()
Beispiel #8
0
def main():
    global LOGGER
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    # Connect to DB
    database = lib.get_db()

    latest_block = 0

    # XXX All in one db transaction....
    # Get unlocked blocks from the db
    unlocked_blocks = Pool_blocks.get_all_unlocked()
    database.db.getSession().commit()
    for pb in unlocked_blocks:
        try:
            LOGGER.warn("Processing unlocked block: {}".format(pb))
            if pb.height > latest_block:
                latest_block = pb.height
            # Get Worker_stats of this block to calculate reward for each worker
            worker_stats = Worker_stats.get_by_height(pb.height)
            # Calculate Payment info:
            if len(worker_stats) > 0:
                # Calcualte reward/share:
                # XXX TODO: Enhance
                #  What algorithm to use?  Maybe: https://slushpool.com/help/manual/rewards
                r_per_g = REWARD / sum([st.gps for st in worker_stats])
                for stat in worker_stats:
                    # Calculate reward
                    worker_rewards = stat.gps * r_per_g
                    # Add or create worker rewards
                    worker_utxo = Pool_utxo.credit_worker(
                        stat.worker, worker_rewards)
                    LOGGER.warn("Credit to user: {} = {}".format(
                        stat.worker, worker_rewards))
            # Mark the pool_block state="paid" (maybe "processed" would be more accurate?)
            pb.state = "paid"
            database.db.getSession().commit()
        except Exception as e:
            database.db.getSession().rollback()
            LOGGER.error("Something went wrong: {}".format(e))

    #database.db.getSession().commit()
    LOGGER.warn("=== Completed {}".format(PROCESS))
    sys.stdout.flush()
Beispiel #9
0
def get_graph_rate_data(num_blocks):
    ##
    # Returns data needed to create a *graph rate* chart over the past num_blocks history
    graph_rate_data = []
    latest_blocks = Pool_blocks.get_last_n(num_blocks)
    for i in range(0, num_blocks - 5):
        # rolling 5-block window
        ratedata = {}
        ts1 = latest_blocks[i].timestamp
        ts2 = latest_blocks[i + 5].timestamp
        difficulty = latest_blocks[i + 5].total_difficulty - latest_blocks[
            i + 4].total_difficulty  # XXX TODO: This isnt right
        gps = calculate_graph_rate(difficulty, ts1, ts2, 5)
        ratedata["height"] = latest_blocks[i + 5].height
        ratedata["gps"] = gps
        ratedata["timestamp"] = ts2.strftime('%s')
        graph_rate_data.append(ratedata)
    return graph_rate_data
Beispiel #10
0
 def addPoolBlock(self, share):
     if share.is_valid == False:
         return
     new_pool_block = Pool_blocks(hash=share.hash,
                                  height=share.height,
                                  nonce=share.nonce,
                                  actual_difficulty=share.share_difficulty,
                                  net_difficulty=share.network_difficulty,
                                  timestamp=share.timestamp,
                                  found_by=share.found_by,
                                  state="new")
     duplicate = lib.get_db().db.createDataObj_ignore_duplicates(
         new_pool_block)
     if duplicate:
         self.LOGGER.warn("Failed to add duplicate Pool Block: {}".format(
             new_pool_block.height))
     else:
         self.LOGGER.warn("Added Pool Block: {}".format(
             new_pool_block.height))
Beispiel #11
0
def get_stats():
    ##
    # Get current Network Info as seen by our grin node
    stats_json = {}
    last_n = 10  # Look at shares from the last N blocks to calculate avg time between
    # Get the poolblocks from the DB
    latest_blocks = Pool_blocks.get_last_n(last_n)
    # Latest poolblock height
    stats_json["height"] = latest_blocks[-1].height
    # Latest poolblock hash
    stats_json["latest_hash"] = latest_blocks[-1].hash
    # Latest poolblock timestamp
    stats_json["latest_timestamp"] = latest_blocks[-1].timestamp.strftime('%s')
    # Avg time between poolblocks
    found_every = (latest_blocks[-1].timestamp -
                   latest_blocks[0].timestamp).strftime('%s')
    stats_json["found_every"] = found_every
    # XXX TODO:
    #    # Pool graph rate
    #    stats_json["graph_rate"] = calculate_graph_rate(latest_difficulty, latest_blocks[0].timestamp, latest_blocks[-1].timestamp, last_n)
    return stats_json
Beispiel #12
0
def calculate(height, window_size):
    # Get the most recent pool data from which to generate the stats
    previous_stats_record = Pool_stats.get_by_height(height - 1)
    assert previous_stats_record is not None, "No previous Pool_stats record found"
    grin_block = Blocks.get_by_height(height)
    assert grin_block is not None, "Missing grin block: {}".format(height)
    window = Worker_shares.get_by_height(height, window_size)
    # Calculate the stats data
    timestamp = grin_block.timestamp
    active_miners = len(list(set([s.worker for s in window])))
    print("active_miners = {}".format(active_miners))
    # Keep track of share totals - sum counts of all share sizes submitted for this block
    shares_processed = 0
    if len(window) > 0:
        shares_processed = window[-1].num_shares()
    print("shares_processed this block= {}".format(shares_processed))
    total_shares_processed = previous_stats_record.total_shares_processed + shares_processed
    total_grin_paid = previous_stats_record.total_grin_paid  # XXX TODO
    total_blocks_found = previous_stats_record.total_blocks_found
    # Caclulate estimated GPS for all sizes with shares submitted
    all_gps = estimate_gps_for_all_sizes(window)
    if Pool_blocks.get_by_height(height - 1) is not None:
        total_blocks_found = total_blocks_found + 1
    new_stats = Pool_stats(
        height=height,
        timestamp=timestamp,
        active_miners=active_miners,
        shares_processed=shares_processed,
        total_blocks_found=total_blocks_found,
        total_shares_processed=total_shares_processed,
        total_grin_paid=total_grin_paid,
        dirty=False,
    )
    print("all_gps for all pool workers")
    pp.pprint(all_gps)
    for gps_est in all_gps:
        gps_rec = Gps(edge_bits=gps_est[0], gps=gps_est[1])
        new_stats.gps.append(gps_rec)
    sys.stdout.flush()
    return new_stats
Beispiel #13
0
def addPoolBlock(logger, timestamp, height, hash, found_by, serverid):
    global POOLBLOCK_MUTEX
    POOLBLOCK_MUTEX.acquire()
    database = lib.get_db()
    try:
        logger.warn(
            "Adding A PoolBlock: Timestamp: {}, ServerID: {}, Height: {}, Hash: {}"
            .format(timestamp, serverid, height, hash))
        state = "new"
        this_block = Blocks.get_by_height(height)
        while this_block is None:
            this_block = Blocks.get_by_height(height)
            time.sleep(1)
        nonce = this_block.nonce
        actual_difficulty = grin.difficulty(this_block.hash,
                                            this_block.edge_bits,
                                            this_block.secondary_scaling)
        net_difficulty = grin.get_network_difficulty(height)
        # Create the DB record
        new_pool_block = Pool_blocks(hash=hash,
                                     height=height,
                                     nonce=nonce,
                                     actual_difficulty=actual_difficulty,
                                     net_difficulty=net_difficulty,
                                     timestamp=timestamp,
                                     found_by=found_by,
                                     state=state)
        duplicate = lib.get_db().db.createDataObj_ignore_duplicates(
            new_pool_block)
        if duplicate:
            logger.warn(
                "Failed to add duplicate Pool Block: {}".format(height))
        else:
            logger.warn("Added Pool Block: {}".format(height))
    finally:
        POOLBLOCK_MUTEX.release()
Beispiel #14
0
def main():
    global LOGGER
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    # Connect to DB
    database = lib.get_db()

    latest_block = 0

    # XXX All in one db transaction....
    # Get unlocked blocks from the db
    unlocked_blocks = Pool_blocks.get_all_unlocked()
    database.db.getSession().commit()
    for pb in unlocked_blocks:
        try:
            LOGGER.warn("Processing unlocked block: {}".format(pb))
            if pb.height > latest_block:
                latest_block = pb.height
            # Get Worker_stats of this block + range to calculate reward for each worker
            worker_shares_window = Worker_shares.get_by_height(pb.height, PPLNS_WINDOW)
            print("worker_shares_window = {}".format(worker_shares_window))
            # Calculate Payment info:
            if len(worker_shares_window) > 0:
                # Calcualte reward/share:
                # XXX TODO: Enhance
                #  What algorithm to use?  Maybe: https://slushpool.com/help/manual/rewards
                # For now, some variation on pplns

                # Sum up the number of each size share submitted by each user
                shares_count_map = {}
                for worker_shares_rec in worker_shares_window:
                    if not worker_shares_rec.worker in shares_count_map:
                        shares_count_map[worker_shares_rec.worker] = {}
                    for pow_size in worker_shares_rec.sizes():
                        print("pow_size = {}".format(pow_size))
                        if not pow_size in shares_count_map[worker_shares_rec.worker]:
                            shares_count_map[worker_shares_rec.worker][pow_size] = 0
                        shares_count_map[worker_shares_rec.worker][pow_size] += worker_shares_rec.num_valid(pow_size)
                print("Shares Count Map:")
                pp.pprint(shares_count_map)

                # Normalize and sum each workers shares to create a "share value"
                total_value = 0
                for worker, worker_shares_count in shares_count_map.items():
                    print("worker: {}, worker_shares_count: {}".format(worker, worker_shares_count))
                    sizes = list(worker_shares_count.keys())
                    print("sizes: {}".format(sizes))
                    shares_count_map[worker]["value"] = 0
                    value = 0
                    for size, count in worker_shares_count.items():
                        if size == 29:
                            value += float(count) * .33
                        else:
                            value += float(count)
                        total_value += value
                    shares_count_map[worker]["value"] = value
                    print("Worker {} value: {}".format(worker, value))
                
                # Make payments based on the workers total share_value
                for worker, worker_shares_count in shares_count_map.items():
                    worker_rewards = REWARD * worker_shares_count["value"] / total_value
                    # Add or create worker rewards
                    worker_utxo = Pool_utxo.credit_worker(worker, worker_rewards)
                    LOGGER.warn("Credit to user: {} = {}".format(worker, worker_rewards))
            # Mark the pool_block state="paid" (maybe "processed" would be more accurate?)
            pb.state = "paid"
            database.db.getSession().commit()
        except Exception as e:
            database.db.getSession().rollback()
            LOGGER.error("Something went wrong: {} - {}".format(e, traceback.print_exc()))

    #database.db.getSession().commit()
    LOGGER.warn("=== Completed {}".format(PROCESS))
    sys.stdout.flush()
Beispiel #15
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()

    # Get the list of pool_blocks that are
    # old enough to unlock and
    # are not orphan blocks

    # XXX TODO: The node may not be synced, may need to wait?

    block_locktime = int(CONFIG[PROCESS]["block_locktime"])
    block_expiretime = int(CONFIG[PROCESS]["block_expiretime"])
    LOGGER.warn("using locktime: {}, expiretime: {}".format(block_locktime, block_expiretime))

    latest = grin.blocking_get_current_height()
    LOGGER.warn("Latest: {}".format(latest))

    # Get outputs form the wallet
    wallet_outputs = wallet.retrieve_outputs(refresh=True)
    wallet_outputs_map = wallet.outputs_to_map_by_height(wallet_outputs)
    wallet_output_heights = list(wallet_outputs_map.keys())
    wallet_output_heights.sort()
    #print("wallet_output_heights = {}".format(wallet_output_heights))

    new_poolblocks = Pool_blocks.get_all_new()
    for pb in new_poolblocks:
        if pb.height < (latest - block_expiretime):
            # Dont re-process very old blocks - protection against duplicate payouts.
            LOGGER.error("Processed expired pool block at height: {}".format(pb.height))
            pb.state = "expired"
            continue
        response = grin.get_block_by_height(pb.height)
        # Check for unknown block
        if response == None:
            # Unknown.  Leave as "new" for now and attempt to validate next run
            LOGGER.error("Failed to get block {}".format(pb.height))
            continue
        # Check for orphans
        if int(response["header"]["nonce"]) != int(pb.nonce):
            LOGGER.warn("Processed orphan pool block at height: {}".format(pb.height))
            pb.state = "orphan"
            continue
#        # Check that we have a coinbase output in the wallet for this block
#        if pb.height not in wallet_output_heights:
#            LOGGER.warn("Wallet has no output for pool block at height: {}".format(pb.height))
#            pb.state = "no_wallet_output"
#            continue
        # Check if its old enough to be mature
        if pb.height < (latest - block_locktime):
            # This block seems valid, and old enough to unlock
            LOGGER.warn("Unlocking pool block at height: {}".format(pb.height))
            pb.state = "unlocked"
        sys.stdout.flush()

    # db.set_last_run(PROCESS, str(time.time()))
    database.db.getSession().commit()
    LOGGER.warn("=== Completed {}".format(PROCESS))
    sys.stdout.flush()
Beispiel #16
0
 def get(self, id, height=None, range=None):
     LOGGER = lib.get_logger(PROCESS)
     if id != g.user.id:
         response = jsonify(
             {'message': 'Not authorized to access data for other users'})
         response.status_code = 403
         return response
     debug and LOGGER.warn("EstimateApi_payment get id:{} height:{}".format(
         id, height))
     id_str = str(id)
     if height is None:
         # Immature Balance Estimate
         LOGGER.warn("Immature Balance Estimate")
         # Get a list of all new and unlocked blocks
         unlocked_blocks = Pool_blocks.get_all_unlocked()
         unlocked_blocks_h = [blk.height for blk in unlocked_blocks]
         #LOGGER.warn("EstimateApi_payment unlocked blocks: {}".format(unlocked_blocks))
         new_blocks = Pool_blocks.get_all_new()
         new_blocks_h = [blk.height for blk in new_blocks]
         #LOGGER.warn("EstimateApi_payment new blocks: {}".format(new_blocks))
         total = 0
         for height in unlocked_blocks_h + new_blocks_h:
             debug and print("Estimate block at height: {}".format(height))
             payout_map = pool.get_block_payout_map_estimate(height, LOGGER)
             if payout_map is not None and id_str in payout_map:
                 total = total + payout_map[id_str]
         return {"immature": total}
     if type(height) == str:
         if height == "next":
             # Next block estimate
             debug and LOGGER.warn("Next block estimate")
             estimate = 0
             payout_map = pool.get_block_payout_map_estimate(height, LOGGER)
             if payout_map is None:
                 estimate = "TBD"
             elif id_str in payout_map:
                 estimate = payout_map[id_str]
             else:
                 estimate = 0
             return {"next": estimate}
         else:
             response = jsonify({'message': 'Invalid Request'})
             response.status_code = 400
             return response
     # Block Reward estimate
     if range is None:
         # One specific block estimate
         debug and LOGGER.warn("One specific block estimate")
         estimate = 0
         payout_map = pool.get_block_payout_map_estimate(height, LOGGER)
         if payout_map is not None:
             if id_str in payout_map.keys():
                 estimate = payout_map[id_str]
             else:
                 estimate = 0
         else:
             # Maybe this is a pool block but we didnt estimate it yet
             pb = Pool_blocks.get_by_height(height)
             if pb is not None:
                 estimate = "TBD"
         str_height = str(height)
         return {str_height: estimate}
     # Range of pool block reward estimates
     debug and LOGGER.warn("Range of blocks estimate")
     # Enforce range limit
     range = min(range, pool_blocks_range_limit)
     # Get the list of pool block(s) heights
     if height == 0:
         blocks = Pool_blocks.get_latest(range)
     else:
         blocks = Pool_blocks.get_by_height(height, range)
     block_heights = [pb.height for pb in blocks]
     # Get estimates for each of the blocks
     estimates = {}
     for height in block_heights:
         estimate = 0
         payout_map = pool.get_block_payout_map_estimate(height, LOGGER)
         if payout_map is None:
             estimate = "TBD"
         elif id_str in payout_map:
             estimate = payout_map[id_str]
         else:
             estimate = 0
         str_height = str(height)
         estimates[str_height] = estimate
     return estimates
Beispiel #17
0
def calculate_block_payout_map(height, window_size, logger, estimate=False):
    global reward_estimate_mutex
    reward_estimate_mutex.acquire()
    try:
        if estimate == True:
            cached_map = get_block_payout_map_estimate(height, logger)
            if cached_map is not None:
                return cached_map
        # Get pool_block record and check block state
        #print("getting the pool bloch: {}".format(height))
        #sys.stdout.flush()
        poolblock = Pool_blocks.get_by_height(height)
        if poolblock is None:
            return {}
        #print("The pool block {}".format(poolblock.to_json()))
        #sys.stdout.flush()
        if estimate == True:
            if poolblock.state != "new" and poolblock.state != "unlocked":
                return {}
        else:
            if poolblock.state != "unlocked":
                return {}
        # Get an array with share counts for each block in the window
        shares = Worker_shares.get_by_height(height, window_size)
        #print("share data in window = {}".format(shares))
        #sys.stdout.flush()
        # Get total value of this block: reward + fees
        reward = get_reward_by_block(height)
        #print("Reward for block {} = {}".format(height, reward))
        #sys.stdout.flush()
        # Get the "secondary_scaling" value for this block
        scale = get_scale_by_block(height)
        #print("Secondary Scaling value for block = {}".format(scale))
        #sys.stdout.flush()
        # build a map of total shares of each size for each user
        shares_count_map = get_share_counts(shares)
        # DUMMY DATA
        #    scale = 529
        #    shares_count_map = {
        #            1: {29: 50},
        #            2: {29: 25, 31: 10},
        #            3: {32: 5},
        #        }

        #print("Shares Count Map:")
        #sys.stdout.flush()
        #pp = pprint.PrettyPrinter(indent=4)
        #pp.pprint(shares_count_map)
        #sys.stdout.flush()
        # Calcualte total value of all shares
        total_value = calculate_total_share_value(shares_count_map, scale)
        #print("total share value in payment window: {}".format(total_value))
        #sys.stdout.flush()
        block_payout_map = {}
        # For each user with shares in the window, calculate payout and add to block_payout_map
        for user_id, worker_shares_count in shares_count_map.items():
            #print("xxx: {} {}".format(user_id, worker_shares_count))
            #sys.stdout.flush()
            # Calcualte the total share value from this worker
            total_worker_value = calculate_total_share_value(
                {user_id: worker_shares_count}, scale)
            worker_payment = total_worker_value / total_value * reward
            #print("worker_payment: {}".format(worker_payment/1000000000))
            #sys.stdout.flush()
            block_payout_map[user_id] = worker_payment
        #print("block_payout_map = {}".format(block_payout_map))
        #sys.stdout.flush()
        if estimate == True:
            payout_estimate_map_key = "payout-estimate-for-block-" + str(
                height)
            try:
                # Estimates are cached in redis, save it there if we can
                redisdb = lib.get_redis_db()
                #redisdb.hmset(payout_estimate_map_key, json.dumps(block_payout_map))
                redisdb.set(payout_estimate_map_key,
                            pickle.dumps(block_payout_map))
            except Exception as e:
                logger.warn(
                    "block_payout_map cache insert failed: {} - {}".format(
                        payout_estimate_map_key, repr(e)))
    except Exception as e:
        logger.error("Estimate went wrong: {} - {}".format(
            e, traceback.print_stack()))
    finally:
        reward_estimate_mutex.release()
    #logger.warn("calculate_map: {}".format(block_payout_map))
    return block_payout_map
Beispiel #18
0
def calculate_block_payout_map(height,
                               window_size,
                               pool_fee,
                               logger,
                               estimate=False):
    block_payout_map = {}
    # Get the grinpool admin user ID for pool fee
    pool_admin_user_id = 1
    # Total the payments for sanity check
    total_payments_this_block = 0
    try:
        admin_user = os.environ["GRIN_POOL_ADMIN_USER"]
        pool_admin_user_id = Users.get_id_by_username(admin_user)
        logger.warn("Pool Fee goes to admin account with id={}".format(
            pool_admin_user_id))
    except Exception as e:
        logger.warn(
            "We dont have Admin account info, using default id={}: {}".format(
                pool_admin_user_id, e))
    # Create the payout map
    # Get pool_block record and check block state
    #print("getting the pool block: {}".format(height))
    #sys.stdout.flush()
    if estimate == False:
        poolblock = Pool_blocks.get_by_height(height)
        if poolblock is None or poolblock.state != "unlocked":
            return {}
        #print("The pool block {}".format(poolblock.to_json()))
    #sys.stdout.flush()
    # Get total value of this block: reward + tx fees
    reward = get_reward_by_block(height)
    print("Reward for block {} = {}".format(height, reward))
    sys.stdout.flush()
    # The Pools Fee
    the_pools_fee = reward * pool_fee
    block_payout_map[pool_admin_user_id] = the_pools_fee
    reward = reward - the_pools_fee
    logger.warn("Pool Fee = {}".format(block_payout_map))
    # Get the "secondary_scaling" value for this block
    scale = get_scale_by_block(height)
    #print("Secondary Scaling value for block = {}".format(scale))
    #sys.stdout.flush()
    # build a map of total shares of each size for each user
    shares_count_map = get_share_counts(height, window_size)
    # DUMMY DATA
    #    scale = 529
    #    shares_count_map = {
    #            1: {29: 50},
    #            2: {29: 25, 31: 10},
    #            3: {32: 5},
    #        }

    #print("Shares Count Map:")
    #sys.stdout.flush()
    #pp = pprint.PrettyPrinter(indent=4)
    #pp.pprint(shares_count_map)
    #sys.stdout.flush()
    # Calcualte total value of all shares
    total_value = calculate_total_share_value(shares_count_map, scale)
    print("total share value in payment window: {}".format(total_value))
    sys.stdout.flush()
    # For each user with shares in the window, calculate payout and add to block_payout_map
    for user_id, worker_shares_count in shares_count_map.items():
        #print("xxx: {} {}".format(user_id, worker_shares_count))
        #sys.stdout.flush()
        # Calcualte the total share value from this worker
        total_worker_value = calculate_total_share_value(
            {user_id: worker_shares_count}, scale)
        if total_value * reward > 0:
            worker_payment = int(total_worker_value / total_value * reward)
        else:
            # For next block estimate, there may be no shares submitted to the pool
            worker_payment = 0
        total_payments_this_block += worker_payment
        print("worker_payment: {}".format(worker_payment / 1000000000))
        sys.stdout.flush()
        if user_id in block_payout_map.keys():
            block_payout_map[user_id] += worker_payment
        else:
            block_payout_map[user_id] = worker_payment
    logger.warn(
        "Total Grin Paid Out this block: {} + the_pools_fee: {} ".format(
            total_payments_this_block, the_pools_fee))
    print("block_payout_map = {}".format(block_payout_map))
    #sys.stdout.flush()
    #logger.warn("calculate_map: {}".format(block_payout_map))
    return block_payout_map
Beispiel #19
0
def main():
    global CONFIG
    global LOGGER
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    # Number of blocks of share data used to calculate rewards
    PPLNG_WINDOW_SIZE = 60
    try:
        PPLNG_WINDOW_SIZE = int(os.environ["PPLNG_WINDOW_SIZE"])
    except Exception as e:
        LOGGER.error(
            "Failed to get PPLNG_WINDOW_SIZE from the environment: {}.  Using default size of {}"
            .format(e, PPLNG_WINDOW_SIZE))

    POOL_FEE = 0.0075
    try:
        POOL_FEE = float(CONFIG[PROCESS]["pool_fee"])
    except Exception as e:
        LOGGER.error(
            "Failed to get POOL_FEE from the config: {}.  Using default fee of {}"
            .format(e, POOL_FEE))

    # Keep track of "next" block estimated
    next_height_estimated = 0

    # Connect to DB
    database = lib.get_db()

    while True:
        # Generate pool block reward estimates for all new and unlocked blocks
        try:
            database.db.initializeSession()
            next_height = Blocks.get_latest(
            ).height - 5  # A recent height which all worker shares are available
            unlocked_blocks = Pool_blocks.get_all_unlocked()
            new_blocks = Pool_blocks.get_all_new()
            unlocked_blocks_h = [blk.height for blk in unlocked_blocks]
            new_blocks_h = [blk.height for blk in new_blocks]

            need_estimates = unlocked_blocks_h + new_blocks_h
            LOGGER.warn(
                "Will ensure estimate for blocks: {}".format(need_estimates))
            redisdb = lib.get_redis_db()

            # Generate Estimate
            for height in need_estimates:
                if height > next_height:
                    LOGGER.warn(
                        "Delay estimate until we have recent shares availalbe for block: {}"
                        .format(height))
                else:
                    LOGGER.warn("Ensure estimate for block: {}".format(height))
                    # Check if we already have an estimate cached
                    payout_estimate_map_key = key_prefix + str(height)
                    cached_map = redisdb.get(payout_estimate_map_key)
                    if cached_map is None:
                        # We dont have it cached, we need to calcualte it and cache it now
                        payout_map = pool.calculate_block_payout_map(
                            height, PPLNG_WINDOW_SIZE, POOL_FEE, LOGGER, True)
                        payout_map_json = json.dumps(payout_map)
                        redisdb.set(payout_estimate_map_key,
                                    payout_map_json,
                                    ex=cache_expire)
                        LOGGER.warn(
                            "Created estimate for block {} with key {}".format(
                                height, payout_estimate_map_key))
                    else:
                        LOGGER.warn(
                            "There is an exiting estimate for block: {}".
                            format(height))

            # Generate estimate for "next" block
            LOGGER.warn(
                "Ensure estimate for next block: {}".format(next_height))
            if next_height_estimated != next_height:
                payout_map = pool.calculate_block_payout_map(
                    next_height, PPLNG_WINDOW_SIZE, POOL_FEE, LOGGER, True)
                payout_map_json = json.dumps(payout_map)
                payout_estimate_map_key = key_prefix + "next"
                redisdb.set(payout_estimate_map_key,
                            payout_map_json,
                            ex=cache_expire)
                next_height_estimated = next_height
                LOGGER.warn("Created estimate for block {} with key {}".format(
                    next_height, payout_estimate_map_key))
            else:
                LOGGER.warn(
                    "There is an exiting next block estimate for : {}".format(
                        next_height))

            LOGGER.warn("Completed estimates")
            database.db.destroySession()
            # Flush debug print statements
            sys.stdout.flush()
        except Exception as e:  # AssertionError as e:
            LOGGER.error("Something went wrong: {} - {}".format(
                e, traceback.format_exc()))
            database.db.destroySession()

        LOGGER.warn("=== Completed {}".format(PROCESS))
        sleep(check_interval)
Beispiel #20
0
def calculate_block_payout_map(height, window_size, pool_fee, logger, estimate=False):
    block_payout_map = {}
    # Get the grinpool admin user ID for pool fee
    pool_admin_user_id = 1
    # Total the payments for sanity check
    total_payments_this_block = 0
    try:
        admin_user = os.environ["GRIN_POOL_ADMIN_USER"]
        pool_admin_user_id = Users.get_id_by_username(admin_user)
        logger.warn("Pool Fee goes to admin account with id={}".format(pool_admin_user_id))
    except Exception as e:
        logger.warn("We dont have Admin account info, using default id={}: {}".format(pool_admin_user_id, e))
    # Create the payout map
    try:
        if estimate == True:
            cached_map = get_block_payout_map_estimate(height, logger)
            if cached_map is not None:
                return cached_map
        # Get pool_block record and check block state
        print("getting the pool block: {}".format(height))
        sys.stdout.flush()
        poolblock = Pool_blocks.get_by_height(height)
        if poolblock is None:
            return {}
        print("The pool block {}".format(poolblock.to_json()))
        sys.stdout.flush()
        if estimate == True:
            if poolblock.state != "new" and poolblock.state != "unlocked":
                return {}
        else:
            if poolblock.state != "unlocked":
                return {}
        # Get total value of this block: reward + tx fees
        reward = get_reward_by_block(height)
        print("Reward for block {} = {}".format(height, reward))
        sys.stdout.flush()
        # The Pools Fee
        the_pools_fee = reward * pool_fee
        block_payout_map[pool_admin_user_id] = the_pools_fee
        reward = reward - the_pools_fee
        logger.warn("Pool Fee = {}".format(block_payout_map))
        # Get the "secondary_scaling" value for this block
        scale = get_scale_by_block(height)
        print("Secondary Scaling value for block = {}".format(scale))
        sys.stdout.flush()
        # build a map of total shares of each size for each user
        shares_count_map = get_share_counts(height, window_size)
        # DUMMY DATA
    #    scale = 529
    #    shares_count_map = {
    #            1: {29: 50},
    #            2: {29: 25, 31: 10},
    #            3: {32: 5},
    #        }
    
        #print("Shares Count Map:")
        #sys.stdout.flush()
        #pp = pprint.PrettyPrinter(indent=4)
        #pp.pprint(shares_count_map)
        #sys.stdout.flush()
        # Calcualte total value of all shares
        total_value = calculate_total_share_value(shares_count_map, scale)
        print("total share value in payment window: {}".format(total_value))
        sys.stdout.flush()
        # For each user with shares in the window, calculate payout and add to block_payout_map
        for user_id, worker_shares_count in shares_count_map.items():
            print("xxx: {} {}".format(user_id, worker_shares_count))
            sys.stdout.flush()
            # Calcualte the total share value from this worker
            total_worker_value = calculate_total_share_value({user_id:worker_shares_count}, scale)
            worker_payment = total_worker_value / total_value * reward
            total_payments_this_block += worker_payment
            print("worker_payment: {}".format(worker_payment/1000000000))
            sys.stdout.flush()
            if user_id in block_payout_map.keys():
                block_payout_map[user_id] += worker_payment
            else:
                block_payout_map[user_id] = worker_payment
        logger.warn("Total Grin Paid Out this block: {} + the_pools_fee: {} ".format(total_payments_this_block, the_pools_fee))
        print("block_payout_map = {}".format(block_payout_map))
        #sys.stdout.flush()
        if estimate == True:
            payout_estimate_map_key = "payout-estimate-for-block-" + str(height)
            try:
                # Estimates are cached in redis, save it there if we can
                redisdb = lib.get_redis_db()
                #redisdb.hmset(payout_estimate_map_key, json.dumps(block_payout_map))
                redisdb.set(payout_estimate_map_key, pickle.dumps(block_payout_map))
            except Exception as e:
                logger.warn("block_payout_map cache insert failed: {} - {}".format(payout_estimate_map_key, repr(e)))
    except Exception as e:
        logger.error("Estimate went wrong: {} - {}".format(e, traceback.print_exc(e)))
        raise e
    #logger.warn("calculate_map: {}".format(block_payout_map))
    return block_payout_map
Beispiel #21
0
 def get(self, height=None):
     LOGGER = lib.get_logger(PROCESS)
     debug and LOGGER.warn("PoolAPI_blocksCount get height:{}".format(height))
     count = Pool_blocks.count(height)
     return count
Beispiel #22
0
def main():
    global LOGGER
    global CONFIG
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)

    while True:
        try:
            LOGGER.warn("=== Starting {}".format(PROCESS))

            # Connect to DB
            database = lib.get_db()

            # Get the prebious audit record to find its height
            previous_audit_record = Pool_audit.getLatest()
            if previous_audit_record is None:
                previous_audit_record = Pool_audit()
                database.db.createDataObj(previous_audit_record)

            # Create new pool audit record
            audit_record = Pool_audit()

            summary_info = wallet.retrieve_summary_info(refresh=True)

            # Set the height by wallet
            audit_record.height = int(summary_info["last_confirmed_height"])
            # Set pool bock count
            audit_record.pool_blocks_count = Pool_blocks.count(
                audit_record.height) - Pool_blocks.count(
                    previous_audit_record.height)
            # Audit pools liability vs equity
            audit_record.equity = int(
                summary_info["amount_currently_spendable"]) + int(
                    summary_info["amount_awaiting_confirmation"])
            audit_record.liability = Pool_utxo.get_liability()
            audit_record.balance = audit_record.equity - audit_record.liability

            # Add payouts value
            payments_made = Pool_payment.get_by_height(
                audit_record.height,
                audit_record.height - previous_audit_record.height)
            audit_record.payouts = sum(
                [payment.amount for payment in payments_made])
            # Add payments value
            pool_credits = Pool_credits.get_by_height(
                audit_record.height,
                audit_record.height - previous_audit_record.height)
            total_credits = 0
            if pool_credits is not None:
                for credit in pool_credits:
                    credits_this_block = sum(credit.credits.values())
                    total_credits += credits_this_block
                    print("credits_this_block: {}, total_credits: {}".format(
                        credits_this_block, total_credits))
                audit_record.payments = total_credits
            else:
                audit_record.payments = 0

            # Add and Commit the audit record
            #LOGGER.warn("Create Audit Record: {}".format(json.dumps(audit_record)))
            database.db.createDataObj(audit_record)

            LOGGER.warn("=== Completed {}".format(PROCESS))
        except Exception as e:
            lib.teardown_db()
            LOGGER.exception("Something went wrong: {} ".format(
                traceback.format_exc()))

        time.sleep(999)