def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    ##
    # Update user records in MySQL and REDIS
    database = lib.get_db()
    database.db.initializeSession()

    redisdb = lib.get_redis_db()
    redis_userid_key = "userid."

    id = 1
    try:
        while True:
            thisuser = Users.get_by_id(id)
            if thisuser is None:
                if id > 2358:
                    LOGGER.warn("last id = {}".format(id))
                    break
                id = id + 1
                continue
            if thisuser.username == "bjg62hj8byyksphuw95vqc3f74.lionm1":
                orig_username = thisuser.username
                thisuser.username = "******"
                LOGGER.warn("Updated: {} to {}".format(orig_username,
                                                       thisuser.username))
            if thisuser.username == "[email protected]_d1":
                orig_username = thisuser.username
                thisuser.username = "******"
                LOGGER.warn("Updated: {} to {}".format(orig_username,
                                                       thisuser.username))
            if thisuser.username == "*****@*****.**":
                orig_username = thisuser.username
                thisuser.username = "******"
                LOGGER.warn("Updated: {} to {}".format(orig_username,
                                                       thisuser.username))
            if "." in thisuser.username:
                orig_username = thisuser.username
                # Update mysql
                thisuser.username = thisuser.username.replace(".", "_")
                # Update redis
                redis_key = redis_userid_key + orig_username
                COMMIT and redisdb.delete(redis_key)
                redis_key = redis_userid_key + thisuser.username
                COMMIT and redisdb.set(redis_key, id)
                LOGGER.warn("Updated: {} to {}".format(orig_username,
                                                       thisuser.username))
            id = id + 1
    except Exception as e:  # AssertionError as e:
        LOGGER.error("Something went wrong: {} - {}".format(
            e, traceback.print_stack()))

    COMMIT or LOGGER.warn("XXX No Commit - Edit for final run")
    COMMIT and database.db.getSession().commit()
    LOGGER.warn("=== Completed {}".format(PROCESS))
Esempio n. 2
0
def RigDataCommitScheduler(max_lag, commit_interval, logger):
    global RIGDATA_MUTEX
    global RIGDATA_GROUPSIZE
    global REDIS_RIGDATA_KEY
    global REDIS_RIGDATA_EXPIRETIME
    while True:
        try:
            database = lib.get_db()
            latest_block = Blocks.get_latest()
            while latest_block is None:
                logger.warn("Cant get latest block from database")
                time.sleep(10)
                latest_block = Blocks.get_latest()
            chain_height = latest_block.height
            lib.teardown_db()
            RIGDATA_MUTEX.acquire()
            try:
                logger.warn("= Begin RigDataCommitScheduler")
                # Itterate over each rigdata cache key in redis
                redisdb = lib.get_redis_db()
                redis_key = "{}-*".format(REDIS_RIGDATA_KEY)
                keys = []
                for key in redisdb.scan_iter(match=redis_key, count=100):
                    keys.append(key.decode())
                for key in sorted(keys):
                    share_height = int(key.split("-")[1])
                    if share_height < chain_height - RIGDATA_GROUPSIZE - max_lag:
                        # Commit this set of rigdata records
                        logger.warn(
                            "-- RigDataCommitScheduler processing record at height: {}"
                            .format(share_height))
                        redis_cached_rigdata = redisdb.get(key)
                        redis_cached_rigdata = json.loads(
                            redis_cached_rigdata.decode())
                        for user, rigdata in redis_cached_rigdata.items():
                            redis_key = "{}.{}.{}".format(
                                REDIS_RIGDATA_KEY, share_height, user)
                            if redisdb.exists(redis_key):
                                # XXX TODO
                                logger.warn(
                                    "XXX TODO: DUPLICATE RIGDATA WORKER KEY - MERGE ???"
                                )
                            else:
                                redisdb.set(redis_key,
                                            json.dumps(rigdata),
                                            ex=REDIS_RIGDATA_EXPIRETIME)
                        # Wrote this rigdata to REDIS, so remove the cache record now
                        redisdb.delete(key)
            finally:
                RIGDATA_MUTEX.release()
                logger.warn("= End RigDataCommitScheduler")
            time.sleep(commit_interval)
        except Exception as e:
            logger.exception("Something went wrong: {}".format(
                traceback.format_exc()))
            time.sleep(10)
Esempio n. 3
0
def get_block_payout_map_estimate(height, logger):
    payout_estimate_map_key = "payout-estimate-for-block-" + str(height)
    try:
        # Estimates are cached in redis, get it from there if we can
        redisdb = lib.get_redis_db()
        cached_map = redisdb.get(payout_estimate_map_key)
        sys.stdout.flush()
        if cached_map is None:
            return None
        #logger.warn("The Map: {}".format(cached_map.decode('utf-8')))
        return json.loads(cached_map.decode('utf-8'))
    except Exception as e:
        logger.warn("block_payout_map Lookup Error: {} - {}".format(
            payout_estimate_map_key, repr(e)))
Esempio n. 4
0
def get_block_payout_map_estimate(height, logger):
    payout_estimate_map_key = "payout-estimate-for-block-" + str(height)
    try:
        # Estimates are cached in redis, get it from there if we can
        redisdb = lib.get_redis_db()
        cached_map = redisdb.get(payout_estimate_map_key)
        #print("Get the pickled map: {}".format(cached_map))
        sys.stdout.flush()
        if cached_map is None:
            return None
        json_map = pickle.loads(cached_map)
        return json_map
    except Exception as e:
        logger.warn("block_payout_map Lookup Error: {} - {}".format(payout_estimate_map_key, repr(e)))
Esempio n. 5
0
def RigDataCommitScheduler(max_lag, logger):
    global RIGDATA_MUTEX
    global RIGDATA
    global REDIS_RIGDATA_KEY
    global REDIS_RIGDATA_EXPIRETIME
    while True:
        try:
            redisdb = lib.get_redis_db()
            while True:
                database = lib.get_db()
                chain_height = Blocks.get_latest().height
                logger.warn(
                    "RIGDATA commit scheduler - chain_height = {}".format(
                        chain_height))
                RIGDATA_MUTEX.acquire()
                try:
                    for height in [
                            h for h in RIGDATA.keys()
                            if h < (chain_height - max_lag)
                    ]:
                        logger.warn(
                            "Commit RIGDATA for height: {}".format(height))
                        # Picke RIGDATA and write to redis
                        for user, rigdata in RIGDATA[height].items():
                            key = "{}.{}.{}".format(REDIS_RIGDATA_KEY, height,
                                                    user)
                            if redisdb.exists(key):
                                logger.warn(
                                    "XXX TODO - MERGE THIS ADDITIONAL SHARE DATA"
                                )
                            else:
                                redisdb.set(key,
                                            json.dumps(rigdata),
                                            ex=REDIS_RIGDATA_EXPIRETIME)
                        RIGDATA.pop(height, None)
                finally:
                    RIGDATA_MUTEX.release()
                    lib.teardown_db()
                time.sleep(30)
        except Exception as e:
            logger.error("Something went wrong: {}\n{}".format(
                e,
                traceback.format_exc().splitlines()))
            lib.teardown_db()
            time.sleep(10)
Esempio n. 6
0
def ShareCommitScheduler(max_lag, commit_interval, logger):
    global SHARES_MUTEX
    while True:
        try:
            database = lib.get_db()
            latest_block = Blocks.get_latest()
            while latest_block is None:
                logger.warn("Waiting for first block")
                time.sleep(10)
                latest_block = Blocks.get_latest()
            chain_height = latest_block.height
            SHARES_MUTEX.acquire()
            try:
                logger.warn("= Begin ShareCommitScheduler")
                # Itterate over each sharedata key in redis
                redisdb = lib.get_redis_db()
                redis_key = "{}-*".format(REDIS_SHAREDATA_KEY)
                keys = []
                for key in redisdb.scan_iter(match=redis_key, count=100):
                    keys.append(key.decode())
                for key in sorted(keys):
                    share_height = int(key.split("-")[1])
                    if share_height < chain_height - max_lag:
                        # Commit this record
                        logger.warn(
                            "-- ShareCommitScheduler processing record at height: {}"
                            .format(share_height))
                        redis_sharedata = redisdb.get(key)
                        redis_sharedata = json.loads(redis_sharedata.decode())
                        ts_str = redis_sharedata.pop("timestamp",
                                                     str(datetime.utcnow()))
                        ts = datetime.strptime(
                            ts_str.split(".")[0], '%Y-%m-%d %H:%M:%S')
                        for worker, worker_shares in redis_sharedata.items():
                            # Get any existing record
                            worker_shares_rec = Worker_shares.get_by_height_and_id(
                                share_height, worker)
                            if worker_shares_rec is None:
                                # No existing record for this worker at this height, create it
                                logger.warn(
                                    "New share record for worker {} at height {}"
                                    .format(worker, share_height))
                                worker_shares_rec = Worker_shares(
                                    height=share_height,
                                    user_id=worker,
                                    timestamp=ts,
                                )
                                database.db.createDataObj(worker_shares_rec)
                            else:
                                logger.warn(
                                    "Add to existing record for worker {} at height {}"
                                    .format(worker, share_height))
                            for edge_bits, shares_count in worker_shares.items(
                            ):
                                worker_shares_rec.add_shares(
                                    edge_bits, shares_count["difficulty"],
                                    shares_count["accepted"],
                                    shares_count["rejected"],
                                    shares_count["stale"])
                                # Debug
                                logger.warn("Worker Shares: {}".format(
                                    worker_shares_rec))
                        # We wrote this record to mysql, so remove the redis cache
                        database.db.getSession().commit()
                        redisdb.delete(key)
                # Write fillter record if needed
                share_height = Worker_shares.get_latest_height()
                if share_height is None:
                    share_height = grin.blocking_get_current_height()
                share_height = share_height + 1
                while share_height < (chain_height - max_lag):
                    logger.warn(
                        "Processed 0 shares in block {} - Creating filler record"
                        .format(share_height))
                    filler_worker_shares_rec = Worker_shares(
                        height=share_height,
                        user_id=1,  # Pool User
                        timestamp=datetime.utcnow(),
                    )
                    database.db.createDataObj(filler_worker_shares_rec)
                    share_height += 1
            finally:
                database.db.getSession().commit()
                SHARES_MUTEX.release()
                lib.teardown_db()
                logger.warn("= End ShareCommitScheduler")
            time.sleep(commit_interval)
        except Exception as e:
            lib.teardown_db()
            logger.exception("Something went wrong: {} ".format(
                traceback.format_exc()))
            time.sleep(10)
Esempio n. 7
0
def addWorkerShares(logger, channel, delivery_tag, timestamp, height, worker,
                    workerid, rig_id, agent, difficulty, shares_data):
    global debug
    global SHARES_MUTEX
    global SHARE_RE
    global REDIS_SHAREDATA_KEY
    global REDIS_SHAREDATA_EXPIRETIME
    global RIGDATA_MUTEX
    global REDIS_RIGDATA_KEY
    global RIGDATA_GROUPSIZE
    global REDIS_RIGDATA_CACHE_EXPIRETIME
    logger.warn(
        "New Worker Shares: Height: {}, Worker: {}, RigID: {}, Agent: {}, Difficulty: {}, SharesData: {}"
        .format(height, worker, rig_id, agent, difficulty, shares_data))
    # Parse the sharedata
    # TEST: shares_data = "{29: Shares { edge_bits: 29, accepted: 7, rejected: 0, stale: 0 }, 31: Shares { edge_bits: 31, accepted: 4, rejected: 0, stale: 0 }}"
    all_shares = SHARE_RE.findall(shares_data)
    #logger.warn("addWorkerShares processing all_shares: {}".format(all_shares))

    # God Damn REDIS - Convert all keys to strings
    worker = str(worker)
    rig_id = str(rig_id)
    workerid = str(workerid)

    for shares in all_shares:
        debug and logger.warn("shares: {}".format(shares))
        edge_bits, accepted, rejected, stale = shares
        # Adjust for minimum share difficulty
        accepted = int(accepted) * difficulty
        rejected = int(rejected) * difficulty
        stale = int(stale) * difficulty
        logger.warn(
            "New Worker Shares: Difficulty: {}, Accepted: {}, Rejected: {}, Stale: {}"
            .format(difficulty, accepted, rejected, stale))

        ##
        # Add the data to Aggregated counts of *all rigs* for this worker
        SHARES_MUTEX.acquire()
        try:
            redisdb = lib.get_redis_db()
            redis_key = "{}-{}".format(REDIS_SHAREDATA_KEY, height)
            # Check for exisintg redis record
            sharedata = redisdb.get(redis_key)
            if sharedata is None:
                # Create a new record
                sharedata = {"timestamp": str(timestamp)}
            else:
                # Load existing record
                sharedata = json.loads(sharedata.decode())
            # Merge in this shares data
            if worker not in sharedata:
                sharedata[worker] = {}
            if edge_bits not in sharedata[worker]:
                sharedata[worker][edge_bits] = {
                    'difficulty': 1,
                    'accepted': 0,
                    'rejected': 0,
                    'stale': 0
                }
            sharedata[worker][edge_bits]['accepted'] += accepted
            sharedata[worker][edge_bits]['rejected'] += rejected
            sharedata[worker][edge_bits]['stale'] += stale
            # Write the record to REDIS
            logger.debug("Write to REDIS: {} - {}".format(
                redis_key, sharedata))
            redisdb.set(redis_key,
                        json.dumps(sharedata),
                        ex=REDIS_SHAREDATA_EXPIRETIME)
            # Debug
            logger.debug("Adding to sharedata cache: {}[{}][{}] = {}".format(
                redis_key, worker, edge_bits, sharedata[worker][edge_bits]))
        finally:
            SHARES_MUTEX.release()

        ##
        # Add the *individual rig* share data for each worker (into redis cache record)
        RIGDATA_MUTEX.acquire()
        group_height = int((height - height % RIGDATA_GROUPSIZE) +
                           RIGDATA_GROUPSIZE)
        try:
            redisdb = lib.get_redis_db()
            redis_key = "{}-{}".format(REDIS_RIGDATA_KEY, group_height)
            # Check for exisintg redis record
            rigdata = redisdb.get(redis_key)
            if rigdata is None:
                # Create a new record
                rigdata = {}
            else:
                # Load existing record
                rigdata = json.loads(rigdata.decode())
                logger.debug(
                    "Existing rigdata cache record: {}".format(rigdata))
            if worker not in rigdata:
                rigdata[worker] = {}
            if rig_id not in rigdata[worker]:
                rigdata[worker][rig_id] = {}
            if workerid not in rigdata[worker][rig_id]:
                rigdata[worker][rig_id][workerid] = {}
            if edge_bits not in rigdata[worker][rig_id][workerid]:
                logger.debug(
                    "New rigdata cache record: height {}  group_height {} worker_id {}"
                    .format(height, group_height, worker))
                rigdata[worker][rig_id][workerid][edge_bits] = {
                    'difficulty': 1,
                    'agent': agent,
                    'accepted': 0,
                    'rejected': 0,
                    'stale': 0
                }
            rigdata[worker][rig_id][workerid][edge_bits][
                'accepted'] += accepted
            rigdata[worker][rig_id][workerid][edge_bits][
                'rejected'] += rejected
            rigdata[worker][rig_id][workerid][edge_bits]['stale'] += stale
            # Write the record to REDIS
            logger.debug("Write to REDIS: {} - {}".format(redis_key, rigdata))
            redisdb.set(redis_key,
                        json.dumps(rigdata),
                        ex=REDIS_RIGDATA_CACHE_EXPIRETIME)
            logger.debug(
                "Adding to rigdata cache:  {}[{}][{}][{}][{}] = {}".format(
                    redis_key, group_height, worker, rig_id, workerid,
                    edge_bits, rigdata[worker][rig_id][workerid][edge_bits]))
        finally:
            RIGDATA_MUTEX.release()
Esempio n. 8
0
def calculate_block_payout_map(height, window_size, logger, estimate=False):
    global reward_estimate_mutex
    reward_estimate_mutex.acquire()
    try:
        if estimate == True:
            cached_map = get_block_payout_map_estimate(height, logger)
            if cached_map is not None:
                return cached_map
        # Get pool_block record and check block state
        #print("getting the pool bloch: {}".format(height))
        #sys.stdout.flush()
        poolblock = Pool_blocks.get_by_height(height)
        if poolblock is None:
            return {}
        #print("The pool block {}".format(poolblock.to_json()))
        #sys.stdout.flush()
        if estimate == True:
            if poolblock.state != "new" and poolblock.state != "unlocked":
                return {}
        else:
            if poolblock.state != "unlocked":
                return {}
        # Get an array with share counts for each block in the window
        shares = Worker_shares.get_by_height(height, window_size)
        #print("share data in window = {}".format(shares))
        #sys.stdout.flush()
        # Get total value of this block: reward + fees
        reward = get_reward_by_block(height)
        #print("Reward for block {} = {}".format(height, reward))
        #sys.stdout.flush()
        # Get the "secondary_scaling" value for this block
        scale = get_scale_by_block(height)
        #print("Secondary Scaling value for block = {}".format(scale))
        #sys.stdout.flush()
        # build a map of total shares of each size for each user
        shares_count_map = get_share_counts(shares)
        # DUMMY DATA
        #    scale = 529
        #    shares_count_map = {
        #            1: {29: 50},
        #            2: {29: 25, 31: 10},
        #            3: {32: 5},
        #        }

        #print("Shares Count Map:")
        #sys.stdout.flush()
        #pp = pprint.PrettyPrinter(indent=4)
        #pp.pprint(shares_count_map)
        #sys.stdout.flush()
        # Calcualte total value of all shares
        total_value = calculate_total_share_value(shares_count_map, scale)
        #print("total share value in payment window: {}".format(total_value))
        #sys.stdout.flush()
        block_payout_map = {}
        # For each user with shares in the window, calculate payout and add to block_payout_map
        for user_id, worker_shares_count in shares_count_map.items():
            #print("xxx: {} {}".format(user_id, worker_shares_count))
            #sys.stdout.flush()
            # Calcualte the total share value from this worker
            total_worker_value = calculate_total_share_value(
                {user_id: worker_shares_count}, scale)
            worker_payment = total_worker_value / total_value * reward
            #print("worker_payment: {}".format(worker_payment/1000000000))
            #sys.stdout.flush()
            block_payout_map[user_id] = worker_payment
        #print("block_payout_map = {}".format(block_payout_map))
        #sys.stdout.flush()
        if estimate == True:
            payout_estimate_map_key = "payout-estimate-for-block-" + str(
                height)
            try:
                # Estimates are cached in redis, save it there if we can
                redisdb = lib.get_redis_db()
                #redisdb.hmset(payout_estimate_map_key, json.dumps(block_payout_map))
                redisdb.set(payout_estimate_map_key,
                            pickle.dumps(block_payout_map))
            except Exception as e:
                logger.warn(
                    "block_payout_map cache insert failed: {} - {}".format(
                        payout_estimate_map_key, repr(e)))
    except Exception as e:
        logger.error("Estimate went wrong: {} - {}".format(
            e, traceback.print_stack()))
    finally:
        reward_estimate_mutex.release()
    #logger.warn("calculate_map: {}".format(block_payout_map))
    return block_payout_map
Esempio n. 9
0
def main():
    global CONFIG
    global LOGGER
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    # Number of blocks of share data used to calculate rewards
    PPLNG_WINDOW_SIZE = 60
    try:
        PPLNG_WINDOW_SIZE = int(os.environ["PPLNG_WINDOW_SIZE"])
    except Exception as e:
        LOGGER.error(
            "Failed to get PPLNG_WINDOW_SIZE from the environment: {}.  Using default size of {}"
            .format(e, PPLNG_WINDOW_SIZE))

    POOL_FEE = 0.0075
    try:
        POOL_FEE = float(CONFIG[PROCESS]["pool_fee"])
    except Exception as e:
        LOGGER.error(
            "Failed to get POOL_FEE from the config: {}.  Using default fee of {}"
            .format(e, POOL_FEE))

    # Keep track of "next" block estimated
    next_height_estimated = 0

    # Connect to DB
    database = lib.get_db()

    while True:
        # Generate pool block reward estimates for all new and unlocked blocks
        try:
            database.db.initializeSession()
            next_height = Blocks.get_latest(
            ).height - 5  # A recent height which all worker shares are available
            unlocked_blocks = Pool_blocks.get_all_unlocked()
            new_blocks = Pool_blocks.get_all_new()
            unlocked_blocks_h = [blk.height for blk in unlocked_blocks]
            new_blocks_h = [blk.height for blk in new_blocks]

            need_estimates = unlocked_blocks_h + new_blocks_h
            LOGGER.warn(
                "Will ensure estimate for blocks: {}".format(need_estimates))
            redisdb = lib.get_redis_db()

            # Generate Estimate
            for height in need_estimates:
                if height > next_height:
                    LOGGER.warn(
                        "Delay estimate until we have recent shares availalbe for block: {}"
                        .format(height))
                else:
                    LOGGER.warn("Ensure estimate for block: {}".format(height))
                    # Check if we already have an estimate cached
                    payout_estimate_map_key = key_prefix + str(height)
                    cached_map = redisdb.get(payout_estimate_map_key)
                    if cached_map is None:
                        # We dont have it cached, we need to calcualte it and cache it now
                        payout_map = pool.calculate_block_payout_map(
                            height, PPLNG_WINDOW_SIZE, POOL_FEE, LOGGER, True)
                        payout_map_json = json.dumps(payout_map)
                        redisdb.set(payout_estimate_map_key,
                                    payout_map_json,
                                    ex=cache_expire)
                        LOGGER.warn(
                            "Created estimate for block {} with key {}".format(
                                height, payout_estimate_map_key))
                    else:
                        LOGGER.warn(
                            "There is an exiting estimate for block: {}".
                            format(height))

            # Generate estimate for "next" block
            LOGGER.warn(
                "Ensure estimate for next block: {}".format(next_height))
            if next_height_estimated != next_height:
                payout_map = pool.calculate_block_payout_map(
                    next_height, PPLNG_WINDOW_SIZE, POOL_FEE, LOGGER, True)
                payout_map_json = json.dumps(payout_map)
                payout_estimate_map_key = key_prefix + "next"
                redisdb.set(payout_estimate_map_key,
                            payout_map_json,
                            ex=cache_expire)
                next_height_estimated = next_height
                LOGGER.warn("Created estimate for block {} with key {}".format(
                    next_height, payout_estimate_map_key))
            else:
                LOGGER.warn(
                    "There is an exiting next block estimate for : {}".format(
                        next_height))

            LOGGER.warn("Completed estimates")
            database.db.destroySession()
            # Flush debug print statements
            sys.stdout.flush()
        except Exception as e:  # AssertionError as e:
            LOGGER.error("Something went wrong: {} - {}".format(
                e, traceback.format_exc()))
            database.db.destroySession()

        LOGGER.warn("=== Completed {}".format(PROCESS))
        sleep(check_interval)
Esempio n. 10
0
def calculate_block_payout_map(height, window_size, pool_fee, logger, estimate=False):
    block_payout_map = {}
    # Get the grinpool admin user ID for pool fee
    pool_admin_user_id = 1
    # Total the payments for sanity check
    total_payments_this_block = 0
    try:
        admin_user = os.environ["GRIN_POOL_ADMIN_USER"]
        pool_admin_user_id = Users.get_id_by_username(admin_user)
        logger.warn("Pool Fee goes to admin account with id={}".format(pool_admin_user_id))
    except Exception as e:
        logger.warn("We dont have Admin account info, using default id={}: {}".format(pool_admin_user_id, e))
    # Create the payout map
    try:
        if estimate == True:
            cached_map = get_block_payout_map_estimate(height, logger)
            if cached_map is not None:
                return cached_map
        # Get pool_block record and check block state
        print("getting the pool block: {}".format(height))
        sys.stdout.flush()
        poolblock = Pool_blocks.get_by_height(height)
        if poolblock is None:
            return {}
        print("The pool block {}".format(poolblock.to_json()))
        sys.stdout.flush()
        if estimate == True:
            if poolblock.state != "new" and poolblock.state != "unlocked":
                return {}
        else:
            if poolblock.state != "unlocked":
                return {}
        # Get total value of this block: reward + tx fees
        reward = get_reward_by_block(height)
        print("Reward for block {} = {}".format(height, reward))
        sys.stdout.flush()
        # The Pools Fee
        the_pools_fee = reward * pool_fee
        block_payout_map[pool_admin_user_id] = the_pools_fee
        reward = reward - the_pools_fee
        logger.warn("Pool Fee = {}".format(block_payout_map))
        # Get the "secondary_scaling" value for this block
        scale = get_scale_by_block(height)
        print("Secondary Scaling value for block = {}".format(scale))
        sys.stdout.flush()
        # build a map of total shares of each size for each user
        shares_count_map = get_share_counts(height, window_size)
        # DUMMY DATA
    #    scale = 529
    #    shares_count_map = {
    #            1: {29: 50},
    #            2: {29: 25, 31: 10},
    #            3: {32: 5},
    #        }
    
        #print("Shares Count Map:")
        #sys.stdout.flush()
        #pp = pprint.PrettyPrinter(indent=4)
        #pp.pprint(shares_count_map)
        #sys.stdout.flush()
        # Calcualte total value of all shares
        total_value = calculate_total_share_value(shares_count_map, scale)
        print("total share value in payment window: {}".format(total_value))
        sys.stdout.flush()
        # For each user with shares in the window, calculate payout and add to block_payout_map
        for user_id, worker_shares_count in shares_count_map.items():
            print("xxx: {} {}".format(user_id, worker_shares_count))
            sys.stdout.flush()
            # Calcualte the total share value from this worker
            total_worker_value = calculate_total_share_value({user_id:worker_shares_count}, scale)
            worker_payment = total_worker_value / total_value * reward
            total_payments_this_block += worker_payment
            print("worker_payment: {}".format(worker_payment/1000000000))
            sys.stdout.flush()
            if user_id in block_payout_map.keys():
                block_payout_map[user_id] += worker_payment
            else:
                block_payout_map[user_id] = worker_payment
        logger.warn("Total Grin Paid Out this block: {} + the_pools_fee: {} ".format(total_payments_this_block, the_pools_fee))
        print("block_payout_map = {}".format(block_payout_map))
        #sys.stdout.flush()
        if estimate == True:
            payout_estimate_map_key = "payout-estimate-for-block-" + str(height)
            try:
                # Estimates are cached in redis, save it there if we can
                redisdb = lib.get_redis_db()
                #redisdb.hmset(payout_estimate_map_key, json.dumps(block_payout_map))
                redisdb.set(payout_estimate_map_key, pickle.dumps(block_payout_map))
            except Exception as e:
                logger.warn("block_payout_map cache insert failed: {} - {}".format(payout_estimate_map_key, repr(e)))
    except Exception as e:
        logger.error("Estimate went wrong: {} - {}".format(e, traceback.print_exc(e)))
        raise e
    #logger.warn("calculate_map: {}".format(block_payout_map))
    return block_payout_map