예제 #1
0
 def get(self, id=None, height=0, range=0, fields=None):
     database = lib.get_db()
     LOGGER = lib.get_logger(PROCESS)
     LOGGER.warn(
         "WorkerAPI_shares get id:{} height:{} range:{} fields:{}".format(
             id, height, range, fields))
     fields = lib.fields_to_list(fields)
     if height == 0:
         height = Blocks.get_latest().height
     shares_records = []
     if id is None:
         for shares in Worker_shares.get_by_height(height, range):
             shares_records.append(shares.to_json(fields))
         return shares_records
     else:
         if range is None:
             worker_sh_recs = Worker_shares.get_by_height_and_id(height, id)
             #print("worker_sh_recs = {}".format(worker_sh_recs))
             if res is None:
                 return "[]".to_json()
             return res.to_json(fields)
         else:
             for share in Worker_shares.get_by_height_and_id(
                     height, id, range):
                 shares_records.append(share.to_json(fields))
             return shares_records
예제 #2
0
파일: api.py 프로젝트: bitgrin/grin-pool
 def get(self, id=None, height=None, range=0, fields=None):
     global database
     #database = lib.get_db()
     LOGGER = lib.get_logger(PROCESS)
     # AUTH FILTER
     if id != g.user.id:
         response = jsonify({ 'message': 'Not authorized to access data for other users' })
         response.status_code = 403
         return response
     debug and LOGGER.warn("WorkerAPI_shares get id:{} height:{} range:{} fields:{}".format(id, height, range, fields))
     # Enforce range limit
     if range is not None:
         range = min(range, worker_shares_range_limit)
     fields = lib.fields_to_list(fields)
     if height is None:
         return Worker_shares.get_latest_height(id)
     if height == 0:
         height = Blocks.get_latest().height
     shares_records = []
     if id is None:
         for shares in Worker_shares.get_by_height(height, range):
             shares_records.append(shares.to_json(fields))
         return shares_records
     else:
         if range is None:
             worker_sh_recs = Worker_shares.get_by_height_and_id(height, id)
             #print("worker_sh_recs = {}".format(worker_sh_recs))
             if res is None:
                 return "[]".to_json()
             return res.to_json(fields)
         else:
             for share in Worker_shares.get_by_height_and_id(height, id, range):
                 shares_records.append(share.to_json(fields))
             return shares_records
예제 #3
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()
    atexit.register(lib.teardown_db)

    # Get config
    check_interval = float(CONFIG[PROCESS]["check_interval"])
    avg_over_range = int(CONFIG[PROCESS]["avg_over_range"])

    # Initialize poolStats records if this is the first run
    latest_stat = Pool_stats.get_latest()
    if latest_stat is None:
        # Special case for new pool startup
        poolstats.initialize(avg_over_range, LOGGER)

    latest_stat = Pool_stats.get_latest()
    LOGGER.warn("Starting at height: {}".format(latest_stat.height))

    # Generate pool stats records - one per grin block
    while True:
        # Find the height of the latest stats record
        latest_stat = Pool_stats.get_latest()
        height = latest_stat.height + 1
        LOGGER.warn("Starting at height: {}".format(height))
        try:
            while True:
                share_height = Worker_shares.get_latest_height()
                while share_height is None:
                    LOGGER.warn("Waiting for shares")
                    share_height = Worker_shares.get_latest_height()
                    sleep(10)
                latest = Blocks.get_latest().height
                stats_height = height - 1
                LOGGER.warn(
                    "Running: Chain height: {}, share height: {},  stats height: {}"
                    .format(latest, share_height, stats_height))
                while share_height - 1 > height:
                    new_stats = poolstats.calculate(height, avg_over_range)
                    # Batch new stats when possible, but commit at reasonable intervals
                    database.db.getSession().add(new_stats)
                    if ((height % BATCHSZ == 0) or (height >= (latest - 10))):
                        database.db.getSession().commit()
                    LOGGER.warn(
                        "Added Pool_stats for block: {} - {} {} {}".format(
                            new_stats.height, new_stats.gps,
                            new_stats.active_miners,
                            new_stats.shares_processed))
                    height = height + 1
                    sys.stdout.flush()
                sleep(check_interval)
        except Exception as e:
            LOGGER.error("Something went wrong: {} - {}".format(
                e, traceback.print_stack()))
            database.db.getSession().rollback()
            sleep(check_interval)

    LOGGER.warn("=== Completed {}".format(PROCESS))
예제 #4
0
    def commit(self, height):
        print("self.shares.keys(): {}".format(self.shares.keys()))
        if height not in self.shares or len(self.shares[height]) == 0:
            self.LOGGER.warn(
                "Processed 0 shares in block {} - Creatiing filler record".
                format(height))
            # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height
            filler_shares_rec = Worker_shares(height=height,
                                              worker="GrinPool",
                                              timestamp=datetime.utcnow(),
                                              difficulty=DIFFICULTY,
                                              valid=0,
                                              invalid=0)
            lib.get_db().db.createDataObj_ignore_duplicates(filler_shares_rec)
            return

        byWorker = {}
        for nonce in self.shares[height]:
            share = self.shares[height][nonce]
            # Sort shares by worker
            if share.found_by not in byWorker:
                byWorker[share.found_by] = []
            byWorker[share.found_by].append(share)
            # Create Pool_blocks for full solution shares
            if share.is_solution:
                self.addPoolBlock(share)

        # Create a Worker_shares record for each user and commit to DB
        # XXX TODO: Bulk Insert - will be needed when the pool has hundredes or thousands of workers
        for worker in byWorker:
            workerShares = byWorker[worker]
            # Not possible?
            #            if len(workerShares) == 0:
            #                continue
            self.LOGGER.warn(
                "Processed {} shares in block {} for worker {}".format(
                    len(workerShares), height, worker))
            valid_list = [share.is_valid for share in workerShares]
            # self.LOGGER.warn("xxx:  {}".format(valid_list))
            num_valid = sum([int(share.is_valid) for share in workerShares])
            new_shares_rec = Worker_shares(height=height,
                                           worker=worker,
                                           timestamp=datetime.utcnow(),
                                           difficulty=DIFFICULTY,
                                           valid=num_valid,
                                           invalid=len(workerShares) -
                                           num_valid)
            lib.get_db().db.createDataObj_ignore_duplicates(new_shares_rec)
            # We added new worker share data, so if a Pool_stats record already exists at this height, we mark it dirty so it gets recalulated
            stats_rec = Pool_stats.get_by_height(height)
            if stats_rec is not None:
                stats_rec.dirty = True
                lib.get_db().db.getSession().commit()
예제 #5
0
def calculate(height, window_size):
    # Get the most recent pool data from which to generate the stats
    previous_stats_record = Pool_stats.get_by_height(height - 1)
    assert previous_stats_record is not None, "No previous Pool_stats record found"
    grin_block = Blocks.get_by_height(height)
    assert grin_block is not None, "Missing grin block: {}".format(height)
    window = Worker_shares.get_by_height(height, window_size)
    #    assert window[-1].height - window[0].height >= window_size, "Failed to get proper window size"
    #    print("Sanity: window size:  {} vs  {}".format(window[-1].height - window[0].height, window_size))
    # Calculate the stats data
    timestamp = grin_block.timestamp
    active_miners = len(list(set([s.user_id for s in window])))
    print("active_miners = {}".format(active_miners))
    # Keep track of share totals - sum counts of all share sizes submitted for this block
    num_shares_processed = 0
    share_counts = {}
    for ws in Worker_shares.get_by_height(height):
        num_shares_processed += ws.num_shares()
        for size in ws.sizes():
            size_str = "{}{}".format("C", size)
            if size_str not in share_counts:
                share_counts[size_str] = {"valid": 0, "invalid": 0, "stale": 0}
            share_counts[size_str] = {
                "valid": share_counts[size_str]["valid"] + ws.num_valid(size),
                "invalid":
                share_counts[size_str]["invalid"] + ws.num_invalid(size),
                "stale": share_counts[size_str]["stale"] + ws.num_stale(size)
            }
    print("num_shares_processed this block= {}".format(num_shares_processed))
    total_shares_processed = previous_stats_record.total_shares_processed + num_shares_processed
    total_blocks_found = previous_stats_record.total_blocks_found
    # Caclulate estimated GPS for all sizes with shares submitted
    all_gps = estimate_gps_for_all_sizes(window)
    if Pool_blocks.get_by_height(height - 1) is not None:
        total_blocks_found = total_blocks_found + 1
    new_stats = Pool_stats(
        height=height,
        timestamp=timestamp,
        active_miners=active_miners,
        share_counts=share_counts,
        shares_processed=num_shares_processed,
        total_blocks_found=total_blocks_found,
        total_shares_processed=total_shares_processed,
        dirty=False,
    )
    print("all_gps for all pool workers")
    pp.pprint(all_gps)
    for gps_est in all_gps:
        gps_rec = Gps(edge_bits=gps_est[0], gps=gps_est[1])
        new_stats.gps.append(gps_rec)
    sys.stdout.flush()
    return new_stats
예제 #6
0
def calculate(height, avg_range):
    # Get the most recent pool data from which to generate the stats
    previous_stats_record = Pool_stats.get_by_height(height - 1)
    assert previous_stats_record is not None, "No previous stats record found"
    avg_over_first_grin_block = Blocks.get_by_height(max(
        height - avg_range, 1))
    assert avg_over_first_grin_block is not None, "Missing grin block: {}".format(
        max(height - avg_range, 1))
    grin_block = Blocks.get_by_height(height)
    assert grin_block is not None, "Missing grin block: {}".format(height)
    latest_worker_shares = Worker_shares.get_by_height(height)
    # If no shares are found for this height, we have 2 options:
    # 1) Assume the share data is *delayed* so dont create the stats record now
    # assert len(latest_worker_shares) > 0, "No worker shares found"
    # 2) If we want we can create the record without share data and then when shares are added later this record will be recalculated
    avg_over_worker_shares = Worker_shares.get_by_height(height, avg_range)
    # Calculate the stats data
    timestamp = grin_block.timestamp
    difficulty = POOL_MIN_DIFF  # XXX TODO - enchance to support multiple difficulties
    gps = 0
    active_miners = 0
    shares_processed = 0
    num_shares_in_range = 0
    if len(avg_over_worker_shares) > 0:
        num_shares_in_range = sum(
            [shares.valid for shares in avg_over_worker_shares])
        gps = grin.calculate_graph_rate(difficulty,
                                        avg_over_first_grin_block.timestamp,
                                        grin_block.timestamp,
                                        num_shares_in_range)
        print("XXX: difficulty={}, {}-{}, len={}".format(
            difficulty, avg_over_first_grin_block.timestamp,
            grin_block.timestamp, num_shares_in_range))
    if latest_worker_shares is not None:
        active_miners = len(latest_worker_shares)  # XXX NO, FIX THIS
        num_valid = sum([shares.valid for shares in latest_worker_shares])
        num_invalid = sum([shares.invalid for shares in latest_worker_shares])
        shares_processed = num_valid + num_invalid

    total_shares_processed = previous_stats_record.total_shares_processed + shares_processed
    total_grin_paid = previous_stats_record.total_grin_paid  # XXX TODO
    total_blocks_found = previous_stats_record.total_blocks_found
    if Pool_blocks.get_by_height(height - 1) is not None:
        total_blocks_found = total_blocks_found + 1
    return Pool_stats(height=height,
                      timestamp=timestamp,
                      gps=gps,
                      active_miners=active_miners,
                      shares_processed=shares_processed,
                      total_shares_processed=total_shares_processed,
                      total_grin_paid=total_grin_paid,
                      total_blocks_found=total_blocks_found)
예제 #7
0
def initialize(window_size, logger):
    database = lib.get_db()
    # Special case for new pool startup
    block_zero = None
    while block_zero is None:
        logger.warn("Waiting for the first block record in the database")
        time.sleep(1)
        block_zero = Blocks.get_earliest()
    print("block_zero={}".format(block_zero))

    stat_height = max(0, block_zero.height + window_size)
    seed_stat = Pool_stats(
        height=stat_height,
        timestamp=datetime.utcnow(),
        active_miners=0,
        shares_processed=0,
        share_counts=None,
        total_blocks_found=0,
        total_shares_processed=0,
        dirty=False,
    )
    database.db.createDataObj(seed_stat)
    seed_share = Worker_shares(
        height=stat_height,
        user_id=1,
        timestamp=datetime.utcnow(),
    )
    database.db.createDataObj(seed_share)
예제 #8
0
def main():
    global LOGGER
    global CONFIG
    global SHARES
    global HEIGHT
    global GRINSHARE_HEIGHT
    global POOLSHARE_HEIGHT
    CONFIG = lib.get_config()

    # XXX TODO: Put in config
    HOST = "0.0.0.0"
    PORT = 32080
    GRINSHARE_HEIGHT = 0
    POOLSHARE_HEIGHT = 0

    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    database = lib.get_db()
    HEIGHT = Worker_shares.get_latest_height()
    if HEIGHT is None:
        HEIGHT = grin.blocking_get_current_height()
    SHARES = WorkerShares(LOGGER)

    #server = ThreadedHTTPServer((HOST, PORT), ShareHandler)
    #server = HTTPServer((HOST, PORT), ShareHandler)

    #    server = socketserver.TCPServer((HOST, PORT), ShareHandler)
    #    server.handle_request()
    #    server.server_close()

    commit_thread = threading.Thread(target=ShareCommitScheduler, args=(15, ))
    commit_thread.start()
    server = ThreadedTCPServer((HOST, PORT), ShareHandler)
    server.serve_forever()
예제 #9
0
def calculate(height, avg_range):
    avg_over_first_grin_block = Blocks.get_by_height( max(height-avg_range, 1) )
    assert avg_over_first_grin_block is not None, "Missing grin block: {}".format(max(height-avg_range, 1))
    grin_block = Blocks.get_by_height(height)
    assert grin_block is not None, "Missing grin block: {}".format(height)
    # Get all workers share records for the current range of blocks
    latest_worker_shares = Worker_shares.get_by_height(height)
#    assert len(latest_worker_shares) != 0, "Missing worker shares record for height {}".format(height)
    avg_over_worker_shares = Worker_shares.get_by_height(height, avg_range)
    # Create a worker_stats for each user who submitted a share in this range
    workers = list(set([share.worker for share in latest_worker_shares]))
    new_stats = []
    for worker in workers:
        # Get this workers most recent worker_stats record (for running totals)
        last_stat = Worker_stats.get_latest_by_id(worker)
        if last_stat is None:
            # A new worker
            last_stat = Worker_stats(None, datetime.utcnow(), height-1, worker, 0, 0, 0, 0, 0, 0)
            new_stats.append(last_stat)
        # Calculate this workers stats data
        timestamp = grin_block.timestamp
        difficulty = POOL_MIN_DIFF # XXX TODO - enchance to support multiple difficulties
        num_shares_in_range = sum([shares.valid for shares in avg_over_worker_shares if shares.worker == worker])
        gps = grin.calculate_graph_rate(difficulty, avg_over_first_grin_block.timestamp, grin_block.timestamp, num_shares_in_range)
        num_valid_this_block = [shares.valid for shares in latest_worker_shares if shares.worker == worker][0]
        num_invalid_this_block = [shares.invalid for shares in latest_worker_shares if shares.worker == worker][0]
        shares_processed = num_valid_this_block + num_invalid_this_block
#        latest_worker_shares = [share for share in latest_pool_shares if share.found_by == worker]
        #shares_processed = len(worker_shares_this_block)
        total_shares_processed = last_stat.total_shares_processed + shares_processed
        stats = Worker_stats(
                id = None,
                height = height,
                timestamp = timestamp,
                worker = worker,
                gps = gps,
                shares_processed = shares_processed,
                total_shares_processed = total_shares_processed,
                grin_paid = 123, # XXX TODO
                total_grin_paid = 456, # XXX TODO
                balance = 1) # XXX TODO
        new_stats.append(stats)
    return new_stats
예제 #10
0
def calculate(height, window_size):
    # Get the most recent pool data from which to generate the stats
    previous_stats_record = Pool_stats.get_by_height(height-1)
    assert previous_stats_record is not None, "No previous Pool_stats record found"
    grin_block = Blocks.get_by_height(height)
    assert grin_block is not None, "Missing grin block: {}".format(height)
    window = Worker_shares.get_by_height(height, window_size)
    # Calculate the stats data
    timestamp = grin_block.timestamp
    active_miners = len(list(set([s.user_id for s in window])))
    print("active_miners = {}".format(active_miners))
    # Keep track of share totals - sum counts of all share sizes submitted for this block
    shares_processed = Worker_shares.get_by_height(height)
    num_shares_processed = sum([shares.num_shares() for shares in shares_processed])
    print("num_shares_processed this block= {}".format(num_shares_processed))
    total_shares_processed = previous_stats_record.total_shares_processed + num_shares_processed
    total_blocks_found = previous_stats_record.total_blocks_found
    # Caclulate estimated GPS for all sizes with shares submitted
    all_gps = estimate_gps_for_all_sizes(window)
    if Pool_blocks.get_by_height(height-1) is not None:
        total_blocks_found = total_blocks_found + 1
    new_stats = Pool_stats(
            height = height,
            timestamp = timestamp,
            active_miners = active_miners,
            shares_processed = num_shares_processed,
            total_blocks_found = total_blocks_found,
            total_shares_processed = total_shares_processed,
            dirty = False,
        )
    print("all_gps for all pool workers")
    pp.pprint(all_gps)
    for gps_est in all_gps:
        gps_rec = Gps(
            edge_bits = gps_est[0],
            gps = gps_est[1]
        )
        new_stats.gps.append(gps_rec)
    sys.stdout.flush()
    return new_stats
예제 #11
0
def main():
    global LOGGER
    global CONFIG
    global SHARES
    global HEIGHT
    global GRINSHARE_HEIGHT
    global POOLSHARE_HEIGHT
    global SHARE_EXPIRETIME
    global database
    global RABBITMQ_USER
    global RABBITMQ_PASSWORD
    CONFIG = lib.get_config()
    atexit.register(lib.teardown_db)

    GRINSHARE_HEIGHT = 0
    POOLSHARE_HEIGHT = 0

    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    SHARE_EXPIRETIME = int(CONFIG[PROCESS]["share_expire_time"])
    commit_interval = int(CONFIG[PROCESS]["commit_interval"])
    rmq_endpoints = json.loads(CONFIG[PROCESS]["rmq"])

    RABBITMQ_USER = os.environ["RABBITMQ_USER"]
    RABBITMQ_PASSWORD = os.environ["RABBITMQ_PASSWORD"]

    database = lib.get_db()
    HEIGHT = Worker_shares.get_latest_height()
    while HEIGHT is None:
        LOGGER.warn("Waiting on the first grin block...")
        time.sleep(5)
        latest_block = Blocks.get_latest()
        if latest_block is not None:
            HEIGHT = latest_block.height

    SHARES = WorkerShares(LOGGER)

    ##
    # Start a thread to commit shares
    commit_thread = threading.Thread(target=ShareCommitScheduler,
                                     args=(
                                         commit_interval,
                                         database,
                                     ))
    commit_thread.start()

    ##
    # Start a pika consumer thread for each rabbit we want to consume from
    for rmq in rmq_endpoints:
        rmq_thread = threading.Thread(target=RmqConsumer, args=(rmq, ))
        rmq_thread.start()
예제 #12
0
파일: api.py 프로젝트: bitgrin/grin-pool
 def get(self, height=0, range=None, fields=None):
     global database
     #database = lib.get_db()
     LOGGER = lib.get_logger(PROCESS)
     debug and LOGGER.warn("WorkersAPI_shares get height:{} range:{} fields:{}".format(height, range, fields))
     fields = lib.fields_to_list(fields)
     shares_records = []
     if height == 0:
         height = Blocks.get_latest().height
     for shares in Worker_shares.get_by_height(height, range):
         # AUTH FILTER
         if shares.user_id == g.user.id:
             shares_records.append(shares.to_json(fields))
     return shares_records
예제 #13
0
파일: pool.py 프로젝트: waosman/grin-pool
def get_share_counts(height, window_size):
    shares = Worker_shares.get_by_height(height, window_size)
    # Sum up the number of each size share submitted by each user
    shares_count_map = {}
    for worker_shares_rec in shares:
        if not worker_shares_rec.user_id in shares_count_map:
            shares_count_map[worker_shares_rec.user_id] = {}
        for pow_size in worker_shares_rec.sizes():
            #print("pow_size = {}".format(pow_size))
            if not pow_size in shares_count_map[worker_shares_rec.user_id]:
                shares_count_map[worker_shares_rec.user_id][pow_size] = 0
            num_valid = worker_shares_rec.num_valid(pow_size)
            shares_count_map[worker_shares_rec.user_id][pow_size] += num_valid
    return shares_count_map
예제 #14
0
def recalculate(start_height, window_size):
    database = lib.get_db()
    height = start_height
    worker_shares_height = Worker_shares.get_latest_height() - 1
    while worker_shares_height > height:
        old_stats = Pool_stats.get_by_height(height)
        new_stats = calculate(height, window_size)
        if old_stats is None:
            database.db.createDataObj(new_stats)
        else:
            old_stats.timestamp = new_stats.timestamp
            old_stats.active_miners = new_stats.active_miners
            old_stats.share_counts = new_stats.share_counts
            old_stats.shares_processed = new_stats.shares_processed
            old_stats.total_blocks_found = new_stats.total_blocks_found
            old_stats.total_shares_processed = new_stats.total_shares_processed
            old_stats.dirty = False
            database.db.getSession().commit()
        height = height + 1
예제 #15
0
    def commit(self, height=None):
        global HEIGHT
        if height is None:
            block_heights = list(self.shares.keys())
            try:
                # Process all heights, except
                # Dont process shares from current block or newer
                block_heights = [h for h in block_heights if h < HEIGHT]
            except ValueError as e:
                pass
        else:
            block_heights = [height]

        #pp.pprint(self.shares)

        if len(block_heights) > 0:
            self.LOGGER.warn(
                "Committing shares for blocks: {}".format(block_heights))

        for height in block_heights:
            if height not in self.shares or len(self.shares[height]) == 0:
                # XXX TODO: Only create filler record if no records already exist for this height
                self.LOGGER.warn(
                    "Processed 0 shares in block {} - Creating filler record".
                    format(height))
                # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height
                filler_worker_shares_rec = Worker_shares(
                    height=height,
                    user_id=1,
                    timestamp=datetime.utcnow(),
                )
                database.db.createDataObj(filler_worker_shares_rec)
                return

            # Sort the shares by worker and graph size
            # byWorker is multi-level structure:
            # 1) hash by worker id
            # 2) hash by graph size
            # 3) List of shares
            byWorker = {}
            for hash in self.shares[height]:
                share = self.shares[height][hash]
                # Sort shares by worker
                if share.found_by not in byWorker:
                    byWorker[share.found_by] = {}
                if share.edge_bits not in byWorker[share.found_by]:
                    byWorker[share.found_by][share.edge_bits] = []
                #print("XXX Adding share to workerShares: {}".format(share))
                byWorker[share.found_by][share.edge_bits].append(share)
                # Create Pool_blocks for full solution shares
                if share.is_solution:
                    self.addPoolBlock(share)

            #pp.pprint(byWorker)
            # Create/update a Worker_shares record for each user and commit to DB
            # XXX TODO: Bulk Insert - will be needed when the pool has hundredes or thousands of workers
            for worker in byWorker:
                if worker == 0:
                    continue
                workerShares = byWorker[worker]
                #print("workerShares for {} = {}".format(worker, workerShares))
                # Count them (just for logging)
                num_valid_shares = 0
                num_invalid_shares = 0
                num_stale_shares = 0
                for graph_size in workerShares:
                    for share in workerShares[graph_size]:
                        if share.is_valid:
                            num_valid_shares += 1
                        elif share.is_valid == False and share.invalid_reason == 'too late':
                            num_stale_shares += 1
                        elif share.is_valid == False:
                            num_invalid_shares += 1
                self.LOGGER.warn(
                    "Processed {} shares in block {} for user_id {}: Valid: {}, stale: {}, invalid: {}"
                    .format(len(workerShares), height, worker,
                            num_valid_shares, num_stale_shares,
                            num_invalid_shares))
                pp.pprint(workerShares)

                # xxx

                # Get any existing record for this worker at this height
                worker_shares_rec = Worker_shares.get_by_height_and_id(
                    height, worker)
                existing = True
                if worker_shares_rec is None or len(worker_shares_rec) == 0:
                    # No existing record, create it
                    self.LOGGER.warn(
                        "This is a new share record for worker: {}".format(
                            worker))
                    worker_shares_rec = Worker_shares(
                        height=height,
                        user_id=worker,
                        timestamp=datetime.utcnow(),
                    )
                    database.db.createDataObj(worker_shares_rec)
                    existing = False
                else:
                    print("XXXXXXXXXXXXXXXXXXXXX")
                    pp.pprint(worker_shares_rec)
                    worker_shares_rec = worker_shares_rec[0]
                # Create/update Shares records - one per graph size mined in this block
                #pp.pprint(workerShares)
                for a_share_list in workerShares.values():
                    for a_share in a_share_list:
                        if a_share.edge_bits == 0:
                            # We dont actually know what size this share was since we
                            # only got the pools half.  Its invalid anyway, so just ignore
                            # for now. XXX TODO something better
                            continue
                        #print("a_share = {}".format(a_share))
                        edge_bits = a_share.edge_bits
                        difficulty = a_share.share_difficulty
                        valid = 0
                        stale = 0
                        invalid = 0
                        if share.is_valid:
                            valid = 1
                        elif share.is_valid == False and share.invalid_reason == 'too late':
                            stale = 1
                        else:
                            invalid = 1
                        worker_shares_rec.add_shares(a_share.edge_bits,
                                                     a_share.share_difficulty,
                                                     valid, invalid, stale)
                try:
                    database.db.getSession().commit()
                    # After we commit share data we need to ack the rmq messages and clear the committed shares
                    self.ack_and_clear(height)
                    # We added new worker share data, so if a Pool_stats record already exists at this height,
                    # we mark it dirty so it gets recalulated by thre shareValidator service
                    stats_rec = Pool_stats.get_by_height(height)
                    if stats_rec is not None:
                        stats_rec.dirty = True
                    # Commit any changes
                    if existing == True:
                        self.LOGGER.warn(
                            "XXX UPDATED worker share record: {}".format(
                                worker_shares_rec))
                    else:
                        self.LOGGER.warn(
                            "XXX NEW worker share record: {}".format(
                                worker_shares_rec))
                    database.db.getSession().commit()
                except Exception as e:
                    self.LOGGER.error(
                        "Failed to commit worker shares for {} at height {} - {}"
                        .format(worker, height, e))
예제 #16
0
def calculate(height, window_size):
    database = lib.get_db()
    grin_block = Blocks.get_by_height(height)
    assert grin_block is not None, "Missing grin block at height: {}".format(height)
    # Get all Worker_share records in the estimation window
    window = Worker_shares.get_by_height(height, window_size)
    # Get list of all workers who submitted shares OR recvd a payment at this height
    pmts = Pool_payment.get_by_height(height)
    shares_workers = [share.user_id for share in window]
    pmts_workers = [pmt.user_id for pmt in pmts]
    workers = list(set(shares_workers + pmts_workers))
    # Create a new Worker_stats record for each of these workers
    print("Calcualte worker stats for height {}, workers {}".format(height, workers))
    new_stats = []
    for worker in workers:
        # Get this workers most recent worker_stats record (for running totals)
        last_stat = Worker_stats.get_latest_by_id(worker)
        if last_stat is None:
            # A new worker, initialize a last_stat for the previous block
            last_stat = Worker_stats(
                            timestamp=datetime.utcnow(),
                            height=height-1,
                            user_id=worker)
            new_stats.append(last_stat)
        # Calculate this workers stats data
        timestamp = grin_block.timestamp
        # Caclulate estimated GPS for all sizes with shares submitted
        all_gps = estimate_gps_for_all_sizes(worker, window)
        # Keep track of share totals - sum counts of all share sizes submitted for this block
        print("Looking up shares for height {} user {}".format(height, worker))
        this_workers_shares_this_block = Worker_shares.get_by_height_and_id(height, worker)
        print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX {}".format(this_workers_shares_this_block))
        if this_workers_shares_this_block is None or len(this_workers_shares_this_block) == 0:
            this_workers_valid_shares = 0
            this_workers_invalid_shares = 0
            this_workers_stale_shares = 0
        else:
            this_workers_shares_this_block = this_workers_shares_this_block[-1]
            this_workers_valid_shares = this_workers_shares_this_block.num_valid()
            this_workers_invalid_shares = this_workers_shares_this_block.num_invalid()
            this_workers_stale_shares = this_workers_shares_this_block.num_stale()
        this_workers_total_valid_shares = last_stat.total_valid_shares + this_workers_valid_shares
        this_workers_total_invalid_shares = last_stat.total_invalid_shares + this_workers_invalid_shares
        this_workers_total_stale_shares = last_stat.total_stale_shares + this_workers_stale_shares

        # XXX PERFORAMCE = removed bulk insert to debug some other issue, need to put bulk insert back!!!
        stats = Worker_stats(
                height = height,
                timestamp = timestamp,
                user_id = worker,
                valid_shares = this_workers_valid_shares,
                invalid_shares = this_workers_invalid_shares,
                stale_shares = this_workers_stale_shares,
                total_valid_shares = this_workers_total_valid_shares,
                total_invalid_shares = this_workers_total_invalid_shares,
                total_stale_shares = this_workers_total_stale_shares,
            )
        database.db.getSession().add(stats)
        database.db.getSession().commit()
        #print("AAA: Created Worker_stats with id={}".format(stats.id))
#        print("all_gps for worker {}:".format(worker))
#        pp.pprint(all_gps)
        for gps_est in all_gps:
            gps_rec = Gps(
                edge_bits = gps_est[0],
                gps = gps_est[1],
            )
            stats.gps.append(gps_rec)
            #print("AAA: Appended gps record to Worker_stats: {}".format(gps_rec))
#            gps_rec.worker_stats_id = stats.id,
#            database.db.getSession().add(gps_rec)
        new_stats.append(stats)
        database.db.getSession().add(stats)
        database.db.getSession().commit()
    sys.stdout.flush()
    return new_stats
예제 #17
0
    def commit(self, height=None):
        global HEIGHT
        database = lib.get_db()
        if height is None:
            block_heights = list(self.shares.keys())
            try:
                # Process all heights, except
                # Dont process shares from current block or newer
                block_heights = [h for h in block_heights if h < HEIGHT]
            except ValueError as e:
                pass
            self.LOGGER.warn(
                "Committing shares for blocks: {}".format(block_heights))
        else:
            block_heights = [height]

        self.LOGGER.warn(
            "Will commit shares for blocks: {} - (current height: {})".format(
                block_heights, HEIGHT))
        for height in block_heights:
            if height not in self.shares or len(self.shares[height]) == 0:
                # XXX TODO: Only create filler record if no records already exist for this height
                self.LOGGER.warn(
                    "Processed 0 shares in block {} - Creatiing filler record".
                    format(height))
                # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height
                filler_shares_rec = Worker_shares(height=height,
                                                  worker="GrinPool",
                                                  timestamp=datetime.utcnow(),
                                                  difficulty=DIFFICULTY,
                                                  valid=0,
                                                  invalid=0)
                database.db.createDataObj_ignore_duplicates(filler_shares_rec)
                return

            byWorker = {}
            for nonce in self.shares[height]:
                share = self.shares[height][nonce]
                # Sort shares by worker
                if share.found_by not in byWorker:
                    byWorker[share.found_by] = []
                byWorker[share.found_by].append(share)
                # Create Pool_blocks for full solution shares
                if share.is_solution:
                    self.addPoolBlock(share)

            # Create/update a Worker_shares record for each user and commit to DB
            # XXX TODO: Bulk Insert - will be needed when the pool has hundredes or thousands of workers
            for worker in byWorker:
                workerShares = byWorker[worker]
                self.LOGGER.warn(
                    "Processed {} shares in block {} for worker {}".format(
                        len(workerShares), height, worker))
                valid_list = [share.is_valid for share in workerShares]
                # self.LOGGER.warn("xxx:  {}".format(valid_list))
                num_valid = sum(
                    [int(share.is_valid) for share in workerShares])
                # Get any existing record for this worker at this height
                existing_shares_rec = Worker_shares.get_by_user_and_height(
                    worker, height)
                if len(existing_shares_rec) == 0:
                    # No existing record, create it
                    self.LOGGER.warn(
                        "New share record for {} at height {} with {} valid shares, {} invalid share"
                        .format(worker, height, num_valid,
                                len(workerShares) - num_valid))
                    new_shares_rec = Worker_shares(height=height,
                                                   worker=worker,
                                                   timestamp=datetime.utcnow(),
                                                   difficulty=DIFFICULTY,
                                                   valid=num_valid,
                                                   invalid=len(workerShares) -
                                                   num_valid)
                    database.db.createDataObj_ignore_duplicates(new_shares_rec)
                else:
                    existing_shares_rec = existing_shares_rec[0]
                    self.LOGGER.warn(
                        "Updated share record for {} at height {}: Prev={} valid, {} invalid ; Now={} valid, {} invalid"
                        .format(
                            worker, height, existing_shares_rec.valid,
                            existing_shares_rec.invalid,
                            existing_shares_rec.valid + num_valid,
                            existing_shares_rec.invalid + len(workerShares) -
                            num_valid))
                    existing_shares_rec.valid += num_valid
                    existing_shares_rec.invalid += len(
                        workerShares) - num_valid
                # After we commit share data we need to clear it
                self.clear(height)
                # We added new worker share data, so if a Pool_stats record already exists at this height,
                # we mark it dirty so it gets recalulated by thre shareValidator service
                stats_rec = Pool_stats.get_by_height(height)
                if stats_rec is not None:
                    stats_rec.dirty = True
                # Commit any changes
                database.db.getSession().commit()
예제 #18
0
def ShareCommitScheduler(max_lag, commit_interval, logger):
    global SHARES_MUTEX
    while True:
        try:
            database = lib.get_db()
            latest_block = Blocks.get_latest()
            while latest_block is None:
                logger.warn("Waiting for first block")
                time.sleep(10)
                latest_block = Blocks.get_latest()
            chain_height = latest_block.height
            SHARES_MUTEX.acquire()
            try:
                logger.warn("= Begin ShareCommitScheduler")
                # Itterate over each sharedata key in redis
                redisdb = lib.get_redis_db()
                redis_key = "{}-*".format(REDIS_SHAREDATA_KEY)
                keys = []
                for key in redisdb.scan_iter(match=redis_key, count=100):
                    keys.append(key.decode())
                for key in sorted(keys):
                    share_height = int(key.split("-")[1])
                    if share_height < chain_height - max_lag:
                        # Commit this record
                        logger.warn(
                            "-- ShareCommitScheduler processing record at height: {}"
                            .format(share_height))
                        redis_sharedata = redisdb.get(key)
                        redis_sharedata = json.loads(redis_sharedata.decode())
                        ts_str = redis_sharedata.pop("timestamp",
                                                     str(datetime.utcnow()))
                        ts = datetime.strptime(
                            ts_str.split(".")[0], '%Y-%m-%d %H:%M:%S')
                        for worker, worker_shares in redis_sharedata.items():
                            # Get any existing record
                            worker_shares_rec = Worker_shares.get_by_height_and_id(
                                share_height, worker)
                            if worker_shares_rec is None:
                                # No existing record for this worker at this height, create it
                                logger.warn(
                                    "New share record for worker {} at height {}"
                                    .format(worker, share_height))
                                worker_shares_rec = Worker_shares(
                                    height=share_height,
                                    user_id=worker,
                                    timestamp=ts,
                                )
                                database.db.createDataObj(worker_shares_rec)
                            else:
                                logger.warn(
                                    "Add to existing record for worker {} at height {}"
                                    .format(worker, share_height))
                            for edge_bits, shares_count in worker_shares.items(
                            ):
                                worker_shares_rec.add_shares(
                                    edge_bits, shares_count["difficulty"],
                                    shares_count["accepted"],
                                    shares_count["rejected"],
                                    shares_count["stale"])
                                # Debug
                                logger.warn("Worker Shares: {}".format(
                                    worker_shares_rec))
                        # We wrote this record to mysql, so remove the redis cache
                        database.db.getSession().commit()
                        redisdb.delete(key)
                # Write fillter record if needed
                share_height = Worker_shares.get_latest_height()
                if share_height is None:
                    share_height = grin.blocking_get_current_height()
                share_height = share_height + 1
                while share_height < (chain_height - max_lag):
                    logger.warn(
                        "Processed 0 shares in block {} - Creating filler record"
                        .format(share_height))
                    filler_worker_shares_rec = Worker_shares(
                        height=share_height,
                        user_id=1,  # Pool User
                        timestamp=datetime.utcnow(),
                    )
                    database.db.createDataObj(filler_worker_shares_rec)
                    share_height += 1
            finally:
                database.db.getSession().commit()
                SHARES_MUTEX.release()
                lib.teardown_db()
                logger.warn("= End ShareCommitScheduler")
            time.sleep(commit_interval)
        except Exception as e:
            lib.teardown_db()
            logger.exception("Something went wrong: {} ".format(
                traceback.format_exc()))
            time.sleep(10)
예제 #19
0
def calculate_block_payout_map(height, window_size, logger, estimate=False):
    global reward_estimate_mutex
    reward_estimate_mutex.acquire()
    try:
        if estimate == True:
            cached_map = get_block_payout_map_estimate(height, logger)
            if cached_map is not None:
                return cached_map
        # Get pool_block record and check block state
        #print("getting the pool bloch: {}".format(height))
        #sys.stdout.flush()
        poolblock = Pool_blocks.get_by_height(height)
        if poolblock is None:
            return {}
        #print("The pool block {}".format(poolblock.to_json()))
        #sys.stdout.flush()
        if estimate == True:
            if poolblock.state != "new" and poolblock.state != "unlocked":
                return {}
        else:
            if poolblock.state != "unlocked":
                return {}
        # Get an array with share counts for each block in the window
        shares = Worker_shares.get_by_height(height, window_size)
        #print("share data in window = {}".format(shares))
        #sys.stdout.flush()
        # Get total value of this block: reward + fees
        reward = get_reward_by_block(height)
        #print("Reward for block {} = {}".format(height, reward))
        #sys.stdout.flush()
        # Get the "secondary_scaling" value for this block
        scale = get_scale_by_block(height)
        #print("Secondary Scaling value for block = {}".format(scale))
        #sys.stdout.flush()
        # build a map of total shares of each size for each user
        shares_count_map = get_share_counts(shares)
        # DUMMY DATA
        #    scale = 529
        #    shares_count_map = {
        #            1: {29: 50},
        #            2: {29: 25, 31: 10},
        #            3: {32: 5},
        #        }

        #print("Shares Count Map:")
        #sys.stdout.flush()
        #pp = pprint.PrettyPrinter(indent=4)
        #pp.pprint(shares_count_map)
        #sys.stdout.flush()
        # Calcualte total value of all shares
        total_value = calculate_total_share_value(shares_count_map, scale)
        #print("total share value in payment window: {}".format(total_value))
        #sys.stdout.flush()
        block_payout_map = {}
        # For each user with shares in the window, calculate payout and add to block_payout_map
        for user_id, worker_shares_count in shares_count_map.items():
            #print("xxx: {} {}".format(user_id, worker_shares_count))
            #sys.stdout.flush()
            # Calcualte the total share value from this worker
            total_worker_value = calculate_total_share_value(
                {user_id: worker_shares_count}, scale)
            worker_payment = total_worker_value / total_value * reward
            #print("worker_payment: {}".format(worker_payment/1000000000))
            #sys.stdout.flush()
            block_payout_map[user_id] = worker_payment
        #print("block_payout_map = {}".format(block_payout_map))
        #sys.stdout.flush()
        if estimate == True:
            payout_estimate_map_key = "payout-estimate-for-block-" + str(
                height)
            try:
                # Estimates are cached in redis, save it there if we can
                redisdb = lib.get_redis_db()
                #redisdb.hmset(payout_estimate_map_key, json.dumps(block_payout_map))
                redisdb.set(payout_estimate_map_key,
                            pickle.dumps(block_payout_map))
            except Exception as e:
                logger.warn(
                    "block_payout_map cache insert failed: {} - {}".format(
                        payout_estimate_map_key, repr(e)))
    except Exception as e:
        logger.error("Estimate went wrong: {} - {}".format(
            e, traceback.print_stack()))
    finally:
        reward_estimate_mutex.release()
    #logger.warn("calculate_map: {}".format(block_payout_map))
    return block_payout_map
예제 #20
0
def calculate(height, window_size):
    database = lib.get_db()
    grin_block = Blocks.get_by_height(height)
    assert grin_block is not None, "Missing grin block at height: {}".format(
        height)
    # Get all Worker_share records in the estimation window
    window = Worker_shares.get_by_height(height, window_size)
    # Get list of all workers who submitted shares in the window
    workers = list(set([share.worker for share in window]))
    # Create a new Worker_stats record for each of these workers
    print("Calcualte worker stats for height {}, workers {}".format(
        height, workers))
    new_stats = []
    for worker in workers:
        # Get this workers most recent worker_stats record (for running totals)
        last_stat = Worker_stats.get_latest_by_id(worker)
        if last_stat is None:
            # A new worker, initialize a last_stat for the previous block
            last_stat = Worker_stats(None, datetime.utcnow(), height - 1,
                                     worker, 0, 0, 0, 0, 0, 0)
            new_stats.append(last_stat)
        # Calculate this workers stats data
        timestamp = grin_block.timestamp
        # Caclulate estimated GPS for all sizes with shares submitted
        all_gps = estimate_gps_for_all_sizes(worker, window)
        # Keep track of share totals - sum counts of all share sizes submitted for this block
        this_workers_shares = [ws for ws in window if ws.worker == worker]
        num_shares_processed = this_workers_shares[-1].num_shares()
        print("num_shares_processed={}".format(num_shares_processed))
        total_shares_processed = last_stat.total_shares_processed + num_shares_processed
        print("total_shares_processed={}".format(total_shares_processed))

        # XXX PERFORAMCE = could not get bulk_insert to work...

        stats = Worker_stats(
            id=None,
            height=height,
            timestamp=timestamp,
            worker=worker,
            shares_processed=num_shares_processed,
            total_shares_processed=total_shares_processed,
            grin_paid=123,  # XXX TODO
            total_grin_paid=456,  # XXX TODO
            balance=1)  # XXX TODO
        database.db.getSession().add(stats)
        database.db.getSession().commit()
        #print("AAA: Created Worker_stats with id={}".format(stats.id))
        #        print("all_gps for worker {}:".format(worker))
        #        pp.pprint(all_gps)
        for gps_est in all_gps:
            gps_rec = Gps(
                edge_bits=gps_est[0],
                gps=gps_est[1],
            )
            stats.gps.append(gps_rec)
            #print("AAA: Appended gps record to Worker_stats: {}".format(gps_rec))
#            gps_rec.worker_stats_id = stats.id,
#            database.db.getSession().add(gps_rec)
#        new_stats.append(stats)
        database.db.getSession().add(stats)
        database.db.getSession().commit()
    sys.stdout.flush()
    return new_stats
예제 #21
0
def main():
    global LOGGER
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    # Connect to DB
    database = lib.get_db()

    latest_block = 0

    # XXX All in one db transaction....
    # Get unlocked blocks from the db
    unlocked_blocks = Pool_blocks.get_all_unlocked()
    database.db.getSession().commit()
    for pb in unlocked_blocks:
        try:
            LOGGER.warn("Processing unlocked block: {}".format(pb))
            if pb.height > latest_block:
                latest_block = pb.height
            # Get Worker_stats of this block + range to calculate reward for each worker
            worker_shares_window = Worker_shares.get_by_height(pb.height, PPLNS_WINDOW)
            print("worker_shares_window = {}".format(worker_shares_window))
            # Calculate Payment info:
            if len(worker_shares_window) > 0:
                # Calcualte reward/share:
                # XXX TODO: Enhance
                #  What algorithm to use?  Maybe: https://slushpool.com/help/manual/rewards
                # For now, some variation on pplns

                # Sum up the number of each size share submitted by each user
                shares_count_map = {}
                for worker_shares_rec in worker_shares_window:
                    if not worker_shares_rec.worker in shares_count_map:
                        shares_count_map[worker_shares_rec.worker] = {}
                    for pow_size in worker_shares_rec.sizes():
                        print("pow_size = {}".format(pow_size))
                        if not pow_size in shares_count_map[worker_shares_rec.worker]:
                            shares_count_map[worker_shares_rec.worker][pow_size] = 0
                        shares_count_map[worker_shares_rec.worker][pow_size] += worker_shares_rec.num_valid(pow_size)
                print("Shares Count Map:")
                pp.pprint(shares_count_map)

                # Normalize and sum each workers shares to create a "share value"
                total_value = 0
                for worker, worker_shares_count in shares_count_map.items():
                    print("worker: {}, worker_shares_count: {}".format(worker, worker_shares_count))
                    sizes = list(worker_shares_count.keys())
                    print("sizes: {}".format(sizes))
                    shares_count_map[worker]["value"] = 0
                    value = 0
                    for size, count in worker_shares_count.items():
                        if size == 29:
                            value += float(count) * .33
                        else:
                            value += float(count)
                        total_value += value
                    shares_count_map[worker]["value"] = value
                    print("Worker {} value: {}".format(worker, value))
                
                # Make payments based on the workers total share_value
                for worker, worker_shares_count in shares_count_map.items():
                    worker_rewards = REWARD * worker_shares_count["value"] / total_value
                    # Add or create worker rewards
                    worker_utxo = Pool_utxo.credit_worker(worker, worker_rewards)
                    LOGGER.warn("Credit to user: {} = {}".format(worker, worker_rewards))
            # Mark the pool_block state="paid" (maybe "processed" would be more accurate?)
            pb.state = "paid"
            database.db.getSession().commit()
        except Exception as e:
            database.db.getSession().rollback()
            LOGGER.error("Something went wrong: {} - {}".format(e, traceback.print_exc()))

    #database.db.getSession().commit()
    LOGGER.warn("=== Completed {}".format(PROCESS))
    sys.stdout.flush()
예제 #22
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()

    # Get config
    check_interval = float(CONFIG[PROCESS]["check_interval"])
    avg_over_range = int(CONFIG[PROCESS]["avg_over_range"])

    # Find the height of the latest stats record
    last_height = 0
    latest_stat = Worker_stats.get_latest()

    if latest_stat != None:
        last_height = latest_stat.height
    else:
        latest = Blocks.get_latest()
        while latest is None:
            LOGGER.warn("Waiting for the first block...")
            sleep(10)
            latest = Blocks.get_latest()
        last_height = latest.height
    height = last_height + 1

    LOGGER.warn("Starting at block height: {}".format(height))

    # Generate worker stats records - one per grin block for each active worker
    while True:
        # latest = grin.blocking_get_current_height()
        latest = Blocks.get_latest().height
        share_height = Worker_shares.get_latest_height()
        while share_height is None:
            LOGGER.warn("waiting for the first worker shares")
            sleep(10)
            share_height = Worker_shares.get_latest_height()
        stats_height = height - 1
        LOGGER.warn(
            "Running: chain height: {}, share height: {} vs stats height: {}".
            format(latest, share_height, stats_height))
        while share_height > height:
            try:
                new_stats = workerstats.calculate(height, avg_over_range)
                LOGGER.warn("{} new stats for height {}".format(
                    len(new_stats), height))
                for stats in new_stats:
                    LOGGER.warn("Added Worker_stats: {}".format(stats))
                # mark any existing pool_stats dirty
                pool_stats = Pool_stats.get_by_height(height)
                for stat_rec in new_stats:
                    database.db.getSession().add(stat_rec)
                if pool_stats is not None:
                    LOGGER.warn(
                        "Marked existing pool_stats dirty for height: {}".
                        format(height))
                    pool_stats.dirty = True  # Pool_stats need to be recalculated
                if ((height % BATCHSZ == 0) or (height >= (latest - 10))):
                    LOGGER.warn("Commit ---")
                    database.db.getSession().commit()
                height = height + 1
            except Exception as e:
                LOGGER.exception("Something went wrong: {}".format(e))
                database.db.getSession().rollback()
                sleep(check_interval)
        sys.stdout.flush()
        sleep(check_interval)
    LOGGER.warn("=== Completed {}".format(PROCESS))
예제 #23
0
def ShareCommitScheduler(max_lag, logger):
    global SHARES_MUTEX
    global SHARES
    while True:
        try:
            database = lib.get_db()
            chain_height = Blocks.get_latest().height
            share_height = Worker_shares.get_latest_height()
            logger.warn(
                "SHARES commit scheduler - chain_height = {}, share_height = {}"
                .format(chain_height, share_height))
            SHARES_MUTEX.acquire()
            try:
                while share_height < (chain_height - max_lag):
                    share_height += 1
                    if share_height not in SHARES.keys():
                        # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height
                        logger.warn(
                            "Processed 0 shares in block {} - Creating filler record"
                            .format(share_height))
                        filler_worker_shares_rec = Worker_shares(
                            height=share_height,
                            user_id=1,  # Pool User
                            timestamp=datetime.utcnow(),
                        )
                        database.db.createDataObj(filler_worker_shares_rec)
                    else:
                        # Commit SHARES
                        logger.warn("Commit SHARES for height: {}".format(
                            share_height))
                        # Get and remove the timestamp
                        ts = SHARES[share_height].pop("timestamp",
                                                      datetime.utcnow())
                        for worker, worker_shares in SHARES[
                                share_height].items():
                            # Get existing share record for this user at this height
                            worker_shares_rec = Worker_shares.get_by_height_and_id(
                                share_height, worker)
                            if worker_shares_rec is None:
                                # No existing record for this worker at this height, create it
                                logger.warn(
                                    "This is a new share record for worker {} at height {}"
                                    .format(worker, share_height))
                                worker_shares_rec = Worker_shares(
                                    height=share_height,
                                    user_id=worker,
                                    timestamp=ts,
                                )
                                database.db.createDataObj(worker_shares_rec)
                            else:
                                # Add to the existing record
                                logger.warn(
                                    "Add to existing Worker Shares: Accepted: {}, Rejected: {}, Stale: {}"
                                    .format(accepted, rejected, stale))
                            for edge_bits, shares_count in worker_shares.items(
                            ):
                                logger.warn(
                                    "YYY: Commit new worker shares: {}".format(
                                        shares_count))
                                worker_shares_rec.add_shares(
                                    edge_bits, shares_count["difficulty"],
                                    shares_count["accepted"],
                                    shares_count["rejected"],
                                    shares_count["stale"])
                                logger.warn("Worker Shares: {}".format(
                                    worker_shares_rec))
                    # Ack the RMQ shares messages


#                    for channel, tags in RMQ_ACK[share_height].items():
# bulk-ack up to the latest message we processed
#                        channel.basic_ack(delivery_tag=max(tags), multiple=True)
# Discard the processed messages
                    SHARES.pop(share_height, None)
                    RMQ_ACK.pop(share_height, None)
            finally:
                database.db.getSession().commit()
                SHARES_MUTEX.release()
                lib.teardown_db()
            time.sleep(30)
        except Exception as e:
            lib.teardown_db()
            logger.error("Something went wrong: {}\n{}".format(
                e,
                traceback.format_exc().splitlines()))
            time.sleep(10)