Beispiel #1
0
    def commit(self, height):
        print("self.shares.keys(): {}".format(self.shares.keys()))
        if height not in self.shares or len(self.shares[height]) == 0:
            self.LOGGER.warn(
                "Processed 0 shares in block {} - Creatiing filler record".
                format(height))
            # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height
            filler_shares_rec = Worker_shares(height=height,
                                              worker="GrinPool",
                                              timestamp=datetime.utcnow(),
                                              difficulty=DIFFICULTY,
                                              valid=0,
                                              invalid=0)
            lib.get_db().db.createDataObj_ignore_duplicates(filler_shares_rec)
            return

        byWorker = {}
        for nonce in self.shares[height]:
            share = self.shares[height][nonce]
            # Sort shares by worker
            if share.found_by not in byWorker:
                byWorker[share.found_by] = []
            byWorker[share.found_by].append(share)
            # Create Pool_blocks for full solution shares
            if share.is_solution:
                self.addPoolBlock(share)

        # Create a Worker_shares record for each user and commit to DB
        # XXX TODO: Bulk Insert - will be needed when the pool has hundredes or thousands of workers
        for worker in byWorker:
            workerShares = byWorker[worker]
            # Not possible?
            #            if len(workerShares) == 0:
            #                continue
            self.LOGGER.warn(
                "Processed {} shares in block {} for worker {}".format(
                    len(workerShares), height, worker))
            valid_list = [share.is_valid for share in workerShares]
            # self.LOGGER.warn("xxx:  {}".format(valid_list))
            num_valid = sum([int(share.is_valid) for share in workerShares])
            new_shares_rec = Worker_shares(height=height,
                                           worker=worker,
                                           timestamp=datetime.utcnow(),
                                           difficulty=DIFFICULTY,
                                           valid=num_valid,
                                           invalid=len(workerShares) -
                                           num_valid)
            lib.get_db().db.createDataObj_ignore_duplicates(new_shares_rec)
            # We added new worker share data, so if a Pool_stats record already exists at this height, we mark it dirty so it gets recalulated
            stats_rec = Pool_stats.get_by_height(height)
            if stats_rec is not None:
                stats_rec.dirty = True
                lib.get_db().db.getSession().commit()
Beispiel #2
0
def initialize(window_size, logger):
    database = lib.get_db()
    # Special case for new pool startup
    block_zero = None
    while block_zero is None:
        logger.warn("Waiting for the first block record in the database")
        time.sleep(1)
        block_zero = Blocks.get_earliest()
    print("block_zero={}".format(block_zero))

    stat_height = max(0, block_zero.height + window_size)
    seed_stat = Pool_stats(
        height=stat_height,
        timestamp=datetime.utcnow(),
        active_miners=0,
        shares_processed=0,
        share_counts=None,
        total_blocks_found=0,
        total_shares_processed=0,
        dirty=False,
    )
    database.db.createDataObj(seed_stat)
    seed_share = Worker_shares(
        height=stat_height,
        user_id=1,
        timestamp=datetime.utcnow(),
    )
    database.db.createDataObj(seed_share)
Beispiel #3
0
    def commit(self, height=None):
        global HEIGHT
        if height is None:
            block_heights = list(self.shares.keys())
            try:
                # Process all heights, except
                # Dont process shares from current block or newer
                block_heights = [h for h in block_heights if h < HEIGHT]
            except ValueError as e:
                pass
        else:
            block_heights = [height]

        #pp.pprint(self.shares)

        if len(block_heights) > 0:
            self.LOGGER.warn(
                "Committing shares for blocks: {}".format(block_heights))

        for height in block_heights:
            if height not in self.shares or len(self.shares[height]) == 0:
                # XXX TODO: Only create filler record if no records already exist for this height
                self.LOGGER.warn(
                    "Processed 0 shares in block {} - Creating filler record".
                    format(height))
                # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height
                filler_worker_shares_rec = Worker_shares(
                    height=height,
                    user_id=1,
                    timestamp=datetime.utcnow(),
                )
                database.db.createDataObj(filler_worker_shares_rec)
                return

            # Sort the shares by worker and graph size
            # byWorker is multi-level structure:
            # 1) hash by worker id
            # 2) hash by graph size
            # 3) List of shares
            byWorker = {}
            for hash in self.shares[height]:
                share = self.shares[height][hash]
                # Sort shares by worker
                if share.found_by not in byWorker:
                    byWorker[share.found_by] = {}
                if share.edge_bits not in byWorker[share.found_by]:
                    byWorker[share.found_by][share.edge_bits] = []
                #print("XXX Adding share to workerShares: {}".format(share))
                byWorker[share.found_by][share.edge_bits].append(share)
                # Create Pool_blocks for full solution shares
                if share.is_solution:
                    self.addPoolBlock(share)

            #pp.pprint(byWorker)
            # Create/update a Worker_shares record for each user and commit to DB
            # XXX TODO: Bulk Insert - will be needed when the pool has hundredes or thousands of workers
            for worker in byWorker:
                if worker == 0:
                    continue
                workerShares = byWorker[worker]
                #print("workerShares for {} = {}".format(worker, workerShares))
                # Count them (just for logging)
                num_valid_shares = 0
                num_invalid_shares = 0
                num_stale_shares = 0
                for graph_size in workerShares:
                    for share in workerShares[graph_size]:
                        if share.is_valid:
                            num_valid_shares += 1
                        elif share.is_valid == False and share.invalid_reason == 'too late':
                            num_stale_shares += 1
                        elif share.is_valid == False:
                            num_invalid_shares += 1
                self.LOGGER.warn(
                    "Processed {} shares in block {} for user_id {}: Valid: {}, stale: {}, invalid: {}"
                    .format(len(workerShares), height, worker,
                            num_valid_shares, num_stale_shares,
                            num_invalid_shares))
                pp.pprint(workerShares)

                # xxx

                # Get any existing record for this worker at this height
                worker_shares_rec = Worker_shares.get_by_height_and_id(
                    height, worker)
                existing = True
                if worker_shares_rec is None or len(worker_shares_rec) == 0:
                    # No existing record, create it
                    self.LOGGER.warn(
                        "This is a new share record for worker: {}".format(
                            worker))
                    worker_shares_rec = Worker_shares(
                        height=height,
                        user_id=worker,
                        timestamp=datetime.utcnow(),
                    )
                    database.db.createDataObj(worker_shares_rec)
                    existing = False
                else:
                    print("XXXXXXXXXXXXXXXXXXXXX")
                    pp.pprint(worker_shares_rec)
                    worker_shares_rec = worker_shares_rec[0]
                # Create/update Shares records - one per graph size mined in this block
                #pp.pprint(workerShares)
                for a_share_list in workerShares.values():
                    for a_share in a_share_list:
                        if a_share.edge_bits == 0:
                            # We dont actually know what size this share was since we
                            # only got the pools half.  Its invalid anyway, so just ignore
                            # for now. XXX TODO something better
                            continue
                        #print("a_share = {}".format(a_share))
                        edge_bits = a_share.edge_bits
                        difficulty = a_share.share_difficulty
                        valid = 0
                        stale = 0
                        invalid = 0
                        if share.is_valid:
                            valid = 1
                        elif share.is_valid == False and share.invalid_reason == 'too late':
                            stale = 1
                        else:
                            invalid = 1
                        worker_shares_rec.add_shares(a_share.edge_bits,
                                                     a_share.share_difficulty,
                                                     valid, invalid, stale)
                try:
                    database.db.getSession().commit()
                    # After we commit share data we need to ack the rmq messages and clear the committed shares
                    self.ack_and_clear(height)
                    # We added new worker share data, so if a Pool_stats record already exists at this height,
                    # we mark it dirty so it gets recalulated by thre shareValidator service
                    stats_rec = Pool_stats.get_by_height(height)
                    if stats_rec is not None:
                        stats_rec.dirty = True
                    # Commit any changes
                    if existing == True:
                        self.LOGGER.warn(
                            "XXX UPDATED worker share record: {}".format(
                                worker_shares_rec))
                    else:
                        self.LOGGER.warn(
                            "XXX NEW worker share record: {}".format(
                                worker_shares_rec))
                    database.db.getSession().commit()
                except Exception as e:
                    self.LOGGER.error(
                        "Failed to commit worker shares for {} at height {} - {}"
                        .format(worker, height, e))
Beispiel #4
0
def ShareCommitScheduler(max_lag, commit_interval, logger):
    global SHARES_MUTEX
    while True:
        try:
            database = lib.get_db()
            latest_block = Blocks.get_latest()
            while latest_block is None:
                logger.warn("Waiting for first block")
                time.sleep(10)
                latest_block = Blocks.get_latest()
            chain_height = latest_block.height
            SHARES_MUTEX.acquire()
            try:
                logger.warn("= Begin ShareCommitScheduler")
                # Itterate over each sharedata key in redis
                redisdb = lib.get_redis_db()
                redis_key = "{}-*".format(REDIS_SHAREDATA_KEY)
                keys = []
                for key in redisdb.scan_iter(match=redis_key, count=100):
                    keys.append(key.decode())
                for key in sorted(keys):
                    share_height = int(key.split("-")[1])
                    if share_height < chain_height - max_lag:
                        # Commit this record
                        logger.warn(
                            "-- ShareCommitScheduler processing record at height: {}"
                            .format(share_height))
                        redis_sharedata = redisdb.get(key)
                        redis_sharedata = json.loads(redis_sharedata.decode())
                        ts_str = redis_sharedata.pop("timestamp",
                                                     str(datetime.utcnow()))
                        ts = datetime.strptime(
                            ts_str.split(".")[0], '%Y-%m-%d %H:%M:%S')
                        for worker, worker_shares in redis_sharedata.items():
                            # Get any existing record
                            worker_shares_rec = Worker_shares.get_by_height_and_id(
                                share_height, worker)
                            if worker_shares_rec is None:
                                # No existing record for this worker at this height, create it
                                logger.warn(
                                    "New share record for worker {} at height {}"
                                    .format(worker, share_height))
                                worker_shares_rec = Worker_shares(
                                    height=share_height,
                                    user_id=worker,
                                    timestamp=ts,
                                )
                                database.db.createDataObj(worker_shares_rec)
                            else:
                                logger.warn(
                                    "Add to existing record for worker {} at height {}"
                                    .format(worker, share_height))
                            for edge_bits, shares_count in worker_shares.items(
                            ):
                                worker_shares_rec.add_shares(
                                    edge_bits, shares_count["difficulty"],
                                    shares_count["accepted"],
                                    shares_count["rejected"],
                                    shares_count["stale"])
                                # Debug
                                logger.warn("Worker Shares: {}".format(
                                    worker_shares_rec))
                        # We wrote this record to mysql, so remove the redis cache
                        database.db.getSession().commit()
                        redisdb.delete(key)
                # Write fillter record if needed
                share_height = Worker_shares.get_latest_height()
                if share_height is None:
                    share_height = grin.blocking_get_current_height()
                share_height = share_height + 1
                while share_height < (chain_height - max_lag):
                    logger.warn(
                        "Processed 0 shares in block {} - Creating filler record"
                        .format(share_height))
                    filler_worker_shares_rec = Worker_shares(
                        height=share_height,
                        user_id=1,  # Pool User
                        timestamp=datetime.utcnow(),
                    )
                    database.db.createDataObj(filler_worker_shares_rec)
                    share_height += 1
            finally:
                database.db.getSession().commit()
                SHARES_MUTEX.release()
                lib.teardown_db()
                logger.warn("= End ShareCommitScheduler")
            time.sleep(commit_interval)
        except Exception as e:
            lib.teardown_db()
            logger.exception("Something went wrong: {} ".format(
                traceback.format_exc()))
            time.sleep(10)
Beispiel #5
0
def ShareCommitScheduler(max_lag, logger):
    global SHARES_MUTEX
    global SHARES
    while True:
        try:
            database = lib.get_db()
            chain_height = Blocks.get_latest().height
            share_height = Worker_shares.get_latest_height()
            logger.warn(
                "SHARES commit scheduler - chain_height = {}, share_height = {}"
                .format(chain_height, share_height))
            SHARES_MUTEX.acquire()
            try:
                while share_height < (chain_height - max_lag):
                    share_height += 1
                    if share_height not in SHARES.keys():
                        # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height
                        logger.warn(
                            "Processed 0 shares in block {} - Creating filler record"
                            .format(share_height))
                        filler_worker_shares_rec = Worker_shares(
                            height=share_height,
                            user_id=1,  # Pool User
                            timestamp=datetime.utcnow(),
                        )
                        database.db.createDataObj(filler_worker_shares_rec)
                    else:
                        # Commit SHARES
                        logger.warn("Commit SHARES for height: {}".format(
                            share_height))
                        # Get and remove the timestamp
                        ts = SHARES[share_height].pop("timestamp",
                                                      datetime.utcnow())
                        for worker, worker_shares in SHARES[
                                share_height].items():
                            # Get existing share record for this user at this height
                            worker_shares_rec = Worker_shares.get_by_height_and_id(
                                share_height, worker)
                            if worker_shares_rec is None:
                                # No existing record for this worker at this height, create it
                                logger.warn(
                                    "This is a new share record for worker {} at height {}"
                                    .format(worker, share_height))
                                worker_shares_rec = Worker_shares(
                                    height=share_height,
                                    user_id=worker,
                                    timestamp=ts,
                                )
                                database.db.createDataObj(worker_shares_rec)
                            else:
                                # Add to the existing record
                                logger.warn(
                                    "Add to existing Worker Shares: Accepted: {}, Rejected: {}, Stale: {}"
                                    .format(accepted, rejected, stale))
                            for edge_bits, shares_count in worker_shares.items(
                            ):
                                logger.warn(
                                    "YYY: Commit new worker shares: {}".format(
                                        shares_count))
                                worker_shares_rec.add_shares(
                                    edge_bits, shares_count["difficulty"],
                                    shares_count["accepted"],
                                    shares_count["rejected"],
                                    shares_count["stale"])
                                logger.warn("Worker Shares: {}".format(
                                    worker_shares_rec))
                    # Ack the RMQ shares messages


#                    for channel, tags in RMQ_ACK[share_height].items():
# bulk-ack up to the latest message we processed
#                        channel.basic_ack(delivery_tag=max(tags), multiple=True)
# Discard the processed messages
                    SHARES.pop(share_height, None)
                    RMQ_ACK.pop(share_height, None)
            finally:
                database.db.getSession().commit()
                SHARES_MUTEX.release()
                lib.teardown_db()
            time.sleep(30)
        except Exception as e:
            lib.teardown_db()
            logger.error("Something went wrong: {}\n{}".format(
                e,
                traceback.format_exc().splitlines()))
            time.sleep(10)
Beispiel #6
0
    def commit(self, height=None):
        global HEIGHT
        database = lib.get_db()
        if height is None:
            block_heights = list(self.shares.keys())
            try:
                # Process all heights, except
                # Dont process shares from current block or newer
                block_heights = [h for h in block_heights if h < HEIGHT]
            except ValueError as e:
                pass
            self.LOGGER.warn(
                "Committing shares for blocks: {}".format(block_heights))
        else:
            block_heights = [height]

        self.LOGGER.warn(
            "Will commit shares for blocks: {} - (current height: {})".format(
                block_heights, HEIGHT))
        for height in block_heights:
            if height not in self.shares or len(self.shares[height]) == 0:
                # XXX TODO: Only create filler record if no records already exist for this height
                self.LOGGER.warn(
                    "Processed 0 shares in block {} - Creatiing filler record".
                    format(height))
                # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height
                filler_shares_rec = Worker_shares(height=height,
                                                  worker="GrinPool",
                                                  timestamp=datetime.utcnow(),
                                                  difficulty=DIFFICULTY,
                                                  valid=0,
                                                  invalid=0)
                database.db.createDataObj_ignore_duplicates(filler_shares_rec)
                return

            byWorker = {}
            for nonce in self.shares[height]:
                share = self.shares[height][nonce]
                # Sort shares by worker
                if share.found_by not in byWorker:
                    byWorker[share.found_by] = []
                byWorker[share.found_by].append(share)
                # Create Pool_blocks for full solution shares
                if share.is_solution:
                    self.addPoolBlock(share)

            # Create/update a Worker_shares record for each user and commit to DB
            # XXX TODO: Bulk Insert - will be needed when the pool has hundredes or thousands of workers
            for worker in byWorker:
                workerShares = byWorker[worker]
                self.LOGGER.warn(
                    "Processed {} shares in block {} for worker {}".format(
                        len(workerShares), height, worker))
                valid_list = [share.is_valid for share in workerShares]
                # self.LOGGER.warn("xxx:  {}".format(valid_list))
                num_valid = sum(
                    [int(share.is_valid) for share in workerShares])
                # Get any existing record for this worker at this height
                existing_shares_rec = Worker_shares.get_by_user_and_height(
                    worker, height)
                if len(existing_shares_rec) == 0:
                    # No existing record, create it
                    self.LOGGER.warn(
                        "New share record for {} at height {} with {} valid shares, {} invalid share"
                        .format(worker, height, num_valid,
                                len(workerShares) - num_valid))
                    new_shares_rec = Worker_shares(height=height,
                                                   worker=worker,
                                                   timestamp=datetime.utcnow(),
                                                   difficulty=DIFFICULTY,
                                                   valid=num_valid,
                                                   invalid=len(workerShares) -
                                                   num_valid)
                    database.db.createDataObj_ignore_duplicates(new_shares_rec)
                else:
                    existing_shares_rec = existing_shares_rec[0]
                    self.LOGGER.warn(
                        "Updated share record for {} at height {}: Prev={} valid, {} invalid ; Now={} valid, {} invalid"
                        .format(
                            worker, height, existing_shares_rec.valid,
                            existing_shares_rec.invalid,
                            existing_shares_rec.valid + num_valid,
                            existing_shares_rec.invalid + len(workerShares) -
                            num_valid))
                    existing_shares_rec.valid += num_valid
                    existing_shares_rec.invalid += len(
                        workerShares) - num_valid
                # After we commit share data we need to clear it
                self.clear(height)
                # We added new worker share data, so if a Pool_stats record already exists at this height,
                # we mark it dirty so it gets recalulated by thre shareValidator service
                stats_rec = Pool_stats.get_by_height(height)
                if stats_rec is not None:
                    stats_rec.dirty = True
                # Commit any changes
                database.db.getSession().commit()