示例#1
0
def ShareCommitScheduler(interval, database):
    global LOGGER
    global SHARES
    global HEIGHT
    global GRINSHARE_HEIGHT
    global POOLSHARE_HEIGHT
    database = lib.get_db()

    try:
        # XXX TODO:  enhance
        while True:
            bc_height = Blocks.get_latest(
            ).height  # grin.blocking_get_current_height()
            LOGGER.warn(
                "HEIGHT={}, POOLSHARE_HEIGHT={}, GRINSHARE_HEIGHT={}".format(
                    HEIGHT, POOLSHARE_HEIGHT, GRINSHARE_HEIGHT))
            while (HEIGHT < POOLSHARE_HEIGHT
                   and HEIGHT < GRINSHARE_HEIGHT) or (bc_height > HEIGHT):
                # Commit and purge current block share data if we are starting a new block
                LOGGER.warn("Commit shares for height: {}".format(HEIGHT))
                # time.sleep(5) # Give straggler shares a chance to come in
                SHARES.commit(HEIGHT)
                HEIGHT = HEIGHT + 1
            # Commit and purge all old share data (except current block) every 'interval' seconds
            try:
                SHARES.commit()  # All except current block
            except Exception as e:
                LOGGER.error("Failed to commit: {}".format(e))
            time.sleep(interval)
    except Exception as e:
        LOGGER.error("Something went wrong: {}\n{}".format(
            e,
            traceback.format_exc().splitlines()))
        time.sleep(interval)
    lib.teardown_db()
示例#2
0
def RigDataCommitScheduler(max_lag, commit_interval, logger):
    global RIGDATA_MUTEX
    global RIGDATA_GROUPSIZE
    global REDIS_RIGDATA_KEY
    global REDIS_RIGDATA_EXPIRETIME
    while True:
        try:
            database = lib.get_db()
            latest_block = Blocks.get_latest()
            while latest_block is None:
                logger.warn("Cant get latest block from database")
                time.sleep(10)
                latest_block = Blocks.get_latest()
            chain_height = latest_block.height
            lib.teardown_db()
            RIGDATA_MUTEX.acquire()
            try:
                logger.warn("= Begin RigDataCommitScheduler")
                # Itterate over each rigdata cache key in redis
                redisdb = lib.get_redis_db()
                redis_key = "{}-*".format(REDIS_RIGDATA_KEY)
                keys = []
                for key in redisdb.scan_iter(match=redis_key, count=100):
                    keys.append(key.decode())
                for key in sorted(keys):
                    share_height = int(key.split("-")[1])
                    if share_height < chain_height - RIGDATA_GROUPSIZE - max_lag:
                        # Commit this set of rigdata records
                        logger.warn(
                            "-- RigDataCommitScheduler processing record at height: {}"
                            .format(share_height))
                        redis_cached_rigdata = redisdb.get(key)
                        redis_cached_rigdata = json.loads(
                            redis_cached_rigdata.decode())
                        for user, rigdata in redis_cached_rigdata.items():
                            redis_key = "{}.{}.{}".format(
                                REDIS_RIGDATA_KEY, share_height, user)
                            if redisdb.exists(redis_key):
                                # XXX TODO
                                logger.warn(
                                    "XXX TODO: DUPLICATE RIGDATA WORKER KEY - MERGE ???"
                                )
                            else:
                                redisdb.set(redis_key,
                                            json.dumps(rigdata),
                                            ex=REDIS_RIGDATA_EXPIRETIME)
                        # Wrote this rigdata to REDIS, so remove the cache record now
                        redisdb.delete(key)
            finally:
                RIGDATA_MUTEX.release()
                logger.warn("= End RigDataCommitScheduler")
            time.sleep(commit_interval)
        except Exception as e:
            logger.exception("Something went wrong: {}".format(
                traceback.format_exc()))
            time.sleep(10)
示例#3
0
def RigDataCommitScheduler(max_lag, logger):
    global RIGDATA_MUTEX
    global RIGDATA
    global REDIS_RIGDATA_KEY
    global REDIS_RIGDATA_EXPIRETIME
    while True:
        try:
            redisdb = lib.get_redis_db()
            while True:
                database = lib.get_db()
                chain_height = Blocks.get_latest().height
                logger.warn(
                    "RIGDATA commit scheduler - chain_height = {}".format(
                        chain_height))
                RIGDATA_MUTEX.acquire()
                try:
                    for height in [
                            h for h in RIGDATA.keys()
                            if h < (chain_height - max_lag)
                    ]:
                        logger.warn(
                            "Commit RIGDATA for height: {}".format(height))
                        # Picke RIGDATA and write to redis
                        for user, rigdata in RIGDATA[height].items():
                            key = "{}.{}.{}".format(REDIS_RIGDATA_KEY, height,
                                                    user)
                            if redisdb.exists(key):
                                logger.warn(
                                    "XXX TODO - MERGE THIS ADDITIONAL SHARE DATA"
                                )
                            else:
                                redisdb.set(key,
                                            json.dumps(rigdata),
                                            ex=REDIS_RIGDATA_EXPIRETIME)
                        RIGDATA.pop(height, None)
                finally:
                    RIGDATA_MUTEX.release()
                    lib.teardown_db()
                time.sleep(30)
        except Exception as e:
            logger.error("Something went wrong: {}\n{}".format(
                e,
                traceback.format_exc().splitlines()))
            lib.teardown_db()
            time.sleep(10)
示例#4
0
def ShareCommitScheduler(max_lag, commit_interval, logger):
    global SHARES_MUTEX
    while True:
        try:
            database = lib.get_db()
            latest_block = Blocks.get_latest()
            while latest_block is None:
                logger.warn("Waiting for first block")
                time.sleep(10)
                latest_block = Blocks.get_latest()
            chain_height = latest_block.height
            SHARES_MUTEX.acquire()
            try:
                logger.warn("= Begin ShareCommitScheduler")
                # Itterate over each sharedata key in redis
                redisdb = lib.get_redis_db()
                redis_key = "{}-*".format(REDIS_SHAREDATA_KEY)
                keys = []
                for key in redisdb.scan_iter(match=redis_key, count=100):
                    keys.append(key.decode())
                for key in sorted(keys):
                    share_height = int(key.split("-")[1])
                    if share_height < chain_height - max_lag:
                        # Commit this record
                        logger.warn(
                            "-- ShareCommitScheduler processing record at height: {}"
                            .format(share_height))
                        redis_sharedata = redisdb.get(key)
                        redis_sharedata = json.loads(redis_sharedata.decode())
                        ts_str = redis_sharedata.pop("timestamp",
                                                     str(datetime.utcnow()))
                        ts = datetime.strptime(
                            ts_str.split(".")[0], '%Y-%m-%d %H:%M:%S')
                        for worker, worker_shares in redis_sharedata.items():
                            # Get any existing record
                            worker_shares_rec = Worker_shares.get_by_height_and_id(
                                share_height, worker)
                            if worker_shares_rec is None:
                                # No existing record for this worker at this height, create it
                                logger.warn(
                                    "New share record for worker {} at height {}"
                                    .format(worker, share_height))
                                worker_shares_rec = Worker_shares(
                                    height=share_height,
                                    user_id=worker,
                                    timestamp=ts,
                                )
                                database.db.createDataObj(worker_shares_rec)
                            else:
                                logger.warn(
                                    "Add to existing record for worker {} at height {}"
                                    .format(worker, share_height))
                            for edge_bits, shares_count in worker_shares.items(
                            ):
                                worker_shares_rec.add_shares(
                                    edge_bits, shares_count["difficulty"],
                                    shares_count["accepted"],
                                    shares_count["rejected"],
                                    shares_count["stale"])
                                # Debug
                                logger.warn("Worker Shares: {}".format(
                                    worker_shares_rec))
                        # We wrote this record to mysql, so remove the redis cache
                        database.db.getSession().commit()
                        redisdb.delete(key)
                # Write fillter record if needed
                share_height = Worker_shares.get_latest_height()
                if share_height is None:
                    share_height = grin.blocking_get_current_height()
                share_height = share_height + 1
                while share_height < (chain_height - max_lag):
                    logger.warn(
                        "Processed 0 shares in block {} - Creating filler record"
                        .format(share_height))
                    filler_worker_shares_rec = Worker_shares(
                        height=share_height,
                        user_id=1,  # Pool User
                        timestamp=datetime.utcnow(),
                    )
                    database.db.createDataObj(filler_worker_shares_rec)
                    share_height += 1
            finally:
                database.db.getSession().commit()
                SHARES_MUTEX.release()
                lib.teardown_db()
                logger.warn("= End ShareCommitScheduler")
            time.sleep(commit_interval)
        except Exception as e:
            lib.teardown_db()
            logger.exception("Something went wrong: {} ".format(
                traceback.format_exc()))
            time.sleep(10)
示例#5
0
def ShareCommitScheduler(max_lag, logger):
    global SHARES_MUTEX
    global SHARES
    while True:
        try:
            database = lib.get_db()
            chain_height = Blocks.get_latest().height
            share_height = Worker_shares.get_latest_height()
            logger.warn(
                "SHARES commit scheduler - chain_height = {}, share_height = {}"
                .format(chain_height, share_height))
            SHARES_MUTEX.acquire()
            try:
                while share_height < (chain_height - max_lag):
                    share_height += 1
                    if share_height not in SHARES.keys():
                        # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height
                        logger.warn(
                            "Processed 0 shares in block {} - Creating filler record"
                            .format(share_height))
                        filler_worker_shares_rec = Worker_shares(
                            height=share_height,
                            user_id=1,  # Pool User
                            timestamp=datetime.utcnow(),
                        )
                        database.db.createDataObj(filler_worker_shares_rec)
                    else:
                        # Commit SHARES
                        logger.warn("Commit SHARES for height: {}".format(
                            share_height))
                        # Get and remove the timestamp
                        ts = SHARES[share_height].pop("timestamp",
                                                      datetime.utcnow())
                        for worker, worker_shares in SHARES[
                                share_height].items():
                            # Get existing share record for this user at this height
                            worker_shares_rec = Worker_shares.get_by_height_and_id(
                                share_height, worker)
                            if worker_shares_rec is None:
                                # No existing record for this worker at this height, create it
                                logger.warn(
                                    "This is a new share record for worker {} at height {}"
                                    .format(worker, share_height))
                                worker_shares_rec = Worker_shares(
                                    height=share_height,
                                    user_id=worker,
                                    timestamp=ts,
                                )
                                database.db.createDataObj(worker_shares_rec)
                            else:
                                # Add to the existing record
                                logger.warn(
                                    "Add to existing Worker Shares: Accepted: {}, Rejected: {}, Stale: {}"
                                    .format(accepted, rejected, stale))
                            for edge_bits, shares_count in worker_shares.items(
                            ):
                                logger.warn(
                                    "YYY: Commit new worker shares: {}".format(
                                        shares_count))
                                worker_shares_rec.add_shares(
                                    edge_bits, shares_count["difficulty"],
                                    shares_count["accepted"],
                                    shares_count["rejected"],
                                    shares_count["stale"])
                                logger.warn("Worker Shares: {}".format(
                                    worker_shares_rec))
                    # Ack the RMQ shares messages


#                    for channel, tags in RMQ_ACK[share_height].items():
# bulk-ack up to the latest message we processed
#                        channel.basic_ack(delivery_tag=max(tags), multiple=True)
# Discard the processed messages
                    SHARES.pop(share_height, None)
                    RMQ_ACK.pop(share_height, None)
            finally:
                database.db.getSession().commit()
                SHARES_MUTEX.release()
                lib.teardown_db()
            time.sleep(30)
        except Exception as e:
            lib.teardown_db()
            logger.error("Something went wrong: {}\n{}".format(
                e,
                traceback.format_exc().splitlines()))
            time.sleep(10)
示例#6
0
def main():
    global LOGGER
    global CONFIG
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)

    while True:
        try:
            LOGGER.warn("=== Starting {}".format(PROCESS))

            # Connect to DB
            database = lib.get_db()

            # Get the prebious audit record to find its height
            previous_audit_record = Pool_audit.getLatest()
            if previous_audit_record is None:
                previous_audit_record = Pool_audit()
                database.db.createDataObj(previous_audit_record)

            # Create new pool audit record
            audit_record = Pool_audit()

            summary_info = wallet.retrieve_summary_info(refresh=True)

            # Set the height by wallet
            audit_record.height = int(summary_info["last_confirmed_height"])
            # Set pool bock count
            audit_record.pool_blocks_count = Pool_blocks.count(
                audit_record.height) - Pool_blocks.count(
                    previous_audit_record.height)
            # Audit pools liability vs equity
            audit_record.equity = int(
                summary_info["amount_currently_spendable"]) + int(
                    summary_info["amount_awaiting_confirmation"])
            audit_record.liability = Pool_utxo.get_liability()
            audit_record.balance = audit_record.equity - audit_record.liability

            # Add payouts value
            payments_made = Pool_payment.get_by_height(
                audit_record.height,
                audit_record.height - previous_audit_record.height)
            audit_record.payouts = sum(
                [payment.amount for payment in payments_made])
            # Add payments value
            pool_credits = Pool_credits.get_by_height(
                audit_record.height,
                audit_record.height - previous_audit_record.height)
            total_credits = 0
            if pool_credits is not None:
                for credit in pool_credits:
                    credits_this_block = sum(credit.credits.values())
                    total_credits += credits_this_block
                    print("credits_this_block: {}, total_credits: {}".format(
                        credits_this_block, total_credits))
                audit_record.payments = total_credits
            else:
                audit_record.payments = 0

            # Add and Commit the audit record
            #LOGGER.warn("Create Audit Record: {}".format(json.dumps(audit_record)))
            database.db.createDataObj(audit_record)

            LOGGER.warn("=== Completed {}".format(PROCESS))
        except Exception as e:
            lib.teardown_db()
            LOGGER.exception("Something went wrong: {} ".format(
                traceback.format_exc()))

        time.sleep(999)