Esempio n. 1
0
def RigDataCommitScheduler(max_lag, commit_interval, logger):
    global RIGDATA_MUTEX
    global RIGDATA_GROUPSIZE
    global REDIS_RIGDATA_KEY
    global REDIS_RIGDATA_EXPIRETIME
    while True:
        try:
            database = lib.get_db()
            latest_block = Blocks.get_latest()
            while latest_block is None:
                logger.warn("Cant get latest block from database")
                time.sleep(10)
                latest_block = Blocks.get_latest()
            chain_height = latest_block.height
            lib.teardown_db()
            RIGDATA_MUTEX.acquire()
            try:
                logger.warn("= Begin RigDataCommitScheduler")
                # Itterate over each rigdata cache key in redis
                redisdb = lib.get_redis_db()
                redis_key = "{}-*".format(REDIS_RIGDATA_KEY)
                keys = []
                for key in redisdb.scan_iter(match=redis_key, count=100):
                    keys.append(key.decode())
                for key in sorted(keys):
                    share_height = int(key.split("-")[1])
                    if share_height < chain_height - RIGDATA_GROUPSIZE - max_lag:
                        # Commit this set of rigdata records
                        logger.warn(
                            "-- RigDataCommitScheduler processing record at height: {}"
                            .format(share_height))
                        redis_cached_rigdata = redisdb.get(key)
                        redis_cached_rigdata = json.loads(
                            redis_cached_rigdata.decode())
                        for user, rigdata in redis_cached_rigdata.items():
                            redis_key = "{}.{}.{}".format(
                                REDIS_RIGDATA_KEY, share_height, user)
                            if redisdb.exists(redis_key):
                                # XXX TODO
                                logger.warn(
                                    "XXX TODO: DUPLICATE RIGDATA WORKER KEY - MERGE ???"
                                )
                            else:
                                redisdb.set(redis_key,
                                            json.dumps(rigdata),
                                            ex=REDIS_RIGDATA_EXPIRETIME)
                        # Wrote this rigdata to REDIS, so remove the cache record now
                        redisdb.delete(key)
            finally:
                RIGDATA_MUTEX.release()
                logger.warn("= End RigDataCommitScheduler")
            time.sleep(commit_interval)
        except Exception as e:
            logger.exception("Something went wrong: {}".format(
                traceback.format_exc()))
            time.sleep(10)
Esempio n. 2
0
 def post(self):
     global database
     LOGGER = lib.get_logger(PROCESS)
     username = None
     password = None
     try:
         debug and print("json request = {}".format(request.form))
         username = request.form.get('username')
         password = request.form.get('password')
         debug and LOGGER.warn("PoolAPI_users POST: user:{} password:{}".format(username, password))
     except AttributeError as e:
         LOGGER.warn("Missing username or password - {}".format(str(e)))
     if username is None or password is None:
         response = jsonify({ 'message': 'Missing arguments: username and pasword required' })
         response.status_code = 400
         return response
     if username == "" or password == "":
         response = jsonify({ 'message': 'Missing arguments: username and pasword required' })
         response.status_code = 400
         return response
     if "." in username:
         response = jsonify({ 'message': 'Invalid Username: May not contain "."' })
         response.status_code = 400
         return response
     # Check if the username is taken
     exists = Users.check_username_exists(username)
     if exists:
         debug and print("Failed to add - conflict with existing user = {}".format(username))
         response = jsonify({ 'message': 'Conflict with existing account' })
         response.status_code = 409
         return response
     # Create the users record
     user_rec = Users.create(username, password)
     if user_rec is None:
         debug and print("Failed to add - unable to create a new user record")
         response = jsonify({ 'message': 'System Error: Failed to create account' })
         response.status_code = 500
         return response
     # initialize a worker_stats record for this user (previous block) so they get instance feedback on the UI
     lb = Blocks.get_latest()
     if lb is not None:
         height = Blocks.get_latest().height
         initial_stat = Worker_stats(datetime.utcnow(), height, user_rec.id)
         database.db.createDataObj(initial_stat)
     debug and print("Added user = {}".format(user_rec))
     response = jsonify({ 'username': user_rec.username, 'id': user_rec.id })
     response.status_code = 201
     return response
Esempio n. 3
0
 def get(self, id=None, height=0, range=0, fields=None):
     database = lib.get_db()
     LOGGER = lib.get_logger(PROCESS)
     LOGGER.warn(
         "WorkerAPI_shares get id:{} height:{} range:{} fields:{}".format(
             id, height, range, fields))
     fields = lib.fields_to_list(fields)
     if height == 0:
         height = Blocks.get_latest().height
     shares_records = []
     if id is None:
         for shares in Worker_shares.get_by_height(height, range):
             shares_records.append(shares.to_json(fields))
         return shares_records
     else:
         if range is None:
             worker_sh_recs = Worker_shares.get_by_height_and_id(height, id)
             #print("worker_sh_recs = {}".format(worker_sh_recs))
             if res is None:
                 return "[]".to_json()
             return res.to_json(fields)
         else:
             for share in Worker_shares.get_by_height_and_id(
                     height, id, range):
                 shares_records.append(share.to_json(fields))
             return shares_records
Esempio n. 4
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()
    atexit.register(lib.teardown_db)

    # Get config
    check_interval = float(CONFIG[PROCESS]["check_interval"])
    avg_over_range = int(CONFIG[PROCESS]["avg_over_range"])

    # Initialize poolStats records if this is the first run
    latest_stat = Pool_stats.get_latest()
    if latest_stat is None:
        # Special case for new pool startup
        poolstats.initialize(avg_over_range, LOGGER)

    latest_stat = Pool_stats.get_latest()
    LOGGER.warn("Starting at height: {}".format(latest_stat.height))

    # Generate pool stats records - one per grin block
    while True:
        # Find the height of the latest stats record
        latest_stat = Pool_stats.get_latest()
        height = latest_stat.height + 1
        LOGGER.warn("Starting at height: {}".format(height))
        try:
            while True:
                share_height = Worker_shares.get_latest_height()
                while share_height is None:
                    LOGGER.warn("Waiting for shares")
                    share_height = Worker_shares.get_latest_height()
                    sleep(10)
                latest = Blocks.get_latest().height
                stats_height = height - 1
                LOGGER.warn(
                    "Running: Chain height: {}, share height: {},  stats height: {}"
                    .format(latest, share_height, stats_height))
                while share_height - 1 > height:
                    new_stats = poolstats.calculate(height, avg_over_range)
                    # Batch new stats when possible, but commit at reasonable intervals
                    database.db.getSession().add(new_stats)
                    if ((height % BATCHSZ == 0) or (height >= (latest - 10))):
                        database.db.getSession().commit()
                    LOGGER.warn(
                        "Added Pool_stats for block: {} - {} {} {}".format(
                            new_stats.height, new_stats.gps,
                            new_stats.active_miners,
                            new_stats.shares_processed))
                    height = height + 1
                    sys.stdout.flush()
                sleep(check_interval)
        except Exception as e:
            LOGGER.error("Something went wrong: {} - {}".format(
                e, traceback.print_stack()))
            database.db.getSession().rollback()
            sleep(check_interval)

    LOGGER.warn("=== Completed {}".format(PROCESS))
Esempio n. 5
0
def ShareCommitScheduler(interval, database):
    global LOGGER
    global SHARES
    global HEIGHT
    global GRINSHARE_HEIGHT
    global POOLSHARE_HEIGHT
    database = lib.get_db()

    try:
        # XXX TODO:  enhance
        while True:
            bc_height = Blocks.get_latest(
            ).height  # grin.blocking_get_current_height()
            LOGGER.warn(
                "HEIGHT={}, POOLSHARE_HEIGHT={}, GRINSHARE_HEIGHT={}".format(
                    HEIGHT, POOLSHARE_HEIGHT, GRINSHARE_HEIGHT))
            while (HEIGHT < POOLSHARE_HEIGHT
                   and HEIGHT < GRINSHARE_HEIGHT) or (bc_height > HEIGHT):
                # Commit and purge current block share data if we are starting a new block
                LOGGER.warn("Commit shares for height: {}".format(HEIGHT))
                # time.sleep(5) # Give straggler shares a chance to come in
                SHARES.commit(HEIGHT)
                HEIGHT = HEIGHT + 1
            # Commit and purge all old share data (except current block) every 'interval' seconds
            try:
                SHARES.commit()  # All except current block
            except Exception as e:
                LOGGER.error("Failed to commit: {}".format(e))
            time.sleep(interval)
    except Exception as e:
        LOGGER.error("Something went wrong: {}\n{}".format(
            e,
            traceback.format_exc().splitlines()))
        time.sleep(interval)
    lib.teardown_db()
Esempio n. 6
0
 def get(self, id=None, height=None, range=0, fields=None):
     global database
     #database = lib.get_db()
     LOGGER = lib.get_logger(PROCESS)
     # AUTH FILTER
     if id != g.user.id:
         response = jsonify({ 'message': 'Not authorized to access data for other users' })
         response.status_code = 403
         return response
     debug and LOGGER.warn("WorkerAPI_shares get id:{} height:{} range:{} fields:{}".format(id, height, range, fields))
     # Enforce range limit
     if range is not None:
         range = min(range, worker_shares_range_limit)
     fields = lib.fields_to_list(fields)
     if height is None:
         return Worker_shares.get_latest_height(id)
     if height == 0:
         height = Blocks.get_latest().height
     shares_records = []
     if id is None:
         for shares in Worker_shares.get_by_height(height, range):
             shares_records.append(shares.to_json(fields))
         return shares_records
     else:
         if range is None:
             worker_sh_recs = Worker_shares.get_by_height_and_id(height, id)
             #print("worker_sh_recs = {}".format(worker_sh_recs))
             if res is None:
                 return "[]".to_json()
             return res.to_json(fields)
         else:
             for share in Worker_shares.get_by_height_and_id(height, id, range):
                 shares_records.append(share.to_json(fields))
             return shares_records
Esempio n. 7
0
 def get(self, id=None, height=0, range=None, fields=None):
     database = lib.get_db()
     LOGGER = lib.get_logger(PROCESS)
     LOGGER.warn(
         "WorkerAPI_stats get id:{} height:{} range:{} fields:{}".format(
             id, height, range, fields))
     fields = lib.fields_to_list(fields)
     if height == 0:
         height = Blocks.get_latest().height
     stats = []
     if id is None:
         for stat in Worker_stats.get_by_height(height, range):
             #print("YYY: {}".format(stats))
             stats.append(stat.to_json(fields))
         return stats
     else:
         if range is None:
             res = Worker_stats.get_by_height_and_id(id, height)
             if res is None:
                 return "[]".to_json()
             return res.to_json(fields)
         else:
             for stat in Worker_stats.get_by_height_and_id(
                     id, height, range):
                 stats.append(stat.to_json(fields))
             return stats
Esempio n. 8
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()

    # Get config
    check_interval = float(CONFIG[PROCESS]["check_interval"])
    avg_over_range = int(CONFIG[PROCESS]["avg_over_range"])

    # Find the height of the latest stats record
    last_height = 0
    latest_stat = Worker_stats.get_latest()

    if latest_stat != None:
        last_height = latest_stat.height
    height = last_height + 1
    LOGGER.warn("Starting at block height: {}".format(height))

    # Generate worker stats records - one per grin block for each active worker
    while True:
        # latest = grin.blocking_get_current_height()
        latest = Blocks.get_latest().height
        #LOGGER.warn("Latest Network Block Height = {}".format(latest))
        while latest > height:
            try:
                new_stats = workerstats.calculate(height, avg_over_range)
                LOGGER.warn("{} new stats for height {}".format(
                    len(new_stats), height))
                # mark any existing pool_stats dirty
                pool_stats = Pool_stats.get_by_height(height)
                if pool_stats is not None:
                    LOGGER.warn(
                        "Marked existing pool_stats dirty for height: {}".
                        format(height))
                    pool_stats.dirty = True
                database.db.getSession().bulk_save_objects(new_stats)
                if ((height % BATCHSZ == 0) or (height >= (latest - 10))):
                    database.db.getSession().commit()
                for stats in new_stats:
                    LOGGER.warn(
                        "Added Worker_stats for block: {}, Worker: {} - {} {} {} {} {} {}"
                        .format(stats.height, stats.worker, stats.gps,
                                stats.shares_processed,
                                stats.total_shares_processed, stats.grin_paid,
                                stats.total_grin_paid, stats.balance))
                height = height + 1
            except Exception as e:
                LOGGER.error("Something went wrong: {}".format(e))
                LOGGER.error("Traceback: {}".format(
                    traceback.format_exc().splitlines()))
                database.db.getSession().rollback()
                sleep(check_interval)
        sys.stdout.flush()
        sleep(check_interval)
    LOGGER.warn("=== Completed {}".format(PROCESS))
Esempio n. 9
0
def main():
    global LOGGER
    global CONFIG
    global SHARES
    global HEIGHT
    global GRINSHARE_HEIGHT
    global POOLSHARE_HEIGHT
    global SHARE_EXPIRETIME
    global database
    global RABBITMQ_USER
    global RABBITMQ_PASSWORD
    CONFIG = lib.get_config()
    atexit.register(lib.teardown_db)

    GRINSHARE_HEIGHT = 0
    POOLSHARE_HEIGHT = 0

    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    SHARE_EXPIRETIME = int(CONFIG[PROCESS]["share_expire_time"])
    commit_interval = int(CONFIG[PROCESS]["commit_interval"])
    rmq_endpoints = json.loads(CONFIG[PROCESS]["rmq"])

    RABBITMQ_USER = os.environ["RABBITMQ_USER"]
    RABBITMQ_PASSWORD = os.environ["RABBITMQ_PASSWORD"]

    database = lib.get_db()
    HEIGHT = Worker_shares.get_latest_height()
    while HEIGHT is None:
        LOGGER.warn("Waiting on the first grin block...")
        time.sleep(5)
        latest_block = Blocks.get_latest()
        if latest_block is not None:
            HEIGHT = latest_block.height

    SHARES = WorkerShares(LOGGER)

    ##
    # Start a thread to commit shares
    commit_thread = threading.Thread(target=ShareCommitScheduler,
                                     args=(
                                         commit_interval,
                                         database,
                                     ))
    commit_thread.start()

    ##
    # Start a pika consumer thread for each rabbit we want to consume from
    for rmq in rmq_endpoints:
        rmq_thread = threading.Thread(target=RmqConsumer, args=(rmq, ))
        rmq_thread.start()
Esempio n. 10
0
 def get(self, height=0, range=None, fields=None):
     global database
     #database = lib.get_db()
     LOGGER = lib.get_logger(PROCESS)
     debug and LOGGER.warn("WorkersAPI_shares get height:{} range:{} fields:{}".format(height, range, fields))
     fields = lib.fields_to_list(fields)
     shares_records = []
     if height == 0:
         height = Blocks.get_latest().height
     for shares in Worker_shares.get_by_height(height, range):
         # AUTH FILTER
         if shares.user_id == g.user.id:
             shares_records.append(shares.to_json(fields))
     return shares_records
Esempio n. 11
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()
    atexit.register(lib.teardown_db)

    check_interval = float(CONFIG[PROCESS]["check_interval"])
    avg_over_range = int(CONFIG[PROCESS]["avg_over_range"])

    # Find the height of the latest stats record
    last_height = 0
    latest_stat = Grin_stats.get_latest()
    print("latest_stat = {}".format(latest_stat))

    if latest_stat == None:
        LOGGER.warn("Initializing Grin_stats")
        grinstats.initialize(avg_over_range, LOGGER)
        latest_stat = Grin_stats.get_latest()
        print("Finished initializing, latest_stat height = {}".format(
            latest_stat.height))
    last_height = latest_stat.height
    height = last_height + 1
    LOGGER.warn(
        "grinStats service starting at block height: {}".format(height))

    # Generate grin stats records - one per grin block
    while True:
        #latest_db_block = Blocks.get_latest()
        latest = Blocks.get_latest().height
        while latest >= height:
            try:
                new_stats = grinstats.calculate(height, avg_over_range)
                # Batch new stats when possible, but commit at reasonable intervals
                database.db.getSession().add(new_stats)
                #                if( (height % BATCHSZ == 0) or (height >= (latest-10)) ):
                database.db.getSession().commit()
                LOGGER.warn(
                    "Added Grin_stats for block: {} - gps:{} diff:{}".format(
                        new_stats.height, new_stats.gps, new_stats.difficulty))
                height = height + 1
            except AssertionError as e:
                LOGGER.error("Something went wrong: {}".format(e))
                sleep(check_interval)
        sys.stdout.flush()
        sleep(check_interval)
    LOGGER.warn("=== Completed {}".format(PROCESS))
Esempio n. 12
0
    def get(self, height=0, range=None, fields=None):
        global database
        #database = lib.get_db()
        LOGGER = lib.get_logger(PROCESS)
        LOGGER.warn("WorkersAPI_stats get height:{} range:{} fields:{}".format(
            height, range, fields))
        fields = lib.fields_to_list(fields)
        stats = []
        if height == 0:
            height = Blocks.get_latest().height
        for stat in Worker_stats.get_by_height(height, range):
            # AUTH FILTER
            if stat.user_id == ADMIN_ID:
                stats.append(stat.to_json(fields))

        return stats
Esempio n. 13
0
def RigDataCommitScheduler(max_lag, logger):
    global RIGDATA_MUTEX
    global RIGDATA
    global REDIS_RIGDATA_KEY
    global REDIS_RIGDATA_EXPIRETIME
    while True:
        try:
            redisdb = lib.get_redis_db()
            while True:
                database = lib.get_db()
                chain_height = Blocks.get_latest().height
                logger.warn(
                    "RIGDATA commit scheduler - chain_height = {}".format(
                        chain_height))
                RIGDATA_MUTEX.acquire()
                try:
                    for height in [
                            h for h in RIGDATA.keys()
                            if h < (chain_height - max_lag)
                    ]:
                        logger.warn(
                            "Commit RIGDATA for height: {}".format(height))
                        # Picke RIGDATA and write to redis
                        for user, rigdata in RIGDATA[height].items():
                            key = "{}.{}.{}".format(REDIS_RIGDATA_KEY, height,
                                                    user)
                            if redisdb.exists(key):
                                logger.warn(
                                    "XXX TODO - MERGE THIS ADDITIONAL SHARE DATA"
                                )
                            else:
                                redisdb.set(key,
                                            json.dumps(rigdata),
                                            ex=REDIS_RIGDATA_EXPIRETIME)
                        RIGDATA.pop(height, None)
                finally:
                    RIGDATA_MUTEX.release()
                    lib.teardown_db()
                time.sleep(30)
        except Exception as e:
            logger.error("Something went wrong: {}\n{}".format(
                e,
                traceback.format_exc().splitlines()))
            lib.teardown_db()
            time.sleep(10)
Esempio n. 14
0
 def get(self, height=None, range=None, fields=None):
     LOGGER = lib.get_logger(PROCESS)
     LOGGER.warn("GrinAPI_blocks get height:{} range:{} fields:{}".format(
         height, range, fields))
     fields = lib.fields_to_list(fields)
     if height is None or height == 0:
         blocks = Blocks.get_latest(range)
     else:
         blocks = Blocks.get_by_height(height, range)
     if range == None:
         if blocks is None:
             return None
         return blocks.to_json(fields)
     else:
         bl = []
         for block in blocks:
             bl.append(block.to_json(fields))
         return bl
Esempio n. 15
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()

    # Get config
    check_interval = float(CONFIG[PROCESS]["check_interval"])
    avg_over_range = int(CONFIG[PROCESS]["avg_over_range"])

    # Find the height of the latest stats record
    last_height = 0
    latest_stat = Pool_stats.get_latest()
    if latest_stat is None:
        # Special case for new pool startup
        poolstats.initialize()
        last_height = 0
    else:
        last_height = latest_stat.height
    height = last_height + 1
    LOGGER.warn("Starting at height: {}".format(height))

    # Generate pool stats records - one per grin block
    while True:
        try:
            # latest = grin.blocking_get_current_height()
            latest = Blocks.get_latest().height
            while latest > height:
                new_stats = poolstats.calculate(height, avg_over_range)
                # Batch new stats when possible, but commit at reasonable intervals
                database.db.getSession().add(new_stats)
                if( (height % BATCHSZ == 0) or (height >= (latest-10)) ):
                    database.db.getSession().commit()
                LOGGER.warn("Added Pool_stats for block: {} - {} {} {}".format(new_stats.height, new_stats.gps, new_stats.active_miners, new_stats.shares_processed))
                height = height + 1
                sys.stdout.flush()
        except Exception as e:  # AssertionError as e:
            LOGGER.error("Something went wrong: {} - {}".format(e, traceback.print_stack()))
            sleep(check_interval)
        sleep(check_interval)
    LOGGER.warn("=== Completed {}".format(PROCESS))
Esempio n. 16
0
def avg_network_gps(height=0, range=60):
    if height == 0:
        height = Blocks.get_latest().height
    if range <= 0:
        range = 1
    grinstats = Grin_stats.get_by_height(height, range)
    gpslists = [stat.gps for stat in grinstats]
    gpslists_len = len(gpslists)
    if gpslists_len == 0:
        return 0
    gpstotals = {}
    for gpslist in gpslists:
        for gps in gpslist:
            if gps.edge_bits not in gpstotals:
                gpstotals[gps.edge_bits] = 0
            gpstotals[gps.edge_bits] += gps.gps
    gpsavgs = {}
    for sz, gpstotal in gpstotals.items():
        gpsavgs[sz] = gpstotal / gpslists_len
    return gpsavgs
Esempio n. 17
0
 def get(self, id, height=0, range=None, fields=None):
     global database
     #database = lib.get_db()
     LOGGER = lib.get_logger(PROCESS)
     # AUTH FILTER
     if id != g.user.id:
         response = jsonify(
             {'message': 'Not authorized to access data for other users'})
         response.status_code = 403
         return response
     debug and LOGGER.warn(
         "WorkerAPI_stats get id:{} height:{} range:{} fields:{}".format(
             id, height, range, fields))
     # Enforce range limit
     if range is not None:
         range = min(range, worker_stats_range_limit)
     fields = lib.fields_to_list(fields)
     res = None
     if range is None:
         # Getting a single record
         if height == 0:
             # Get the most recent stats for this user
             res = Worker_stats.get_latest_by_id(id)
         else:
             res = Worker_stats.get_by_height_and_id(id, height)
         if res is None:
             return None
         return res.to_json(fields)
     else:
         # Getting a range of records
         if height == 0:
             height = Blocks.get_latest().height
         res = Worker_stats.get_by_height_and_id(id, height, range)
         if res is None:
             return None
         stats = []
         for stat in res:
             stats.append(stat.to_json(fields))
         return stats
Esempio n. 18
0
def ShareCommitScheduler(max_lag, commit_interval, logger):
    global SHARES_MUTEX
    while True:
        try:
            database = lib.get_db()
            latest_block = Blocks.get_latest()
            while latest_block is None:
                logger.warn("Waiting for first block")
                time.sleep(10)
                latest_block = Blocks.get_latest()
            chain_height = latest_block.height
            SHARES_MUTEX.acquire()
            try:
                logger.warn("= Begin ShareCommitScheduler")
                # Itterate over each sharedata key in redis
                redisdb = lib.get_redis_db()
                redis_key = "{}-*".format(REDIS_SHAREDATA_KEY)
                keys = []
                for key in redisdb.scan_iter(match=redis_key, count=100):
                    keys.append(key.decode())
                for key in sorted(keys):
                    share_height = int(key.split("-")[1])
                    if share_height < chain_height - max_lag:
                        # Commit this record
                        logger.warn(
                            "-- ShareCommitScheduler processing record at height: {}"
                            .format(share_height))
                        redis_sharedata = redisdb.get(key)
                        redis_sharedata = json.loads(redis_sharedata.decode())
                        ts_str = redis_sharedata.pop("timestamp",
                                                     str(datetime.utcnow()))
                        ts = datetime.strptime(
                            ts_str.split(".")[0], '%Y-%m-%d %H:%M:%S')
                        for worker, worker_shares in redis_sharedata.items():
                            # Get any existing record
                            worker_shares_rec = Worker_shares.get_by_height_and_id(
                                share_height, worker)
                            if worker_shares_rec is None:
                                # No existing record for this worker at this height, create it
                                logger.warn(
                                    "New share record for worker {} at height {}"
                                    .format(worker, share_height))
                                worker_shares_rec = Worker_shares(
                                    height=share_height,
                                    user_id=worker,
                                    timestamp=ts,
                                )
                                database.db.createDataObj(worker_shares_rec)
                            else:
                                logger.warn(
                                    "Add to existing record for worker {} at height {}"
                                    .format(worker, share_height))
                            for edge_bits, shares_count in worker_shares.items(
                            ):
                                worker_shares_rec.add_shares(
                                    edge_bits, shares_count["difficulty"],
                                    shares_count["accepted"],
                                    shares_count["rejected"],
                                    shares_count["stale"])
                                # Debug
                                logger.warn("Worker Shares: {}".format(
                                    worker_shares_rec))
                        # We wrote this record to mysql, so remove the redis cache
                        database.db.getSession().commit()
                        redisdb.delete(key)
                # Write fillter record if needed
                share_height = Worker_shares.get_latest_height()
                if share_height is None:
                    share_height = grin.blocking_get_current_height()
                share_height = share_height + 1
                while share_height < (chain_height - max_lag):
                    logger.warn(
                        "Processed 0 shares in block {} - Creating filler record"
                        .format(share_height))
                    filler_worker_shares_rec = Worker_shares(
                        height=share_height,
                        user_id=1,  # Pool User
                        timestamp=datetime.utcnow(),
                    )
                    database.db.createDataObj(filler_worker_shares_rec)
                    share_height += 1
            finally:
                database.db.getSession().commit()
                SHARES_MUTEX.release()
                lib.teardown_db()
                logger.warn("= End ShareCommitScheduler")
            time.sleep(commit_interval)
        except Exception as e:
            lib.teardown_db()
            logger.exception("Something went wrong: {} ".format(
                traceback.format_exc()))
            time.sleep(10)
Esempio n. 19
0
def ShareCommitScheduler(max_lag, logger):
    global SHARES_MUTEX
    global SHARES
    while True:
        try:
            database = lib.get_db()
            chain_height = Blocks.get_latest().height
            share_height = Worker_shares.get_latest_height()
            logger.warn(
                "SHARES commit scheduler - chain_height = {}, share_height = {}"
                .format(chain_height, share_height))
            SHARES_MUTEX.acquire()
            try:
                while share_height < (chain_height - max_lag):
                    share_height += 1
                    if share_height not in SHARES.keys():
                        # Even if there are no shares in the pool at all for this block, we still need to create a filler record at this height
                        logger.warn(
                            "Processed 0 shares in block {} - Creating filler record"
                            .format(share_height))
                        filler_worker_shares_rec = Worker_shares(
                            height=share_height,
                            user_id=1,  # Pool User
                            timestamp=datetime.utcnow(),
                        )
                        database.db.createDataObj(filler_worker_shares_rec)
                    else:
                        # Commit SHARES
                        logger.warn("Commit SHARES for height: {}".format(
                            share_height))
                        # Get and remove the timestamp
                        ts = SHARES[share_height].pop("timestamp",
                                                      datetime.utcnow())
                        for worker, worker_shares in SHARES[
                                share_height].items():
                            # Get existing share record for this user at this height
                            worker_shares_rec = Worker_shares.get_by_height_and_id(
                                share_height, worker)
                            if worker_shares_rec is None:
                                # No existing record for this worker at this height, create it
                                logger.warn(
                                    "This is a new share record for worker {} at height {}"
                                    .format(worker, share_height))
                                worker_shares_rec = Worker_shares(
                                    height=share_height,
                                    user_id=worker,
                                    timestamp=ts,
                                )
                                database.db.createDataObj(worker_shares_rec)
                            else:
                                # Add to the existing record
                                logger.warn(
                                    "Add to existing Worker Shares: Accepted: {}, Rejected: {}, Stale: {}"
                                    .format(accepted, rejected, stale))
                            for edge_bits, shares_count in worker_shares.items(
                            ):
                                logger.warn(
                                    "YYY: Commit new worker shares: {}".format(
                                        shares_count))
                                worker_shares_rec.add_shares(
                                    edge_bits, shares_count["difficulty"],
                                    shares_count["accepted"],
                                    shares_count["rejected"],
                                    shares_count["stale"])
                                logger.warn("Worker Shares: {}".format(
                                    worker_shares_rec))
                    # Ack the RMQ shares messages


#                    for channel, tags in RMQ_ACK[share_height].items():
# bulk-ack up to the latest message we processed
#                        channel.basic_ack(delivery_tag=max(tags), multiple=True)
# Discard the processed messages
                    SHARES.pop(share_height, None)
                    RMQ_ACK.pop(share_height, None)
            finally:
                database.db.getSession().commit()
                SHARES_MUTEX.release()
                lib.teardown_db()
            time.sleep(30)
        except Exception as e:
            lib.teardown_db()
            logger.error("Something went wrong: {}\n{}".format(
                e,
                traceback.format_exc().splitlines()))
            time.sleep(10)
Esempio n. 20
0
def main():
    global CONFIG
    global LOGGER
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()
    atexit.register(lib.teardown_db)

    # Get Config
    check_interval = float(CONFIG[PROCESS]["check_interval"])

    # Find the height of the latest block record
    last_height = grin.blocking_get_current_height() - 1400
    latest_block = Blocks.get_latest()
    if latest_block is not None:
        last_height = latest_block.height
    height = last_height + 1
    height = max(0, height)
    LOGGER.warn("Starting at block height: {}".format(height))

    while True:
        try:
            latest = grin.blocking_get_current_height()
            while latest >= height:
                response = grin.blocking_get_block_by_height(height)
                LOGGER.warn("New Block: {} at {}".format(response["header"]["hash"],
                                                         response["header"]["height"]))
                #print("sleeping 60....")
                #sleep(60)
                #print(".....GO")
                try:
                    new_block = Blocks(hash = response["header"]["hash"],
                                   version = response["header"]["version"],
                                   height = response["header"]["height"],
                                   previous = response["header"]["previous"],
                                   timestamp = datetime.strptime(response["header"]["timestamp"][:-1], "%Y-%m-%dT%H:%M:%S+00:0"),
                                   output_root = response["header"]["output_root"],
                                   range_proof_root = response["header"]["range_proof_root"],
                                   kernel_root = response["header"]["kernel_root"],
                                   nonce = response["header"]["nonce"],
                                   edge_bits = response["header"]["edge_bits"],
                                   total_difficulty = response["header"]["total_difficulty"],
                                   secondary_scaling = response["header"]["secondary_scaling"],
                                   num_inputs = len(response["inputs"]),
                                   num_outputs = len(response["outputs"]),
                                   num_kernels = len(response["kernels"]),
                                   fee = sum(k["fee"] for k in response["kernels"]),
                                   lock_height = response["kernels"][0]["lock_height"] if(len(response["kernels"])>0) else 0,
                                   total_kernel_offset = response["header"]["total_kernel_offset"],
                                   state = "new")
                    # Batch inserts when catching up
                    database.db.getSession().add(new_block)
                    if( (height % BATCHSZ == 0) or (height >= (latest-10)) ):
                        database.db.getSession().commit()
                    height = height + 1
                except (sqlalchemy.exc.IntegrityError, pymysql.err.IntegrityError):
                    LOGGER.warn("Attempted to re-add block: {}".format(response["header"]["height"]))
                    database.db.getSession().rollback()
                    latest_block = Blocks.get_latest()
                    height = latest_block.height + 1
                    sleep(check_interval)
            sys.stdout.flush()
            sleep(check_interval)
        except Exception as e:
            LOGGER.error("Something went wrong: {}\n{}".format(e, traceback.format_exc().splitlines()))
            database.db.getSession().rollback()
            sys.stdout.flush()
            sleep(check_interval)
    # Should never get here, but....
    LOGGER.warn("=== Completed {}".format(PROCESS))
Esempio n. 21
0
def main():
    global CONFIG
    global LOGGER
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    # Number of blocks of share data used to calculate rewards
    PPLNG_WINDOW_SIZE = 60
    try:
        PPLNG_WINDOW_SIZE = int(os.environ["PPLNG_WINDOW_SIZE"])
    except Exception as e:
        LOGGER.error(
            "Failed to get PPLNG_WINDOW_SIZE from the environment: {}.  Using default size of {}"
            .format(e, PPLNG_WINDOW_SIZE))

    POOL_FEE = 0.0075
    try:
        POOL_FEE = float(CONFIG[PROCESS]["pool_fee"])
    except Exception as e:
        LOGGER.error(
            "Failed to get POOL_FEE from the config: {}.  Using default fee of {}"
            .format(e, POOL_FEE))

    # Keep track of "next" block estimated
    next_height_estimated = 0

    # Connect to DB
    database = lib.get_db()

    while True:
        # Generate pool block reward estimates for all new and unlocked blocks
        try:
            database.db.initializeSession()
            next_height = Blocks.get_latest(
            ).height - 5  # A recent height which all worker shares are available
            unlocked_blocks = Pool_blocks.get_all_unlocked()
            new_blocks = Pool_blocks.get_all_new()
            unlocked_blocks_h = [blk.height for blk in unlocked_blocks]
            new_blocks_h = [blk.height for blk in new_blocks]

            need_estimates = unlocked_blocks_h + new_blocks_h
            LOGGER.warn(
                "Will ensure estimate for blocks: {}".format(need_estimates))
            redisdb = lib.get_redis_db()

            # Generate Estimate
            for height in need_estimates:
                if height > next_height:
                    LOGGER.warn(
                        "Delay estimate until we have recent shares availalbe for block: {}"
                        .format(height))
                else:
                    LOGGER.warn("Ensure estimate for block: {}".format(height))
                    # Check if we already have an estimate cached
                    payout_estimate_map_key = key_prefix + str(height)
                    cached_map = redisdb.get(payout_estimate_map_key)
                    if cached_map is None:
                        # We dont have it cached, we need to calcualte it and cache it now
                        payout_map = pool.calculate_block_payout_map(
                            height, PPLNG_WINDOW_SIZE, POOL_FEE, LOGGER, True)
                        payout_map_json = json.dumps(payout_map)
                        redisdb.set(payout_estimate_map_key,
                                    payout_map_json,
                                    ex=cache_expire)
                        LOGGER.warn(
                            "Created estimate for block {} with key {}".format(
                                height, payout_estimate_map_key))
                    else:
                        LOGGER.warn(
                            "There is an exiting estimate for block: {}".
                            format(height))

            # Generate estimate for "next" block
            LOGGER.warn(
                "Ensure estimate for next block: {}".format(next_height))
            if next_height_estimated != next_height:
                payout_map = pool.calculate_block_payout_map(
                    next_height, PPLNG_WINDOW_SIZE, POOL_FEE, LOGGER, True)
                payout_map_json = json.dumps(payout_map)
                payout_estimate_map_key = key_prefix + "next"
                redisdb.set(payout_estimate_map_key,
                            payout_map_json,
                            ex=cache_expire)
                next_height_estimated = next_height
                LOGGER.warn("Created estimate for block {} with key {}".format(
                    next_height, payout_estimate_map_key))
            else:
                LOGGER.warn(
                    "There is an exiting next block estimate for : {}".format(
                        next_height))

            LOGGER.warn("Completed estimates")
            database.db.destroySession()
            # Flush debug print statements
            sys.stdout.flush()
        except Exception as e:  # AssertionError as e:
            LOGGER.error("Something went wrong: {} - {}".format(
                e, traceback.format_exc()))
            database.db.destroySession()

        LOGGER.warn("=== Completed {}".format(PROCESS))
        sleep(check_interval)
Esempio n. 22
0
def main():
    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))
    # Connect to DB
    database = lib.get_db()

    # Get config
    check_interval = float(CONFIG[PROCESS]["check_interval"])
    avg_over_range = int(CONFIG[PROCESS]["avg_over_range"])

    # Find the height of the latest stats record
    last_height = 0
    latest_stat = Worker_stats.get_latest()

    if latest_stat != None:
        last_height = latest_stat.height
    else:
        latest = Blocks.get_latest()
        while latest is None:
            LOGGER.warn("Waiting for the first block...")
            sleep(10)
            latest = Blocks.get_latest()
        last_height = latest.height
    height = last_height + 1

    LOGGER.warn("Starting at block height: {}".format(height))

    # Generate worker stats records - one per grin block for each active worker
    while True:
        # latest = grin.blocking_get_current_height()
        latest = Blocks.get_latest().height
        share_height = Worker_shares.get_latest_height()
        while share_height is None:
            LOGGER.warn("waiting for the first worker shares")
            sleep(10)
            share_height = Worker_shares.get_latest_height()
        stats_height = height - 1
        LOGGER.warn(
            "Running: chain height: {}, share height: {} vs stats height: {}".
            format(latest, share_height, stats_height))
        while share_height > height:
            try:
                new_stats = workerstats.calculate(height, avg_over_range)
                LOGGER.warn("{} new stats for height {}".format(
                    len(new_stats), height))
                for stats in new_stats:
                    LOGGER.warn("Added Worker_stats: {}".format(stats))
                # mark any existing pool_stats dirty
                pool_stats = Pool_stats.get_by_height(height)
                for stat_rec in new_stats:
                    database.db.getSession().add(stat_rec)
                if pool_stats is not None:
                    LOGGER.warn(
                        "Marked existing pool_stats dirty for height: {}".
                        format(height))
                    pool_stats.dirty = True  # Pool_stats need to be recalculated
                if ((height % BATCHSZ == 0) or (height >= (latest - 10))):
                    LOGGER.warn("Commit ---")
                    database.db.getSession().commit()
                height = height + 1
            except Exception as e:
                LOGGER.exception("Something went wrong: {}".format(e))
                database.db.getSession().rollback()
                sleep(check_interval)
        sys.stdout.flush()
        sleep(check_interval)
    LOGGER.warn("=== Completed {}".format(PROCESS))
Esempio n. 23
0
def main():
    global LOGGER
    global CONFIG

    CONFIG = lib.get_config()
    LOGGER = lib.get_logger(PROCESS)
    LOGGER.warn("=== Starting {}".format(PROCESS))

    # Connect to DB
    database = lib.get_db()
    atexit.register(lib.teardown_db)

    validation_depth = int(CONFIG[PROCESS]["validation_depth"])

    latest = grin.get_current_height(
    ) - 10  # stop 10 blocks from current to avoid overrunning the blockWatcher
    last_block_record = Blocks.get_latest()
    if last_block_record == None:
        last_block_record_height = 0
    else:
        last_block_record_height = last_block_record.height
    last = min(latest - validation_depth, last_block_record_height -
               validation_depth)  # start a reasonable distance back
    if last < 0:
        last = 0

    LOGGER.warn("Starting from block #{}".format(last))

    for i in range(last, latest + 1):
        if i % 100 == 0:
            LOGGER.warn("Processing #{}".format(i))
        response = grin.blocking_get_block_by_height(i)
        assert (response is not None)
        assert (int(response["header"]["height"]) == i)
        #print("{}: {}".format(response["header"]["height"], response["header"]["hash"]))
        try:
            database.db.initializeSession()
            rec = Blocks.get_by_height(
                i)  # Get existing entry from the DB (if any)
            if rec is not None:
                # Test if we have an orphan thats not already marked
                # Dont update any block info in the orphan, just mark the state
                if rec.hash != response["header"][
                        "hash"] and rec.state != "orphan":
                    LOGGER.warn(
                        "Found an orphan - height: {}, hash: {} vs {}".format(
                            rec.height, rec.hash, response["header"]["hash"]))
                    rec.state = "orphan"
                    database.db.getSession().commit()
            else:
                # If it was not in the DB then we should add it now
                LOGGER.warn("Adding missing block - height: {}".format(
                    response["header"]["height"]))
                missing_block = Blocks(
                    hash=response["header"]["hash"],
                    version=response["header"]["version"],
                    height=response["header"]["height"],
                    previous=response["header"]["previous"],
                    timestamp=datetime.strptime(
                        response["header"]["timestamp"][:-1],
                        "%Y-%m-%dT%H:%M:%S+00:0"),
                    output_root=response["header"]["output_root"],
                    range_proof_root=response["header"]["range_proof_root"],
                    kernel_root=response["header"]["kernel_root"],
                    nonce=response["header"]["nonce"],
                    edge_bits=response["header"]["edge_bits"],
                    total_difficulty=response["header"]["total_difficulty"],
                    secondary_scaling=response["header"]["secondary_scaling"],
                    num_inputs=len(response["inputs"]),
                    num_outputs=len(response["outputs"]),
                    num_kernels=len(response["kernels"]),
                    fee=sum(k["fee"] for k in response["kernels"]),
                    lock_height=response["kernels"][0]["lock_height"]
                    if len(response["kernels"]) > 0 else 0,
                    total_kernel_offset=response["header"]
                    ["total_kernel_offset"],
                    state="missing")
                database.db.createDataObj(missing_block)
        except Exception as e:
            LOGGER.error("Something went wrong: {} - {}".format(
                e, traceback.print_stack()))
            database.db.getSession().rollback()
        database.db.destroySession()
        sys.stdout.flush()
        time.sleep(0.1)  # dont be too aggressive
    LOGGER.warn("=== Completed {}".format(PROCESS))