Exemple #1
0
def update_pplns_est():
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    logger.info("Recomputing PPLNS for users")
    # grab configured N
    mult = int(current_app.config['last_n'])
    # grab configured hashes in a n1 share
    hashes_per_share = current_app.config.get('hashes_per_share', 65536)
    # generate average diff from last 500 blocks
    diff = cache.get('difficulty_avg')
    if diff is None:
        logger.warn("Difficulty average is blank, can't calculate pplns estimate")
        return

    # Calculate the total shares to that are 'counted'
    total_shares = ((float(diff) * (2 ** 32)) / hashes_per_share) * mult

    # Loop through all shares, descending order, until we'd distributed the
    # shares
    user_shares, total_grabbed = get_sharemap(None, total_shares)
    user_shares = {'pplns_' + k: v for k, v in user_shares.iteritems()}

    cache.set('pplns_total_shares', total_grabbed, timeout=40 * 60)
    cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
    cache.set_many(user_shares, timeout=40 * 60)
    cache.set('pplns_user_shares', user_shares, timeout=40 * 60)
Exemple #2
0
 def test_leaderboard_anon(self):
     s = m.UserSettings(user="******",
                        anon=True)
     db.session.add(s)
     start = datetime.datetime.utcnow()
     now = start - datetime.timedelta(minutes=2)
     v = m.ShareSlice(time=now,
                      value=101,
                      user="******",
                      worker="",
                      algo="scrypt",
                      span=0,
                      share_type="acc")
     db.session.add(v)
     v = m.ShareSlice(time=now,
                      value=100,
                      user="******",
                      worker="",
                      algo="scrypt",
                      span=0,
                      share_type="acc")
     db.session.add(v)
     db.session.commit()
     leaderboard()
     users = cache.get("leaderboard")
     self.assertEquals(users[0][0], "Anonymous")
     self.assertEquals(users[0][1]['scrypt'], 110318.93333333333)
     self.assertEquals(users[1][0], "DAbhwsnEq5TjtBP5j76TinhUqqLTktDAnD")
     self.assertEquals(users[1][1]['scrypt'], 109226.66666666667)
def update_pplns_est():
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    logger.info("Recomputing PPLNS for users")
    # grab configured N
    mult = int(current_app.config['last_n'])
    # grab configured hashes in a n1 share
    hashes_per_share = current_app.config.get('hashes_per_share', 65536)
    # generate average diff from last 500 blocks
    diff = cache.get('difficulty_avg')
    if diff is None:
        logger.warn(
            "Difficulty average is blank, can't calculate pplns estimate")
        return

    # Calculate the total shares to that are 'counted'
    total_shares = ((float(diff) * (2**32)) / hashes_per_share) * mult

    # Loop through all shares, descending order, until we'd distributed the
    # shares
    user_shares, total_grabbed = get_sharemap(None, total_shares)
    user_shares = {'pplns_' + k: v for k, v in user_shares.iteritems()}

    cache.set('pplns_total_shares', total_grabbed, timeout=40 * 60)
    cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
    cache.set_many(user_shares, timeout=40 * 60)
    cache.set('pplns_user_shares', user_shares, timeout=40 * 60)
Exemple #4
0
def update_pplns_est(self):
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    try:
        logger.info("Recomputing PPLNS for users")
        # grab configured N
        mult = int(current_app.config['last_n'])
        # generate average diff from last 500 blocks
        diff = cache.get('difficulty_avg')
        if diff is None:
            logger.warn("Difficulty average is blank, can't calculate pplns estimate")
            return

        # Calculate the total shares to that are 'counted'
        total_shares = (float(diff) * (2 ** 16)) * mult

        # Loop through all shares, descending order, until we'd distributed the
        # shares
        user_shares, total_grabbed = get_sharemap(None, total_shares)
        user_shares = {'pplns_' + k: v for k, v in user_shares.iteritems()}

        cache.set('pplns_total_shares', total_grabbed, timeout=40 * 60)
        cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
        cache.set_many(user_shares, timeout=40 * 60)
        cache.set('pplns_user_shares', user_shares, timeout=40 * 60)

    except Exception as exc:
        logger.error("Unhandled exception in estimating pplns", exc_info=True)
        raise self.retry(exc=exc)
    def set_data(gbt, curr=None):
        prefix = ""
        if curr:
            prefix = curr + "_"
        prev_height = cache.get(prefix + 'blockheight') or 0

        if gbt['height'] == prev_height:
            logger.debug(
                "Not updating {} net info, height {} already recorded.".format(
                    curr or 'main', prev_height))
            return
        logger.info("Updating {} net info for height {}.".format(
            curr or 'main', gbt['height']))

        # set general information for this network
        difficulty = bits_to_difficulty(gbt['bits'])
        cache.set(prefix + 'blockheight', gbt['height'], timeout=1200)
        cache.set(prefix + 'difficulty', difficulty, timeout=1200)
        cache.set(prefix + 'reward', gbt['coinbasevalue'], timeout=1200)

        # keep a configured number of blocks in the cache for getting average difficulty
        cache.cache._client.lpush(prefix + 'block_cache', gbt['bits'])
        cache.cache._client.ltrim(prefix + 'block_cache', 0,
                                  current_app.config['difficulty_avg_period'])
        diff_list = cache.cache._client.lrange(
            prefix + 'block_cache', 0,
            current_app.config['difficulty_avg_period'])
        total_diffs = sum([bits_to_difficulty(diff) for diff in diff_list])
        cache.set(prefix + 'difficulty_avg',
                  total_diffs / len(diff_list),
                  timeout=120 * 60)

        # add the difficulty as a one minute share, unless we're staging
        if not current_app.config.get('stage', False):
            now = datetime.datetime.utcnow()
            try:
                m = OneMinuteType(typ=prefix + 'netdiff',
                                  value=difficulty * 1000,
                                  time=now)
                db.session.add(m)
                db.session.commit()
            except sqlalchemy.exc.IntegrityError:
                db.session.rollback()
                slc = OneMinuteType.query.with_lockmode('update').filter_by(
                    time=now, typ=prefix + 'netdiff').one()
                # just average the diff of two blocks that occured in the same second..
                slc.value = ((difficulty * 1000) + slc.value) / 2
                db.session.commit()
Exemple #6
0
def update_network():
    """
    Queries the RPC servers confirmed to update network stats information.
    """
    for currency in currencies.itervalues():
        if not currency.mineable:
            continue

        try:
            gbt = currency.coinserv.getblocktemplate({})
        except (urllib3.exceptions.HTTPError, CoinRPCException) as e:
            current_app.logger.error("Unable to communicate with {} RPC server: {}"
                                     .format(currency, e))
            continue

        key = "{}_data".format(currency.key)
        block_cache_key = "{}_block_cache".format(currency.key)

        current_data = cache.get(key)
        if current_data and current_data['height'] == gbt['height']:
            # Already have information for this block
            current_app.logger.debug(
                "Not updating {} net info, height {} already recorded."
                .format(currency, current_data['height']))
        else:
            current_app.logger.info(
                "Updating {} net info for height {}.".format(currency, gbt['height']))

        # Six hours worth of blocks. how many we'll keep in the cache
        keep_count = 21600 / currency.block_time

        difficulty = bits_to_difficulty(gbt['bits'])
        cache.cache._client.lpush(block_cache_key, difficulty)
        cache.cache._client.ltrim(block_cache_key, 0, keep_count)
        diff_list = cache.cache._client.lrange(block_cache_key, 0, -1)
        difficulty_avg = sum(map(float, diff_list)) / len(diff_list)

        cache.set(key,
                  dict(height=gbt['height'],
                       difficulty=difficulty,
                       reward=gbt['coinbasevalue'] * current_app.SATOSHI,
                       difficulty_avg=difficulty_avg,
                       difficulty_avg_stale=len(diff_list) < keep_count),
                  timeout=1200)
Exemple #7
0
def update_network():
    """
    Queries the RPC servers confirmed to update network stats information.
    """
    for currency in currencies.itervalues():
        if not currency.mineable:
            continue

        try:
            gbt = currency.coinserv.getblocktemplate({})
        except (urllib3.exceptions.HTTPError, CoinRPCException) as e:
            current_app.logger.error("Unable to communicate with {} RPC server: {}"
                                     .format(currency, e))
            continue

        key = "{}_data".format(currency.key)
        block_cache_key = "{}_block_cache".format(currency.key)

        current_data = cache.get(key)
        if current_data and current_data['height'] == gbt['height']:
            # Already have information for this block
            current_app.logger.debug(
                "Not updating {} net info, height {} already recorded."
                .format(currency, current_data['height']))
        else:
            current_app.logger.info(
                "Updating {} net info for height {}.".format(currency, gbt['height']))

        # Six hours worth of blocks. how many we'll keep in the cache
        keep_count = 21600 / currency.block_time

        difficulty = bits_to_difficulty(gbt['bits'])
        cache.cache._client.lpush(block_cache_key, difficulty)
        cache.cache._client.ltrim(block_cache_key, 0, keep_count)
        diff_list = cache.cache._client.lrange(block_cache_key, 0, -1)
        difficulty_avg = sum(map(float, diff_list)) / len(diff_list)

        cache.set(key,
                  dict(height=gbt['height'],
                       difficulty=difficulty,
                       reward=gbt['coinbasevalue'] * current_app.SATOSHI,
                       difficulty_avg=difficulty_avg,
                       difficulty_avg_stale=len(diff_list) < keep_count),
                  timeout=1200)
 def test_leaderboard_anon(self):
     s = m.UserSettings(user="******", anon=True)
     db.session.add(s)
     start = datetime.datetime.utcnow()
     now = start - datetime.timedelta(minutes=2)
     v = m.ShareSlice(time=now, value=101,
                      user="******", worker="",
                      algo="scrypt", span=0, share_type="acc")
     db.session.add(v)
     v = m.ShareSlice(time=now, value=100,
                      user="******", worker="",
                      algo="scrypt", span=0, share_type="acc")
     db.session.add(v)
     db.session.commit()
     leaderboard()
     users = cache.get("leaderboard")
     self.assertEquals(users[0][0], "Anonymous")
     self.assertEquals(users[0][1]['scrypt'], 110318.93333333333)
     self.assertEquals(users[1][0], "DAbhwsnEq5TjtBP5j76TinhUqqLTktDAnD")
     self.assertEquals(users[1][1]['scrypt'], 109226.66666666667)
Exemple #9
0
def update_pplns_est(self):
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    try:
        logger.info("Recomputing PPLNS for users")
        # grab configured N
        mult = int(current_app.config['last_n'])
        # generate average diff from last 500 blocks
        diff = cache.get('difficulty_avg')
        if diff is None:
            logger.warn("Difficulty average is blank, can't calculate pplns estimate")
            return
        # Calculate the total shares to that are 'counted'
        total_shares = ((float(diff) * (2 ** 16)) * mult)

        # Loop through all shares, descending order, until we'd distributed the
        # shares
        remain = total_shares
        user_shares = {}
        for shares, user in (db.engine.execution_options(stream_results=True).
                             execute(select([Share.shares, Share.user]).
                                     order_by(Share.id.desc()))):
            user_shares.setdefault('pplns_' + user, 0)
            if remain > shares:
                user_shares['pplns_' + user] += shares
                remain -= shares
            else:
                user_shares['pplns_' + user] += remain
                remain = 0
                break

        cache.set('pplns_total_shares', (total_shares - remain), timeout=40 * 60)
        cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
        cache.set_many(user_shares, timeout=40 * 60)
        cache.set('pplns_user_shares', user_shares, timeout=40 * 60)

    except Exception as exc:
        logger.error("Unhandled exception in estimating pplns", exc_info=True)
        raise self.retry(exc=exc)
Exemple #10
0
    def set_data(gbt, curr=None):
        prefix = ""
        if curr:
            prefix = curr + "_"
        prev_height = cache.get(prefix + 'blockheight') or 0

        if gbt['height'] == prev_height:
            logger.debug("Not updating {} net info, height {} already recorded."
                         .format(curr or 'main', prev_height))
            return
        logger.info("Updating {} net info for height {}.".format(curr or 'main', gbt['height']))

        # set general information for this network
        difficulty = bits_to_difficulty(gbt['bits'])
        cache.set(prefix + 'blockheight', gbt['height'], timeout=1200)
        cache.set(prefix + 'difficulty', difficulty, timeout=1200)
        cache.set(prefix + 'reward', gbt['coinbasevalue'], timeout=1200)

        # keep a configured number of blocks in the cache for getting average difficulty
        cache.cache._client.lpush(prefix + 'block_cache', gbt['bits'])
        cache.cache._client.ltrim(prefix + 'block_cache', 0, current_app.config['difficulty_avg_period'])
        diff_list = cache.cache._client.lrange(prefix + 'block_cache', 0, current_app.config['difficulty_avg_period'])
        total_diffs = sum([bits_to_difficulty(diff) for diff in diff_list])
        cache.set(prefix + 'difficulty_avg', total_diffs / len(diff_list), timeout=120 * 60)

        # add the difficulty as a one minute share, unless we're staging
        if not current_app.config.get('stage', False):
            now = datetime.datetime.utcnow()
            try:
                m = OneMinuteType(typ=prefix + 'netdiff', value=difficulty * 1000, time=now)
                db.session.add(m)
                db.session.commit()
            except sqlalchemy.exc.IntegrityError:
                db.session.rollback()
                slc = OneMinuteType.query.with_lockmode('update').filter_by(
                    time=now, typ=prefix + 'netdiff').one()
                # just average the diff of two blocks that occured in the same second..
                slc.value = ((difficulty * 1000) + slc.value) / 2
                db.session.commit()
Exemple #11
0
def new_block(self, blockheight, bits=None, reward=None):
    """
    Notification that a new block height has been reached in the network.
    Sets some things into the cache for display on the website, adds graphing
    for the network difficulty graph.
    """
    # prevent lots of duplicate rerunning...
    last_blockheight = cache.get('blockheight') or 0
    if blockheight == last_blockheight:
        logger.warn("Recieving duplicate new_block notif, ignoring...")
        return
    logger.info("Recieved notice of new block height {}".format(blockheight))

    difficulty = bits_to_difficulty(bits)
    cache.set('blockheight', blockheight, timeout=1200)
    cache.set('difficulty', difficulty, timeout=1200)
    cache.set('reward', reward, timeout=1200)

    # keep the last 500 blocks in the cache for getting average difficulty
    cache.cache._client.lpush('block_cache', bits)
    cache.cache._client.ltrim('block_cache', 0, 500)
    diff_list = cache.cache._client.lrange('block_cache', 0, 500)
    total_diffs = sum([bits_to_difficulty(diff) for diff in diff_list])
    cache.set('difficulty_avg', total_diffs / len(diff_list), timeout=120 * 60)

    # add the difficulty as a one minute share
    now = datetime.datetime.utcnow()
    try:
        m = OneMinuteType(typ='netdiff', value=difficulty * 1000, time=now)
        db.session.add(m)
        db.session.commit()
    except sqlalchemy.exc.IntegrityError:
        db.session.rollback()
        slc = OneMinuteType.query.with_lockmode('update').filter_by(
            time=now, typ='netdiff').one()
        # just average the diff of two blocks that occured in the same second..
        slc.value = ((difficulty * 1000) + slc.value) / 2
        db.session.commit()
Exemple #12
0
def cleanup(self, simulate=False):
    """
    Finds all the shares that will no longer be used and removes them from
    the database.
    """
    import time
    t = time.time()
    try:
        diff = cache.get('difficulty_avg')
        diff = 1100.0
        if diff is None:
            current_app.logger.warn(
                "Difficulty average is blank, can't safely cleanup")
            return
        # count all unprocessed blocks
        unproc_blocks = len(Block.query.filter_by(processed=False).all())
        # make sure we leave the right number of shares for them
        unproc_n = unproc_blocks * current_app.config['last_n']
        # plus our requested cleanup n for a safe margin
        cleanup_n = current_app.config.get('cleanup_n', 4) + current_app.config['last_n']
        # calculate how many n1 shares that is
        total_shares = int(round(((float(diff) * (2 ** 16)) * (cleanup_n + unproc_n))))
        stale_id = 0
        counted_shares = 0
        rows = 0
        logger.info("Unprocessed blocks: {}; {} N kept"
                    .format(unproc_blocks, unproc_n))
        logger.info("Safety margin N from config: {}".format(cleanup_n))
        logger.info("Total shares being saved: {}".format(total_shares))
        # iterate through shares in newest to oldest order to find the share
        # id that is oldest needed id
        for shares, id in (db.engine.execution_options(stream_results=True).
                           execute(select([Share.shares, Share.id]).
                                   order_by(Share.id.desc()))):
            rows += 1
            counted_shares += shares
            if counted_shares >= total_shares:
                stale_id = id
                break

        if not stale_id:
            logger.info("Stale ID is 0, deleting nothing.")
            return

        logger.info("Time to identify proper id {}".format(time.time() - t))
        if simulate:
            logger.info("Stale ID computed: {}".format(stale_id))
            logger.info("Rows iterated to find stale id: {}".format(rows))
            return

        logger.info("Cleaning all shares older than id {}".format(stale_id))
        # To prevent integrity errors, all blocks linking to a share that's
        # going to be deleted needs to be updated to remove reference
        Block.query.filter(Block.last_share_id <= stale_id).update({Block.last_share_id: None})
        db.session.flush()
        # delete all shares that are sufficiently old
        Share.query.filter(Share.id < stale_id).delete(synchronize_session=False)
        db.session.commit()
        logger.info("Time to completion {}".format(time.time() - t))
    except Exception as exc:
        logger.error("Unhandled exception in cleanup", exc_info=True)
        db.session.rollback()
        raise self.retry(exc=exc)
Exemple #13
0
def cleanup(simulate=False, chunk_size=None, sleep_interval=None):
    """
    Finds all the shares that will no longer be used and removes them from
    the database.
    """
    t = time.time()
    # allow overridable configuration defaults
    chunk_size = chunk_size or current_app.config.get('cleanup_chunk_size', 10000)
    sleep_interval = sleep_interval or current_app.config.get('cleanup_sleep_interval', 1.0)

    diff = cache.get('difficulty_avg')
    if diff is None:
        logger.warn("Difficulty average is blank, can't safely cleanup")
        return
    # count all unprocessed blocks
    unproc_blocks = len(Block.query.filter_by(processed=False, merged_type=None).all())
    # make sure we leave the right number of shares for unprocessed block
    # to be distributed
    unproc_n = unproc_blocks * current_app.config['last_n']
    # plus our requested cleanup n for a safe margin
    cleanup_n = current_app.config.get('cleanup_n', 4) + current_app.config['last_n']
    # calculate how many n1 shares that is
    total_shares = int(round(((float(diff) * (2 ** 16)) * (cleanup_n + unproc_n))))

    logger.info("Chunk size {:,}; Sleep time {}".format(chunk_size, sleep_interval))
    logger.info("Unprocessed blocks: {}; {} Extra N kept".format(unproc_blocks, unproc_n))
    logger.info("Safety margin N from config: {}".format(cleanup_n))
    logger.info("Total shares being saved: {:,}".format(total_shares))
    # upper and lower iteration bounds
    start_id = Share.query.order_by(Share.id.desc()).first().id + 1
    stop_id = Share.query.order_by(Share.id).first().id
    logger.info("Diff between first share {:,} and last {:,}: {:,}"
                .format(stop_id, start_id, start_id - stop_id))

    rows = 0
    counted_shares = 0
    stale_id = 0
    # iterate through shares in newest to oldest order to find the share
    # id that is oldest required to be kept
    while counted_shares < total_shares and start_id > stop_id:
        res = (db.engine.execute(select([Share.shares, Share.id]).
                                 order_by(Share.id.desc()).
                                 where(Share.id >= start_id - chunk_size).
                                 where(Share.id < start_id)))
        chunk = res.fetchall()
        for shares, id in chunk:
            rows += 1
            counted_shares += shares
            if counted_shares >= total_shares:
                stale_id = id
                break
        logger.info("Fetched rows {:,} to {:,}. Found {:,} shares so far. Avg share/row {:,.2f}"
                    .format(start_id - chunk_size, start_id, counted_shares, counted_shares / rows))
        start_id -= chunk_size

    if not stale_id:
        logger.info("Stale ID is 0, deleting nothing.")
        return

    logger.info("Time to identify proper id {}"
                .format(datetime.timedelta(seconds=time.time() - t)))
    logger.info("Rows iterated to find stale id: {:,}".format(rows))
    logger.info("Cleaning all shares older than id {:,}, up to {:,} rows. Saving {:,} rows."
                .format(stale_id, stale_id - stop_id, stale_id - start_id))
    if simulate:
        logger.info("Simulate mode, exiting")
        return

    # To prevent integrity errors, all blocks linking to a share that's
    # going to be deleted needs to be updated to remove reference
    Block.query.filter(Block.last_share_id <= stale_id).update({Block.last_share_id: None})
    db.session.commit()

    total_sleep = 0
    total = stale_id - stop_id
    remain = total
    # delete all shares that are sufficiently old
    while remain > 0:
        bottom = stale_id - chunk_size
        res = (Share.query.filter(Share.id < stale_id).
                filter(Share.id >= bottom).delete(synchronize_session=False))
        db.session.commit()
        remain -= chunk_size
        logger.info("Deleted {:,} rows from {:,} to {:,}\t{:,.4f}\t{:,}"
                    .format(res, stale_id, bottom, remain * 100.0 / total, remain))
        stale_id -= chunk_size
        if res:  # only sleep if we actually deleted something
            sleep(sleep_interval)
            total_sleep += sleep_interval

    logger.info("Time to completion {}".format(datetime.timedelta(time.time() - t)))
    logger.info("Time spent sleeping {}".format(datetime.timedelta(seconds=total_sleep)))
def cleanup(simulate=False, chunk_size=None, sleep_interval=None):
    """
    Finds all the shares that will no longer be used and removes them from
    the database.
    """
    t = time.time()
    # allow overridable configuration defaults
    chunk_size = chunk_size or current_app.config.get('cleanup_chunk_size',
                                                      10000)
    sleep_interval = sleep_interval or current_app.config.get(
        'cleanup_sleep_interval', 1.0)

    diff = cache.get('difficulty_avg')
    if diff is None:
        logger.warn("Difficulty average is blank, can't safely cleanup")
        return
    # count all unprocessed blocks
    unproc_blocks = len(
        Block.query.filter_by(processed=False, merged_type=None).all())
    # make sure we leave the right number of shares for unprocessed block
    # to be distributed
    unproc_n = unproc_blocks * current_app.config['last_n']
    # plus our requested cleanup n for a safe margin
    cleanup_n = current_app.config.get('cleanup_n',
                                       4) + current_app.config['last_n']
    # calculate how many n1 shares that is
    total_shares = int(
        round(((float(diff) * (2**16)) * (cleanup_n + unproc_n))))

    logger.info("Chunk size {:,}; Sleep time {}".format(
        chunk_size, sleep_interval))
    logger.info("Unprocessed blocks: {}; {} Extra N kept".format(
        unproc_blocks, unproc_n))
    logger.info("Safety margin N from config: {}".format(cleanup_n))
    logger.info("Total shares being saved: {:,}".format(total_shares))
    # upper and lower iteration bounds
    start_id = Share.query.order_by(Share.id.desc()).first().id + 1
    stop_id = Share.query.order_by(Share.id).first().id
    logger.info("Diff between first share {:,} and last {:,}: {:,}".format(
        stop_id, start_id, start_id - stop_id))

    rows = 0
    counted_shares = 0
    stale_id = 0
    # iterate through shares in newest to oldest order to find the share
    # id that is oldest required to be kept
    while counted_shares < total_shares and start_id > stop_id:
        res = (db.engine.execute(
            select([Share.shares, Share.id]).order_by(Share.id.desc()).where(
                Share.id >= start_id - chunk_size).where(Share.id < start_id)))
        chunk = res.fetchall()
        for shares, id in chunk:
            rows += 1
            counted_shares += shares
            if counted_shares >= total_shares:
                stale_id = id
                break
        logger.info(
            "Fetched rows {:,} to {:,}. Found {:,} shares so far. Avg share/row {:,.2f}"
            .format(start_id - chunk_size, start_id, counted_shares,
                    counted_shares / rows))
        start_id -= chunk_size

    if not stale_id:
        logger.info("Stale ID is 0, deleting nothing.")
        return

    logger.info("Time to identify proper id {}".format(
        datetime.timedelta(seconds=time.time() - t)))
    logger.info("Rows iterated to find stale id: {:,}".format(rows))
    logger.info(
        "Cleaning all shares older than id {:,}, up to {:,} rows. Saving {:,} rows."
        .format(stale_id, stale_id - stop_id, stale_id - start_id))
    if simulate:
        logger.info("Simulate mode, exiting")
        return

    # To prevent integrity errors, all blocks linking to a share that's
    # going to be deleted needs to be updated to remove reference
    Block.query.filter(Block.last_share_id <= stale_id).update(
        {Block.last_share_id: None})
    db.session.commit()

    total_sleep = 0
    total = stale_id - stop_id
    remain = total
    # delete all shares that are sufficiently old
    while remain > 0:
        bottom = stale_id - chunk_size
        res = (Share.query.filter(Share.id < stale_id).filter(
            Share.id >= bottom).delete(synchronize_session=False))
        db.session.commit()
        remain -= chunk_size
        logger.info(
            "Deleted {:,} rows from {:,} to {:,}\t{:,.4f}\t{:,}".format(
                res, stale_id, bottom, remain * 100.0 / total, remain))
        stale_id -= chunk_size
        if res:  # only sleep if we actually deleted something
            sleep(sleep_interval)
            total_sleep += sleep_interval

    logger.info("Time to completion {}".format(
        datetime.timedelta(time.time() - t)))
    logger.info("Time spent sleeping {}".format(
        datetime.timedelta(seconds=total_sleep)))
Exemple #15
0
def chain_cleanup(chain, dont_simulate):
    """ Handles removing all redis share slices that we are fairly certain won't
    be needed to credit a block if one were to be solved in the future. """
    if not chain.currencies:
        current_app.logger.warn(
            "Unable to run share slice cleanup on chain {} since currencies "
            "aren't specified!".format(chain.id))
        return

    # Get the current sharechain index from redis
    current_index = int(redis_conn.get("chain_{}_slice_index".format(chain.id)) or 0)
    if not current_index:
        current_app.logger.warn(
            "Index couldn't be determined for chain {}".format(chain.id))
        return

    # Find the maximum average difficulty of all currencies on this sharechain
    max_diff = 0
    max_diff_currency = None
    for currency in chain.currencies:
        currency_data = cache.get("{}_data".format(currency.key))
        if not currency_data or currency_data['difficulty_avg_stale']:
            current_app.logger.warn(
                "Cache doesn't accurate enough average diff for {} to cleanup chain {}"
                .format(currency, chain.id))
            return

        if currency_data['difficulty_avg'] > max_diff:
            max_diff = currency_data['difficulty_avg']
            max_diff_currency = currency

    assert max_diff != 0

    hashes_to_solve = max_diff * (2 ** 32)
    shares_to_solve = hashes_to_solve / chain.algo.hashes_per_share
    shares_to_keep = shares_to_solve * chain.safety_margin
    if chain.type == "pplns":
        shares_to_keep *= chain.last_n
    current_app.logger.info(
        "Keeping {:,} shares based on max diff {} for {} on chain {}"
        .format(shares_to_keep, max_diff, max_diff_currency, chain.id))

    # Delete any shares past shares_to_keep
    found_shares = 0
    empty_slices = 0
    iterations = 0
    for index in xrange(current_index, -1, -1):
        iterations += 1
        slc_key = "chain_{}_slice_{}".format(chain.id, index)
        key_type = redis_conn.type(slc_key)

        # Fetch slice information
        if key_type == "list":
            empty_slices = 0
            # For speed sake, ignore uncompressed slices
            continue
        elif key_type == "hash":
            empty_slices = 0
            found_shares += float(redis_conn.hget(slc_key, "total_shares"))
        elif key_type == "none":
            empty_slices += 1
        else:
            raise Exception("Unexpected slice key type {}".format(key_type))

        if found_shares >= shares_to_keep or empty_slices >= 20:
            break

    if found_shares < shares_to_keep:
        current_app.logger.info(
            "Not enough shares {:,}/{:,} for cleanup on chain {}"
            .format(found_shares, shares_to_keep, chain.id))
        return

    current_app.logger.info("Found {:,} shares after {:,} iterations"
                            .format(found_shares, iterations))

    # Delete all share slices older than the last index found
    oldest_kept = index - 1
    empty_found = 0
    deleted_count = 0
    for index in xrange(oldest_kept, -1, -1):
        if empty_found >= 20:
            current_app.logger.debug("20 empty in a row, exiting")
            break
        key = "chain_{}_slice_{}".format(chain, index)
        if redis_conn.type(key) == "none":
            empty_found += 1
        else:
            empty_found = 0

            if dont_simulate:
                if redis_conn.delete(key):
                    deleted_count += 1
            else:
                current_app.logger.info("Would delete {}".format(key))

    if dont_simulate:
        current_app.logger.info(
            "Deleted {} total share slices from #{:,}->{:,}"
            .format(deleted_count, oldest_kept, index))
Exemple #16
0
def chain_cleanup(chain, dont_simulate):
    """ Handles removing all redis share slices that we are fairly certain won't
    be needed to credit a block if one were to be solved in the future. """
    if not chain.currencies:
        current_app.logger.warn(
            "Unable to run share slice cleanup on chain {} since currencies "
            "aren't specified!".format(chain.id))
        return

    # Get the current sharechain index from redis
    current_index = int(redis_conn.get("chain_{}_slice_index".format(chain.id)) or 0)
    if not current_index:
        current_app.logger.warn(
            "Index couldn't be determined for chain {}".format(chain.id))
        return

    # Find the maximum average difficulty of all currencies on this sharechain
    max_diff = 0
    max_diff_currency = None
    for currency in chain.currencies:
        currency_data = cache.get("{}_data".format(currency.key))
        if not currency_data or currency_data['difficulty_avg_stale']:
            current_app.logger.warn(
                "Cache doesn't accurate enough average diff for {} to cleanup chain {}"
                .format(currency, chain.id))
            return

        if currency_data['difficulty_avg'] > max_diff:
            max_diff = currency_data['difficulty_avg']
            max_diff_currency = currency

    assert max_diff != 0

    hashes_to_solve = max_diff * (2 ** 32)
    shares_to_solve = hashes_to_solve / chain.algo.hashes_per_share
    shares_to_keep = shares_to_solve * chain.safety_margin
    if chain.type == "pplns":
        shares_to_keep *= chain.last_n
    current_app.logger.info(
        "Keeping {:,} shares based on max diff {} for {} on chain {}"
        .format(shares_to_keep, max_diff, max_diff_currency, chain.id))

    # Delete any shares past shares_to_keep
    found_shares = 0
    empty_slices = 0
    iterations = 0
    for index in xrange(current_index, -1, -1):
        iterations += 1
        slc_key = "chain_{}_slice_{}".format(chain.id, index)
        key_type = redis_conn.type(slc_key)

        # Fetch slice information
        if key_type == "list":
            empty_slices = 0
            # For speed sake, ignore uncompressed slices
            continue
        elif key_type == "hash":
            empty_slices = 0
            found_shares += float(redis_conn.hget(slc_key, "total_shares"))
        elif key_type == "none":
            empty_slices += 1
        else:
            raise Exception("Unexpected slice key type {}".format(key_type))

        if found_shares >= shares_to_keep or empty_slices >= 20:
            break

    if found_shares < shares_to_keep:
        current_app.logger.info(
            "Not enough shares {:,}/{:,} for cleanup on chain {}"
            .format(found_shares, shares_to_keep, chain.id))
        return

    current_app.logger.info("Found {:,} shares after {:,} iterations"
                            .format(found_shares, iterations))

    # Delete all share slices older than the last index found
    oldest_kept = index - 1
    empty_found = 0
    deleted_count = 0
    for index in xrange(oldest_kept, -1, -1):
        if empty_found >= 20:
            current_app.logger.debug("20 empty in a row, exiting")
            break
        key = "chain_{}_slice_{}".format(chain, index)
        if redis_conn.type(key) == "none":
            empty_found += 1
        else:
            empty_found = 0

            if dont_simulate:
                if redis_conn.delete(key):
                    deleted_count += 1
            else:
                current_app.logger.info("Would delete {}".format(key))

    if dont_simulate:
        current_app.logger.info(
            "Deleted {} total share slices from #{:,}->{:,}"
            .format(deleted_count, oldest_kept, index))