Esempio n. 1
0
def cleanup(chain, oldest_kept, simulate, empty):
    """ Given the oldest block hash that you desire to hold shares for, delete
    everything older than it. """
    for cp in Block.query.filter_by(hash=oldest_kept).one().chain_payouts:
        if cp.chainid == chain:
            oldest_kept = cp.solve_slice
            break

    current_app.logger.info(
        "Current slice index {}".format(redis_conn.get("chain_1_slice_index")))
    current_app.logger.info(
        "Looking at all slices older than {}".format(oldest_kept))

    simulate = bool(int(simulate))
    if not simulate:
        if raw_input("Are you sure you want to continue? [y/n]") != "y":
            return

    empty_found = 0
    for i in xrange(oldest_kept, 0, -1):
        if empty_found >= empty:
            current_app.logger.info("20 empty in a row, exiting")
            break
        key = "chain_{}_slice_{}".format(chain, i)
        if redis_conn.type(key) == 'none':
            empty_found += 1
        else:
            empty_found = 0

        if not simulate:
            current_app.logger.info("deleting {}!".format(key))
            current_app.logger.info(redis_conn.delete(key))
        else:
            current_app.logger.info("would delete {}".format(key))
Esempio n. 2
0
def cleanup(chain, oldest_kept, simulate):
    for cp in Block.query.filter_by(hash=oldest_kept).one().chain_payouts:
        if cp.chainid == chain:
            oldest_kept = cp.solve_slice
            break

    print "Current slice index {}".format(redis_conn.get("chain_1_slice_index"))
    print "Looking at all slices older than {}".format(oldest_kept)

    simulate = bool(int(simulate))
    if not simulate:
        if raw_input("Are you sure you want to continue? [y/n]") != "y":
            return

    empty = 0
    for i in xrange(oldest_kept, 0, -1):
        if empty >= 20:
            print "20 empty in a row, exiting"
            break
        key = "chain_{}_slice_{}".format(chain, i)
        if not redis_conn.llen(key):
            empty += 1
        else:
            empty = 0

        if not simulate:
            print "deleting {}!".format(key)
            print redis_conn.delete(key)
        else:
            print "would delete {}".format(key)
Esempio n. 3
0
def cleanup(chain, oldest_kept, simulate, empty):
    """ Given the oldest block hash that you desire to hold shares for, delete
    everything older than it. """
    for cp in Block.query.filter_by(hash=oldest_kept).one().chain_payouts:
        if cp.chainid == chain:
            oldest_kept = cp.solve_slice
            break

    current_app.logger.info("Current slice index {}".format(
        redis_conn.get("chain_1_slice_index")))
    current_app.logger.info(
        "Looking at all slices older than {}".format(oldest_kept))

    simulate = bool(int(simulate))
    if not simulate:
        if raw_input("Are you sure you want to continue? [y/n]") != "y":
            return

    empty_found = 0
    for i in xrange(oldest_kept, 0, -1):
        if empty_found >= empty:
            current_app.logger.info("20 empty in a row, exiting")
            break
        key = "chain_{}_slice_{}".format(chain, i)
        if redis_conn.type(key) == 'none':
            empty_found += 1
        else:
            empty_found = 0

        if not simulate:
            current_app.logger.info("deleting {}!".format(key))
            current_app.logger.info(redis_conn.delete(key))
        else:
            current_app.logger.info("would delete {}".format(key))
Esempio n. 4
0
def compress_slices():
    for chain in chains.itervalues():
        # Get the index of the last inserted share slice on this chain
        last_complete_slice = redis_conn.get("chain_{}_slice_index".format(chain.id))
        if last_complete_slice is None:
            # Chain must not be in use....
            current_app.logger.debug(
                "No slice index for chain {}".format(chain))
            continue
        else:
            last_complete_slice = int(last_complete_slice)

        # Loop thorugh all possible share slice numbers
        empty = 0
        encoding_time = 0.0
        retrieval_time = 0.0
        entry_count = 0
        encoded_size = 0
        original_size = 0
        last_slice = last_complete_slice
        for slc_idx in xrange(last_complete_slice, 0, -1):
            key = "chain_{}_slice_{}".format(chain.id, slc_idx)
            key_type = redis_conn.type(key)

            # Compress if it's a list. This is raw data from powerpools redis
            # reporter
            if key_type == "list":
                # Reduce empty counter, but don't go negative
                empty = max(0, empty - 1)

                # Retrieve the enencoded information from redis
                t = time.time()
                slice_shares = redis_conn.lrange(key, 0, -1)
                this_original_size = int(redis_conn.debug_object(key)['serializedlength'])
                this_retrieval_time = time.time() - t

                # Parse the list into proper python representation
                data = []
                total_shares = 0
                for entry in slice_shares:
                    user, shares = entry.split(":")
                    shares = Decimal(shares)
                    data.append((user, shares))
                    total_shares += shares
                this_entry_count = len(data)

                # serialization and compression
                t = time.time()
                data = json.dumps(data, separators=(',', ':'), use_decimal=True)
                data = bz2.compress(data)
                this_encoding_time = time.time() - t

                # Put all the new data into a temporary key, then atomically
                # replace the old list key. ensures we never loose data, even
                # on failures (exceptions)
                key_compressed = key + "_compressed"
                redis_conn.hmset(key_compressed,
                                 dict(
                                     date=int(time.time()),
                                     data=data,
                                     encoding="bz2json",
                                     total_shares=total_shares)
                                 )
                redis_conn.rename(key_compressed, key)
                this_encoded_size = int(redis_conn.debug_object(key)['serializedlength'])

                last_slice = slc_idx
                # Update all the aggregates
                encoding_time += this_encoding_time
                retrieval_time += this_retrieval_time
                entry_count += this_entry_count
                encoded_size += this_encoded_size
                original_size += this_original_size
                # Print progress
                current_app.logger.info(
                    "Encoded slice #{:,} containing {:,} entries."
                    " retrieval_time: {}; encoding_time: {}; start_size: {:,}; end_size: {:,}; ratio: {}"
                    .format(slc_idx, this_entry_count,
                            time_format(this_retrieval_time),
                            time_format(this_encoding_time),
                            this_original_size, this_encoded_size,
                            float(this_original_size) / (this_encoded_size or 1)))

            # Count an empty entry to detect the end of live slices
            elif key_type == "none":
                empty += 1

            # If we've seen a lot of empty slices, probably nothing else to find!
            if empty >= 20:
                current_app.logger.info(
                    "Ended compression search at {}".format(slc_idx))
                break

        current_app.logger.info(
            "Encoded from slice #{:,} -> #{:,} containing {:,} entries."
            " retrieval_time: {}; encoding_time: {}; start_size: {:,}; end_size: {:,}; ratio: {}"
            .format(last_complete_slice, last_slice, entry_count,
                    time_format(retrieval_time), time_format(encoding_time),
                    original_size, encoded_size,
                    float(original_size) / (encoded_size or 1)))
Esempio n. 5
0
def chain_cleanup(chain, dont_simulate):
    """ Handles removing all redis share slices that we are fairly certain won't
    be needed to credit a block if one were to be solved in the future. """
    if not chain.currencies:
        current_app.logger.warn(
            "Unable to run share slice cleanup on chain {} since currencies "
            "aren't specified!".format(chain.id))
        return

    # Get the current sharechain index from redis
    current_index = int(redis_conn.get("chain_{}_slice_index".format(chain.id)) or 0)
    if not current_index:
        current_app.logger.warn(
            "Index couldn't be determined for chain {}".format(chain.id))
        return

    # Find the maximum average difficulty of all currencies on this sharechain
    max_diff = 0
    max_diff_currency = None
    for currency in chain.currencies:
        currency_data = cache.get("{}_data".format(currency.key))
        if not currency_data or currency_data['difficulty_avg_stale']:
            current_app.logger.warn(
                "Cache doesn't accurate enough average diff for {} to cleanup chain {}"
                .format(currency, chain.id))
            return

        if currency_data['difficulty_avg'] > max_diff:
            max_diff = currency_data['difficulty_avg']
            max_diff_currency = currency

    assert max_diff != 0

    hashes_to_solve = max_diff * (2 ** 32)
    shares_to_solve = hashes_to_solve / chain.algo.hashes_per_share
    shares_to_keep = shares_to_solve * chain.safety_margin
    if chain.type == "pplns":
        shares_to_keep *= chain.last_n
    current_app.logger.info(
        "Keeping {:,} shares based on max diff {} for {} on chain {}"
        .format(shares_to_keep, max_diff, max_diff_currency, chain.id))

    # Delete any shares past shares_to_keep
    found_shares = 0
    empty_slices = 0
    iterations = 0
    for index in xrange(current_index, -1, -1):
        iterations += 1
        slc_key = "chain_{}_slice_{}".format(chain.id, index)
        key_type = redis_conn.type(slc_key)

        # Fetch slice information
        if key_type == "list":
            empty_slices = 0
            # For speed sake, ignore uncompressed slices
            continue
        elif key_type == "hash":
            empty_slices = 0
            found_shares += float(redis_conn.hget(slc_key, "total_shares"))
        elif key_type == "none":
            empty_slices += 1
        else:
            raise Exception("Unexpected slice key type {}".format(key_type))

        if found_shares >= shares_to_keep or empty_slices >= 20:
            break

    if found_shares < shares_to_keep:
        current_app.logger.info(
            "Not enough shares {:,}/{:,} for cleanup on chain {}"
            .format(found_shares, shares_to_keep, chain.id))
        return

    current_app.logger.info("Found {:,} shares after {:,} iterations"
                            .format(found_shares, iterations))

    # Delete all share slices older than the last index found
    oldest_kept = index - 1
    empty_found = 0
    deleted_count = 0
    for index in xrange(oldest_kept, -1, -1):
        if empty_found >= 20:
            current_app.logger.debug("20 empty in a row, exiting")
            break
        key = "chain_{}_slice_{}".format(chain, index)
        if redis_conn.type(key) == "none":
            empty_found += 1
        else:
            empty_found = 0

            if dont_simulate:
                if redis_conn.delete(key):
                    deleted_count += 1
            else:
                current_app.logger.info("Would delete {}".format(key))

    if dont_simulate:
        current_app.logger.info(
            "Deleted {} total share slices from #{:,}->{:,}"
            .format(deleted_count, oldest_kept, index))
Esempio n. 6
0
def compress_slices():
    for chain in chains.itervalues():
        # Get the index of the last inserted share slice on this chain
        last_complete_slice = redis_conn.get("chain_{}_slice_index".format(chain.id))
        if last_complete_slice is None:
            # Chain must not be in use....
            current_app.logger.debug(
                "No slice index for chain {}".format(chain))
            continue
        else:
            last_complete_slice = int(last_complete_slice)

        # Loop thorugh all possible share slice numbers
        empty = 0
        encoding_time = 0.0
        retrieval_time = 0.0
        entry_count = 0
        encoded_size = 0
        original_size = 0
        last_slice = last_complete_slice
        for slc_idx in xrange(last_complete_slice, 0, -1):
            key = "chain_{}_slice_{}".format(chain.id, slc_idx)
            key_type = redis_conn.type(key)

            # Compress if it's a list. This is raw data from powerpools redis
            # reporter
            if key_type == "list":
                # Reduce empty counter, but don't go negative
                empty = max(0, empty - 1)

                # Retrieve the enencoded information from redis
                t = time.time()
                slice_shares = redis_conn.lrange(key, 0, -1)
                this_original_size = int(redis_conn.debug_object(key)['serializedlength'])
                this_retrieval_time = time.time() - t

                # Parse the list into proper python representation
                data = []
                total_shares = 0
                for entry in slice_shares:
                    user, shares = entry.split(":")
                    shares = Decimal(shares)
                    data.append((user, shares))
                    total_shares += shares
                this_entry_count = len(data)

                # serialization and compression
                t = time.time()
                data = json.dumps(data, separators=(',', ':'), use_decimal=True)
                data = bz2.compress(data)
                this_encoding_time = time.time() - t

                # Put all the new data into a temporary key, then atomically
                # replace the old list key. ensures we never loose data, even
                # on failures (exceptions)
                key_compressed = key + "_compressed"
                redis_conn.hmset(key_compressed,
                                 dict(
                                     date=int(time.time()),
                                     data=data,
                                     encoding="bz2json",
                                     total_shares=total_shares)
                                 )
                redis_conn.rename(key_compressed, key)
                this_encoded_size = int(redis_conn.debug_object(key)['serializedlength'])

                last_slice = slc_idx
                # Update all the aggregates
                encoding_time += this_encoding_time
                retrieval_time += this_retrieval_time
                entry_count += this_entry_count
                encoded_size += this_encoded_size
                original_size += this_original_size
                # Print progress
                current_app.logger.info(
                    "Encoded slice #{:,} containing {:,} entries."
                    " retrieval_time: {}; encoding_time: {}; start_size: {:,}; end_size: {:,}; ratio: {}"
                    .format(slc_idx, this_entry_count,
                            time_format(this_retrieval_time),
                            time_format(this_encoding_time),
                            this_original_size, this_encoded_size,
                            float(this_original_size) / (this_encoded_size or 1)))

            # Count an empty entry to detect the end of live slices
            elif key_type == "none":
                empty += 1

            # If we've seen a lot of empty slices, probably nothing else to find!
            if empty >= 20:
                current_app.logger.info(
                    "Ended compression search at {}".format(slc_idx))
                break

        current_app.logger.info(
            "Encoded from slice #{:,} -> #{:,} containing {:,} entries."
            " retrieval_time: {}; encoding_time: {}; start_size: {:,}; end_size: {:,}; ratio: {}"
            .format(last_complete_slice, last_slice, entry_count,
                    time_format(retrieval_time), time_format(encoding_time),
                    original_size, encoded_size,
                    float(original_size) / (encoded_size or 1)))
Esempio n. 7
0
def chain_cleanup(chain, dont_simulate):
    """ Handles removing all redis share slices that we are fairly certain won't
    be needed to credit a block if one were to be solved in the future. """
    if not chain.currencies:
        current_app.logger.warn(
            "Unable to run share slice cleanup on chain {} since currencies "
            "aren't specified!".format(chain.id))
        return

    # Get the current sharechain index from redis
    current_index = int(redis_conn.get("chain_{}_slice_index".format(chain.id)) or 0)
    if not current_index:
        current_app.logger.warn(
            "Index couldn't be determined for chain {}".format(chain.id))
        return

    # Find the maximum average difficulty of all currencies on this sharechain
    max_diff = 0
    max_diff_currency = None
    for currency in chain.currencies:
        currency_data = cache.get("{}_data".format(currency.key))
        if not currency_data or currency_data['difficulty_avg_stale']:
            current_app.logger.warn(
                "Cache doesn't accurate enough average diff for {} to cleanup chain {}"
                .format(currency, chain.id))
            return

        if currency_data['difficulty_avg'] > max_diff:
            max_diff = currency_data['difficulty_avg']
            max_diff_currency = currency

    assert max_diff != 0

    hashes_to_solve = max_diff * (2 ** 32)
    shares_to_solve = hashes_to_solve / chain.algo.hashes_per_share
    shares_to_keep = shares_to_solve * chain.safety_margin
    if chain.type == "pplns":
        shares_to_keep *= chain.last_n
    current_app.logger.info(
        "Keeping {:,} shares based on max diff {} for {} on chain {}"
        .format(shares_to_keep, max_diff, max_diff_currency, chain.id))

    # Delete any shares past shares_to_keep
    found_shares = 0
    empty_slices = 0
    iterations = 0
    for index in xrange(current_index, -1, -1):
        iterations += 1
        slc_key = "chain_{}_slice_{}".format(chain.id, index)
        key_type = redis_conn.type(slc_key)

        # Fetch slice information
        if key_type == "list":
            empty_slices = 0
            # For speed sake, ignore uncompressed slices
            continue
        elif key_type == "hash":
            empty_slices = 0
            found_shares += float(redis_conn.hget(slc_key, "total_shares"))
        elif key_type == "none":
            empty_slices += 1
        else:
            raise Exception("Unexpected slice key type {}".format(key_type))

        if found_shares >= shares_to_keep or empty_slices >= 20:
            break

    if found_shares < shares_to_keep:
        current_app.logger.info(
            "Not enough shares {:,}/{:,} for cleanup on chain {}"
            .format(found_shares, shares_to_keep, chain.id))
        return

    current_app.logger.info("Found {:,} shares after {:,} iterations"
                            .format(found_shares, iterations))

    # Delete all share slices older than the last index found
    oldest_kept = index - 1
    empty_found = 0
    deleted_count = 0
    for index in xrange(oldest_kept, -1, -1):
        if empty_found >= 20:
            current_app.logger.debug("20 empty in a row, exiting")
            break
        key = "chain_{}_slice_{}".format(chain, index)
        if redis_conn.type(key) == "none":
            empty_found += 1
        else:
            empty_found = 0

            if dont_simulate:
                if redis_conn.delete(key):
                    deleted_count += 1
            else:
                current_app.logger.info("Would delete {}".format(key))

    if dont_simulate:
        current_app.logger.info(
            "Deleted {} total share slices from #{:,}->{:,}"
            .format(deleted_count, oldest_kept, index))