Beispiel #1
0
def update_pplns_est(self):
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    try:
        # grab configured N
        mult = int(current_app.config['last_n'])
        # generate average diff from last 500 blocks
        blobs = Blob.query.filter(Blob.key.in_(("server", "diff"))).all()
        diff = [b for b in blobs if b.key == "diff"][0].data['diff']
        # Calculate the total shares to that are 'counted'
        total_shares = ((float(diff) * (2 ** 16)) * mult)

        # Loop through all shares, descending order, until we'd distributed the shares
        remain = total_shares
        user_shares = {}
        for share in Share.query.order_by(Share.id.desc()).yield_per(5000):
            user_shares.setdefault('pplns_' + share.user, 0)
            if remain > share.shares:
                user_shares['pplns_' + share.user] += share.shares
                remain -= share.shares
            else:
                user_shares['pplns_' + share.user] += remain
                remain = 0
                break

        cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
        cache.set_many(user_shares, timeout=40 * 60)
        cache.set('pplns_user_shares', user_shares, timeout=40 * 60)

    except Exception as exc:
        logger.error("Unhandled exception in estimating pplns", exc_info=True)
        raise self.retry(exc=exc)
Beispiel #2
0
def leaderboard():
    users = {}
    lower_10, upper_10 = make_upper_lower(offset=datetime.timedelta(minutes=2))
    for slc in ShareSlice.get_span(share_type=("acc", ), ret_query=True, lower=lower_10, upper=upper_10):
        try:
            address_version(slc.user)
        except Exception:
            pass
        else:
            user = users.setdefault(slc.user, {})
            user.setdefault(slc.algo, [0, set()])
            user[slc.algo][0] += slc.value
            user[slc.algo][1].add(slc.time)

    # Loop through and convert a summation of shares into a hashrate. Converts
    # to hashes per second
    for user, algo_shares in users.iteritems():
        for algo_key, (shares, minutes) in algo_shares.items():
            algo_obj = algos[algo_key]
            algo_shares[algo_key] = algo_obj.hashes_per_share * (shares / (len(minutes) * 60))
            algo_shares.setdefault('normalized', 0)
            algo_shares['normalized'] += users[user][algo_key] * algo_obj.normalize_mult

    sorted_users = sorted(users.iteritems(),
                          key=lambda x: x[1]['normalized'],
                          reverse=True)

    # This is really bad.... XXX: Needs rework!
    if users:
        anon = anon_users()
        for i, (user, data) in enumerate(sorted_users):
            if user in anon:
                sorted_users[i] = ("Anonymous", data)

    cache.set("leaderboard", sorted_users, timeout=15 * 60)
def leaderboard():
    users = {}
    lower_10, upper_10 = make_upper_lower(offset=datetime.timedelta(minutes=1))
    for slc in ShareSlice.get_span(ret_query=True, lower=lower_10, upper=upper_10):
        try:
            address_version(slc.user)
        except Exception:
            pass
        else:
            user = users.setdefault(slc.user, {})
            user.setdefault(slc.algo, [0, set()])
            user[slc.algo][0] += slc.value
            user[slc.algo][1].add(slc.time)

    # Loop through and convert a summation of shares into a hashrate. Converts
    # to hashes per second
    for user, algo_shares in users.iteritems():
        for algo_key, (shares, minutes) in algo_shares.items():
            algo_obj = algos[algo_key]
            algo_shares[algo_key] = algo_obj.hashes_per_share * (shares / (len(minutes) * 60))
            algo_shares.setdefault('normalized', 0)
            algo_shares['normalized'] += users[user][algo_key] * algo_obj.normalize_mult

    sorted_users = sorted(users.iteritems(),
                          key=lambda x: x[1]['normalized'],
                          reverse=True)

    # This is really bad.... XXX: Needs rework!
    if users:
        anon = anon_users()
        for i, (user, data) in enumerate(sorted_users):
            if user in anon:
                sorted_users[i] = ("Anonymous", data)

    cache.set("leaderboard", sorted_users, timeout=15 * 60)
Beispiel #4
0
    def test_share_slice_cleanup(self):
        # Make 10 or so fake compressed share slices. Count the shares for half of them
        total_shares = 0
        for i in xrange(10):
            shares = random.randint(1, 200)
            if i <= 5:
                total_shares += shares
            self.app.redis.hmset("chain_1_slice_{}".format(i),
                                 dict(total_shares=shares))
        self.app.redis.set("chain_1_slice_index", 9)

        # Find a fake difficulty that would cause deletion of one half of the
        # share slices
        hashes_for_shares = total_shares * float(
            chains[1].algo.hashes_per_share)
        diff_for_shares = hashes_for_shares / (2**32)
        # Safety_margin + last_n multiplies by 4
        diff_for_shares /= 4
        cache.set(
            "DOGE_data",
            dict(difficulty_avg=diff_for_shares,
                 difficulty_avg_stale=False,
                 timeout=1200))

        chain_cleanup(chains[1], dont_simulate=True)
        # 11 total keys, we will delete 5
        self.assertEquals(len(self.app.redis.keys("chain_1_slice_*")), 8)
Beispiel #5
0
def cache_profitability():
    """
    Calculates the profitability from recent blocks
    """
    # track chain profits
    chain_profit = {}

    start_time = datetime.datetime.utcnow() - datetime.timedelta(hours=96)

    query_currencies = [c.key for c in currencies.itervalues() if c.mineable and c.sellable]
    blocks = (Block.query.filter(Block.found_at > start_time).
              filter(Block.currency.in_(query_currencies)).all())

    for block in blocks:
        chain_data = block.chain_profitability()
        current_app.logger.info("Get {} from {}".format(chain_data, block))

        for chainid, data in chain_data.iteritems():

            if chainid not in chains:
                current_app.logger.warn(
                    "Chain #{} not configured properly! Skipping it..."
                    .format(chainid))
                continue

            # Set the block for convenience later
            data['block'] = block
            chain_profit.setdefault(chainid, {})
            chain_profit[chainid].setdefault(block.currency_obj, []).append(data)

    for chainid, chain_currencies in chain_profit.iteritems():
        merged_shares = 0
        main_shares = 0
        merged_currencies = 0
        btc_total = 0
        for currency, entries in chain_currencies.iteritems():
            if currency.merged:
                merged_currencies += 1
            for data in entries:
                btc_total += data['btc_total']
                if currency.merged:
                    merged_shares += data['sold_shares']
                else:
                    main_shares += data['sold_shares']

        hps = chains[chainid].algo.hashes_per_share
        if main_shares != 0:
            btc_per = btc_total / (main_shares * hps)
        elif merged_shares != 0:
            btc_per = btc_total / (merged_shares * hps / merged_currencies)
        else:
            btc_per = 0
        btc_per *= 86400  # per day

        current_app.logger.debug("Caching chain #{} with profit {}"
                                 .format(chainid, btc_per))

        cache.set('chain_{}_profitability'.format(chainid),
                  btc_per, timeout=3600 * 8)
Beispiel #6
0
def cache_profitability():
    """
    Calculates the profitability from recent blocks
    """
    # track chain profits
    chain_profit = {}

    start_time = datetime.datetime.utcnow() - datetime.timedelta(hours=96)

    query_currencies = [c.key for c in currencies.itervalues() if c.mineable and c.sellable]
    blocks = (Block.query.filter(Block.found_at > start_time).
              filter(Block.currency.in_(query_currencies)))

    for block in blocks:
        chain_data = block.chain_profitability()
        current_app.logger.info("Get {} from {}".format(chain_data, block))

        for chainid, data in chain_data.iteritems():

            if chainid not in chains:
                current_app.logger.warn(
                    "Chain #{} not configured properly! Skipping it..."
                    .format(chainid))
                continue

            # Set the block for convenience later
            data['block'] = block
            chain_profit.setdefault(chainid, {})
            chain_profit[chainid].setdefault(block.currency_obj, []).append(data)

    for chainid, chain_currencies in chain_profit.iteritems():
        merged_shares = 0
        main_shares = 0
        merged_currencies = 0
        btc_total = 0
        for currency, entries in chain_currencies.iteritems():
            if currency.merged:
                merged_currencies += 1
            for data in entries:
                btc_total += data['btc_total']
                if currency.merged:
                    merged_shares += data['sold_shares']
                else:
                    main_shares += data['sold_shares']

        hps = chains[chainid].algo.hashes_per_share
        if main_shares != 0:
            btc_per = btc_total / (main_shares * hps)
        elif merged_shares != 0:
            btc_per = btc_total / (merged_shares * hps / merged_currencies)
        else:
            btc_per = 0
        btc_per *= 86400  # per day

        current_app.logger.debug("Caching chain #{} with profit {}"
                                 .format(chainid, btc_per))

        cache.set('chain_{}_profitability'.format(chainid),
                  btc_per, timeout=3600 * 2)
Beispiel #7
0
def cache_user_donation():
    """
    Grab all user donations and loop through them then cache donation %
    """
    user_donations = {}
    # Build a dict of donation % to cache
    custom_donations = DonationPercent.query.all()
    for donation in custom_donations:
        user_donations.setdefault(donation.user, current_app.config['default_perc'])
        user_donations[donation.user] = donation.perc

    cache.set('user_donations', user_donations, timeout=1440 * 60)
Beispiel #8
0
def difficulty_avg(self):
    """
    Setup a blob with the average network difficulty for the last 500 blocks
    """
    try:
        diff_list = cache.cache._client.lrange('block_cache', 0, 500)
        total_diffs = sum([bits_to_difficulty(diff) for diff in diff_list])
        cache.set('difficulty_avg', total_diffs / len(diff_list),
                  timeout=120 * 60)
    except Exception as exc:
        logger.warn("Unknown failure in difficulty_avg", exc_info=True)
        raise self.retry(exc=exc)
def cache_user_donation():
    """
    Grab all user donations and loop through them then cache donation %
    """
    user_donations = {}
    # Build a dict of donation % to cache
    users = UserSettings.query.all()
    for user in users:
        user_donations.setdefault(user.user, Decimal(current_app.config.get('default_donate_perc', 0)))
        user_donations[user.user] = user.pdonation_perc

    cache.set('user_donations', user_donations, timeout=1440 * 60)
Beispiel #10
0
def cache_user_donation():
    """
    Grab all user donations and loop through them then cache donation %
    """
    user_donations = {}
    # Build a dict of donation % to cache
    users = UserSettings.query.all()
    for user in users:
        user_donations.setdefault(user.user, Decimal(current_app.config.get('default_donate_perc', 0)))
        user_donations[user.user] = user.pdonation_perc

    cache.set('user_donations', user_donations, timeout=1440 * 60)
def cache_user_donation():
    """
    Grab all user donations and loop through them then cache donation %
    """
    user_donations = {}
    # Build a dict of donation % to cache
    custom_donations = DonationPercent.query.all()
    for donation in custom_donations:
        user_donations.setdefault(donation.user,
                                  current_app.config['default_perc'])
        user_donations[donation.user] = donation.perc

    cache.set('user_donations', user_donations, timeout=1440 * 60)
Beispiel #12
0
def cache_user_donation(self):
    """
    Grab all user donations and loop through them then cache donation %
    """

    try:
        user_donations = {}
        # Build a dict of donation % to cache
        custom_donations = DonationPercent.query.all()
        for donation in custom_donations:
            user_donations.setdefault(donation.user, current_app.config['default_perc'])
            user_donations[donation.user] = donation.perc

        cache.set('user_donations', user_donations, timeout=1440 * 60)

    except Exception as exc:
        logger.error("Unhandled exception in caching user donations", exc_info=True)
        raise self.retry(exc=exc)
Beispiel #13
0
def update_network():
    """
    Queries the RPC servers confirmed to update network stats information.
    """
    for currency in currencies.itervalues():
        if not currency.mineable:
            continue

        try:
            gbt = currency.coinserv.getblocktemplate({})
        except (urllib3.exceptions.HTTPError, CoinRPCException) as e:
            current_app.logger.error("Unable to communicate with {} RPC server: {}"
                                     .format(currency, e))
            continue

        key = "{}_data".format(currency.key)
        block_cache_key = "{}_block_cache".format(currency.key)

        current_data = cache.get(key)
        if current_data and current_data['height'] == gbt['height']:
            # Already have information for this block
            current_app.logger.debug(
                "Not updating {} net info, height {} already recorded."
                .format(currency, current_data['height']))
        else:
            current_app.logger.info(
                "Updating {} net info for height {}.".format(currency, gbt['height']))

        # Six hours worth of blocks. how many we'll keep in the cache
        keep_count = 21600 / currency.block_time

        difficulty = bits_to_difficulty(gbt['bits'])
        cache.cache._client.lpush(block_cache_key, difficulty)
        cache.cache._client.ltrim(block_cache_key, 0, keep_count)
        diff_list = cache.cache._client.lrange(block_cache_key, 0, -1)
        difficulty_avg = sum(map(float, diff_list)) / len(diff_list)

        cache.set(key,
                  dict(height=gbt['height'],
                       difficulty=difficulty,
                       reward=gbt['coinbasevalue'] * current_app.SATOSHI,
                       difficulty_avg=difficulty_avg,
                       difficulty_avg_stale=len(diff_list) < keep_count),
                  timeout=1200)
Beispiel #14
0
def update_network():
    """
    Queries the RPC servers confirmed to update network stats information.
    """
    for currency in currencies.itervalues():
        if not currency.mineable:
            continue

        try:
            gbt = currency.coinserv.getblocktemplate({})
        except (urllib3.exceptions.HTTPError, CoinRPCException) as e:
            current_app.logger.error("Unable to communicate with {} RPC server: {}"
                                     .format(currency, e))
            continue

        key = "{}_data".format(currency.key)
        block_cache_key = "{}_block_cache".format(currency.key)

        current_data = cache.get(key)
        if current_data and current_data['height'] == gbt['height']:
            # Already have information for this block
            current_app.logger.debug(
                "Not updating {} net info, height {} already recorded."
                .format(currency, current_data['height']))
        else:
            current_app.logger.info(
                "Updating {} net info for height {}.".format(currency, gbt['height']))

        # Six hours worth of blocks. how many we'll keep in the cache
        keep_count = 21600 / currency.block_time

        difficulty = bits_to_difficulty(gbt['bits'])
        cache.cache._client.lpush(block_cache_key, difficulty)
        cache.cache._client.ltrim(block_cache_key, 0, keep_count)
        diff_list = cache.cache._client.lrange(block_cache_key, 0, -1)
        difficulty_avg = sum(map(float, diff_list)) / len(diff_list)

        cache.set(key,
                  dict(height=gbt['height'],
                       difficulty=difficulty,
                       reward=gbt['coinbasevalue'] * current_app.SATOSHI,
                       difficulty_avg=difficulty_avg,
                       difficulty_avg_stale=len(diff_list) < keep_count),
                  timeout=1200)
def crontab(func, *args, **kwargs):
    """ Handles rolling back SQLAlchemy exceptions to prevent breaking the
    connection for the whole scheduler. Also records timing information into
    the cache """

    t = time.time()
    res = None
    try:
        res = func(*args, **kwargs)
    except sqlalchemy.exc.SQLAlchemyError as e:
        current_app.logger.error("SQLAlchemyError occurred, rolling back: {}".format(e), exc_info=True)
        db.session.rollback()
    except Exception:
        current_app.logger.error("Unhandled exception in {}".format(func.__name__),
                                 exc_info=True)

    t = time.time() - t
    cache.set('cron_last_run_{}'.format(func.__name__),
              dict(runtime=t, time=int(time.time())))
    return res
Beispiel #16
0
def crontab(func, *args, **kwargs):
    """ Handles rolling back SQLAlchemy exceptions to prevent breaking the
    connection for the whole scheduler. Also records timing information into
    the cache """

    t = time.time()
    res = None
    try:
        res = func(*args, **kwargs)
    except sqlalchemy.exc.SQLAlchemyError as e:
        current_app.logger.error("SQLAlchemyError occurred, rolling back: {}".format(e), exc_info=True)
        db.session.rollback()
    except Exception:
        current_app.logger.error("Unhandled exception in {}".format(func.__name__),
                                 exc_info=True)

    t = time.time() - t
    cache.set('cron_last_run_{}'.format(func.__name__),
              dict(runtime=t, time=int(time.time())))
    return res
def update_pplns_est():
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    logger.info("Recomputing PPLNS for users")
    # grab configured N
    mult = int(current_app.config['last_n'])
    # grab configured hashes in a n1 share
    hashes_per_share = current_app.config.get('hashes_per_share', 65536)
    # generate average diff from last 500 blocks
    diff = cache.get('difficulty_avg')
    if diff is None:
        logger.warn(
            "Difficulty average is blank, can't calculate pplns estimate")
        return

    # Calculate the total shares to that are 'counted'
    total_shares = ((float(diff) * (2**32)) / hashes_per_share) * mult

    # Loop through all shares, descending order, until we'd distributed the
    # shares
    user_shares, total_grabbed = get_sharemap(None, total_shares)
    user_shares = {'pplns_' + k: v for k, v in user_shares.iteritems()}

    cache.set('pplns_total_shares', total_grabbed, timeout=40 * 60)
    cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
    cache.set_many(user_shares, timeout=40 * 60)
    cache.set('pplns_user_shares', user_shares, timeout=40 * 60)
def server_status():
    """
    Periodicly poll the backend to get number of workers and other general
    status information.
    """
    algo_miners = {}
    servers = {}
    raw_servers = {}
    for powerpool in powerpools.itervalues():
        try:
            data = powerpool.request('')
        except Exception:
            current_app.logger.warn("Couldn't connect to internal monitor {}"
                                    .format(powerpool.full_info()))
            continue
        else:
            raw_servers[powerpool.stratum_address] = data
            servers[powerpool] = dict(workers=data['client_count_authed'],
                                      miners=data['address_count'],
                                      hashrate=data['hps'],
                                      name=powerpool.stratum_address)
            algo_miners.setdefault(powerpool.chain.algo.key, 0)
            algo_miners[powerpool.chain.algo.key] += data['address_count']

    cache.set('raw_server_status', raw_servers, timeout=1200)
    cache.set('server_status', servers, timeout=1200)
    cache.set('total_miners', algo_miners, timeout=1200)
Beispiel #19
0
def update_pplns_est(self):
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    try:
        logger.info("Recomputing PPLNS for users")
        # grab configured N
        mult = int(current_app.config['last_n'])
        # generate average diff from last 500 blocks
        diff = cache.get('difficulty_avg')
        if diff is None:
            logger.warn("Difficulty average is blank, can't calculate pplns estimate")
            return

        # Calculate the total shares to that are 'counted'
        total_shares = (float(diff) * (2 ** 16)) * mult

        # Loop through all shares, descending order, until we'd distributed the
        # shares
        user_shares, total_grabbed = get_sharemap(None, total_shares)
        user_shares = {'pplns_' + k: v for k, v in user_shares.iteritems()}

        cache.set('pplns_total_shares', total_grabbed, timeout=40 * 60)
        cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
        cache.set_many(user_shares, timeout=40 * 60)
        cache.set('pplns_user_shares', user_shares, timeout=40 * 60)

    except Exception as exc:
        logger.error("Unhandled exception in estimating pplns", exc_info=True)
        raise self.retry(exc=exc)
Beispiel #20
0
def server_status():
    """
    Periodicly poll the backend to get number of workers and throw it in the
    cache
    """
    total_workers = 0
    servers = []
    raw_servers = {}
    for i, pp_config in enumerate(current_app.config['monitor_addrs']):
        mon_addr = pp_config['mon_address']
        try:
            req = requests.get(mon_addr)
            data = req.json()
        except Exception:
            logger.warn("Couldn't connect to internal monitor at {}"
                        .format(mon_addr))
            continue
        else:
            if 'server' in data:
                workers = data['stratum_manager']['client_count_authed']
                hashrate = data['stratum_manager']['mhps'] * 1000000
                raw_servers[pp_config['stratum']] = data
            else:
                workers = data['stratum_clients']
                hashrate = data['shares']['hour_total'] / 3600.0 * current_app.config['hashes_per_share']
            servers.append(dict(workers=workers,
                                hashrate=hashrate,
                                name=pp_config['stratum']))
            total_workers += workers

    cache.set('raw_server_status', json.dumps(raw_servers), timeout=1200)
    cache.set('server_status', json.dumps(servers), timeout=1200)
    cache.set('total_workers', total_workers, timeout=1200)
Beispiel #21
0
def update_pplns_est():
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    logger.info("Recomputing PPLNS for users")
    # grab configured N
    mult = int(current_app.config['last_n'])
    # grab configured hashes in a n1 share
    hashes_per_share = current_app.config.get('hashes_per_share', 65536)
    # generate average diff from last 500 blocks
    diff = cache.get('difficulty_avg')
    if diff is None:
        logger.warn("Difficulty average is blank, can't calculate pplns estimate")
        return

    # Calculate the total shares to that are 'counted'
    total_shares = ((float(diff) * (2 ** 32)) / hashes_per_share) * mult

    # Loop through all shares, descending order, until we'd distributed the
    # shares
    user_shares, total_grabbed = get_sharemap(None, total_shares)
    user_shares = {'pplns_' + k: v for k, v in user_shares.iteritems()}

    cache.set('pplns_total_shares', total_grabbed, timeout=40 * 60)
    cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
    cache.set_many(user_shares, timeout=40 * 60)
    cache.set('pplns_user_shares', user_shares, timeout=40 * 60)
Beispiel #22
0
    def test_share_slice_cleanup(self):
        # Make 10 or so fake compressed share slices. Count the shares for half of them
        total_shares = 0
        for i in xrange(10):
            shares = random.randint(1, 200)
            if i <= 5:
                total_shares += shares
            self.app.redis.hmset("chain_1_slice_{}".format(i), dict(total_shares=shares))
        self.app.redis.set("chain_1_slice_index", 9)

        # Find a fake difficulty that would cause deletion of one half of the
        # share slices
        hashes_for_shares = total_shares * float(chains[1].algo.hashes_per_share)
        diff_for_shares = hashes_for_shares / (2 ** 32)
        # Safety_margin + last_n multiplies by 4
        diff_for_shares /= 4
        cache.set("DOGE_data", dict(difficulty_avg=diff_for_shares,
                                    difficulty_avg_stale=False, timeout=1200))

        chain_cleanup(chains[1], dont_simulate=True)
        # 11 total keys, we will delete 5
        self.assertEquals(len(self.app.redis.keys("chain_1_slice_*")), 8)
    def set_data(gbt, curr=None):
        prefix = ""
        if curr:
            prefix = curr + "_"
        prev_height = cache.get(prefix + 'blockheight') or 0

        if gbt['height'] == prev_height:
            logger.debug(
                "Not updating {} net info, height {} already recorded.".format(
                    curr or 'main', prev_height))
            return
        logger.info("Updating {} net info for height {}.".format(
            curr or 'main', gbt['height']))

        # set general information for this network
        difficulty = bits_to_difficulty(gbt['bits'])
        cache.set(prefix + 'blockheight', gbt['height'], timeout=1200)
        cache.set(prefix + 'difficulty', difficulty, timeout=1200)
        cache.set(prefix + 'reward', gbt['coinbasevalue'], timeout=1200)

        # keep a configured number of blocks in the cache for getting average difficulty
        cache.cache._client.lpush(prefix + 'block_cache', gbt['bits'])
        cache.cache._client.ltrim(prefix + 'block_cache', 0,
                                  current_app.config['difficulty_avg_period'])
        diff_list = cache.cache._client.lrange(
            prefix + 'block_cache', 0,
            current_app.config['difficulty_avg_period'])
        total_diffs = sum([bits_to_difficulty(diff) for diff in diff_list])
        cache.set(prefix + 'difficulty_avg',
                  total_diffs / len(diff_list),
                  timeout=120 * 60)

        # add the difficulty as a one minute share, unless we're staging
        if not current_app.config.get('stage', False):
            now = datetime.datetime.utcnow()
            try:
                m = OneMinuteType(typ=prefix + 'netdiff',
                                  value=difficulty * 1000,
                                  time=now)
                db.session.add(m)
                db.session.commit()
            except sqlalchemy.exc.IntegrityError:
                db.session.rollback()
                slc = OneMinuteType.query.with_lockmode('update').filter_by(
                    time=now, typ=prefix + 'netdiff').one()
                # just average the diff of two blocks that occured in the same second..
                slc.value = ((difficulty * 1000) + slc.value) / 2
                db.session.commit()
Beispiel #24
0
def server_status(self):
    """
    Periodicly poll the backend to get number of workers and throw it in the cache
    """
    try:
        total_workers = 0
        for i, pp_config in enumerate(current_app.config['monitor_addrs']):
            mon_addr = pp_config['mon_address']
            try:
                req = requests.get(mon_addr)
                data = req.json()
            except Exception:
                logger.warn("Couldn't connect to internal monitor at {}"
                            .format(mon_addr))
                continue
            else:
                cache.set('stratum_workers_' + str(i),
                          data['stratum_clients'], timeout=1200)
                total_workers += data['stratum_clients']

        cache.set('total_workers', total_workers, timeout=1200)
    except Exception:
        logger.error("Unhandled exception in server_status", exc_info=True)
        db.session.rollback()
Beispiel #25
0
def new_block(self, blockheight, bits=None, reward=None):
    """
    Notification that a new block height has been reached in the network.
    Sets some things into the cache for display on the website.
    """
    logger.info("Recieved notice of new block height {}".format(blockheight))

    cache.set('blockheight', blockheight, timeout=1200)
    cache.set('difficulty', bits_to_difficulty(bits), timeout=1200)
    cache.set('reward', reward, timeout=1200)

    # keep the last 500 blocks in the cache for getting average difficulty
    cache.cache._client.lpush('block_cache', bits)
    cache.cache._client.ltrim('block_cache', 0, 500)
Beispiel #26
0
    def set_data(gbt, curr=None):
        prefix = ""
        if curr:
            prefix = curr + "_"
        prev_height = cache.get(prefix + 'blockheight') or 0

        if gbt['height'] == prev_height:
            logger.debug("Not updating {} net info, height {} already recorded."
                         .format(curr or 'main', prev_height))
            return
        logger.info("Updating {} net info for height {}.".format(curr or 'main', gbt['height']))

        # set general information for this network
        difficulty = bits_to_difficulty(gbt['bits'])
        cache.set(prefix + 'blockheight', gbt['height'], timeout=1200)
        cache.set(prefix + 'difficulty', difficulty, timeout=1200)
        cache.set(prefix + 'reward', gbt['coinbasevalue'], timeout=1200)

        # keep a configured number of blocks in the cache for getting average difficulty
        cache.cache._client.lpush(prefix + 'block_cache', gbt['bits'])
        cache.cache._client.ltrim(prefix + 'block_cache', 0, current_app.config['difficulty_avg_period'])
        diff_list = cache.cache._client.lrange(prefix + 'block_cache', 0, current_app.config['difficulty_avg_period'])
        total_diffs = sum([bits_to_difficulty(diff) for diff in diff_list])
        cache.set(prefix + 'difficulty_avg', total_diffs / len(diff_list), timeout=120 * 60)

        # add the difficulty as a one minute share, unless we're staging
        if not current_app.config.get('stage', False):
            now = datetime.datetime.utcnow()
            try:
                m = OneMinuteType(typ=prefix + 'netdiff', value=difficulty * 1000, time=now)
                db.session.add(m)
                db.session.commit()
            except sqlalchemy.exc.IntegrityError:
                db.session.rollback()
                slc = OneMinuteType.query.with_lockmode('update').filter_by(
                    time=now, typ=prefix + 'netdiff').one()
                # just average the diff of two blocks that occured in the same second..
                slc.value = ((difficulty * 1000) + slc.value) / 2
                db.session.commit()
Beispiel #27
0
def new_block(self, blockheight, bits=None, reward=None):
    """
    Notification that a new block height has been reached in the network.
    Sets some things into the cache for display on the website, adds graphing
    for the network difficulty graph.
    """
    # prevent lots of duplicate rerunning...
    last_blockheight = cache.get('blockheight') or 0
    if blockheight == last_blockheight:
        logger.warn("Recieving duplicate new_block notif, ignoring...")
        return
    logger.info("Recieved notice of new block height {}".format(blockheight))

    difficulty = bits_to_difficulty(bits)
    cache.set('blockheight', blockheight, timeout=1200)
    cache.set('difficulty', difficulty, timeout=1200)
    cache.set('reward', reward, timeout=1200)

    # keep the last 500 blocks in the cache for getting average difficulty
    cache.cache._client.lpush('block_cache', bits)
    cache.cache._client.ltrim('block_cache', 0, 500)
    diff_list = cache.cache._client.lrange('block_cache', 0, 500)
    total_diffs = sum([bits_to_difficulty(diff) for diff in diff_list])
    cache.set('difficulty_avg', total_diffs / len(diff_list), timeout=120 * 60)

    # add the difficulty as a one minute share
    now = datetime.datetime.utcnow()
    try:
        m = OneMinuteType(typ='netdiff', value=difficulty * 1000, time=now)
        db.session.add(m)
        db.session.commit()
    except sqlalchemy.exc.IntegrityError:
        db.session.rollback()
        slc = OneMinuteType.query.with_lockmode('update').filter_by(
            time=now, typ='netdiff').one()
        # just average the diff of two blocks that occured in the same second..
        slc.value = ((difficulty * 1000) + slc.value) / 2
        db.session.commit()
Beispiel #28
0
def update_pplns_est(self):
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    try:
        logger.info("Recomputing PPLNS for users")
        # grab configured N
        mult = int(current_app.config['last_n'])
        # generate average diff from last 500 blocks
        diff = cache.get('difficulty_avg')
        if diff is None:
            logger.warn("Difficulty average is blank, can't calculate pplns estimate")
            return
        # Calculate the total shares to that are 'counted'
        total_shares = ((float(diff) * (2 ** 16)) * mult)

        # Loop through all shares, descending order, until we'd distributed the
        # shares
        remain = total_shares
        user_shares = {}
        for shares, user in (db.engine.execution_options(stream_results=True).
                             execute(select([Share.shares, Share.user]).
                                     order_by(Share.id.desc()))):
            user_shares.setdefault('pplns_' + user, 0)
            if remain > shares:
                user_shares['pplns_' + user] += shares
                remain -= shares
            else:
                user_shares['pplns_' + user] += remain
                remain = 0
                break

        cache.set('pplns_total_shares', (total_shares - remain), timeout=40 * 60)
        cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
        cache.set_many(user_shares, timeout=40 * 60)
        cache.set('pplns_user_shares', user_shares, timeout=40 * 60)

    except Exception as exc:
        logger.error("Unhandled exception in estimating pplns", exc_info=True)
        raise self.retry(exc=exc)
def server_status():
    """
    Periodicly poll the backend to get number of workers and throw it in the
    cache
    """
    total_workers = 0
    servers = []
    raw_servers = {}
    for i, pp_config in enumerate(current_app.config['monitor_addrs']):
        mon_addr = pp_config['mon_address']
        try:
            req = requests.get(mon_addr)
            data = req.json()
        except Exception:
            logger.warn(
                "Couldn't connect to internal monitor at {}".format(mon_addr))
            continue
        else:
            if 'server' in data:
                workers = data['stratum_manager']['client_count_authed']
                hashrate = data['stratum_manager']['mhps'] * 1000000
                raw_servers[pp_config['stratum']] = data
            else:
                workers = data['stratum_clients']
                hashrate = data['shares'][
                    'hour_total'] / 3600.0 * current_app.config[
                        'hashes_per_share']
            servers.append(
                dict(workers=workers,
                     hashrate=hashrate,
                     name=pp_config['stratum']))
            total_workers += workers

    cache.set('raw_server_status', json.dumps(raw_servers), timeout=1200)
    cache.set('server_status', json.dumps(servers), timeout=1200)
    cache.set('total_workers', total_workers, timeout=1200)
Beispiel #30
0
def server_status():
    """
    Periodically poll the backend to get number of workers and other general
    status information.
    """
    past_chain_profit = get_past_chain_profit()
    currency_hashrates = {}
    algo_miners = {}
    servers = {}
    raw_servers = {}
    for powerpool in powerpools.itervalues():

        server_default = dict(workers=0,
                              miners=0,
                              hashrate=0,
                              name='???',
                              profit_4d=0,
                              currently_mining='???')

        try:
            data = powerpool.request('')
        except Exception:
            current_app.logger.warn("Couldn't connect to internal monitor {}"
                                    .format(powerpool.full_info()))
            continue
        else:
            raw_servers[powerpool.stratum_address] = data
            status = {'workers': data['client_count_authed'],
                      'miners': data['address_count'],
                      'hashrate': data['hps'],
                      'name': powerpool.stratum_address,
                      'profit_4d': past_chain_profit[powerpool.chain.id]}

            server_default.update(status)
            servers[powerpool.key] = server_default

            algo_miners.setdefault(powerpool.chain.algo.key, 0)
            algo_miners[powerpool.chain.algo.key] += data['address_count']

            if 'last_flush_job' in data and data['last_flush_job'] \
                    and 'currency' in data['last_flush_job']:
                curr = data['last_flush_job']['currency']
                servers[powerpool.key].update({'currently_mining': curr})
                currency_hashrates.setdefault(currencies[curr], 0)
                currency_hashrates[currencies[curr]] += data['hps']
                # Add hashrate to the merged networks too
                if 'merged_networks' in data['last_flush_job']:
                    for currency in data['last_flush_job']['merged_networks']:
                        currency_hashrates.setdefault(currencies[currency], 0)
                        currency_hashrates[currencies[currency]] += data['hps']

    # Set hashrate to 0 if not located
    for currency in currencies.itervalues():
        hashrate = 0
        if currency in currency_hashrates:
            hashrate = currency_hashrates[currency]

        cache.set('hashrate_' + currency.key, hashrate, timeout=120)

    cache.set('raw_server_status', raw_servers, timeout=1200)
    cache.set('server_status', servers, timeout=1200)
    cache.set('total_miners', algo_miners, timeout=1200)
Beispiel #31
0
def server_status():
    """
    Periodically poll the backend to get number of workers and other general
    status information.
    """
    past_chain_profit = get_past_chain_profit()
    currency_hashrates = {}
    algo_miners = {}
    servers = {}
    raw_servers = {}
    for powerpool in powerpools.itervalues():

        server_default = dict(workers=0,
                              miners=0,
                              hashrate=0,
                              name='???',
                              profit_4d=0,
                              currently_mining='???')

        try:
            data = powerpool.request('')
        except Exception:
            current_app.logger.warn("Couldn't connect to internal monitor {}"
                                    .format(powerpool.full_info()))
            continue
        else:
            raw_servers[powerpool.stratum_address] = data
            status = {'workers': data['client_count_authed'],
                      'miners': data['address_count'],
                      'hashrate': data['hps'],
                      'name': powerpool.stratum_address,
                      'profit_4d': past_chain_profit[powerpool.chain.id]}

            server_default.update(status)
            servers[powerpool.key] = server_default

            algo_miners.setdefault(powerpool.chain.algo.key, 0)
            algo_miners[powerpool.chain.algo.key] += data['address_count']

            if 'last_flush_job' in data and 'currency' in data['last_flush_job']:
                curr = data['last_flush_job']['currency']
                servers[powerpool.key].update({'currently_mining': curr})
                currency_hashrates.setdefault(currencies[curr], 0)
                currency_hashrates[currencies[curr]] += data['hps']
                # Add hashrate to the merged networks too
                if 'merged_networks' in data['last_flush_job']:
                    for currency in data['last_flush_job']['merged_networks']:
                        currency_hashrates.setdefault(currencies[currency], 0)
                        currency_hashrates[currencies[currency]] += data['hps']

    # Set hashrate to 0 if not located
    for currency in currencies.itervalues():
        hashrate = 0
        if currency in currency_hashrates:
            hashrate = currency_hashrates[currency]

        cache.set('hashrate_' + currency.key, hashrate, timeout=120)

    cache.set('raw_server_status', raw_servers, timeout=1200)
    cache.set('server_status', servers, timeout=1200)
    cache.set('total_miners', algo_miners, timeout=1200)