def update_pplns_est():
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    logger.info("Recomputing PPLNS for users")
    # grab configured N
    mult = int(current_app.config['last_n'])
    # grab configured hashes in a n1 share
    hashes_per_share = current_app.config.get('hashes_per_share', 65536)
    # generate average diff from last 500 blocks
    diff = cache.get('difficulty_avg')
    if diff is None:
        logger.warn(
            "Difficulty average is blank, can't calculate pplns estimate")
        return

    # Calculate the total shares to that are 'counted'
    total_shares = ((float(diff) * (2**32)) / hashes_per_share) * mult

    # Loop through all shares, descending order, until we'd distributed the
    # shares
    user_shares, total_grabbed = get_sharemap(None, total_shares)
    user_shares = {'pplns_' + k: v for k, v in user_shares.iteritems()}

    cache.set('pplns_total_shares', total_grabbed, timeout=40 * 60)
    cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
    cache.set_many(user_shares, timeout=40 * 60)
    cache.set('pplns_user_shares', user_shares, timeout=40 * 60)
Example #2
0
def update_online_workers(self):
    """
    Grabs a list of workers from the running powerpool instances and caches
    them
    """
    try:
        users = {}
        for i, pp_config in enumerate(current_app.config['monitor_addrs']):
            mon_addr = pp_config['mon_address'] + '/clients'
            try:
                req = requests.get(mon_addr)
                data = req.json()
            except Exception:
                current_app.logger.warn("Unable to connect to {} to gather worker summary."
                                        .format(mon_addr))
            else:
                for address, workers in data['clients'].iteritems():
                    users.setdefault('addr_online_' + address, [])
                    for d in workers:
                        users['addr_online_' + address].append((d['worker'], i))

        cache.set_many(users, timeout=480)

    except Exception as exc:
        logger.error("Unhandled exception in estimating pplns", exc_info=True)
        raise self.retry(exc=exc)
Example #3
0
def update_pplns_est():
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    logger.info("Recomputing PPLNS for users")
    # grab configured N
    mult = int(current_app.config['last_n'])
    # grab configured hashes in a n1 share
    hashes_per_share = current_app.config.get('hashes_per_share', 65536)
    # generate average diff from last 500 blocks
    diff = cache.get('difficulty_avg')
    if diff is None:
        logger.warn("Difficulty average is blank, can't calculate pplns estimate")
        return

    # Calculate the total shares to that are 'counted'
    total_shares = ((float(diff) * (2 ** 32)) / hashes_per_share) * mult

    # Loop through all shares, descending order, until we'd distributed the
    # shares
    user_shares, total_grabbed = get_sharemap(None, total_shares)
    user_shares = {'pplns_' + k: v for k, v in user_shares.iteritems()}

    cache.set('pplns_total_shares', total_grabbed, timeout=40 * 60)
    cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
    cache.set_many(user_shares, timeout=40 * 60)
    cache.set('pplns_user_shares', user_shares, timeout=40 * 60)
Example #4
0
def update_pplns_est(self):
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    try:
        logger.info("Recomputing PPLNS for users")
        # grab configured N
        mult = int(current_app.config['last_n'])
        # generate average diff from last 500 blocks
        diff = cache.get('difficulty_avg')
        if diff is None:
            logger.warn("Difficulty average is blank, can't calculate pplns estimate")
            return

        # Calculate the total shares to that are 'counted'
        total_shares = (float(diff) * (2 ** 16)) * mult

        # Loop through all shares, descending order, until we'd distributed the
        # shares
        user_shares, total_grabbed = get_sharemap(None, total_shares)
        user_shares = {'pplns_' + k: v for k, v in user_shares.iteritems()}

        cache.set('pplns_total_shares', total_grabbed, timeout=40 * 60)
        cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
        cache.set_many(user_shares, timeout=40 * 60)
        cache.set('pplns_user_shares', user_shares, timeout=40 * 60)

    except Exception as exc:
        logger.error("Unhandled exception in estimating pplns", exc_info=True)
        raise self.retry(exc=exc)
Example #5
0
def update_online_workers():
    """
    Grabs data on all currently connected clients. Forms a dictionary of this form:
        dict(address=dict(worker_name=dict(powerpool_id=connection_count)))
    And caches each addresses connection summary as a single cache key.
    """
    users = {}
    for ppid, powerpool in powerpools.iteritems():
        try:
            data = powerpool.request('clients/')
        except RemoteException:
            current_app.logger.warn("Unable to connect to PP {} to gather worker summary."
                                    .format(powerpool.full_info()), exc_info=True)
            continue

        for address, connections in data['clients'].iteritems():
            user = users.setdefault('addr_online_' + address, {})
            if isinstance(connections, dict):
                connections = connections.itervalues()
            for connection in connections:
                if isinstance(connection, basestring):
                    continue
                worker = user.setdefault(connection['worker'], {})
                worker.setdefault(ppid, 0)
                worker[ppid] += 1

    cache.set_many(users, timeout=660)
Example #6
0
def update_pplns_est(self):
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    try:
        # grab configured N
        mult = int(current_app.config['last_n'])
        # generate average diff from last 500 blocks
        diff = Blob.query.filter_by(key="diff").first().data['diff']
        # Calculate the total shares to that are 'counted'
        total_shares = ((float(diff) * (2 ** 16)) * mult)

        # Loop through all shares, descending order, until we'd distributed the shares
        remain = total_shares
        user_shares = {}
        for share in Share.query.order_by(Share.id.desc()).yield_per(5000):
            user_shares.setdefault('pplns_' + share.user, 0)
            if remain > share.shares:
                user_shares['pplns_' + share.user] += share.shares
                remain -= share.shares
            else:
                user_shares['pplns_' + share.user] += remain
                remain = 0
                break

        cache.set('pplns_total_shares', (total_shares - remain), timeout=40 * 60)
        cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
        cache.set_many(user_shares, timeout=40 * 60)
        cache.set('pplns_user_shares', user_shares, timeout=40 * 60)

    except Exception as exc:
        logger.error("Unhandled exception in estimating pplns", exc_info=True)
        raise self.retry(exc=exc)
Example #7
0
def update_online_workers():
    """
    Grabs data on all currently connected clients. Forms a dictionary of this form:
        dict(address=dict(worker_name=dict(powerpool_id=connection_count)))
    And caches each addresses connection summary as a single cache key.
    """
    users = {}
    for ppid, powerpool in powerpools.iteritems():
        try:
            data = powerpool.request('clients/')
        except RemoteException:
            current_app.logger.warn("Unable to connect to PP {} to gather worker summary."
                                    .format(powerpool.full_info()), exc_info=True)
            continue

        for address, connections in data['clients'].iteritems():
            user = users.setdefault('addr_online_' + address, {})
            if isinstance(connections, dict):
                connections = connections.itervalues()
            for connection in connections:
                if isinstance(connection, basestring):
                    continue
                worker = user.setdefault(connection['worker'], {})
                worker.setdefault(ppid, 0)
                worker[ppid] += 1

    cache.set_many(users, timeout=660)
Example #8
0
def update_pplns_est(self):
    """
    Generates redis cached value for share counts of all users based on PPLNS window
    """
    try:
        logger.info("Recomputing PPLNS for users")
        # grab configured N
        mult = int(current_app.config['last_n'])
        # generate average diff from last 500 blocks
        diff = cache.get('difficulty_avg')
        if diff is None:
            logger.warn("Difficulty average is blank, can't calculate pplns estimate")
            return
        # Calculate the total shares to that are 'counted'
        total_shares = ((float(diff) * (2 ** 16)) * mult)

        # Loop through all shares, descending order, until we'd distributed the
        # shares
        remain = total_shares
        user_shares = {}
        for shares, user in (db.engine.execution_options(stream_results=True).
                             execute(select([Share.shares, Share.user]).
                                     order_by(Share.id.desc()))):
            user_shares.setdefault('pplns_' + user, 0)
            if remain > shares:
                user_shares['pplns_' + user] += shares
                remain -= shares
            else:
                user_shares['pplns_' + user] += remain
                remain = 0
                break

        cache.set('pplns_total_shares', (total_shares - remain), timeout=40 * 60)
        cache.set('pplns_cache_time', datetime.datetime.utcnow(), timeout=40 * 60)
        cache.set_many(user_shares, timeout=40 * 60)
        cache.set('pplns_user_shares', user_shares, timeout=40 * 60)

    except Exception as exc:
        logger.error("Unhandled exception in estimating pplns", exc_info=True)
        raise self.retry(exc=exc)
def update_online_workers():
    """
    Grabs a list of workers from the running powerpool instances and caches
    them
    """
    users = {}
    for i, pp_config in enumerate(current_app.config['monitor_addrs']):
        mon_addr = pp_config['mon_address'] + '/clients'
        try:
            req = requests.get(mon_addr)
            data = req.json()
        except Exception:
            logger.warn(
                "Unable to connect to {} to gather worker summary.".format(
                    mon_addr))
        else:
            for address, workers in data['clients'].iteritems():
                users.setdefault('addr_online_' + address, [])
                for d in workers:
                    users['addr_online_' + address].append((d['worker'], i))

    cache.set_many(users, timeout=480)