Пример #1
0
def print_runtime_info(sig, frame):
    if sig in [signal.SIGUSR1, signal.SIGUSR2]:
        delayed = reactor.getDelayedCalls()
        readers = reactor.getReaders()
        writers = reactor.getWriters()
        clients = []
        http_conn_num = 0
        for reader in readers:
            if isinstance(reader, twisted.internet.tcp.Server):
                clients.append(reader.getPeer())
            if isinstance(reader, twisted.internet.tcp.Client):
                http_conn_num += 1
        log.msg(
            "[Clients: %(client_num)s] [HTTP Conns: %(http_conn_num)s] "
            "[Readers: %(reader_num)s] [Writers: %(writer_num)s] "
            "[DelayedCalls: %(delayed_num)s]"
            % {
                "client_num": len(clients),
                "http_conn_num": http_conn_num,
                "reader_num": len(readers),
                "writer_num": len(writers),
                "delayed_num": len(delayed),
            }
        )
        log.msg("[Connected Clients]: %s" % clients)
        if sig == signal.SIGUSR2:
            for d in delayed:
                log.msg("SIGUSR2[delayed]: %s" % d)

            for r in readers:
                log.msg("SIGUSR2[reader]: %s" % r)

            for w in writers:
                log.msg("SIGUSR2[writer]: %s" % w)
Пример #2
0
def periodic_reporter(settings):
    """Twisted Task function that runs every few seconds to emit general
    metrics regarding twisted and client counts"""
    settings.metrics.gauge("update.client.writers", len(reactor.getWriters()))
    settings.metrics.gauge("update.client.readers", len(reactor.getReaders()))
    settings.metrics.gauge("update.client.connections", len(settings.clients))
    settings.metrics.gauge("update.client.ws_connections", settings.factory.countConnections)
Пример #3
0
def runtime_info():
    delayed = reactor.getDelayedCalls()
    readers = reactor.getReaders()
    writers = reactor.getWriters()
    servers = []
    clients = []
    other = []
    for reader in readers:
        if isinstance(reader, tcp.Server):
            servers.append({
                'transport': reader,
                'host': reader.getHost(),
                'peer': reader.getPeer()
            })
        elif isinstance(reader, tcp.Client):
            clients.append({
                'transport': reader,
                'host': reader.getHost(),
                'peer': reader.getPeer()
            })
        else:
            other.append(reader)
    return {
        'num_clients': len(clients),
        'num_servers': len(servers),
        'num_other': len(other),
        'num_writers': len(writers),
        'num_delayed': len(delayed),
        'clients': clients,
        'servers': servers,
        'other': other,
        'writers': writers,
        'delayed': delayed,
    }
Пример #4
0
def runtime_info():
    delayed = reactor.getDelayedCalls()
    readers = reactor.getReaders()
    writers = reactor.getWriters()
    servers = []
    clients = []
    other = []
    for reader in readers:
        if isinstance(reader, tcp.Server):
            servers.append({
                'transport': reader,
                'host': reader.getHost(),
                'peer': reader.getPeer()
            })
        elif isinstance(reader, tcp.Client):
            clients.append({
                'transport': reader,
                'host': reader.getHost(),
                'peer': reader.getPeer()
            })
        else:
            other.append(reader)
    return {
        'num_clients': len(clients),
        'num_servers': len(servers),
        'num_other': len(other),
        'num_writers': len(writers),
        'num_delayed': len(delayed),
        'clients': clients,
        'servers': servers,
        'other': other,
        'writers': writers,
        'delayed': delayed,
    }
Пример #5
0
    def checkReactor(self, phase, *_):
        has_network_selectables = False
        for item in reactor.getReaders() + reactor.getWriters():
            if isinstance(item, HTTPChannel) or isinstance(item, Client):
                has_network_selectables = True
                break

        if has_network_selectables:
            # TODO(Martijn): we wait a while before we continue the check since network selectables
            # might take some time to cleanup. I'm not sure what's causing this.
            yield deferLater(reactor, 0.2, lambda: None)

        # This is the same check as in the _cleanReactor method of Twisted's Trial
        selectable_strings = []
        for sel in reactor.removeAll():
            if interfaces.IProcessTransport.providedBy(sel):
                self._logger.error("Sending kill signal to %s", repr(sel))
                sel.signalProcess('KILL')
            selectable_strings.append(repr(sel))

        self.assertFalse(selectable_strings,
                         "The reactor has leftover readers/writers during %s: %r" % (phase, selectable_strings))

        # Check whether we have closed all the sockets
        open_readers = reactor.getReaders()
        for reader in open_readers:
            self.assertNotIsInstance(reader, BasePort)

        # Check whether the threadpool is clean
        tp_items = len(reactor.getThreadPool().working)
        if tp_items > 0:  # Print all stacks to debug this issue
            self.watchdog.print_all_stacks()
        self.assertEqual(tp_items, 0, "Still items left in the threadpool")
    def test_getWriters(self):
        """
        Check that L{interfaces.IReactorFDSet.getWriters} reflects the actions
        made with L{interfaces.IReactorFDSet.addWriter} and
        L{interfaces.IReactorFDSet.removeWriter}.
        """
        s = socket.socket()
        self.addCleanup(s.close)

        c = Connection(s, protocol.Protocol())
        self.assertNotIn(c, reactor.getWriters())

        reactor.addWriter(c)
        self.assertIn(c, reactor.getWriters())

        reactor.removeWriter(c)
        self.assertNotIn(c, reactor.getWriters())
Пример #7
0
    def test_getWriters(self):
        """
        Check that L{interfaces.IReactorFDSet.getWriters} reflects the actions
        made with L{interfaces.IReactorFDSet.addWriter} and
        L{interfaces.IReactorFDSet.removeWriter}.
        """
        s = socket.socket()
        self.addCleanup(s.close)

        c = Connection(s, protocol.Protocol())
        self.assertNotIn(c, reactor.getWriters())

        reactor.addWriter(c)
        self.assertIn(c, reactor.getWriters())

        reactor.removeWriter(c)
        self.assertNotIn(c, reactor.getWriters())
Пример #8
0
def periodic_reporter(settings):
    """Twisted Task function that runs every few seconds to emit general
    metrics regarding twisted and client counts"""
    settings.metrics.gauge("update.client.writers", len(reactor.getWriters()))
    settings.metrics.gauge("update.client.readers", len(reactor.getReaders()))
    settings.metrics.gauge("update.client.connections", len(settings.clients))
    settings.metrics.gauge("update.client.ws_connections",
                           settings.factory.countConnections)
Пример #9
0
        def status_thread():
            last_str = None
            last_time = 0
            while True:
                yield deferral.sleep(3)
                try:
                    height = node.tracker.get_height(node.best_share_var.value)
                    this_str = 'P2Pool: %i shares in chain (%i verified/%i total) Peers: %i (%i incoming)' % (
                        height,
                        len(node.tracker.verified.items),
                        len(node.tracker.items),
                        len(node.p2p_node.peers),
                        sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
                    ) + (' FDs: %i R/%i W' % (len(reactor.getReaders()), len(reactor.getWriters())) if p2pool.DEBUG else '')
                    
                    datums, dt = wb.local_rate_monitor.get_datums_in_last()
                    my_att_s = sum(datum['work']/dt for datum in datums)
                    my_shares_per_s = sum(datum['work']/dt/bitcoin_data.target_to_average_attempts(datum['share_target']) for datum in datums)
                    this_str += '\n Local: %sH/s in last %s Local dead on arrival: %s Expected time to share: %s' % (
                        math.format(int(my_att_s)),
                        math.format_dt(dt),
                        math.format_binomial_conf(sum(1 for datum in datums if datum['dead']), len(datums), 0.95),
                        math.format_dt(1/my_shares_per_s) if my_shares_per_s else '???',
                    )
                    
                    if height > 2:
                        (stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
                        stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, min(60*60//net.SHARE_PERIOD, height))
                        real_att_s = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, min(height - 1, 60*60//net.SHARE_PERIOD)) / (1 - stale_prop)
                        
                        this_str += '\n Shares: %i (%i orphan, %i dead) Stale rate: %s Efficiency: %s Current payout: %.4f %s' % (
                            shares, stale_orphan_shares, stale_doa_shares,
                            math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95),
                            math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95, lambda x: (1 - x)/(1 - stale_prop)),
                            node.get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(my_pubkey_hash), 0)*1e-8, net.PARENT.SYMBOL,
                        )
			print(node.bitcoind_work.value['bits'])
			print(real_att_s)
                        this_str += '\n Pool: %sH/s Stale rate: %.1f%% Expected time to block: %s' % (
                            math.format(int(real_att_s)),
                            100*stale_prop,
                            math.format_dt(2**256 / node.bitcoind_work.value['bits'].target / real_att_s),
                        )
                        
                        for warning in p2pool_data.get_warnings(node.tracker, node.best_share_var.value, net, bitcoind_getinfo_var.value, node.bitcoind_work.value):
                            print >>sys.stderr, '#'*40
                            print >>sys.stderr, '>>> Warning: ' + warning
                            print >>sys.stderr, '#'*40
                        
                        if gc.garbage:
                            print '%i pieces of uncollectable cyclic garbage! Types: %r' % (len(gc.garbage), map(type, gc.garbage))
                    
                    if this_str != last_str or time.time() > last_time + 15:
                        print this_str
                        last_str = this_str
                        last_time = time.time()
                except:
                    log.err()
Пример #10
0
    def assertReactorIsClean(self):
        """
        Check that the reactor has no delayed calls, readers or writers.
        """
        if reactor is None:
            return

        def raise_failure(location, reason):
            raise AssertionError(
                'Reactor is not clean. %s: %s' % (location, reason))

        if reactor._started:
            raise AssertionError('Reactor was not stopped.')

        # Look at threads queue.
        if len(reactor.threadCallQueue) > 0:
            raise_failure('threads', reactor.threadCallQueue)

        if self._threadPoolQueueSize() > 0:
            raise_failure('threadpoool queue', self._threadPoolQueueSize())

        if self._threadPoolWorking() > 0:
            raise_failure('threadpoool working', self._threadPoolWorking())

        if self._threadPoolThreads() > 0:
            raise_failure('threadpoool threads', self._threadPoolThreads())

        if len(reactor.getWriters()) > 0:
            raise_failure('writers', str(reactor.getWriters()))

        for reader in reactor.getReaders():
            excepted = False
            for reader_type in self.EXCEPTED_READERS:
                if isinstance(reader, reader_type):
                    excepted = True
                    break
            if not excepted:
                raise_failure('readers', str(reactor.getReaders()))

        for delayed_call in reactor.getDelayedCalls():
            if delayed_call.active():
                delayed_str = self._getDelayedCallName(delayed_call)
                if delayed_str in self.EXCEPTED_DELAYED_CALLS:
                    continue
                raise_failure('delayed calls', delayed_str)
Пример #11
0
 def status_thread():
     last_str = None
     last_time = 0
     while True:
         yield deferral.sleep(3)
         try:
             height = node.tracker.get_height(node.best_share_var.value)
             this_str = 'P2Pool: %i shares in chain (%i verified/%i total) Peers: %i (%i incoming)' % (
                 height,
                 len(node.tracker.verified.items),
                 len(node.tracker.items),
                 len(node.p2p_node.peers),
                 sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
             ) + (' FDs: %i R/%i W' % (len(reactor.getReaders()), len(reactor.getWriters())) if p2pool.DEBUG else '')
             
             datums, dt = wb.local_rate_monitor.get_datums_in_last()
             my_att_s = sum(datum['work']/dt for datum in datums)
             my_shares_per_s = sum(datum['work']/dt/bitcoin_data.target_to_average_attempts(datum['share_target']) for datum in datums)
             this_str += '\n Local: %sH/s in last %s Local dead on arrival: %s Expected time to share: %s' % (
                 math.format(int(my_att_s)),
                 math.format_dt(dt),
                 math.format_binomial_conf(sum(1 for datum in datums if datum['dead']), len(datums), 0.95),
                 math.format_dt(1/my_shares_per_s) if my_shares_per_s else '???',
             )
             
             if height > 2:
                 (stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
                 stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, min(60*60//net.SHARE_PERIOD, height))
                 real_att_s = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, min(height - 1, 60*60//net.SHARE_PERIOD)) / (1 - stale_prop)
                 
                 this_str += '\n Shares: %i (%i orphan, %i dead) Stale rate: %s Efficiency: %s Current payout: %.4f %s' % (
                     shares, stale_orphan_shares, stale_doa_shares,
                     math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95),
                     math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95, lambda x: (1 - x)/(1 - stale_prop)),
                     ### CJWinty: dimecoin has only 5 digits
                     node.get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(my_pubkey_hash), 0)*1e-5, net.PARENT.SYMBOL,
                 )
                 this_str += '\n Pool: %sH/s Stale rate: %.1f%% Expected time to block: %s' % (
                     math.format(int(real_att_s)),
                     100*stale_prop,
                     math.format_dt(2**256 / node.dimecoind_work.value['bits'].target / real_att_s),
                 )
                 
                 for warning in p2pool_data.get_warnings(node.tracker, node.best_share_var.value, net, dimecoind_warning_var.value, node.dimecoind_work.value):
                     print >>sys.stderr, '#'*40
                     print >>sys.stderr, '>>> Warning: ' + warning
                     print >>sys.stderr, '#'*40
                 
                 if gc.garbage:
                     print '%i pieces of uncollectable cyclic garbage! Types: %r' % (len(gc.garbage), map(type, gc.garbage))
             
             if this_str != last_str or time.time() > last_time + 15:
                 print this_str
                 last_str = this_str
                 last_time = time.time()
         except:
             log.err()
Пример #12
0
 def test_runtime_info(self):
     """Make sure we add runtime info."""
     stats_worker = StatsWorker(self.service, 10)
     # get the reactor
     from twisted.internet import reactor
     stats_worker.runtime_info()
     # check the reactor data
     self.assertIn(('gauge', 'reactor.readers',
                    len(reactor.getReaders())), self.metrics.calls)
     self.assertIn(('gauge', 'reactor.writers',
                    len(reactor.getWriters())), self.metrics.calls)
Пример #13
0
 def test_runtime_info(self):
     """Make sure we add runtime info."""
     stats_worker = StatsWorker(self.service, 10)
     # get the reactor
     from twisted.internet import reactor
     stats_worker.runtime_info()
     # check the reactor data
     self.assertIn(('gauge', 'reactor.readers', len(reactor.getReaders())),
                   self.metrics.calls)
     self.assertIn(('gauge', 'reactor.writers', len(reactor.getWriters())),
                   self.metrics.calls)
Пример #14
0
    def checkReactor(self, phase, *_):
        delayed_calls = reactor.getDelayedCalls()
        if delayed_calls:
            self._logger.error("The reactor was dirty during %s:", phase)
            for dc in delayed_calls:
                self._logger.error(">     %s", dc)
                dc.cancel()

        from pony.orm.core import local
        if local.db_context_counter > 0:
            self._logger.error("Leftover pony db sessions found!")
        from pony.orm import db_session
        for _ in range(local.db_context_counter):
            db_session.__exit__()

        has_network_selectables = False
        for item in reactor.getReaders() + reactor.getWriters():
            if isinstance(item, HTTPChannel) or isinstance(item, Client):
                has_network_selectables = True
                break

        if has_network_selectables:
            # TODO(Martijn): we wait a while before we continue the check since network selectables
            # might take some time to cleanup. I'm not sure what's causing this.
            yield deferLater(reactor, 0.2, lambda: None)

        # This is the same check as in the _cleanReactor method of Twisted's Trial
        selectable_strings = []
        for sel in reactor.removeAll():
            if interfaces.IProcessTransport.providedBy(sel):
                self._logger.error("Sending kill signal to %s", repr(sel))
                sel.signalProcess('KILL')
            selectable_strings.append(repr(sel))

        self.assertFalse(selectable_strings,
                         "The reactor has leftover readers/writers during %s: %r" % (phase, selectable_strings))

        # Check whether we have closed all the sockets
        open_readers = reactor.getReaders()
        for reader in open_readers:
            self.assertNotIsInstance(reader, BasePort)

        # Check whether the threadpool is clean
        tp_items = len(reactor.getThreadPool().working)
        if tp_items > 0:  # Print all stacks to debug this issue
            self.watchdog.print_all_stacks()
        self.assertEqual(tp_items, 0, "Still items left in the threadpool")
Пример #15
0
 def status_thread():
     last_str = None
     last_time = 0
     while True:
         yield deferral.sleep(3)
         try:
             if time.time() > current_work2.value['last_update'] + 60:
                 print '''---> LOST CONTACT WITH BITCOIND for 60 seconds, check that it isn't frozen or dead <---'''
             if current_work.value['best_share_hash'] is not None:
                 height, last = tracker.get_height_and_last(current_work.value['best_share_hash'])
                 if height > 2:
                     att_s = p2pool_data.get_pool_attempts_per_second(tracker, current_work.value['best_share_hash'], min(height - 1, 720))
                     weights, total_weight, donation_weight = tracker.get_cumulative_weights(current_work.value['best_share_hash'], min(height, 720), 65535*2**256)
                     shares, stale_doa_shares, stale_not_doa_shares = get_share_counts(True)
                     stale_shares = stale_doa_shares + stale_not_doa_shares
                     fracs = [share.stale_frac for share in tracker.get_chain(current_work.value['best_share_hash'], min(120, height)) if share.stale_frac is not None]
                     this_str = 'Pool: %sH/s in %i shares (%i/%i verified) Recent: %.02f%% >%sH/s Shares: %i (%i orphan, %i dead) Peers: %i' % (
                         math.format(int(att_s / (1. - (math.median(fracs) if fracs else 0)))),
                         height,
                         len(tracker.verified.shares),
                         len(tracker.shares),
                         weights.get(my_script, 0)/total_weight*100,
                         math.format(int(weights.get(my_script, 0)*att_s//total_weight / (1. - (math.median(fracs) if fracs else 0)))),
                         shares,
                         stale_not_doa_shares,
                         stale_doa_shares,
                         len(p2p_node.peers),
                     ) + (' FDs: %i R/%i W' % (len(reactor.getReaders()), len(reactor.getWriters())) if p2pool.DEBUG else '')
                     if fracs:
                         med = math.median(fracs)
                         this_str += '\nPool stales: %i%%' % (int(100*med+.5),)
                         conf = 0.95
                         if shares:
                             this_str += u' Own: %i±%i%%' % tuple(int(100*x+.5) for x in math.interval_to_center_radius(math.binomial_conf_interval(stale_shares, shares, conf)))
                             if med < .99:
                                 this_str += u' Own efficiency: %i±%i%%' % tuple(int(100*x+.5) for x in math.interval_to_center_radius((1 - y)/(1 - med) for y in math.binomial_conf_interval(stale_shares, shares, conf)[::-1]))
                     if this_str != last_str or time.time() > last_time + 15:
                         print this_str
                         last_str = this_str
                         last_time = time.time()
         except:
             log.err()
Пример #16
0
def runtime_info():
    delayed = reactor.getDelayedCalls()
    readers = reactor.getReaders()
    writers = reactor.getWriters()
    clients = []
    http_conn_num = 0
    for reader in readers:
        if isinstance(reader, twisted.internet.tcp.Server):
            clients.append(reader.getPeer())
        if isinstance(reader, twisted.internet.tcp.Client):
            http_conn_num += 1
    info = {
        'num_clients': len(clients),
        'num_http_conn': http_conn_num,
        'num_readers': len(readers),
        'num_writers': len(writers),
        'num_delayed': len(delayed),
        'clients': clients,
        'readers': readers,
        'writers': writers,
        'delayed': delayed,
    }
    return info
Пример #17
0
 def _iterateTestReactor(self, debug=False):
     """
     Iterate the reactor.
     """
     reactor.runUntilCurrent()
     if debug:
         # When debug is enabled with iterate using a small delay in steps,
         # to have a much better debug output.
         # Otherwise the debug messages will flood the output.
         print (
             u'delayed: %s\n'
             u'threads: %s\n'
             u'writers: %s\n'
             u'readers: %s\n'
             u'threadpool size: %s\n'
             u'threadpool threads: %s\n'
             u'threadpool working: %s\n'
             u'\n' % (
                 self._reactorQueueToString(),
                 reactor.threadCallQueue,
                 reactor.getWriters(),
                 reactor.getReaders(),
                 self._threadPoolQueueSize(),
                 self._threadPoolThreads(),
                 self._threadPoolWorking(),
                 )
             )
         t2 = reactor.timeout()
         # For testing we want to force to reactor to wake at an
         # interval of at most 1 second.
         if t2 is None or t2 > 1:
             t2 = 0.1
         t = reactor.running and t2
         reactor.doIteration(t)
     else:
         reactor.doIteration(False)
Пример #18
0
 def get_writers(self):
     from twisted.internet import reactor
     return reactor.getWriters()
Пример #19
0
    def executeReactor(self, timeout=1, debug=False, run_once=False):
        """
        Run reactor until no more delayed calls, readers or
        writers or threads are in the queues.

        Set run_once=True to only run the reactor once. This is useful if
        you have persistent deferred which will be removed only at the end
        of test.

        Only use this for very high level integration code, where you don't
        have the change to get a "root" deferred.
        In most tests you would like to use one of the
        `getDeferredFailure` or `getDeferredResult`.

        Usage::

            protocol = factory.makeFTPProtocol()
            transport = factory.makeStringTransportProtocol()
            protocol.makeConnection(transport)
            transport.protocol = protocol

            protocol.lineReceived('FEAT')
            self.executeReactor()
            result = transport.value()

            self.assertStartsWith('211-Features:\n', result)
        """
        self._initiateTestReactor(timeout=timeout)

        # Set it to True to enter the first loop.
        have_callbacks = True
        while have_callbacks and not self._timeout_reached:
            self._iterateTestReactor(debug=debug)

            have_callbacks = False

            # Check for active jobs in thread pool.
            if reactor.threadpool:
                if (
                        reactor.threadpool.working or
                        (reactor.threadpool.q.qsize() > 0)
                        ):
                    time.sleep(0.01)
                    have_callbacks = True
                    continue

            # Look at delayed calls.
            for delayed in reactor.getDelayedCalls():
                # We skip our own timeout call.
                if delayed is self._reactor_timeout_call:
                    continue
                if not delayed.func:
                    # Was already called.
                    continue
                delayed_str = self._getDelayedCallName(delayed)
                is_exception = False
                for excepted_callback in self.EXCEPTED_DELAYED_CALLS:
                    if excepted_callback in delayed_str:
                        is_exception = True
                if not is_exception:
                    # No need to look for other delayed calls.
                    have_callbacks = True
                    break

            # No need to look for other things as we already know that we need
            # to wait at least for delayed calls.
            if have_callbacks:
                continue

            if run_once:
                if have_callbacks:
                    raise AssertionError(
                        'Reactor queue still contains delayed deferred.\n'
                        '%s' % (self._reactorQueueToString()))
                break

            # Look at writters buffers:
            if len(reactor.getWriters()) > 0:
                have_callbacks = True
                continue

            for reader in reactor.getReaders():
                have_callbacks = True
                for excepted_reader in self.EXCEPTED_READERS:
                    if isinstance(reader, excepted_reader):
                        have_callbacks = False
                        break
                if have_callbacks:
                    break

            if have_callbacks:
                continue

            # Look at threads queue.
            if len(reactor.threadCallQueue) > 0:
                have_callbacks = True
                continue

        self._shutdownTestReactor()
Пример #20
0
 def report():
     return {prefix + ".readers": len(reactor.getReaders()),
             prefix + ".writers": len(reactor.getWriters())}
Пример #21
0
 def status_thread():
     last_str = None
     last_time = 0
     while True:
         yield deferral.sleep(3)
         try:
             if time.time() > current_work2.value['last_update'] + 60:
                 print '''---> LOST CONTACT WITH BITCOIND for 60 seconds, check that it isn't frozen or dead <---'''
             if current_work.value['best_share_hash'] is not None:
                 height, last = tracker.get_height_and_last(
                     current_work.value['best_share_hash'])
                 if height > 2:
                     att_s = p2pool_data.get_pool_attempts_per_second(
                         tracker, current_work.value['best_share_hash'],
                         min(height - 1, 720))
                     weights, total_weight, donation_weight = tracker.get_cumulative_weights(
                         current_work.value['best_share_hash'],
                         min(height, 720), 65535 * 2**256)
                     shares, stale_doa_shares, stale_not_doa_shares = get_share_counts(
                         True)
                     stale_shares = stale_doa_shares + stale_not_doa_shares
                     fracs = [
                         share.stale_frac
                         for share in tracker.get_chain(
                             current_work.value['best_share_hash'],
                             min(120, height))
                         if share.stale_frac is not None
                     ]
                     this_str = 'Pool: %sH/s in %i shares (%i/%i verified) Recent: %.02f%% >%sH/s Shares: %i (%i orphan, %i dead) Peers: %i' % (
                         math.format(
                             int(att_s /
                                 (1. -
                                  (math.median(fracs) if fracs else 0)))
                         ),
                         height,
                         len(tracker.verified.shares),
                         len(tracker.shares),
                         weights.get(my_script, 0) / total_weight * 100,
                         math.format(
                             int(
                                 weights.get(my_script, 0) * att_s //
                                 total_weight /
                                 (1. -
                                  (math.median(fracs) if fracs else 0)))
                         ),
                         shares,
                         stale_not_doa_shares,
                         stale_doa_shares,
                         len(p2p_node.peers),
                     ) + (' FDs: %i R/%i W' %
                          (len(reactor.getReaders()),
                           len(reactor.getWriters()))
                          if p2pool.DEBUG else '')
                     if fracs:
                         med = math.median(fracs)
                         this_str += '\nPool stales: %i%%' % (
                             int(100 * med + .5), )
                         conf = 0.95
                         if shares:
                             this_str += u' Own: %i±%i%%' % tuple(
                                 int(100 * x + .5) for x in
                                 math.interval_to_center_radius(
                                     math.binomial_conf_interval(
                                         stale_shares, shares, conf)))
                             if med < .99:
                                 this_str += u' Own efficiency: %i±%i%%' % tuple(
                                     int(100 * x + .5) for x in
                                     math.interval_to_center_radius(
                                         (1 - y) / (1 - med) for y in
                                         math.binomial_conf_interval(
                                             stale_shares, shares,
                                             conf)[::-1]))
                     if this_str != last_str or time.time(
                     ) > last_time + 15:
                         print this_str
                         last_str = this_str
                         last_time = time.time()
         except:
             log.err()
Пример #22
0
 def get_writers(self):
     from twisted.internet import reactor
     return reactor.getWriters()
Пример #23
0
 def report():
     return {prefix + ".readers": len(reactor.getReaders()), prefix + ".writers": len(reactor.getWriters())}