Exemplo n.º 1
0
    def checkReactor(self, phase, *_):
        has_network_selectables = False
        for item in reactor.getReaders() + reactor.getWriters():
            if isinstance(item, HTTPChannel) or isinstance(item, Client):
                has_network_selectables = True
                break

        if has_network_selectables:
            # TODO(Martijn): we wait a while before we continue the check since network selectables
            # might take some time to cleanup. I'm not sure what's causing this.
            yield deferLater(reactor, 0.2, lambda: None)

        # This is the same check as in the _cleanReactor method of Twisted's Trial
        selectable_strings = []
        for sel in reactor.removeAll():
            if interfaces.IProcessTransport.providedBy(sel):
                self._logger.error("Sending kill signal to %s", repr(sel))
                sel.signalProcess('KILL')
            selectable_strings.append(repr(sel))

        self.assertFalse(selectable_strings,
                         "The reactor has leftover readers/writers during %s: %r" % (phase, selectable_strings))

        # Check whether we have closed all the sockets
        open_readers = reactor.getReaders()
        for reader in open_readers:
            self.assertNotIsInstance(reader, BasePort)

        # Check whether the threadpool is clean
        tp_items = len(reactor.getThreadPool().working)
        if tp_items > 0:  # Print all stacks to debug this issue
            self.watchdog.print_all_stacks()
        self.assertEqual(tp_items, 0, "Still items left in the threadpool")
Exemplo n.º 2
0
        def _check_fds(_):
            # This appears to only be necessary for HTTPS tests.
            # For the normal HTTP tests then closeCachedConnections is
            # sufficient.
            fds = set(reactor.getReaders() + reactor.getReaders())
            if not [fd for fd in fds if isinstance(fd, Client)]:
                return

            return deferLater(reactor, 0, _check_fds, None)
        def _check_fds(_):
            # This appears to only be necessary for HTTPS tests.
            # For the normal HTTP tests then closeCachedConnections is
            # sufficient.
            fds = set(reactor.getReaders() + reactor.getReaders())
            if not [fd for fd in fds if isinstance(fd, Client)]:
                return

            return deferLater(reactor, 0, _check_fds, None)
Exemplo n.º 4
0
    def tearDown(self):
        try:
            yield self.pool.closeCachedConnections()
        except:
            pass

        while True:
            fds = set(reactor.getReaders() + reactor.getReaders())
            if not [fd for fd in fs if isinstance(fd, Client)]:
                break
Exemplo n.º 5
0
def runtime_info():
    delayed = reactor.getDelayedCalls()
    readers = reactor.getReaders()
    writers = reactor.getWriters()
    servers = []
    clients = []
    other = []
    for reader in readers:
        if isinstance(reader, tcp.Server):
            servers.append({
                'transport': reader,
                'host': reader.getHost(),
                'peer': reader.getPeer()
            })
        elif isinstance(reader, tcp.Client):
            clients.append({
                'transport': reader,
                'host': reader.getHost(),
                'peer': reader.getPeer()
            })
        else:
            other.append(reader)
    return {
        'num_clients': len(clients),
        'num_servers': len(servers),
        'num_other': len(other),
        'num_writers': len(writers),
        'num_delayed': len(delayed),
        'clients': clients,
        'servers': servers,
        'other': other,
        'writers': writers,
        'delayed': delayed,
    }
    def customeTimeout(self):
        print "main.CONNECTION_DONE: ", main.CONNECTION_DONE
        print "main.CONNECTION_LOST: ", main.CONNECTION_LOST
        print "self.sock: ", self.sock

        from twisted.internet import reactor
        print "reactor.getReaders(): ", reactor.getReaders()
        # check if self object is still in reactor:
        #   yes, means maybe not finished yet ...
        #   no, of course is done.
        if self in reactor.getReaders():
            # I will make it stop manually
            self.connectionLost( main.CONNECTION_DONE)
            print "invoke the self.connectionLost ... haha"
        else:
            print "already finished maybe ..."
Exemplo n.º 7
0
def runtime_info():
    delayed = reactor.getDelayedCalls()
    readers = reactor.getReaders()
    writers = reactor.getWriters()
    servers = []
    clients = []
    other = []
    for reader in readers:
        if isinstance(reader, tcp.Server):
            servers.append({
                'transport': reader,
                'host': reader.getHost(),
                'peer': reader.getPeer()
            })
        elif isinstance(reader, tcp.Client):
            clients.append({
                'transport': reader,
                'host': reader.getHost(),
                'peer': reader.getPeer()
            })
        else:
            other.append(reader)
    return {
        'num_clients': len(clients),
        'num_servers': len(servers),
        'num_other': len(other),
        'num_writers': len(writers),
        'num_delayed': len(delayed),
        'clients': clients,
        'servers': servers,
        'other': other,
        'writers': writers,
        'delayed': delayed,
    }
Exemplo n.º 8
0
def print_runtime_info(sig, frame):
    if sig in [signal.SIGUSR1, signal.SIGUSR2]:
        delayed = reactor.getDelayedCalls()
        readers = reactor.getReaders()
        writers = reactor.getWriters()
        clients = []
        http_conn_num = 0
        for reader in readers:
            if isinstance(reader, twisted.internet.tcp.Server):
                clients.append(reader.getPeer())
            if isinstance(reader, twisted.internet.tcp.Client):
                http_conn_num += 1
        log.msg(
            "[Clients: %(client_num)s] [HTTP Conns: %(http_conn_num)s] "
            "[Readers: %(reader_num)s] [Writers: %(writer_num)s] "
            "[DelayedCalls: %(delayed_num)s]"
            % {
                "client_num": len(clients),
                "http_conn_num": http_conn_num,
                "reader_num": len(readers),
                "writer_num": len(writers),
                "delayed_num": len(delayed),
            }
        )
        log.msg("[Connected Clients]: %s" % clients)
        if sig == signal.SIGUSR2:
            for d in delayed:
                log.msg("SIGUSR2[delayed]: %s" % d)

            for r in readers:
                log.msg("SIGUSR2[reader]: %s" % r)

            for w in writers:
                log.msg("SIGUSR2[writer]: %s" % w)
Exemplo n.º 9
0
def periodic_reporter(settings):
    """Twisted Task function that runs every few seconds to emit general
    metrics regarding twisted and client counts"""
    settings.metrics.gauge("update.client.writers", len(reactor.getWriters()))
    settings.metrics.gauge("update.client.readers", len(reactor.getReaders()))
    settings.metrics.gauge("update.client.connections", len(settings.clients))
    settings.metrics.gauge("update.client.ws_connections", settings.factory.countConnections)
Exemplo n.º 10
0
    def checkReactor(self, phase, *_):
        delayed_calls = reactor.getDelayedCalls()
        if delayed_calls:
            self._logger.error("The reactor was dirty during %s:", phase)
            for dc in delayed_calls:
                self._logger.error(">     %s", dc)
                dc.cancel()

        from pony.orm.core import local
        if local.db_context_counter > 0:
            self._logger.error("Leftover pony db sessions found!")
        from pony.orm import db_session
        for _ in range(local.db_context_counter):
            db_session.__exit__()

        has_network_selectables = False
        for item in reactor.getReaders() + reactor.getWriters():
            if isinstance(item, HTTPChannel) or isinstance(item, Client):
                has_network_selectables = True
                break

        if has_network_selectables:
            # TODO(Martijn): we wait a while before we continue the check since network selectables
            # might take some time to cleanup. I'm not sure what's causing this.
            yield deferLater(reactor, 0.2, lambda: None)

        # This is the same check as in the _cleanReactor method of Twisted's Trial
        selectable_strings = []
        for sel in reactor.removeAll():
            if interfaces.IProcessTransport.providedBy(sel):
                self._logger.error("Sending kill signal to %s", repr(sel))
                sel.signalProcess('KILL')
            selectable_strings.append(repr(sel))

        self.assertFalse(selectable_strings,
                         "The reactor has leftover readers/writers during %s: %r" % (phase, selectable_strings))

        # Check whether we have closed all the sockets
        open_readers = reactor.getReaders()
        for reader in open_readers:
            self.assertNotIsInstance(reader, BasePort)

        # Check whether the threadpool is clean
        tp_items = len(reactor.getThreadPool().working)
        if tp_items > 0:  # Print all stacks to debug this issue
            self.watchdog.print_all_stacks()
        self.assertEqual(tp_items, 0, "Still items left in the threadpool")
Exemplo n.º 11
0
    def test_getReaders(self):
        """
        Check that L{interfaces.IReactorFDSet.getReaders} reflects the actions
        made with L{interfaces.IReactorFDSet.addReader} and
        L{interfaces.IReactorFDSet.removeReader}.
        """
        s = socket.socket()
        self.addCleanup(s.close)

        c = Connection(s, protocol.Protocol())
        self.assertNotIn(c, reactor.getReaders())

        reactor.addReader(c)
        self.assertIn(c, reactor.getReaders())

        reactor.removeReader(c)
        self.assertNotIn(c, reactor.getReaders())
Exemplo n.º 12
0
 def tearDown(self):
     from twisted.internet.tcp import Server
     # browsers has the bad habit on not closing the persistent
     # connections, so we need to hack them away to make trial happy
     f = failure.Failure(Exception("test end"))
     for reader in reactor.getReaders():
         if isinstance(reader, Server):
             reader.connectionLost(f)
Exemplo n.º 13
0
 def tearDown(self):
     from twisted.internet.tcp import Server
     # browsers has the bad habit on not closing the persistent
     # connections, so we need to hack them away to make trial happy
     f = failure.Failure(Exception("test end"))
     for reader in reactor.getReaders():
         if isinstance(reader, Server):
             reader.connectionLost(f)
Exemplo n.º 14
0
def periodic_reporter(settings):
    """Twisted Task function that runs every few seconds to emit general
    metrics regarding twisted and client counts"""
    settings.metrics.gauge("update.client.writers", len(reactor.getWriters()))
    settings.metrics.gauge("update.client.readers", len(reactor.getReaders()))
    settings.metrics.gauge("update.client.connections", len(settings.clients))
    settings.metrics.gauge("update.client.ws_connections",
                           settings.factory.countConnections)
    def test_getReaders(self):
        """
        Check that L{interfaces.IReactorFDSet.getReaders} reflects the actions
        made with L{interfaces.IReactorFDSet.addReader} and
        L{interfaces.IReactorFDSet.removeReader}.
        """
        s = socket.socket()
        self.addCleanup(s.close)

        c = Connection(s, protocol.Protocol())
        self.assertNotIn(c, reactor.getReaders())

        reactor.addReader(c)
        self.assertIn(c, reactor.getReaders())

        reactor.removeReader(c)
        self.assertNotIn(c, reactor.getReaders())
Exemplo n.º 16
0
        def status_thread():
            last_str = None
            last_time = 0
            while True:
                yield deferral.sleep(3)
                try:
                    height = node.tracker.get_height(node.best_share_var.value)
                    this_str = 'P2Pool: %i shares in chain (%i verified/%i total) Peers: %i (%i incoming)' % (
                        height,
                        len(node.tracker.verified.items),
                        len(node.tracker.items),
                        len(node.p2p_node.peers),
                        sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
                    ) + (' FDs: %i R/%i W' % (len(reactor.getReaders()), len(reactor.getWriters())) if p2pool.DEBUG else '')
                    
                    datums, dt = wb.local_rate_monitor.get_datums_in_last()
                    my_att_s = sum(datum['work']/dt for datum in datums)
                    my_shares_per_s = sum(datum['work']/dt/bitcoin_data.target_to_average_attempts(datum['share_target']) for datum in datums)
                    this_str += '\n Local: %sH/s in last %s Local dead on arrival: %s Expected time to share: %s' % (
                        math.format(int(my_att_s)),
                        math.format_dt(dt),
                        math.format_binomial_conf(sum(1 for datum in datums if datum['dead']), len(datums), 0.95),
                        math.format_dt(1/my_shares_per_s) if my_shares_per_s else '???',
                    )
                    
                    if height > 2:
                        (stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
                        stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, min(60*60//net.SHARE_PERIOD, height))
                        real_att_s = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, min(height - 1, 60*60//net.SHARE_PERIOD)) / (1 - stale_prop)
                        
                        this_str += '\n Shares: %i (%i orphan, %i dead) Stale rate: %s Efficiency: %s Current payout: %.4f %s' % (
                            shares, stale_orphan_shares, stale_doa_shares,
                            math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95),
                            math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95, lambda x: (1 - x)/(1 - stale_prop)),
                            node.get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(my_pubkey_hash), 0)*1e-8, net.PARENT.SYMBOL,
                        )
			print(node.bitcoind_work.value['bits'])
			print(real_att_s)
                        this_str += '\n Pool: %sH/s Stale rate: %.1f%% Expected time to block: %s' % (
                            math.format(int(real_att_s)),
                            100*stale_prop,
                            math.format_dt(2**256 / node.bitcoind_work.value['bits'].target / real_att_s),
                        )
                        
                        for warning in p2pool_data.get_warnings(node.tracker, node.best_share_var.value, net, bitcoind_getinfo_var.value, node.bitcoind_work.value):
                            print >>sys.stderr, '#'*40
                            print >>sys.stderr, '>>> Warning: ' + warning
                            print >>sys.stderr, '#'*40
                        
                        if gc.garbage:
                            print '%i pieces of uncollectable cyclic garbage! Types: %r' % (len(gc.garbage), map(type, gc.garbage))
                    
                    if this_str != last_str or time.time() > last_time + 15:
                        print this_str
                        last_str = this_str
                        last_time = time.time()
                except:
                    log.err()
Exemplo n.º 17
0
    def assertReactorIsClean(self):
        """
        Check that the reactor has no delayed calls, readers or writers.
        """
        if reactor is None:
            return

        def raise_failure(location, reason):
            raise AssertionError(
                'Reactor is not clean. %s: %s' % (location, reason))

        if reactor._started:
            raise AssertionError('Reactor was not stopped.')

        # Look at threads queue.
        if len(reactor.threadCallQueue) > 0:
            raise_failure('threads', reactor.threadCallQueue)

        if self._threadPoolQueueSize() > 0:
            raise_failure('threadpoool queue', self._threadPoolQueueSize())

        if self._threadPoolWorking() > 0:
            raise_failure('threadpoool working', self._threadPoolWorking())

        if self._threadPoolThreads() > 0:
            raise_failure('threadpoool threads', self._threadPoolThreads())

        if len(reactor.getWriters()) > 0:
            raise_failure('writers', str(reactor.getWriters()))

        for reader in reactor.getReaders():
            excepted = False
            for reader_type in self.EXCEPTED_READERS:
                if isinstance(reader, reader_type):
                    excepted = True
                    break
            if not excepted:
                raise_failure('readers', str(reactor.getReaders()))

        for delayed_call in reactor.getDelayedCalls():
            if delayed_call.active():
                delayed_str = self._getDelayedCallName(delayed_call)
                if delayed_str in self.EXCEPTED_DELAYED_CALLS:
                    continue
                raise_failure('delayed calls', delayed_str)
Exemplo n.º 18
0
    def connectionLost(self,reason):
        self.sock.close()

        reactor.removeReader(self)

        for reader in reactor.getReaders():
            if isinstance(reader,PoetrySocket):
                return
        reactor.stop()
Exemplo n.º 19
0
 def status_thread():
     last_str = None
     last_time = 0
     while True:
         yield deferral.sleep(3)
         try:
             height = node.tracker.get_height(node.best_share_var.value)
             this_str = 'P2Pool: %i shares in chain (%i verified/%i total) Peers: %i (%i incoming)' % (
                 height,
                 len(node.tracker.verified.items),
                 len(node.tracker.items),
                 len(node.p2p_node.peers),
                 sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
             ) + (' FDs: %i R/%i W' % (len(reactor.getReaders()), len(reactor.getWriters())) if p2pool.DEBUG else '')
             
             datums, dt = wb.local_rate_monitor.get_datums_in_last()
             my_att_s = sum(datum['work']/dt for datum in datums)
             my_shares_per_s = sum(datum['work']/dt/bitcoin_data.target_to_average_attempts(datum['share_target']) for datum in datums)
             this_str += '\n Local: %sH/s in last %s Local dead on arrival: %s Expected time to share: %s' % (
                 math.format(int(my_att_s)),
                 math.format_dt(dt),
                 math.format_binomial_conf(sum(1 for datum in datums if datum['dead']), len(datums), 0.95),
                 math.format_dt(1/my_shares_per_s) if my_shares_per_s else '???',
             )
             
             if height > 2:
                 (stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
                 stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, min(60*60//net.SHARE_PERIOD, height))
                 real_att_s = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, min(height - 1, 60*60//net.SHARE_PERIOD)) / (1 - stale_prop)
                 
                 this_str += '\n Shares: %i (%i orphan, %i dead) Stale rate: %s Efficiency: %s Current payout: %.4f %s' % (
                     shares, stale_orphan_shares, stale_doa_shares,
                     math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95),
                     math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95, lambda x: (1 - x)/(1 - stale_prop)),
                     ### CJWinty: dimecoin has only 5 digits
                     node.get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(my_pubkey_hash), 0)*1e-5, net.PARENT.SYMBOL,
                 )
                 this_str += '\n Pool: %sH/s Stale rate: %.1f%% Expected time to block: %s' % (
                     math.format(int(real_att_s)),
                     100*stale_prop,
                     math.format_dt(2**256 / node.dimecoind_work.value['bits'].target / real_att_s),
                 )
                 
                 for warning in p2pool_data.get_warnings(node.tracker, node.best_share_var.value, net, dimecoind_warning_var.value, node.dimecoind_work.value):
                     print >>sys.stderr, '#'*40
                     print >>sys.stderr, '>>> Warning: ' + warning
                     print >>sys.stderr, '#'*40
                 
                 if gc.garbage:
                     print '%i pieces of uncollectable cyclic garbage! Types: %r' % (len(gc.garbage), map(type, gc.garbage))
             
             if this_str != last_str or time.time() > last_time + 15:
                 print this_str
                 last_str = this_str
                 last_time = time.time()
         except:
             log.err()
Exemplo n.º 20
0
 def connectionLost(self, reason):
     self.sock.close()
     # stop monitoring this socket
     from twisted.internet import reactor
     reactor.removeReader(self)
     # see if there are any poetry sockets left
     for reader in reactor.getReaders():
         if isinstance(reader, PoetrySocket):
             return
     reactor.stop()  # no more poetry
Exemplo n.º 21
0
 def test_runtime_info(self):
     """Make sure we add runtime info."""
     stats_worker = StatsWorker(self.service, 10)
     # get the reactor
     from twisted.internet import reactor
     stats_worker.runtime_info()
     # check the reactor data
     self.assertIn(('gauge', 'reactor.readers', len(reactor.getReaders())),
                   self.metrics.calls)
     self.assertIn(('gauge', 'reactor.writers', len(reactor.getWriters())),
                   self.metrics.calls)
Exemplo n.º 22
0
    def report_metrics(self):
        # Report collected metrics
        results = self.metrics
        self.reset_metrics()
        for name, value in results.items():
            self.metric.increment(name, value)

        # Generate/send Aux stats
        num_clients = len(
            [r for r in reactor.getReaders() if isinstance(r, tcp.Server)])
        self.metric.gauge('clients', num_clients)
Exemplo n.º 23
0
 def test_runtime_info(self):
     """Make sure we add runtime info."""
     stats_worker = StatsWorker(self.service, 10)
     # get the reactor
     from twisted.internet import reactor
     stats_worker.runtime_info()
     # check the reactor data
     self.assertIn(('gauge', 'reactor.readers',
                    len(reactor.getReaders())), self.metrics.calls)
     self.assertIn(('gauge', 'reactor.writers',
                    len(reactor.getWriters())), self.metrics.calls)
Exemplo n.º 24
0
    def connectionLost(self, reason):
        self.sock.close()

        from twisted.internet import reactor
        reactor.removeReader(self)

        for reader in reactor.getReaders():
            if isinstance(reader, PoetrySocket):
                return

        reactor.stop()
Exemplo n.º 25
0
    def connectionLost(self, reason):
        self.sock.close()
        reactor.removeReader(self)

        # see if there are any poetry sockets left
        for reader in reactor.getReaders():
            if isinstance(reader, RawSocket):
                return
        try:
            reactor.stop()  # no more poetry
        except Exception:
            pass
Exemplo n.º 26
0
    def connectionLost(self, reason):
        self.sock.close()

        # stop monitoring this socket
        from twisted.internet import reactor
        reactor.removeReader(self)

        # see if there are any poetry sockets left
        for reader in reactor.getReaders():
            if isinstance(reader, PoetrySocket):
                return

        reactor.stop() # no more poetry
Exemplo n.º 27
0
    def connectionLost(self, reason=None):
        self.sock.close()

        # Stop monitoring this socket
        from twisted.internet import reactor
        print("Reactor no longer monitored: %s" % reason)
        reactor.removeReader(self)

        # See if there are any poetry sockets left
        for reader in reactor.getReaders():
            if isinstance(reader, PoetrySocket):
                return

        reactor.stop()  # No more poetry
Exemplo n.º 28
0
    def connectionMade(self):
        """
        Initialize state when the client connects.
        """
        # Get the reader instance for this protocol
        readers = reactor.getReaders()
        for reader in readers:
            if getattr(reader, "protocol", None) == self:
                self.reader = reader
                break
        self.keep_alive = False
        self.input_lines = Queue()
        client = self.transport.getPeer()
        if client.host not in self.factory.dont_log:
            log.msg("Connection from {0}".format(client))

        self.api = StatAPI("whois", self.factory.base_url,
                           headers=[("X-Forwarded-For", client.host)])
Exemplo n.º 29
0
    def connectionMade(self):
        """
        Initialize state when the client connects.
        """
        # Get the reader instance for this protocol
        readers = reactor.getReaders()
        for reader in readers:
            if getattr(reader, "protocol", None) == self:
                self.reader = reader
                break
        self.keep_alive = False
        self.input_lines = Queue()
        client = self.transport.getPeer()
        if client.host not in self.factory.dont_log:
            log.msg("Connection from {0}".format(client))

        self.api = StatAPI("whois",
                           self.factory.base_url,
                           headers=[("X-Forwarded-For", client.host)])
Exemplo n.º 30
0
 def status_thread():
     last_str = None
     last_time = 0
     while True:
         yield deferral.sleep(3)
         try:
             if time.time() > current_work2.value['last_update'] + 60:
                 print '''---> LOST CONTACT WITH BITCOIND for 60 seconds, check that it isn't frozen or dead <---'''
             if current_work.value['best_share_hash'] is not None:
                 height, last = tracker.get_height_and_last(current_work.value['best_share_hash'])
                 if height > 2:
                     att_s = p2pool_data.get_pool_attempts_per_second(tracker, current_work.value['best_share_hash'], min(height - 1, 720))
                     weights, total_weight, donation_weight = tracker.get_cumulative_weights(current_work.value['best_share_hash'], min(height, 720), 65535*2**256)
                     shares, stale_doa_shares, stale_not_doa_shares = get_share_counts(True)
                     stale_shares = stale_doa_shares + stale_not_doa_shares
                     fracs = [share.stale_frac for share in tracker.get_chain(current_work.value['best_share_hash'], min(120, height)) if share.stale_frac is not None]
                     this_str = 'Pool: %sH/s in %i shares (%i/%i verified) Recent: %.02f%% >%sH/s Shares: %i (%i orphan, %i dead) Peers: %i' % (
                         math.format(int(att_s / (1. - (math.median(fracs) if fracs else 0)))),
                         height,
                         len(tracker.verified.shares),
                         len(tracker.shares),
                         weights.get(my_script, 0)/total_weight*100,
                         math.format(int(weights.get(my_script, 0)*att_s//total_weight / (1. - (math.median(fracs) if fracs else 0)))),
                         shares,
                         stale_not_doa_shares,
                         stale_doa_shares,
                         len(p2p_node.peers),
                     ) + (' FDs: %i R/%i W' % (len(reactor.getReaders()), len(reactor.getWriters())) if p2pool.DEBUG else '')
                     if fracs:
                         med = math.median(fracs)
                         this_str += '\nPool stales: %i%%' % (int(100*med+.5),)
                         conf = 0.95
                         if shares:
                             this_str += u' Own: %i±%i%%' % tuple(int(100*x+.5) for x in math.interval_to_center_radius(math.binomial_conf_interval(stale_shares, shares, conf)))
                             if med < .99:
                                 this_str += u' Own efficiency: %i±%i%%' % tuple(int(100*x+.5) for x in math.interval_to_center_radius((1 - y)/(1 - med) for y in math.binomial_conf_interval(stale_shares, shares, conf)[::-1]))
                     if this_str != last_str or time.time() > last_time + 15:
                         print this_str
                         last_str = this_str
                         last_time = time.time()
         except:
             log.err()
Exemplo n.º 31
0
    def connectionLost(self, reason):
        # Called when the connection is lost

        # This is called when the connection on a selectable object has been
        # lost.  It will be called whether the connection was closed explicitly,
        # an exception occurred in an event handler, or the other end of the
        # connection closed it first.
        
        self.sock.close()  # Close the socket and its associated file descriptor

        # stop monitoring this socket
        from twisted.internet import reactor
        reactor.removeReader(self)  # Remove this object from the monitored reader FDs

        # see if there are any poetry sockets left
        for reader in reactor.getReaders():
            if isinstance(reader, PoetrySocket):
                return

        reactor.stop()  # no more readers, no more poetry
Exemplo n.º 32
0
def runtime_info():
    delayed = reactor.getDelayedCalls()
    readers = reactor.getReaders()
    writers = reactor.getWriters()
    clients = []
    http_conn_num = 0
    for reader in readers:
        if isinstance(reader, twisted.internet.tcp.Server):
            clients.append(reader.getPeer())
        if isinstance(reader, twisted.internet.tcp.Client):
            http_conn_num += 1
    info = {
        'num_clients': len(clients),
        'num_http_conn': http_conn_num,
        'num_readers': len(readers),
        'num_writers': len(writers),
        'num_delayed': len(delayed),
        'clients': clients,
        'readers': readers,
        'writers': writers,
        'delayed': delayed,
    }
    return info
Exemplo n.º 33
0
 def _iterateTestReactor(self, debug=False):
     """
     Iterate the reactor.
     """
     reactor.runUntilCurrent()
     if debug:
         # When debug is enabled with iterate using a small delay in steps,
         # to have a much better debug output.
         # Otherwise the debug messages will flood the output.
         print (
             u'delayed: %s\n'
             u'threads: %s\n'
             u'writers: %s\n'
             u'readers: %s\n'
             u'threadpool size: %s\n'
             u'threadpool threads: %s\n'
             u'threadpool working: %s\n'
             u'\n' % (
                 self._reactorQueueToString(),
                 reactor.threadCallQueue,
                 reactor.getWriters(),
                 reactor.getReaders(),
                 self._threadPoolQueueSize(),
                 self._threadPoolThreads(),
                 self._threadPoolWorking(),
                 )
             )
         t2 = reactor.timeout()
         # For testing we want to force to reactor to wake at an
         # interval of at most 1 second.
         if t2 is None or t2 > 1:
             t2 = 0.1
         t = reactor.running and t2
         reactor.doIteration(t)
     else:
         reactor.doIteration(False)
Exemplo n.º 34
0
 def get_readers(self):
     from twisted.internet import reactor
     readers = reactor.getReaders()
     readers.remove(getattr(reactor, 'waker'))
     return readers
Exemplo n.º 35
0
 def report():
     return {prefix + ".readers": len(reactor.getReaders()), prefix + ".writers": len(reactor.getWriters())}
Exemplo n.º 36
0
 def status_thread():
     last_str = None
     last_time = 0
     while True:
         yield deferral.sleep(3)
         try:
             if time.time() > current_work2.value['last_update'] + 60:
                 print '''---> LOST CONTACT WITH BITCOIND for 60 seconds, check that it isn't frozen or dead <---'''
             if current_work.value['best_share_hash'] is not None:
                 height, last = tracker.get_height_and_last(
                     current_work.value['best_share_hash'])
                 if height > 2:
                     att_s = p2pool_data.get_pool_attempts_per_second(
                         tracker, current_work.value['best_share_hash'],
                         min(height - 1, 720))
                     weights, total_weight, donation_weight = tracker.get_cumulative_weights(
                         current_work.value['best_share_hash'],
                         min(height, 720), 65535 * 2**256)
                     shares, stale_doa_shares, stale_not_doa_shares = get_share_counts(
                         True)
                     stale_shares = stale_doa_shares + stale_not_doa_shares
                     fracs = [
                         share.stale_frac
                         for share in tracker.get_chain(
                             current_work.value['best_share_hash'],
                             min(120, height))
                         if share.stale_frac is not None
                     ]
                     this_str = 'Pool: %sH/s in %i shares (%i/%i verified) Recent: %.02f%% >%sH/s Shares: %i (%i orphan, %i dead) Peers: %i' % (
                         math.format(
                             int(att_s /
                                 (1. -
                                  (math.median(fracs) if fracs else 0)))
                         ),
                         height,
                         len(tracker.verified.shares),
                         len(tracker.shares),
                         weights.get(my_script, 0) / total_weight * 100,
                         math.format(
                             int(
                                 weights.get(my_script, 0) * att_s //
                                 total_weight /
                                 (1. -
                                  (math.median(fracs) if fracs else 0)))
                         ),
                         shares,
                         stale_not_doa_shares,
                         stale_doa_shares,
                         len(p2p_node.peers),
                     ) + (' FDs: %i R/%i W' %
                          (len(reactor.getReaders()),
                           len(reactor.getWriters()))
                          if p2pool.DEBUG else '')
                     if fracs:
                         med = math.median(fracs)
                         this_str += '\nPool stales: %i%%' % (
                             int(100 * med + .5), )
                         conf = 0.95
                         if shares:
                             this_str += u' Own: %i±%i%%' % tuple(
                                 int(100 * x + .5) for x in
                                 math.interval_to_center_radius(
                                     math.binomial_conf_interval(
                                         stale_shares, shares, conf)))
                             if med < .99:
                                 this_str += u' Own efficiency: %i±%i%%' % tuple(
                                     int(100 * x + .5) for x in
                                     math.interval_to_center_radius(
                                         (1 - y) / (1 - med) for y in
                                         math.binomial_conf_interval(
                                             stale_shares, shares,
                                             conf)[::-1]))
                     if this_str != last_str or time.time(
                     ) > last_time + 15:
                         print this_str
                         last_str = this_str
                         last_time = time.time()
         except:
             log.err()
Exemplo n.º 37
0
 def _check_fds(_):
     fds = set(reactor.getReaders() + reactor.getReaders())
     if not [fd for fd in fds if isinstance(fd, Client)]:
         return
     return deferLater(reactor, 0, _check_fds, None)
Exemplo n.º 38
0
 def report():
     return {prefix + ".readers": len(reactor.getReaders()),
             prefix + ".writers": len(reactor.getWriters())}
Exemplo n.º 39
0
 def get_readers(self):
     from twisted.internet import reactor
     readers = reactor.getReaders()
     readers.remove(getattr(reactor, 'waker'))
     return readers
Exemplo n.º 40
0
    def executeReactor(self, timeout=1, debug=False, run_once=False):
        """
        Run reactor until no more delayed calls, readers or
        writers or threads are in the queues.

        Set run_once=True to only run the reactor once. This is useful if
        you have persistent deferred which will be removed only at the end
        of test.

        Only use this for very high level integration code, where you don't
        have the change to get a "root" deferred.
        In most tests you would like to use one of the
        `getDeferredFailure` or `getDeferredResult`.

        Usage::

            protocol = factory.makeFTPProtocol()
            transport = factory.makeStringTransportProtocol()
            protocol.makeConnection(transport)
            transport.protocol = protocol

            protocol.lineReceived('FEAT')
            self.executeReactor()
            result = transport.value()

            self.assertStartsWith('211-Features:\n', result)
        """
        self._initiateTestReactor(timeout=timeout)

        # Set it to True to enter the first loop.
        have_callbacks = True
        while have_callbacks and not self._timeout_reached:
            self._iterateTestReactor(debug=debug)

            have_callbacks = False

            # Check for active jobs in thread pool.
            if reactor.threadpool:
                if (
                        reactor.threadpool.working or
                        (reactor.threadpool.q.qsize() > 0)
                        ):
                    time.sleep(0.01)
                    have_callbacks = True
                    continue

            # Look at delayed calls.
            for delayed in reactor.getDelayedCalls():
                # We skip our own timeout call.
                if delayed is self._reactor_timeout_call:
                    continue
                if not delayed.func:
                    # Was already called.
                    continue
                delayed_str = self._getDelayedCallName(delayed)
                is_exception = False
                for excepted_callback in self.EXCEPTED_DELAYED_CALLS:
                    if excepted_callback in delayed_str:
                        is_exception = True
                if not is_exception:
                    # No need to look for other delayed calls.
                    have_callbacks = True
                    break

            # No need to look for other things as we already know that we need
            # to wait at least for delayed calls.
            if have_callbacks:
                continue

            if run_once:
                if have_callbacks:
                    raise AssertionError(
                        'Reactor queue still contains delayed deferred.\n'
                        '%s' % (self._reactorQueueToString()))
                break

            # Look at writters buffers:
            if len(reactor.getWriters()) > 0:
                have_callbacks = True
                continue

            for reader in reactor.getReaders():
                have_callbacks = True
                for excepted_reader in self.EXCEPTED_READERS:
                    if isinstance(reader, excepted_reader):
                        have_callbacks = False
                        break
                if have_callbacks:
                    break

            if have_callbacks:
                continue

            # Look at threads queue.
            if len(reactor.threadCallQueue) > 0:
                have_callbacks = True
                continue

        self._shutdownTestReactor()