Example #1
0
    def test_route_connection_refused_error(self):
        self.agent_mock.request.side_effect = MockAssist(
            [self._raise_connection_refused_error])
        router_data = dict(node_id="http://somewhere", uaid=dummy_uaid)
        self.router_mock.clear_node.return_value = None
        log.addObserver(self._mockObserver)
        d = self.router.route_notification(self.notif, router_data)

        def verify_retry(fail):
            exc = fail.value
            ok_(exc, RouterException)
            eq_(exc.status_code, 503)
            eq_(len(self.router_mock.clear_node.mock_calls), 1)
            self.router_mock.clear_node.reset_mock()
            self.flushLoggedErrors()

        def verify_deliver(fail):
            ok_(self._contains_err('ConnectionRefusedError'))
            exc = fail.value
            ok_(exc, RouterException)
            eq_(exc.status_code, 503)
            eq_(len(self.router_mock.clear_node.mock_calls), 1)
            self.router_mock.clear_node.reset_mock()
            d = self.router.route_notification(self.notif, router_data)
            d.addBoth(verify_retry)
            return d
        d.addBoth(verify_deliver)
        return d
def observe(sentry_dsn):
    ''' Decorator adding twisted raven client and blocking client if reactor
    is not yet running.
    :param str sentry_dsn: URL of sentry API
    '''

    # create blocking client:
    raven_client = raven.base.Client(sentry_dsn)

    # add twisted logObserver with twisted raven client:
    observer = get_observer(sentry_dsn)
    if observer:
        log.addObserver(observer)

    import pytest; pytest.set_trace()
    def decorator(function):

        @wraps(function)
        def wrapper(*args, **kwargs):
            ''' Calls original function, catches any exception, sends it
            to sentry and re-raises it again. '''
            try:
                return function(*args, **kwargs)
            except:
                raven_client.captureException(sys.exc_info())
                raise  # re-raise caught exception
        return wrapper

    return decorator
Example #3
0
def main():
    factory = VatsimClientFactory()
    log.startLogging(sys.stdout)
    #log.addObserver(log.FileLogObserver(open("trace.log",'w')))
    addObserver(FileLogObserver(open("trace.log",'w')).emit)
    reactor.connectTCP('USA-E.vatsim.net',6809, factory)
    reactor.run()
Example #4
0
def start_filelog(filename=None):
    if filename is None:
        filename = "%s.proxy_log.txt" % datetime.now(
        ).strftime("%Y.%m.%d_%H.%M.%S")
    f = open(filename, "w")
    log.addObserver(MinecraftLogObserver(f).emit)
    msg("Started logging to file %s" % filename)
Example #5
0
    def __init__(self):
        class logObserver:
            def __init__(self, con):
                self.con = con

            def emit(self, eventDict):
                edm = eventDict['message']
                if not edm:
                    if eventDict['isError'] and 'failure' in eventDict:
                        text = ((eventDict.get('why') or 'Unhandled Error')
                                + '\n' + eventDict['failure'].getTraceback())
                    elif 'format' in eventDict:
                        text = eventDict['format'] % eventDict
                    else:
                        text = str(eventDict)
                else:
                    text = ' '.join(map(reflect.safe_str, edm))
                
                self.con.addLine(text)


        stdscr = curses.initscr() # initialize curses
        self.screen = Screen(stdscr)   # create Screen object
        log.addObserver(logObserver(self.screen).emit)
        stdscr.refresh()
        reactor.addReader(self.screen) # add screen object as a reader to the reactor
        
        task.LoopingCall(self.screen.updateDisplay).start(.25)
Example #6
0
	def run(self):
		retn = False
		try:
			for user in self._authBlacklist:
				self._userdb.blacklistUser(user)

			site = self._site()

			if not site:
				return False

			# start listen for incoming request
			self.__tcpPort = reactor.listenTCP(self._port,
			                                   site,
	 		                                   self._connections,
			                                   self._listenAddress)

			# setup signal handler
			self.__sighup = False
			signal.signal(signal.SIGHUP, self._sighupHandler)
		        task.LoopingCall(self._reloadTask).start(60, False)

			# start processing
			Logging.info("start listening")
			log.addObserver(logSC3)

			reactor.run()
			retn = True
		except Exception, e:
			Logging.error(str(e))
Example #7
0
def main():
    config = ConfigParser.ConfigParser()
    
    config.read(["/etc/awsdns.ini", os.path.abspath("./awsdns.ini"), os.path.expanduser("~/awsnds.ini")])
    
    # TODO: what happens if we use more than one DNS server?
    resolver = EC2Resolver(
        config,
        servers=[(config.get('awsdns', 'dns_server'), 53)]
    )
    
    f = server.DNSServerFactory(clients=[resolver])
    p = dns.DNSDatagramProtocol(f)
    
    reactor.listenUDP(53, p)
    reactor.listenTCP(53, f)
    
    try:
        loglevel = util.logging_constant(config.get('awsdns', 'loglevel'))
    except ConfigParser.NoOptionError:
        loglevel = logging.INFO
    
    try:
        logfile = config.get('awsdns', 'logfile')
        fh = open("awsdns.log", "a")
    except ConfigParser.NoOptionError:
        fh = sys.stdout
    
    observer = LevelFileLogObserver(fh, loglevel)
    
    log.addObserver(observer)
    
    reactor.run()
Example #8
0
    def main(self):
        """Parse arguments and run the script's main function via ``react``."""
        observer = None
        try:
            if not self.log_directory.exists():
                self.log_directory.makedirs()
            log_path = self.log_directory.child(
                b"%s-%d.log" % (os.path.basename(self.sys_module.argv[0]),
                                os.getpid()))
            log_file = log_path.open("a")
            observer = FileLogObserver(log_file).emit
            addObserver(observer)
            msg("Arguments: %s" % (self.sys_module.argv,))
        except (OSError, IOError):
            pass

        options = self._parse_options(self.sys_module.argv[1:])
        # XXX: We shouldn't be using this private _reactor API. See
        # https://twistedmatrix.com/trac/ticket/6200 and
        # https://twistedmatrix.com/trac/ticket/7527
        self._react(self.script.main, (options,), _reactor=self._reactor)

        # Not strictly necessary, but nice cleanup for tests:
        if observer is not None:
            removeObserver(observer)
            log_file.close()
Example #9
0
    def __init__(self, bot):
        self.clients = []
        self.server = None
        self.bot = bot
        try:
            if "websocket" in bot.config and bot.config["websocket"]["enabled"] == "1":
                # Initialize twisted logging
                try:
                    from twisted.python import log as twisted_log

                    twisted_log.addObserver(WebSocketManager.on_log_message)
                except ImportError:
                    log.error("twisted is not installed, websocket cannot be initialized.")
                    return
                except:
                    log.exception("Uncaught exception")
                    return

                port = int(bot.config["websocket"]["port"])
                secure = False
                key_path = None
                crt_path = None
                if "ssl" in bot.config["websocket"] and bot.config["websocket"]["ssl"] == "1":
                    secure = True
                    if "key_path" in bot.config["websocket"]:
                        key_path = bot.config["websocket"]["key_path"]
                    if "crt_path" in bot.config["websocket"]:
                        crt_path = bot.config["websocket"]["crt_path"]
                self.server = WebSocketServer(self, port, secure, key_path, crt_path)
        except:
            log.exception("Uncaught exception in WebSocketManager")
 def setUp(self):
     self.catcher = []
     observer = self.catcher.append
     log.addObserver(observer)
     self.addCleanup(log.removeObserver, observer)
     self.db = MagicMock()
     self.ws = MagicMock()
Example #11
0
    def __init__(self, bot):
        self.clients = []
        self.server = None
        self.bot = bot
        try:
            if 'websocket' in bot.config and bot.config['websocket']['enabled'] == '1':
                # Initialize twisted logging
                try:
                    from twisted.python import log as twisted_log
                    twisted_log.addObserver(WebSocketManager.on_log_message)
                except ImportError:
                    log.error('twisted is not installed, websocket cannot be initialized.')
                    return
                except:
                    log.exception('Uncaught exception')
                    return

                port = int(bot.config['websocket']['port'])
                secure = False
                key_path = None
                crt_path = None
                if 'ssl' in bot.config['websocket'] and bot.config['websocket']['ssl'] == '1':
                    secure = True
                    if 'key_path' in bot.config['websocket']:
                        key_path = bot.config['websocket']['key_path']
                    if 'crt_path' in bot.config['websocket']:
                        crt_path = bot.config['websocket']['crt_path']
                self.server = WebSocketServer(self, port, secure, key_path, crt_path)
        except:
            log.exception('Uncaught exception in WebSocketManager')
Example #12
0
    def test_connectionLostLogMsg(self):
        """
        When a connection is lost, an informative message should be logged
        (see L{getExpectedConnectionLostLogMsg}): an address identifying
        the port and the fact that it was closed.
        """

        loggedMessages = []
        def logConnectionLostMsg(eventDict):
            loggedMessages.append(log.textFromEventDict(eventDict))

        reactor = self.buildReactor()
        p = self.getListeningPort(reactor)
        expectedMessage = self.getExpectedConnectionLostLogMsg(p)
        log.addObserver(logConnectionLostMsg)

        def stopReactor(ignored):
            log.removeObserver(logConnectionLostMsg)
            reactor.stop()

        def doStopListening():
            log.addObserver(logConnectionLostMsg)
            maybeDeferred(p.stopListening).addCallback(stopReactor)

        reactor.callWhenRunning(doStopListening)
        reactor.run()

        self.assertIn(expectedMessage, loggedMessages)
Example #13
0
    def __init__(self, **kwargs):
        '''
        Initialize the C{Scheduler} instance.
        '''

        # Handle command line options
        from optparse import OptionParser
        parser = OptionParser(usage="%prog")
        parser.add_option('-v', '--verbose', action='store_true',
                dest='verbose', default=False, help='Enable verbose mode')

        (options, args) = parser.parse_args()

        # Don't accept any positional arguments
        if args:
            parser.error('Positional parameters are not supported: %s' % ' '.join(args))
            sys.exit(1)

        # Logging
        # TODO: log in database table perhaps? See ticket:6.
        log_fp = open(os.path.join(ssscrape.config.LOG_DIR, 'ssscrape-scheduler.log'), 'a')
        log.addObserver(log.FileLogObserver(log_fp).emit)

        if options.verbose:
            log.addObserver(log.FileLogObserver(sys.stdout).emit)


        # We cannot recover from SQL errors.
        ssscrape.database.add_error_callback(self.stop)
Example #14
0
    def test_malformedHeaderCGI(self):
        """
        Check for the error message in the duplicated header
        """
        cgiFilename = self.writeCGI(BROKEN_HEADER_CGI)

        portnum = self.startServer(cgiFilename)
        url = "http://localhost:%d/cgi" % (portnum,)
        url = url.encode("ascii")
        agent = client.Agent(reactor)
        d = agent.request(b"GET", url)
        d.addCallback(discardBody)
        loggedMessages = []

        def addMessage(eventDict):
            loggedMessages.append(log.textFromEventDict(eventDict))

        log.addObserver(addMessage)
        self.addCleanup(log.removeObserver, addMessage)

        def checkResponse(ignored):
            self.assertIn("ignoring malformed CGI header: " + repr(b'XYZ'),
                          loggedMessages)

        d.addCallback(checkResponse)
        return d
Example #15
0
    def test_startLoggingTwice(self):
        """
        There are some obscure error conditions that can occur when logging is
        started twice. See http://twistedmatrix.com/trac/ticket/3289 for more
        information.
        """
        self._startLoggingCleanup()
        # The bug is particular to the way that the t.p.log 'global' function
        # handle stdout. If we use our own stream, the error doesn't occur. If
        # we use our own LogPublisher, the error doesn't occur.
        sys.stdout = StringIO()

        def showError(eventDict):
            if eventDict['isError']:
                sys.__stdout__.write(eventDict['failure'].getTraceback())

        log.addObserver(showError)
        self.addCleanup(log.removeObserver, showError)
        observer = log.startLogging(sys.stdout)
        self.addCleanup(observer.stop)
        # At this point, we expect that sys.stdout is a StdioOnnaStick object.
        self.assertIsInstance(sys.stdout, LoggingFile)
        fakeStdout = sys.stdout
        observer = log.startLogging(sys.stdout)
        self.assertIs(sys.stdout, fakeStdout)
Example #16
0
    def test_send_catch_log(self):
        test_signal = object()
        handlers_called = set()

        def log_received(event):
            handlers_called.add(log_received)
            assert "error_handler" in event['message'][0]
            assert event['logLevel'] == log.ERROR

        txlog.addObserver(log_received)
        dispatcher.connect(self.error_handler, signal=test_signal)
        dispatcher.connect(self.ok_handler, signal=test_signal)
        result = yield defer.maybeDeferred(self._get_result, test_signal, arg='test',
                                           handlers_called=handlers_called)

        assert self.error_handler in handlers_called
        assert self.ok_handler in handlers_called
        assert log_received in handlers_called
        self.assertEqual(result[0][0], self.error_handler)
        self.assert_(isinstance(result[0][1], Failure))
        self.assertEqual(result[1], (self.ok_handler, "OK"))

        txlog.removeObserver(log_received)
        self.flushLoggedErrors()
        dispatcher.disconnect(self.error_handler, signal=test_signal)
        dispatcher.disconnect(self.ok_handler, signal=test_signal)
Example #17
0
    def test_deferred(self):
        stdout = StringIO()
        log.addObserver(LevelFileLogObserver(stdout, logging.INFO))
        self.call_count = 0

        def test_result(d, count):
            long_calculation(count)
            self.call_count += 1


        @twisted_timeit
        def defer_calc(count_iteration, count_run):
            d = defer.Deferred()
            for i in xrange(count_iteration):
                d.addCallback(test_result, count_run)
            return d

        d = defer_calc(self.count_iteration, self.count_run)
        d.callback(None)

        self.assertEqual(self.call_count, self.count_iteration)

        result_log = stdout.getvalue()
        self.assertEqual('Time for deferred' in result_log, True)
        print '\n', self.sep_line_two, '\nLog from test_deferred:\n', \
            self.sep_line_one, '\n', result_log, '\n', self.sep_line_two, '\n'
Example #18
0
    def test_wsgiErrors(self):
        """
        The C{'wsgi.errors'} key of the C{environ} C{dict} passed to the
        application is a file-like object (as defined in the U{Input and Errors
        Streams<http://www.python.org/dev/peps/pep-0333/#input-and-error-streams>}
        section of PEP 333) which converts bytes written to it into events for
        the logging system.
        """
        events = []
        addObserver(events.append)
        self.addCleanup(removeObserver, events.append)

        errors = self.render('GET', '1.1', [], [''])
        def cbErrors((environ, startApplication)):
            errors = environ['wsgi.errors']
            errors.write('some message\n')
            errors.writelines(['another\nmessage\n'])
            errors.flush()
            self.assertEqual(events[0]['message'], ('some message\n',))
            self.assertEqual(events[0]['system'], 'wsgi')
            self.assertTrue(events[0]['isError'])
            self.assertEqual(events[1]['message'], ('another\nmessage\n',))
            self.assertEqual(events[1]['system'], 'wsgi')
            self.assertTrue(events[1]['isError'])
            self.assertEqual(len(events), 2)
        errors.addCallback(cbErrors)
        return errors
def assertLogMessage(testCase, expectedMessages, callable, *args, **kwargs):
    """
    Assert that the callable logs the expected messages when called.

    XXX: Put this somewhere where it can be re-used elsewhere. See #6677.

    @param testCase: The test case controlling the test which triggers the
        logged messages and on which assertions will be called.
    @type testCase: L{unittest.SynchronousTestCase}

    @param expectedMessages: A L{list} of the expected log messages
    @type expectedMessages: L{list}

    @param callable: The function which is expected to produce the
        C{expectedMessages} when called.
    @type callable: L{callable}

    @param args: Positional arguments to be passed to C{callable}.
    @type args: L{list}

    @param kwargs: Keyword arguments to be passed to C{callable}.
    @type kwargs: L{dict}
    """
    loggedMessages = []
    log.addObserver(loggedMessages.append)
    testCase.addCleanup(log.removeObserver, loggedMessages.append)

    callable(*args, **kwargs)

    testCase.assertEqual(
        [m['message'][0] for m in loggedMessages],
        expectedMessages)
Example #20
0
 def __init__(self, observer):
     tw_log.addObserver(observer)
     for k, v in self.levels.items():
         method_name = v.lower()
         setattr(self,
                 method_name,
                 self._genLogger(k))
Example #21
0
def main():
    parser = create_parser()
    args = parser.parse_args()
    if args.debug or (not args.logfile and not args.db):
        print 'debug logging to console'
        log.addObserver(log.FileLogObserver(sys.stdout).emit)
    if not args.logfile and args.db:
        con = sqlite3.connect(args.db)
        try:
            # checking if db exists
            con.execute('select count(*) from revisions')
        except sqlite3.OperationalError:
            print 'creating db ' + args.db
            create_db(args.db)
            con = sqlite3.connect(args.db)
        print 'logging to ' + args.db
        log.addObserver(partial(db_observer, connection=con))
    if isinstance(args.logfile, basestring):
        log_file = open(args.logfile, 'a')
        print 'logging to ' + str(log_file)
        log.startLogging(log_file)
    factory = WebSocketClientFactory(args.websocket)
    factory.protocol = RecordClientProtocol
    connectWS(factory)
    reactor.run()
 def setUp(self):
     self.catcher = []
     observer = self.catcher.append
     log.addObserver(observer)
     self.addCleanup(log.removeObserver, observer)
     self.node = Node(digest("test"), "127.0.0.1", 1234)
     self.router = RoutingTable(self, 20, self.node.id)
Example #23
0
 def setUp(self):
     """
     Create a temporary file with a fixed payload of 64 bytes.  Create a
     resource for that file and create a request which will be for that
     resource.  Each test can set a different range header to test different
     aspects of the implementation.
     """
     path = FilePath(self.mktemp())
     # This is just a jumble of random stuff.  It's supposed to be a good
     # set of data for this test, particularly in order to avoid
     # accidentally seeing the right result by having a byte sequence
     # repeated at different locations or by having byte values which are
     # somehow correlated with their position in the string.
     self.payload = ('\xf8u\xf3E\x8c7\xce\x00\x9e\xb6a0y0S\xf0\xef\xac\xb7'
                     '\xbe\xb5\x17M\x1e\x136k{\x1e\xbe\x0c\x07\x07\t\xd0'
                     '\xbckY\xf5I\x0b\xb8\x88oZ\x1d\x85b\x1a\xcdk\xf2\x1d'
                     '&\xfd%\xdd\x82q/A\x10Y\x8b')
     path.setContent(self.payload)
     self.file = path.open()
     self.resource = static.File(self.file.name)
     self.resource.isLeaf = 1
     self.request = DummyRequest([''])
     self.request.uri = self.file.name
     self.catcher = []
     log.addObserver(self.catcher.append)
Example #24
0
def main(port, database=None, debug=False, stdout=True):
    if stdout:
        log.startLogging(sys.stdout)
    if debug:
        log.addObserver(shutdownOnError)
    reactor.listenTCP(port, ServerFactory(database))
    reactor.run()
Example #25
0
    def _test_level(self, level, messages_number):
        observer = LevelFileLogObserver(self.log_file, level)
        log.addObserver(observer)

        self.LOG.debug('test debug')
        self.LOG.info('test info')
        self.LOG.warning('test warning')
        self.LOG.error('test error')
        self.LOG.critical('test critical')

        log.removeObserver(observer)

        with open(self.log_path) as f:
            lines = [line.strip() for line in f.readlines()]

        self.assertEqual(len(lines), messages_number)

        for line in lines:
            m = re.match(self.rx, line)
            self.assertIsNotNone(m)

            time, level_name, system, entry_text = m.groups()

            time = datetime.strptime(time, "%Y-%m-%d %H:%M:%S")
            entry_level = logging.getLevelName(level_name)

            self.assertGreaterEqual(entry_level, level)
            self.assertEqual(system, __name__)

            text = "test {0}".format(level_name.lower())
            self.assertEqual(entry_text, text)
Example #26
0
def stopOnError(case, reactor, publisher=None):
    """
    Stop the reactor as soon as any error is logged on the given publisher.

    This is beneficial for tests which will wait for a L{Deferred} to fire
    before completing (by passing or failing).  Certain implementation bugs may
    prevent the L{Deferred} from firing with any result at all (consider a
    protocol's {dataReceived} method that raises an exception: this exception
    is logged but it won't ever cause a L{Deferred} to fire).  In that case the
    test would have to complete by timing out which is a much less desirable
    outcome than completing as soon as the unexpected error is encountered.

    @param case: A L{SynchronousTestCase} to use to clean up the necessary log
        observer when the test is over.
    @param reactor: The reactor to stop.
    @param publisher: A L{LogPublisher} to watch for errors.  If C{None}, the
        global log publisher will be watched.
    """
    if publisher is None:
        from twisted.python import log as publisher
    running = [None]
    def stopIfError(event):
        if running and event.get('isError'):
            running.pop()
            reactor.stop()
    publisher.addObserver(stopIfError)
    case.addCleanup(publisher.removeObserver, stopIfError)
Example #27
0
    def __init__(self, crawler):
        self.crawler = crawler
        self.timeout = settings.getint('CLOSESPIDER_TIMEOUT')
        self.itemcount = settings.getint('CLOSESPIDER_ITEMCOUNT')
        # XXX: legacy support - remove for future releases
        if settings.getint('CLOSESPIDER_ITEMPASSED'):
            warnings.warn("CLOSESPIDER_ITEMPASSED setting is deprecated, use CLOSESPIDER_ITEMCOUNT instead", ScrapyDeprecationWarning)
            self.pagecount = settings.getint('CLOSESPIDER_ITEMPASSED')
        self.pagecount = settings.getint('CLOSESPIDER_PAGECOUNT')
        self.errorcount = settings.getint('CLOSESPIDER_ERRORCOUNT')

        self.errorcounts = defaultdict(int)
        self.pagecounts = defaultdict(int)
        self.counts = defaultdict(int)
        self.tasks = {}

        if self.errorcount:
            txlog.addObserver(self.catch_log)
        if self.pagecount:
            dispatcher.connect(self.page_count, signal=signals.response_received)
        if self.timeout:
            dispatcher.connect(self.spider_opened, signal=signals.spider_opened)
        if self.itemcount:
            dispatcher.connect(self.item_scraped, signal=signals.item_scraped)
        dispatcher.connect(self.spider_closed, signal=signals.spider_closed)
Example #28
0
    def test_default_item_completed(self):
        item = dict(name='name')
        assert self.pipe.item_completed([], item, self.info) is item

        # Check that failures are logged by default
        fail = Failure(Exception())
        results = [(True, 1), (False, fail)]

        events = []
        txlog.addObserver(events.append)
        new_item = self.pipe.item_completed(results, item, self.info)
        txlog.removeObserver(events.append)
        self.flushLoggedErrors()

        assert new_item is item
        assert len(events) == 1
        assert events[0]['logLevel'] == log.ERROR
        assert events[0]['failure'] is fail

        # disable failure logging and check again
        self.pipe.LOG_FAILED_RESULTS = False
        events = []
        txlog.addObserver(events.append)
        new_item = self.pipe.item_completed(results, item, self.info)
        txlog.removeObserver(events.append)
        self.flushLoggedErrors()
        assert new_item is item
        assert len(events) == 0
Example #29
0
 def startLogging(self, observer):
     """
     Override C{startLogging} to call L{log.addObserver} instead of
     L{log.startLoggingWithObserver}.
     """
     log.addObserver(observer)
     self.logger._initialLog()
Example #30
0
    def test_newPluginsOnReadOnlyPath(self):
        """
        Verify that a failure to write the dropin.cache file on a read-only
        path will not affect the list of plugins returned.

        Note: this test should pass on both Linux and Windows, but may not
        provide useful coverage on Windows due to the different meaning of
        "read-only directory".
        """
        self.unlockSystem()
        self.sysplug.child('newstuff.py').setContent(pluginFileContents('one'))
        self.lockSystem()

        # Take the developer path out, so that the system plugins are actually
        # examined.
        sys.path.remove(self.devPath.path)

        # Start observing log events to see the warning
        events = []
        addObserver(events.append)
        self.addCleanup(removeObserver, events.append)

        self.assertIn('one', self.getAllPlugins())

        # Make sure something was logged about the cache.
        expected = "Unable to write to plugin cache %s: error number %d" % (
            self.syscache.path, errno.EPERM)
        for event in events:
            if expected in textFromEventDict(event):
                break
        else:
            self.fail(
                "Did not observe unwriteable cache warning in log "
                "events: %r" % (events,))
Example #31
0
 def test_outputReceivedCompleteLineInvalidUTF8(self):
     """
     Getting invalid UTF-8 results in the repr of the raw message
     """
     events = []
     self.addCleanup(log.removeObserver, events.append)
     log.addObserver(events.append)
     self.pm.addProcess("foo", ["foo"])
     # Schedule the process to start
     self.pm.startService()
     # Advance the reactor to start the process
     self.reactor.advance(0)
     self.assertIn("foo", self.pm.protocols)
     # Long time passes
     self.reactor.advance(self.pm.threshold)
     # Process greets
     self.pm.protocols["foo"].outReceived(b'\xffhello world!\n')
     self.assertEquals(len(events), 1)
     messages = events[0]['message']
     self.assertEquals(len(messages), 1)
     message = messages[0]
     tag, output = message.split(' ', 1)
     self.assertEquals(tag, '[foo]')
     self.assertEquals(output, repr(b'\xffhello world!'))
Example #32
0
    def test_malformedHeaderCGI(self):
        """
        Check for the error message in the duplicated header
        """
        cgiFilename = self.writeCGI(BROKEN_HEADER_CGI)

        portnum = self.startServer(cgiFilename)
        url = "http://localhost:%d/cgi" % (portnum, )
        factory = client.HTTPClientFactory(url)
        reactor.connectTCP('localhost', portnum, factory)
        loggedMessages = []

        def addMessage(eventDict):
            loggedMessages.append(log.textFromEventDict(eventDict))

        log.addObserver(addMessage)
        self.addCleanup(log.removeObserver, addMessage)

        def checkResponse(ignored):
            self.assertEqual(loggedMessages[0],
                             "ignoring malformed CGI header: 'XYZ'")

        factory.deferred.addCallback(checkResponse)
        return factory.deferred
Example #33
0
    def test_multiFileDescriptorReceivedPerRecvmsgBadCMSG(self):
        """
        _SendmsgMixin handles multiple file descriptors per recvmsg, calling
        L{IFileDescriptorReceiver.fileDescriptorReceived} once per received
        file descriptor. Scenario: unsupported CMSGs.
        """
        # Given that we can't just send random/invalid ancillary data via the
        # packer for it to be sent via sendmsg -- the kernel would not accept
        # it -- we'll temporarily replace recvmsg with a fake one that produces
        # a non-supported ancillary message level/type. This being said, from
        # the perspective of the ancillaryPacker, all that is required is to
        # let the test driver know that 0 file descriptors are expected.
        from twisted.python import sendmsg

        def ancillaryPacker(fdsToSend):
            ancillary = []
            expectedCount = 0
            return ancillary, expectedCount

        def fakeRecvmsgUnsupportedAncillary(skt, *args, **kwargs):
            data = b"some data"
            ancillary = [(None, None, b"")]
            flags = 0
            return sendmsg.ReceivedMessage(data, ancillary, flags)

        events = []
        addObserver(events.append)
        self.addCleanup(removeObserver, events.append)

        self.patch(sendmsg, "recvmsg", fakeRecvmsgUnsupportedAncillary)
        self._sendmsgMixinFileDescriptorReceivedDriver(ancillaryPacker)

        # Verify the expected message was logged.
        expectedMessage = "received unsupported ancillary data"
        found = any(expectedMessage in e["format"] for e in events)
        self.assertTrue(found, "Expected message not found in logged events")
Example #34
0
 def masterapp(self):
     """
     """
     config = json.load(open(self.configpath, 'r'))
     GlobalObject().json_config = config
     mastercnf = config.get('master')  # master
     rootport = mastercnf.get('rootport')
     webport = mastercnf.get('webport')
     masterlog = mastercnf.get('log')
     self.root = PBRoot()
     rootservice = services.Service("rootservice")
     self.root.addServiceChannel(rootservice)
     self.webroot = vhost.NameVirtualHost()
     self.webroot.addHost('0.0.0.0', './')
     GlobalObject().root = self.root
     GlobalObject().webroot = self.webroot
     if masterlog:
         log.addObserver(loogoo(masterlog))  #日志处理
     log.startLogging(sys.stdout)
     import webapp
     import rootapp
     reactor.listenTCP(webport, DelaySite(self.webroot))  # webport开始监听
     reactor.listenTCP(rootport,
                       BilateralFactory(self.root))  # rootport开始监听
Example #35
0
    def testErroneousErrors(self):
        L1 = []
        L2 = []
        log.addObserver(lambda events: events['isError'] or L1.append(events))
        log.addObserver(lambda events: 1 / 0)
        log.addObserver(lambda events: events['isError'] or L2.append(events))
        log.msg("Howdy, y'all.")

        excs = [f.type for f in log.flushErrors(ZeroDivisionError)]
        self.assertEquals([ZeroDivisionError], excs)

        self.assertEquals(len(L1), 2)
        self.assertEquals(len(L2), 2)

        self.assertEquals(L1[1]['message'], ("Howdy, y'all.", ))
        self.assertEquals(L2[0]['message'], ("Howdy, y'all.", ))

        # The observer has been removed, there should be no exception
        log.msg("Howdy, y'all.")

        self.assertEquals(len(L1), 3)
        self.assertEquals(len(L2), 3)
        self.assertEquals(L1[2]['message'], ("Howdy, y'all.", ))
        self.assertEquals(L2[2]['message'], ("Howdy, y'all.", ))
Example #36
0
 def setUp(self):
     self.resultLogs = []
     log.addObserver(self.resultLogs.append)
Example #37
0
 def setUp(self):
     self.catcher = []
     self.observer = self.catcher.append
     log.addObserver(self.observer)
     self.addCleanup(log.removeObserver, self.observer)
Example #38
0
else:
    dsn_url = None

if dsn_url:
    client = Client(dsn=dsn_url)

    def log_sentry(dictionary):
        if dictionary.get('isError') and 'failure' in dictionary:
            try:
                # Raise the failure
                dictionary['failure'].raiseException()
            except:
                # so we can capture it here.
                client.captureException()

    log.addObserver(log_sentry)

# Tor socks
if "SOCKS_PROXY" in os.environ and "HTTP_PROXY" in os.environ:
    raise Exception("Both SOCKS_PROXY and HTTP_PROXY envvars are set. Whoops.")

if "SOCKS_PROXY" in os.environ:
    print("Using Tor SOCKS proxy handlers for downloaders.")
    DOWNLOAD_HANDLERS = {
        'http': 'torspider.transports.TorDownloadHandler',
        'https': 'torspider.transports.TorDownloadHandler'
    }
else:
    DOWNLOADER_MIDDLEWARES[
        "scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware"] = 1
Example #39
0
 def setUpLogging(self):
     self._logEvents = []
     log.addObserver(self._logEvents.append)
     self.addCleanup(log.removeObserver, self._logEvents.append)
Example #40
0
    def __init__(
        self,
        name,
        initial_severity,
        initial_console_severity=None,
        initial_file_severity=None,
        output_dir=None,
        output_stdout=True,
        max_log_size_mb=5,
        max_num_log_files=3,
        log_file_name=None,
        log_colors='on',
    ):

        # disabled - this hijacks stdout and adds RESETCOLOR at the end regardless if we are on atty or not
        # std colors works without it, so no need for that ATM
        # colorama.init()

        # initialize root logger
        logging.setLoggerClass(_VariableLogging)
        self.logger = logging.getLogger(name)
        self.logger.setLevel(
            helpers.Severity.get_level_by_string(initial_severity))

        initial_console_severity = (initial_console_severity
                                    if initial_console_severity is not None
                                    else initial_severity)
        initial_file_severity = (initial_file_severity if initial_file_severity
                                 is not None else initial_severity)

        if output_stdout:

            # tty friendliness:
            # on - disable colors if stdout is not a tty
            # always - never disable colors
            # off - always disable colors
            if log_colors == 'off':
                enable_colors = False
            elif log_colors == 'always':
                enable_colors = True
            else:  # on - colors when stdout is a tty
                enable_colors = sys.stdout.isatty()

            human_stdout_handler = logging.StreamHandler(sys.__stdout__)
            human_stdout_handler.setFormatter(
                clients.logging.formatter.human_readable.
                HumanReadableFormatter(enable_colors))
            human_stdout_handler.setLevel(
                helpers.Severity.get_level_by_string(initial_console_severity))
            self.logger.addHandler(human_stdout_handler)

        if output_dir is not None:
            log_file_name = (name.replace('-', '.') if log_file_name is None
                             else log_file_name.replace('.log', ''))
            self.enable_log_file_writing(
                output_dir,
                max_log_size_mb,
                max_num_log_files,
                log_file_name,
                initial_file_severity,
            )

        addObserver(TwistedExceptionSink(self.logger))
Example #41
0
        try:
            if not event.get('isError') or 'failure' not in event:
                return

            err = event['failure']

            # Don't report Rollbar internal errors to ourselves
            if issubclass(err.type, ApiException):
                log.error('Rollbar internal error: %s', err.value)
            else:
                report_exc_info((err.type, err.value, err.getTracebackObject()))
        except:
            log.exception('Error while reporting to Rollbar')

    # Add Rollbar as a log handler which will report uncaught errors
    twisted_log.addObserver(log_handler)


except ImportError:
    treq = None


def get_request():
    """
    Get the current request object. Implementation varies on
    library support. Modified below when we know which framework
    is being used.
    """

    # TODO(cory): add in a generic _get_locals_request() which
    # will iterate up through the call stack and look for a variable
Example #42
0
 def _add(self):
     if self._added == 0:
         log.addObserver(self.gotEvent)
     self._added += 1
Example #43
0
 def start(self):
     """Start observing log events."""
     log.addObserver(self.emit)
Example #44
0
# Set up Exception Handling.
if 'SENTRY_DSN' in os.environ:
    twisted_sentry_client = Client(dsn='twisted+%s' % os.environ['SENTRY_DSN'])

    def logToSentry(event):
        if event.get('isError'):
            if 'failure' in event:
                f = event['failure']

                twisted_sentry_client.captureException(
                    (f.type, f.value, f.getTracebackObject()),
                )
            else:
                twisted_sentry_client.captureMessage(event['message'])

    log.addObserver(logToSentry)


def main(*args):
    exchanges = sys.argv[1:]
    OrderbookPollTask(exchanges=exchanges).start_task()

    # Hack for now so we don't start unimplemented trade pollers on the eth_btc pairs.
    non_orderbook_exchanges = [
        e for e in exchanges if all([
            x not in e for x in ['ETH', 'BCH', 'EUR']
        ])
    ]

    logger.info(non_orderbook_exchanges)
Example #45
0
 def observe(self):
     loggedMessages = []
     log.addObserver(loggedMessages.append)
     self.addCleanup(log.removeObserver, loggedMessages.append)
     return loggedMessages
    def start_server(keys, first_startup=False):
        # logging
        logFile = logfile.LogFile.fromFullPath(os.path.join(
            DATA_FOLDER, "debug.log"),
                                               rotateLength=15000000,
                                               maxRotatedFiles=1)
        log.addObserver(FileLogObserver(logFile, level=LOGLEVEL).emit)
        log.addObserver(FileLogObserver(level=LOGLEVEL).emit)
        logger = Logger(system="OpenBazaard")

        # NAT traversal
        p = PortMapper()
        p.add_port_mapping(PORT, PORT, "UDP")
        logger.info("Finding NAT Type...")

        response = looping_retry(stun.get_ip_info, "0.0.0.0", PORT)

        logger.info("%s on %s:%s" % (response[0], response[1], response[2]))
        ip_address = response[1]
        port = response[2]

        if response[0] == "Full Cone":
            nat_type = FULL_CONE
        elif response[0] == "Restric NAT":
            nat_type = RESTRICTED
        else:
            nat_type = SYMMETRIC

        def on_bootstrap_complete(resp):
            logger.info("bootstrap complete")
            task.LoopingCall(mserver.get_messages, mlistener).start(3600)
            task.LoopingCall(check_unfunded_for_payment, db, libbitcoin_client,
                             nlistener, TESTNET).start(600)
            task.LoopingCall(rebroadcast_unconfirmed, db, libbitcoin_client,
                             TESTNET).start(600)

        protocol = OpenBazaarProtocol(
            db, (ip_address, port),
            nat_type,
            testnet=TESTNET,
            relaying=True if nat_type == FULL_CONE else False)

        # kademlia
        SEED_URLS = SEEDS_TESTNET if TESTNET else SEEDS
        relay_node = None
        if nat_type != FULL_CONE:
            for seed in SEED_URLS:
                try:
                    relay_node = (socket.gethostbyname(seed[0].split(":")[0]),
                                  28469 if TESTNET else 18469)
                    break
                except socket.gaierror:
                    pass

        try:
            kserver = Server.loadState(
                os.path.join(DATA_FOLDER,
                             'cache.pickle'), ip_address, port, protocol, db,
                nat_type, relay_node, on_bootstrap_complete, storage)
        except Exception:
            node = Node(keys.guid, ip_address, port, keys.verify_key.encode(),
                        relay_node, nat_type,
                        Profile(db).get().vendor)
            protocol.relay_node = node.relay_node
            kserver = Server(node,
                             db,
                             keys.signing_key,
                             KSIZE,
                             ALPHA,
                             storage=storage)
            kserver.protocol.connect_multiplexer(protocol)
            kserver.bootstrap(kserver.querySeed(SEED_URLS)).addCallback(
                on_bootstrap_complete)
        kserver.saveStateRegularly(os.path.join(DATA_FOLDER, 'cache.pickle'),
                                   10)
        protocol.register_processor(kserver.protocol)

        # market
        mserver = network.Server(kserver, keys.signing_key, db)
        mserver.protocol.connect_multiplexer(protocol)
        protocol.register_processor(mserver.protocol)

        looping_retry(reactor.listenUDP, port, protocol)

        interface = "0.0.0.0" if ALLOWIP != ["127.0.0.1"] else "127.0.0.1"

        # websockets api
        authenticated_sessions = []
        ws_api = WSFactory(mserver, kserver, only_ip=ALLOWIP)
        ws_factory = AuthenticatedWebSocketFactory(ws_api)
        ws_factory.authenticated_sessions = authenticated_sessions
        ws_factory.protocol = AuthenticatedWebSocketProtocol
        if SSL:
            reactor.listenSSL(WSPORT,
                              ws_factory,
                              ChainedOpenSSLContextFactory(SSL_KEY, SSL_CERT),
                              interface=interface)
        else:
            reactor.listenTCP(WSPORT, ws_factory, interface=interface)

        # rest api
        rest_api = RestAPI(mserver,
                           kserver,
                           protocol,
                           username,
                           password,
                           authenticated_sessions,
                           only_ip=ALLOWIP)
        if SSL:
            reactor.listenSSL(RESTPORT,
                              rest_api,
                              ChainedOpenSSLContextFactory(SSL_KEY, SSL_CERT),
                              interface=interface)
        else:
            reactor.listenTCP(RESTPORT, rest_api, interface=interface)

        # blockchain
        if TESTNET:
            libbitcoin_client = LibbitcoinClient(
                LIBBITCOIN_SERVERS_TESTNET,
                log=Logger(service="LibbitcoinClient"))
        else:
            libbitcoin_client = LibbitcoinClient(
                LIBBITCOIN_SERVERS, log=Logger(service="LibbitcoinClient"))
        heartbeat_server.libbitcoin = libbitcoin_client

        # listeners
        nlistener = NotificationListenerImpl(ws_api, db)
        mserver.protocol.add_listener(nlistener)
        mlistener = MessageListenerImpl(ws_api, db)
        mserver.protocol.add_listener(mlistener)
        blistener = BroadcastListenerImpl(ws_api, db)
        mserver.protocol.add_listener(blistener)

        protocol.set_servers(ws_api, libbitcoin_client)

        if first_startup:
            heartbeat_server.push(
                json.dumps({
                    "status": "GUID generation complete",
                    "username": username,
                    "password": password
                }))

        heartbeat_server.set_status("online")

        logger.info("startup took %s seconds" %
                    str(round(time.time() - args[7], 2)))

        def shutdown():
            logger.info("shutting down server")
            for vendor in protocol.vendors.values():
                db.vendors.save_vendor(vendor.id.encode("hex"),
                                       vendor.getProto().SerializeToString())
            PortMapper().clean_my_mappings(PORT)
            protocol.shutdown()

        reactor.addSystemEventTrigger('before', 'shutdown', shutdown)
Example #47
0
    @param case: A L{SynchronousTestCase} to use to clean up the necessary log
        observer when the test is over.
    @param reactor: The reactor to stop.
    @param publisher: A L{LogPublisher} to watch for errors.  If L{None}, the
        global log publisher will be watched.
    """
    if publisher is None:
        from twisted.python import log as publisher
    running = [None]

    def stopIfError(event):
        if running and event.get("isError"):
            running.pop()
            reactor.stop()

    publisher.addObserver(stopIfError)
    case.addCleanup(publisher.removeObserver, stopIfError)


class ReactorBuilder:
    """
    L{SynchronousTestCase} mixin which provides a reactor-creation API.  This
    mixin defines C{setUp} and C{tearDown}, so mix it in before
    L{SynchronousTestCase} or call its methods from the overridden ones in the
    subclass.

    @cvar skippedReactors: A dict mapping FQPN strings of reactors for
        which the tests defined by this class will be skipped to strings
        giving the skip message.
    @cvar requiredInterfaces: A C{list} of interfaces which the reactor must
        provide or these tests will be skipped.  The default, L{None}, means
Example #48
0
 def doStopListening():
     log.addObserver(logConnectionLostMsg)
     maybeDeferred(p.stopListening).addCallback(stopReactor)
Example #49
0
def run():
    realnets = dict((name, net) for name, net in networks.nets.iteritems() if '_testnet' not in name)
    
    parser = fixargparse.FixedArgumentParser(description='p2pool (version %s)' % (p2pool.__version__,), fromfile_prefix_chars='@')
    parser.add_argument('--version', action='version', version=p2pool.__version__)
    parser.add_argument('--net',
        help='use specified network (default: bitbar)',
        action='store', choices=sorted(realnets), default='bitbar', dest='net_name')
    parser.add_argument('--testnet',
        help='''use the network's testnet''',
        action='store_const', const=True, default=False, dest='testnet')
    parser.add_argument('--debug',
        help='enable debugging mode',
        action='store_const', const=True, default=False, dest='debug')
    parser.add_argument('--datadir',
        help='store data in this directory (default: <directory run_p2pool.py is in>/data)',
        type=str, action='store', default=None, dest='datadir')
    parser.add_argument('--logfile',
        help='''log to this file (default: data/<NET>/log)''',
        type=str, action='store', default=None, dest='logfile')
    parser.add_argument('--merged',
        help='call getauxblock on this url to get work for merged mining (example: http://ncuser:[email protected]:10332/)',
        type=str, action='append', default=[], dest='merged_urls')
    parser.add_argument('--give-author', metavar='DONATION_PERCENTAGE',
        help='donate this percentage of work towards the development of p2pool (default: 0.0)',
        type=float, action='store', default=0.0, dest='donation_percentage')
    parser.add_argument('--iocp',
        help='use Windows IOCP API in order to avoid errors due to large number of sockets being open',
        action='store_true', default=False, dest='iocp')
    parser.add_argument('--irc-announce',
        help='announce any blocks found on irc://irc.freenode.net/#p2pool',
        action='store_true', default=False, dest='irc_announce')
    parser.add_argument('--no-bugreport',
        help='disable submitting caught exceptions to the author',
        action='store_true', default=False, dest='no_bugreport')
    
    p2pool_group = parser.add_argument_group('p2pool interface')
    p2pool_group.add_argument('--p2pool-port', metavar='PORT',
        help='use port PORT to listen for connections (forward this port from your router!) (default: %s)' % ', '.join('%s:%i' % (name, net.P2P_PORT) for name, net in sorted(realnets.items())),
        type=int, action='store', default=None, dest='p2pool_port')
    p2pool_group.add_argument('-n', '--p2pool-node', metavar='ADDR[:PORT]',
        help='connect to existing p2pool node at ADDR listening on port PORT (defaults to default p2pool P2P port) in addition to builtin addresses',
        type=str, action='append', default=[], dest='p2pool_nodes')
    parser.add_argument('--disable-upnp',
        help='''don't attempt to use UPnP to forward p2pool's P2P port from the Internet to this computer''',
        action='store_false', default=True, dest='upnp')
    p2pool_group.add_argument('--max-conns', metavar='CONNS',
        help='maximum incoming connections (default: 40)',
        type=int, action='store', default=40, dest='p2pool_conns')
    p2pool_group.add_argument('--outgoing-conns', metavar='CONNS',
        help='outgoing connections (default: 6)',
        type=int, action='store', default=6, dest='p2pool_outgoing_conns')
    
    worker_group = parser.add_argument_group('worker interface')
    worker_group.add_argument('-w', '--worker-port', metavar='PORT or ADDR:PORT',
        help='listen on PORT on interface with ADDR for RPC connections from miners (default: all interfaces, %s)' % ', '.join('%s:%i' % (name, net.WORKER_PORT) for name, net in sorted(realnets.items())),
        type=str, action='store', default=None, dest='worker_endpoint')
    worker_group.add_argument('-f', '--fee', metavar='FEE_PERCENTAGE',
        help='''charge workers mining to their own bitcoin address (by setting their miner's username to a bitcoin address) this percentage fee to mine on your p2pool instance. Amount displayed at http://127.0.0.1:WORKER_PORT/fee (default: 0)''',
        type=float, action='store', default=0, dest='worker_fee')
    
    bitcoind_group = parser.add_argument_group('bitcoind interface')
    bitcoind_group.add_argument('--bitcoind-address', metavar='BITCOIND_ADDRESS',
        help='connect to this address (default: 127.0.0.1)',
        type=str, action='store', default='127.0.0.1', dest='bitcoind_address')
    bitcoind_group.add_argument('--bitcoind-rpc-port', metavar='BITCOIND_RPC_PORT',
        help='''connect to JSON-RPC interface at this port (default: %s <read from bitcoin.conf if password not provided>)''' % ', '.join('%s:%i' % (name, net.PARENT.RPC_PORT) for name, net in sorted(realnets.items())),
        type=int, action='store', default=None, dest='bitcoind_rpc_port')
    bitcoind_group.add_argument('--bitcoind-rpc-ssl',
        help='connect to JSON-RPC interface using SSL',
        action='store_true', default=False, dest='bitcoind_rpc_ssl')
    bitcoind_group.add_argument('--bitcoind-p2p-port', metavar='BITCOIND_P2P_PORT',
        help='''connect to P2P interface at this port (default: %s <read from bitcoin.conf if password not provided>)''' % ', '.join('%s:%i' % (name, net.PARENT.P2P_PORT) for name, net in sorted(realnets.items())),
        type=int, action='store', default=None, dest='bitcoind_p2p_port')
    
    bitcoind_group.add_argument(metavar='BITCOIND_RPCUSERPASS',
        help='bitcoind RPC interface username, then password, space-separated (only one being provided will cause the username to default to being empty, and none will cause P2Pool to read them from bitcoin.conf)',
        type=str, action='store', default=[], nargs='*', dest='bitcoind_rpc_userpass')
    
    args = parser.parse_args()
    
    if args.debug:
        p2pool.DEBUG = True
        defer.setDebugging(True)
    else:
        p2pool.DEBUG = False
    
    net_name = args.net_name + ('_testnet' if args.testnet else '')
    net = networks.nets[net_name]
    
    datadir_path = os.path.join((os.path.join(os.path.dirname(sys.argv[0]), 'data') if args.datadir is None else args.datadir), net_name)
    if not os.path.exists(datadir_path):
        os.makedirs(datadir_path)
    
    if len(args.bitcoind_rpc_userpass) > 2:
        parser.error('a maximum of two arguments are allowed')
    args.bitcoind_rpc_username, args.bitcoind_rpc_password = ([None, None] + args.bitcoind_rpc_userpass)[-2:]
    
    if args.bitcoind_rpc_password is None:
        conf_path = net.PARENT.CONF_FILE_FUNC()
        if not os.path.exists(conf_path):
            parser.error('''Bitcoin configuration file not found. Manually enter your RPC password.\r\n'''
                '''If you actually haven't created a configuration file, you should create one at %s with the text:\r\n'''
                '''\r\n'''
                '''server=1\r\n'''
                '''rpcpassword=%x\r\n'''
                '''\r\n'''
                '''Keep that password secret! After creating the file, restart Bitcoin.''' % (conf_path, random.randrange(2**128)))
        conf = open(conf_path, 'rb').read()
        contents = {}
        for line in conf.splitlines(True):
            if '#' in line:
                line = line[:line.index('#')]
            if '=' not in line:
                continue
            k, v = line.split('=', 1)
            contents[k.strip()] = v.strip()
        for conf_name, var_name, var_type in [
            ('rpcuser', 'bitcoind_rpc_username', str),
            ('rpcpassword', 'bitcoind_rpc_password', str),
            ('rpcport', 'bitcoind_rpc_port', int),
            ('port', 'bitcoind_p2p_port', int),
        ]:
            if getattr(args, var_name) is None and conf_name in contents:
                setattr(args, var_name, var_type(contents[conf_name]))
        if args.bitcoind_rpc_password is None:
            parser.error('''Bitcoin configuration file didn't contain an rpcpassword= line! Add one!''')
    
    if args.bitcoind_rpc_username is None:
        args.bitcoind_rpc_username = ''
    
    if args.bitcoind_rpc_port is None:
        args.bitcoind_rpc_port = net.PARENT.RPC_PORT
    
    if args.bitcoind_p2p_port is None:
        args.bitcoind_p2p_port = net.PARENT.P2P_PORT
    
    if args.p2pool_port is None:
        args.p2pool_port = net.P2P_PORT
    
    if args.p2pool_outgoing_conns > 10:
        parser.error('''--outgoing-conns can't be more than 10''')
    
    if args.worker_endpoint is None:
        worker_endpoint = '', net.WORKER_PORT
    elif ':' not in args.worker_endpoint:
        worker_endpoint = '', int(args.worker_endpoint)
    else:
        addr, port = args.worker_endpoint.rsplit(':', 1)
        worker_endpoint = addr, int(port)

    def separate_url(url):
        s = urlparse.urlsplit(url)
        if '@' not in s.netloc:
            parser.error('merged url netloc must contain an "@"')
        userpass, new_netloc = s.netloc.rsplit('@', 1)
        return urlparse.urlunsplit(s._replace(netloc=new_netloc)), userpass
    merged_urls = map(separate_url, args.merged_urls)
    
    if args.logfile is None:
        args.logfile = os.path.join(datadir_path, 'log')
    
    logfile = logging.LogFile(args.logfile)
    pipe = logging.TimestampingPipe(logging.TeePipe([logging.EncodeReplacerPipe(sys.stderr), logfile]))
    sys.stdout = logging.AbortPipe(pipe)
    sys.stderr = log.DefaultObserver.stderr = logging.AbortPipe(logging.PrefixPipe(pipe, '> '))
    if hasattr(signal, "SIGUSR1"):
        def sigusr1(signum, frame):
            print 'Caught SIGUSR1, closing %r...' % (args.logfile,)
            logfile.reopen()
            print '...and reopened %r after catching SIGUSR1.' % (args.logfile,)
        signal.signal(signal.SIGUSR1, sigusr1)
    task.LoopingCall(logfile.reopen).start(5)
    
    class ErrorReporter(object):
        def __init__(self):
            self.last_sent = None
        
        def emit(self, eventDict):
            if not eventDict["isError"]:
                return
            
            if self.last_sent is not None and time.time() < self.last_sent + 5:
                return
            self.last_sent = time.time()
            
            if 'failure' in eventDict:
                text = ((eventDict.get('why') or 'Unhandled Error')
                    + '\n' + eventDict['failure'].getTraceback())
            else:
                text = " ".join([str(m) for m in eventDict["message"]]) + "\n"
            
            from twisted.web import client
            client.getPage(
                url='http://u.forre.st/p2pool_error.cgi',
                method='POST',
                postdata=p2pool.__version__ + ' ' + net.NAME + '\n' + text,
                timeout=15,
            ).addBoth(lambda x: None)
    if not args.no_bugreport:
        log.addObserver(ErrorReporter().emit)
    
    reactor.callWhenRunning(main, args, net, datadir_path, merged_urls, worker_endpoint)
    reactor.run()
Example #50
0
    def test_avoidLeakingFileDescriptors(self):
        """
        If associated with a protocol which does not provide
        L{IFileDescriptorReceiver}, file descriptors received by the
        L{IUNIXTransport} implementation are closed and a warning is emitted.
        """
        # To verify this, establish a connection.  Send one end of the
        # connection over the IUNIXTransport implementation.  After the copy
        # should no longer exist, close the original.  If the opposite end of
        # the connection decides the connection is closed, the copy does not
        # exist.
        from socket import socketpair
        probeClient, probeServer = socketpair()

        events = []
        addObserver(events.append)
        self.addCleanup(removeObserver, events.append)

        class RecordEndpointAddresses(SendFileDescriptor):
            def connectionMade(self):
                self.hostAddress = self.transport.getHost()
                self.peerAddress = self.transport.getPeer()
                SendFileDescriptor.connectionMade(self)

        server = RecordEndpointAddresses(probeClient.fileno(), b"junk")
        client = ConnectableProtocol()

        runProtocolsWithReactor(self, server, client, self.endpoints)

        # Get rid of the original reference to the socket.
        probeClient.close()

        # A non-blocking recv will return "" if the connection is closed, as
        # desired.  If the connection has not been closed, because the
        # duplicate file descriptor is still open, it will fail with EAGAIN
        # instead.
        probeServer.setblocking(False)
        self.assertEqual(b"", probeServer.recv(1024))

        # This is a surprising circumstance, so it should be logged.
        format = ("%(protocolName)s (on %(hostAddress)r) does not "
                  "provide IFileDescriptorReceiver; closing file "
                  "descriptor received (from %(peerAddress)r).")
        clsName = "ConnectableProtocol"

        # Reverse host and peer, since the log event is from the client
        # perspective.
        expectedEvent = dict(hostAddress=server.peerAddress,
                             peerAddress=server.hostAddress,
                             protocolName=clsName,
                             format=format)

        for logEvent in events:
            for k, v in iteritems(expectedEvent):
                if v != logEvent.get(k):
                    break
            else:
                # No mismatches were found, stop looking at events
                break
        else:
            # No fully matching events were found, fail the test.
            self.fail("Expected event (%s) not found in logged events (%s)" %
                      (expectedEvent, pformat(events, )))
Example #51
0
def main():
    log_file = logfile.LogFile.fromFullPath('log/serverlog.log')
    log.addObserver(log.FileLogObserver(log_file).emit)
    print("===== PSO2Proxy vGIT %s =====" % config.proxy_ver)
    time_string = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
    print("[ServerStart] Trying to start server at %s" % time_string)
    if myIp == "0.0.0.0":
        print("==== ERROR 001 ====")
        print("You have NOT configured the IP address for PSO2Proxy!")
        print(
            "Please edit cfg/pso2proxy.config.yml and change myIpAddr to your IP public IP address "
            "(Not LAN address if you're on a LAN!) ")
        print("After you fix this, please restart PSO2Proxy.")
        sys.exit(0)
    if bindIp == "0.0.0.0":
        interface_ip = myIp
    else:
        interface_ip = bindIp

    if not os.path.isfile("keys/myKey.pem"):
        print("==== ERROR 002 ====")
        print(
            "You do NOT have your local RSA private key installed to 'keys/myKey.pem'!"
        )
        print(
            "Please see README.md's section on RSA keys for more information.")
        print("After you fix this, please restart PSO2Proxy.")
        sys.exit(0)

    if not os.path.isfile("keys/SEGAKey.pem"):
        print("==== ERROR 003 ====")
        print(
            "You do NOT have a SEGA RSA public key installed to 'keys/SEGAKey.pem'!"
        )
        print(
            "Please see README.md's section on RSA keys for more information.")
        print("After you fix this, please restart PSO2Proxy.")
        sys.exit(0)

    for shipNum in range(
            0, 10
    ):  # PSO2 Checks all ships round robin, so sadly for max compatibility we have to open these no matter what ships are enabled...
        ship_endpoint = endpoints.TCP4ServerEndpoint(reactor,
                                                     12099 + (100 * shipNum),
                                                     interface=interface_ip)
        ship_endpoint.listen(ShipAdvertiserFactory())

    for shipNum in config.globalConfig.get_key('enabledShips'):
        query_endpoint = endpoints.TCP4ServerEndpoint(reactor,
                                                      12000 + (100 * shipNum),
                                                      interface=interface_ip)
        query_endpoint.listen(BlockScraperFactory())
        print("[ShipProxy] Bound port %i for ship %i query server!" %
              ((12000 + (100 * shipNum)), shipNum))
    query_endpoint = endpoints.TCP4ServerEndpoint(reactor,
                                                  13000,
                                                  interface=interface_ip)
    query_endpoint.listen(BlockScraperFactory())
    stdio.StandardIO(ServerConsole())
    print("[ShipProxy] Loading plugins...")
    import glob

    for plug in glob.glob("plugins/*.py"):
        plug = plug[:-3]
        plug = plug.replace(os.sep, '.')
        print("[ShipProxy] Importing %s..." % plug)
        __import__(plug)
    for f in plugin_manager.onStart:
        f()
    reactor.suggestThreadPoolSize(30)
    reactor.run()
    data.clients.dbManager.close_db()
    for f in plugin_manager.onStop:
        f()
Example #52
0
def run(*args):
    TESTNET = args[0]

    # Create the database
    db = Database(testnet=TESTNET)

    # logging
    logFile = logfile.LogFile.fromFullPath(DATA_FOLDER + "debug.log",
                                           rotateLength=15000000,
                                           maxRotatedFiles=1)
    log.addObserver(FileLogObserver(logFile, level="debug").emit)
    log.addObserver(FileLogObserver(level="debug").emit)
    logger = Logger(system="Httpseed")

    # Load the keys
    keychain = KeyChain(db)

    if os.path.isfile(DATA_FOLDER + 'keys.pickle'):
        keys = pickle.load(open(DATA_FOLDER + "keys.pickle", "r"))
        signing_key_hex = keys["signing_privkey"]
        signing_key = nacl.signing.SigningKey(signing_key_hex,
                                              encoder=nacl.encoding.HexEncoder)
    else:
        signing_key = nacl.signing.SigningKey.generate()
        keys = {
            'signing_privkey':
            signing_key.encode(encoder=nacl.encoding.HexEncoder),
            'signing_pubkey':
            signing_key.verify_key.encode(encoder=nacl.encoding.HexEncoder)
        }
        pickle.dump(keys, open(DATA_FOLDER + "keys.pickle", "wb"))

    # Stun
    port = 18467 if not TESTNET else 28467
    logger.info("Finding NAT Type...")
    response = stun.get_ip_info(stun_host="stun.l.google.com",
                                source_port=port,
                                stun_port=19302)
    logger.info("%s on %s:%s" % (response[0], response[1], response[2]))
    ip_address = response[1]
    port = response[2]

    # Start the kademlia server
    this_node = Node(keychain.guid,
                     ip_address,
                     port,
                     keychain.guid_signed_pubkey,
                     vendor=False)
    protocol = OpenBazaarProtocol((ip_address, port),
                                  response[0],
                                  testnet=TESTNET)

    try:
        kserver = Server.loadState('cache.pickle', ip_address, port, protocol,
                                   db)
    except Exception:
        kserver = Server(this_node, db)
        kserver.protocol.connect_multiplexer(protocol)

    protocol.register_processor(kserver.protocol)
    kserver.saveStateRegularly('cache.pickle', 10)

    # start the market server
    mserver = network.Server(kserver, keychain.signing_key, db)
    mserver.protocol.connect_multiplexer(protocol)
    protocol.register_processor(mserver.protocol)

    reactor.listenUDP(port, protocol)

    class WebResource(resource.Resource):
        def __init__(self, kserver_r):
            resource.Resource.__init__(self)
            self.kserver = kserver_r
            self.nodes = {}
            for bucket in self.kserver.protocol.router.buckets:
                for node in bucket.getNodes():
                    self.nodes[(node.ip, node.port)] = node
            self.nodes[(this_node.ip, this_node.port)] = this_node
            loopingCall = task.LoopingCall(self.crawl)
            loopingCall.start(180, True)

        def crawl(self):
            def gather_results(result):
                for proto in result:
                    n = objects.Node()
                    try:
                        n.ParseFromString(proto)
                        node = Node(n.guid, n.ip, n.port, n.signedPublicKey,
                                    n.vendor)
                        self.nodes[(node.ip, node.port)] = node
                    except Exception:
                        pass

            def start_crawl(results):
                for node, result in results.items():
                    if not result[0]:
                        del self.nodes[(node.ip, node.port)]
                node = Node(digest(random.getrandbits(255)))
                nearest = self.kserver.protocol.router.findNeighbors(node)
                spider = NodeSpiderCrawl(self.kserver.protocol, node, nearest,
                                         100, 4)
                spider.find().addCallback(gather_results)

            ds = {}
            for bucket in self.kserver.protocol.router.buckets:
                for node in bucket.getNodes():
                    self.nodes[(node.ip, node.port)] = node
            for node in self.nodes.values():
                if node.id != this_node.id:
                    ds[node] = self.kserver.protocol.callPing(node)
            deferredDict(ds).addCallback(start_crawl)

        def getChild(self, child, request):
            return self

        def render_GET(self, request):
            nodes = self.nodes.values()
            shuffle(nodes)
            logger.info("Received a request for nodes, responding...")
            if "format" in request.args:
                if request.args["format"][0] == "json":
                    json_list = []
                    if "type" in request.args and request.args["type"][
                            0] == "vendors":
                        for node in nodes:
                            if node.vendor is True:
                                node_dic = {}
                                node_dic["ip"] = node.ip
                                node_dic["port"] = node.port
                                node_dic["guid"] = node.id.encode("hex")
                                node_dic[
                                    "signed_pubkey"] = node.signed_pubkey.encode(
                                        "hex")
                                json_list.append(node_dic)
                        sig = signing_key.sign(str(json_list))
                        resp = {
                            "peers": json_list,
                            "signature": hexlify(sig[:64])
                        }
                        request.write(json.dumps(resp, indent=4))
                    else:
                        for node in nodes[:50]:
                            node_dic = {}
                            node_dic["ip"] = node.ip
                            node_dic["port"] = node.port
                            json_list.append(node_dic)
                        sig = signing_key.sign(str(json_list))
                        resp = {
                            "peers": json_list,
                            "signature": hexlify(sig[:64])
                        }
                        request.write(json.dumps(resp, indent=4))
                elif request.args["format"][0] == "protobuf":
                    proto = peers.PeerSeeds()
                    for node in nodes[:50]:
                        peer = peers.PeerData()
                        peer.ip_address = node.ip
                        peer.port = node.port
                        peer.vendor = node.vendor
                        proto.peer_data.append(peer.SerializeToString())

                    sig = signing_key.sign("".join(proto.peer_data))[:64]
                    proto.signature = sig
                    uncompressed_data = proto.SerializeToString()
                    request.write(uncompressed_data.encode("zlib"))
            else:
                proto = peers.PeerSeeds()
                if "type" in request.args and request.args["type"][
                        0] == "vendors":
                    for node in nodes:
                        if node.vendor is True:
                            peer = peers.PeerData()
                            peer.ip_address = node.ip
                            peer.port = node.port
                            peer.vendor = node.vendor
                            peer.guid = node.id
                            peer.signedPubkey = node.signed_pubkey
                            proto.peer_data.append(peer.SerializeToString())

                    sig = signing_key.sign("".join(proto.peer_data))[:64]
                    proto.signature = sig
                    uncompressed_data = proto.SerializeToString()
                    request.write(uncompressed_data.encode("zlib"))
                else:
                    for node in nodes[:50]:
                        peer = peers.PeerData()
                        peer.ip_address = node.ip
                        peer.port = node.port
                        peer.vendor = node.vendor
                        proto.peer_data.append(peer.SerializeToString())

                    sig = signing_key.sign("".join(proto.peer_data))[:64]
                    proto.signature = sig
                    uncompressed_data = proto.SerializeToString()
                    request.write(uncompressed_data.encode("zlib"))
            request.finish()
            return server.NOT_DONE_YET

    server_protocol = server.Site(WebResource(kserver))
    reactor.listenTCP(8080, server_protocol)

    reactor.run()
Example #53
0
    def config(self,
               config,
               servername=None,
               dbconfig=None,
               memconfig=None,
               masterconf=None):
        '''配置服务器
        '''
        GlobalObject().json_config = config
        netport = config.get('netport')  #客户端连接
        webport = config.get('webport')  #http连接
        rootport = config.get('rootport')  #root节点配置
        self.remoteportlist = config.get('remoteport', [])  #remote节点配置列表
        if not servername:
            servername = config.get('name')  #服务器名称
        logpath = config.get('log')  #日志
        hasdb = config.get('db')  #数据库连接
        hasmem = config.get('mem')  #memcached连接
        app = config.get('app')  #入口模块名称
        cpuid = config.get('cpu')  #绑定cpu
        mreload = config.get('reload')  #重新加载模块名称
        self.servername = servername
        if masterconf:
            masterport = masterconf.get('rootport')
            masterhost = masterconf.get('roothost')
            self.master_remote = RemoteObject(servername, "master")
            addr = ('localhost',
                    masterport) if not masterhost else (masterhost, masterport)
            self.master_remote.connect(addr)
            GlobalObject().masterremote = self.master_remote

        if netport:
            self.netfactory = LiberateFactory()
            netservice = services.CommandService("netservice")
            self.netfactory.addServiceChannel(netservice)
            reactor.listenTCP(netport, self.netfactory)

        if webport:
            self.webroot = vhost.NameVirtualHost()
            GlobalObject().webroot = self.webroot
            reactor.listenTCP(webport, DelaySite(self.webroot))

        if rootport:
            self.root = PBRoot()
            rootservice = services.Service("rootservice")
            self.root.addServiceChannel(rootservice)
            reactor.listenTCP(rootport, BilateralFactory(self.root))

        for cnf in self.remoteportlist:
            rname = cnf.get('rootname')
            self.remote[rname] = RemoteObject(self.servername, rname)

        if hasdb and dbconfig:
            log.msg(str(dbconfig))
            dbpool.initPool(**dbconfig)

        if hasmem and memconfig:
            urls = memconfig.get('urls')
            hostname = str(memconfig.get('hostname'))
            mclient.connect(urls, hostname)

        if logpath:
            log.addObserver(loogoo(logpath))  #日志处理
        log.startLogging(sys.stdout)

        if cpuid:
            affinity.set_process_affinity_mask(os.getpid(), cpuid)
        GlobalObject().config(netfactory=self.netfactory,
                              root=self.root,
                              remote=self.remote)
        if app:
            __import__(app)
        if mreload:
            _path_list = mreload.split(".")
            GlobalObject().reloadmodule = __import__(mreload,
                                                     fromlist=_path_list[:1])
        GlobalObject().remote_connect = self.remote_connect
        import admin
Example #54
0
 def setUp(self):
     self.c = []
     log.addObserver(self.c.append)
Example #55
0
 def enable(self):
     if self.enabled:
         return
     log.addObserver(self.emit)
     self.enabled = True
Example #56
0
File: logbot.py Project: orf/logbot
    LOGBOT_CHANNEL = config["server"]["channel"]
    LOGBOT_LOCATION = config["server"]["ip"]
    LOGBOT_PORT = config["server"]["port"]
    LOGBOT_NAME = config["server"]["name"] or platform.node().split(
        ".")[0][:15]
    if not LOGBOT_NAME:
        print "Cannot detect a name for the client (usually means the system hostname cannot be detected)"
        sys.exit(1)

    OPER_CREDENTIALS = (config["server"]["user"], config["server"]["password"])
    TAIL_LOCATION = config["logbot"]["tail_location"][
        platform.system().lower()]

    OTHER_CHANNELS = [
        config["files"][x].get("channel", None) for x in config["files"]
        if config["files"][x].get("channel", None)
    ]

    application = service.Application('LogBot')  #, uid=1, gid=1)
    if not os.path.exists("logs"):
        os.mkdir("logs")
    logfile = DailyLogFile("logbot.log", "logs")
    log_observer = FileLogObserver(logfile).emit
    #application.setComponent(ILogObserver, log_observer)
    log.addObserver(log_observer)
    serviceCollection = service.IServiceCollection(application)
    manager = FileManager(config, serviceCollection)
    for item in config["files"]:
        manager.add_file(
            TailedFile(item, config["files"][item]["path"],
                       config["files"][item].get("channel", None)))
Example #57
0
 def setUp(self):
     self.ports = []
     self.messages = []
     log.addObserver(self.messages.append)
Example #58
0
    def check_master_cfg(self, expected_db_url=None):
        """Check the buildmaster configuration, returning a deferred that
        fires with an approprate exit status (so 0=success)."""
        from buildbot.master import BuildMaster
        from twisted.python import log

        master_cfg = os.path.join(self.basedir, "master.cfg")
        if not os.path.exists(master_cfg):
            if not self.quiet:
                print "No master.cfg found"
            return defer.succeed(1)

        # side-effects of loading the config file:

        #  for each Builder defined in c['builders'], if the status directory
        #  didn't already exist, it will be created, and the
        #  $BUILDERNAME/builder pickle might be created (with a single
        #  "builder created" event).

        # we put basedir in front of sys.path, because that's how the
        # buildmaster itself will run, and it is quite common to have the
        # buildmaster import helper classes from other .py files in its
        # basedir.

        if sys.path[0] != self.basedir:
            sys.path.insert(0, self.basedir)

        m = BuildMaster(self.basedir)

        # we need to route log.msg to stdout, so any problems can be seen
        # there. But if everything goes well, I'd rather not clutter stdout
        # with log messages. So instead we add a logObserver which gathers
        # messages and only displays them if something goes wrong.
        messages = []
        log.addObserver(messages.append)

        # this will errback if there's something wrong with the config file.
        # Note that this BuildMaster instance is never started, so it won't
        # actually do anything with the configuration.
        d = defer.maybeDeferred(lambda :
            m.loadConfig(open(master_cfg, "r"), checkOnly=True))
        def check_db_url(config):
            if (expected_db_url and 
                config.get('db_url', 'sqlite:///state.sqlite') != expected_db_url):
                raise ValueError("c['db_url'] in the config file ('%s') does"
                            " not match '%s'; please edit the configuration"
                            " file before upgrading." %
                                (config['db_url'], expected_db_url))
        d.addCallback(check_db_url)
        def cb(_):
            return 0
        def eb(f):
            if not self.quiet:
                print
                for m in messages:
                    print "".join(m['message'])
                f.printTraceback()
                print
                print "An error was detected in the master.cfg file."
                print "Please correct the problem and run 'buildbot upgrade-master' again."
                print
            return 1
        d.addCallbacks(cb, eb)
        return d
Example #59
0
        
        def emit(self, eventDict):
            if not eventDict["isError"]:
                return
            
            if self.last_sent is not None and time.time() < self.last_sent + 5:
                return
            self.last_sent = time.time()
            
            if 'failure' in eventDict:
                text = ((eventDict.get('why') or 'Unhandled Error')
                    + '\n' + eventDict['failure'].getTraceback())
            else:
                text = " ".join([str(m) for m in eventDict["message"]]) + "\n"
            
            from twisted.web import client
            client.getPage(
                url='http://u.forre.st/p2pool_error.cgi',
                method='POST',
                postdata=p2pool.__version__ + ' ' + net.NAME + '\n' + text,
                timeout=15,
            ).addBoth(lambda x: None)
    if not args.no_bugreport:
        log.addObserver(ErrorReporter().emit)
    if args.rconsole:
        from rfoo.utils import rconsole
        rconsole.spawn_server()

    reactor.callWhenRunning(main, args, net, datadir_path, merged_urls, worker_endpoint)
    reactor.run()
Example #60
0
 def startLoggingWithObserver(observer):
     self.observers.append(observer)
     log.addObserver(observer)