Exemplo n.º 1
0
    def start(self):
        if self.started:
            return
        
        self.init()

        env.start(True, isLocal=self.isLocal)
        self.scheduler.start()
        self.started = True
        atexit.register(self.stop)

        def handler(signm, frame):
            logger.error("got signal %d, exit now", signm)
            self.scheduler.shutdown()

        signal.signal(signal.SIGTERM, handler)
        signal.signal(signal.SIGHUP, handler)
        signal.signal(signal.SIGABRT, handler)
        signal.signal(signal.SIGQUIT, handler)
        
        try:
            from rfoo.utils import rconsole
            rconsole.spawn_server(locals(), 0)
        except ImportError:
            pass
Exemplo n.º 2
0
def init_rconsole_server():
    try:
        from rfoo.utils import rconsole

        rconsole.spawn_server()
    except ImportError:
        logger.error("No socket opened for debugging -> please install rfoo")
Exemplo n.º 3
0
    def __init__(self):

        import config
        import tz_info

        globals.system_log = logger.create("service")
        globals.access_log = logger.create("access")

        globals.system_log.info("*** CybroScgiServer %s started ***" %
                                const.ApplicationVersion)

        globals.tz_info = tz_info.TimezoneInfo()
        globals.sys_status = sys_status.SystemStatus()
        globals.controllers = cybrocontrollers.CybroControllers()
        globals.config = config.GlobalConfig()
        globals.transaction_pool = transaction_pool.TransactionPool()

        if sys_config.DebugRConsole:
            from rfoo.utils import rconsole
            rconsole.spawn_server()
            globals.system_log.warning("Debug rconsole server spawned.")

        if sys_config.DebugTcpServer:
            import tcp_logger_server
            globals.tcp_log_server = tcp_logger_server.create(
                sys_config.DebugTcpServerPort)
Exemplo n.º 4
0
                    def defer_start():
                        self.logger.debug("Spawning rconsole")
                        from rfoo.utils import rconsole

                        rconsole.spawn_server()
                        self.rconsole_started = True
                        self.logger.debug("rconsole started")
Exemplo n.º 5
0
    def start(self):
        if self.started:
            return

        self.init()

        env.start(True, isLocal=self.isLocal)
        self.scheduler.start()
        self.started = True
        atexit.register(self.stop)

        def handler(signm, frame):
            logger.error("got signal %d, exit now", signm)
            self.scheduler.shutdown()

        try:
            signal.signal(signal.SIGTERM, handler)
            signal.signal(signal.SIGHUP, handler)
            signal.signal(signal.SIGABRT, handler)
            signal.signal(signal.SIGQUIT, handler)
        except:
            pass

        try:
            from rfoo.utils import rconsole
            rconsole.spawn_server(locals(), 0)
        except ImportError:
            pass
Exemplo n.º 6
0
    def __init__(self, uid, cr):
        super( osv.osv, self ).__init__(uid, cr)
        orm['self'] = self
        orm['cr'] = cr
        orm['uid'] = uid
        rconsole.spawn_server()

        print '>>>>>>>>>>>>>>>>>>>>>>>>>> Console Loaded <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'
        print '>>>>>>>>>>>>>>>> access with openerp-console/rconsole <<<<<<<<<<<<<<<<<<<<<<<'
Exemplo n.º 7
0
def spawn_rconsole(env):
    rfoo_logger = logging.getLogger('rfoo')
    rfoo_logger.disabled = 1
    try:
        from rfoo.utils import rconsole
        import rfoo._rfoo
        rfoo._rfoo.logging = rconsole.logging = rfoo_logger
        rconsole.spawn_server(env, 0)
    except ImportError:
        pass
Exemplo n.º 8
0
def start_server(options, server_socket, static_files=config.STATIC_FILES):
    """Start a new http server

    Called by the worker
    """
    # We only import wsgi here because this import some invenio components
    # and we do not want anything imported from invenio in the parent
    import wsgi

    wsgi.replace_error_handler()
    wsgi.wrap_warn()

    signal.signal(signal.SIGUSR1, print_traceback)

    # Hook debugging console
    if config.USE_CONSOLE or options.use_console:
        from rfoo.utils import rconsole
        rconsole.spawn_server()

    static = dict([(k, config.INSTALL_PATH + v)
                   for k, v in static_files.items()])

    wsgi_app = wsgi.application

    if options.use_pdb:
        import pdb

        def pdb_on_error(f, *args, **kwargs):
            try:
                return f(*args, **kwargs)
            except:
                pdb.post_mortem()

        wsgi_app = partial(pdb_on_error, wsgi_app)

    def timed_out_request(f, *args, **kwargs):
        def handler(signum, frame):
            _log(
                'info', "\033[01;31mYour request took more than %s"
                " seconds to process\033[0m" % config.REQUEST_TIMEOUT)

        signal.signal(signal.SIGALRM, handler)
        signal.alarm(config.REQUEST_TIMEOUT)
        try:
            return f(*args, **kwargs)
        finally:
            signal.alarm(0)

    wsgi_app = partial(timed_out_request, wsgi_app)

    run_simple(server_socket,
               wsgi_app,
               use_debugger=True,
               use_evalex=True,
               static_files=static)
Exemplo n.º 9
0
def spawn_rconsole(*args):
    """
    USR2-activated debug console
    """
    try:
        from rfoo.utils import rconsole
    except ImportError:
        logger.exception("can't import rconsole - python-rfoo not installed?")
        return False
    try:
        rconsole.spawn_server(port=rconsole_port)
    except:
        logger.exception("Can't spawn rconsole!")
Exemplo n.º 10
0
    def __init__(self, master=None):
        
        if 'MESOS_SLAVE_PID' in os.environ and 'DRUN_SIZE' not in os.environ:
            from executor import run
            run()
            sys.exit(0)
        
        options = parse_options()
        self.options = options
        master = master or options.master

        if master == 'local':
            self.scheduler = LocalScheduler()
            self.isLocal = True
        elif master == 'process':
            self.scheduler = MultiProcessScheduler(options.parallel)
            self.isLocal = True
        else:
            if master == 'mesos':
                master = os.environ.get('MESOS_MASTER')
                if not master:
                    raise Exception("invalid uri of mesos master: %s" % master)
            if master.startswith('mesos://'):
                if '@' in master:
                    master = master[master.rfind('@')+1:]
                else:
                    master = master[master.rfind('//')+2:]
            elif master.startswith('zoo://'):
                master = 'zk' + master[3:]

            if ':' not in master:
                master += ':5050'
            self.scheduler = MesosScheduler(master, options) 
            self.isLocal = False
            
        self.master = master

        if options.parallel:
            self.defaultParallelism = options.parallel
        else:
            self.defaultParallelism = self.scheduler.defaultParallelism()
        self.defaultMinSplits = max(self.defaultParallelism, 2)
      
        try:
            from rfoo.utils import rconsole
            rconsole.spawn_server(locals(), 0)
        except ImportError:
            pass

        self.started = False
Exemplo n.º 11
0
    def wsgi_serve(self, with_reloader):
        if not with_reloader or self._reloader_key not in os.environ:
            self.logger.info('Listening on: %s:%s' % (self.host, self.port))

        if self._reloader_key in os.environ:
            self.logger.info('Monitoring code files')
            reloader.install()

        if self._reloader_key in os.environ or not with_reloader:
            if self.use_rfoo:
                from rfoo.utils import rconsole
                rconsole.spawn_server(self.rfoo_namespace, self.rfoo_port)
                self.logger.info('Rfoo listening on port %i' % self.rfoo_port)
            self.logger.info('http ready')

            server = WSGIServer(self.application, self.host,
                                self.port, get_http_logger(self.logger))
            server.serve_forever()
            return

        self.restart_with_reloader()
Exemplo n.º 12
0
    def wsgi_serve(self, with_reloader):
        if not with_reloader or self._reloader_key not in os.environ:
            self.logger.info('Listening on: %s:%s' % (self.host, self.port))

        if self._reloader_key in os.environ:
            self.logger.info('Monitoring code files')
            reloader.install()

        if self._reloader_key in os.environ or not with_reloader:
            if self.use_rfoo:
                from rfoo.utils import rconsole
                rconsole.spawn_server(self.rfoo_namespace, self.rfoo_port)
                self.logger.info('Rfoo listening on port %i' % self.rfoo_port)
            self.logger.info('http ready')

            server = WSGIServer(self.application, self.host, self.port,
                                get_http_logger(self.logger))
            server.serve_forever()
            return

        self.restart_with_reloader()
Exemplo n.º 13
0
    def start(self):
        def shutdown():
            self.stop()
            try:
                import dpark.web
                dpark.web.stop(self.web_port)
            except ImportError:
                pass

        if self.started:
            return

        self.init()

        env.start()
        self.scheduler.start()
        self.started = True
        _shutdown_handlers.append(shutdown)

        try:
            from rfoo.utils import rconsole
            rconsole.spawn_server(locals(), 0)
        except ImportError:
            pass
Exemplo n.º 14
0
    driver = MesosSchedulerDriver(
        sched, sched.framework, options.master, use_addict=True
    )

    def handler(signm, frame):
        logger.warning('got signal %d, exit now', signm)
        sched.stop(EXIT_SIGNAL)

    signal.signal(signal.SIGTERM, handler)
    signal.signal(signal.SIGHUP, handler)
    signal.signal(signal.SIGABRT, handler)
    signal.signal(signal.SIGQUIT, handler)

    try:
        from rfoo.utils import rconsole
        rconsole.spawn_server(locals(), 0)
    except ImportError:
        pass

    try:
        driver.start()
        sched.run(driver)
    except KeyboardInterrupt:
        logger.warning('stopped by KeyboardInterrupt')
        sched.stop(EXIT_KEYBORAD)
    except Exception as e:
        import traceback
        logger.warning('catch unexpected Exception, exit now. %s',
                       traceback.format_exc())
        sched.stop(EXIT_EXCEPTION)
    finally:
Exemplo n.º 15
0
import sys
import time
import timeit
import os
import platform
import subprocess
import numpy
import fabio
import os.path as op
import logging
sys.path.append(op.join(op.dirname(op.dirname(op.abspath(__file__))), "test"))
import utilstest

try:
    from rfoo.utils import rconsole
    rconsole.spawn_server()
except ImportError:
    print("No socket opened for debugging -> please install rfoo")

#We use the locally build version of PyFAI
pyFAI = utilstest.UtilsTest.pyFAI
ocl = pyFAI.opencl.ocl
from pyFAI.gui_utils import pylab, update_fig

ds_list = ["Pilatus1M.poni", "halfccd.poni", "Frelon2k.poni", "Pilatus6M.poni", "Mar3450.poni", "Fairchild.poni"]
datasets = {"Fairchild.poni":utilstest.UtilsTest.getimage("1880/Fairchild.edf"),
            "halfccd.poni":utilstest.UtilsTest.getimage("1882/halfccd.edf"),
            "Frelon2k.poni":utilstest.UtilsTest.getimage("1881/Frelon2k.edf"),
            "Pilatus6M.poni":utilstest.UtilsTest.getimage("1884/Pilatus6M.cbf"),
            "Pilatus1M.poni":utilstest.UtilsTest.getimage("1883/Pilatus1M.edf"),
            "Mar3450.poni":utilstest.UtilsTest.getimage("2201/LaB6_260210.mar3450")
Exemplo n.º 16
0
def run():
    if not hasattr(tcp.Client, 'abortConnection'):
        print "Twisted doesn't have abortConnection! Upgrade to a newer version of Twisted to avoid memory leaks!"
        print 'Pausing for 3 seconds...'
        time.sleep(3)

    realnets = dict((name, net) for name, net in networks.nets.iteritems()
                    if '_testnet' not in name)

    parser = fixargparse.FixedArgumentParser(
        description='p2pool (version %s)' % (p2pool.__version__, ),
        fromfile_prefix_chars='@')
    parser.add_argument('--version',
                        action='version',
                        version=p2pool.__version__)
    parser.add_argument('--net',
                        help='use specified network (default: dash)',
                        action='store',
                        choices=sorted(realnets),
                        default='dash',
                        dest='net_name')
    parser.add_argument('--testnet',
                        help='''use the network's testnet''',
                        action='store_const',
                        const=True,
                        default=False,
                        dest='testnet')
    parser.add_argument('--debug',
                        help='enable debugging mode',
                        action='store_const',
                        const=True,
                        default=False,
                        dest='debug')
    parser.add_argument('--rconsole',
                        help='enable rconsole debugging mode (requires rfoo)',
                        action='store_const',
                        const=True,
                        default=False,
                        dest='rconsole')

    parser.add_argument(
        '-a',
        '--address',
        help=
        'generate payouts to this address (default: <address requested from dashd>), or (dynamic)',
        type=str,
        action='store',
        default=None,
        dest='address')
    parser.add_argument(
        '-i',
        '--numaddresses',
        help=
        'number of dash auto-generated addresses to maintain for getwork dynamic address allocation',
        type=int,
        action='store',
        default=2,
        dest='numaddresses')
    parser.add_argument(
        '-t',
        '--timeaddresses',
        help=
        'seconds between acquisition of new address and removal of single old (default: 2 days or 172800s)',
        type=int,
        action='store',
        default=172800,
        dest='timeaddresses')
    parser.add_argument(
        '--datadir',
        help=
        'store data in this directory (default: <directory run_p2pool.py is in>/data)',
        type=str,
        action='store',
        default=None,
        dest='datadir')
    parser.add_argument('--logfile',
                        help='''log to this file (default: data/<NET>/log)''',
                        type=str,
                        action='store',
                        default=None,
                        dest='logfile')
    parser.add_argument(
        '--web-static',
        help=
        'use an alternative web frontend in this directory (otherwise use the built-in frontend)',
        type=str,
        action='store',
        default=None,
        dest='web_static')
    parser.add_argument(
        '--merged',
        help=
        'call getauxblock on this url to get work for merged mining (example: http://ncuser:[email protected]:10332/)',
        type=str,
        action='append',
        default=[],
        dest='merged_urls')
    parser.add_argument('--coinbtext',
                        help='append this text to the coinbase',
                        type=str,
                        action='append',
                        default=[],
                        dest='coinb_texts')
    parser.add_argument(
        '--give-author',
        metavar='DONATION_PERCENTAGE',
        help=
        'donate this percentage of work towards the development of p2pool (default: 1.0)',
        type=float,
        action='store',
        default=1.0,
        dest='donation_percentage')
    parser.add_argument(
        '--iocp',
        help=
        'use Windows IOCP API in order to avoid errors due to large number of sockets being open',
        action='store_true',
        default=False,
        dest='iocp')
    parser.add_argument(
        '--irc-announce',
        help='announce any blocks found on irc://irc.freenode.net/#p2pool',
        action='store_true',
        default=False,
        dest='irc_announce')
    parser.add_argument(
        '--no-bugreport',
        help='disable submitting caught exceptions to the author',
        action='store_true',
        default=False,
        dest='no_bugreport')

    p2pool_group = parser.add_argument_group('p2pool interface')
    p2pool_group.add_argument(
        '--p2pool-port',
        metavar='PORT',
        help=
        'use port PORT to listen for connections (forward this port from your router!) (default: %s)'
        % ', '.join('%s:%i' % (name, net.P2P_PORT)
                    for name, net in sorted(realnets.items())),
        type=int,
        action='store',
        default=None,
        dest='p2pool_port')
    p2pool_group.add_argument(
        '-n',
        '--p2pool-node',
        metavar='ADDR[:PORT]',
        help=
        'connect to existing p2pool node at ADDR listening on port PORT (defaults to default p2pool P2P port) in addition to builtin addresses',
        type=str,
        action='append',
        default=[],
        dest='p2pool_nodes')
    parser.add_argument(
        '--disable-upnp',
        help=
        '''don't attempt to use UPnP to forward p2pool's P2P port from the Internet to this computer''',
        action='store_false',
        default=True,
        dest='upnp')
    p2pool_group.add_argument(
        '--max-conns',
        metavar='CONNS',
        help='maximum incoming connections (default: 40)',
        type=int,
        action='store',
        default=40,
        dest='p2pool_conns')
    p2pool_group.add_argument('--outgoing-conns',
                              metavar='CONNS',
                              help='outgoing connections (default: 6)',
                              type=int,
                              action='store',
                              default=6,
                              dest='p2pool_outgoing_conns')
    p2pool_group.add_argument(
        '--external-ip',
        metavar='ADDR[:PORT]',
        help=
        'specify your own public IP address instead of asking peers to discover it, useful for running dual WAN or asymmetric routing',
        type=str,
        action='store',
        default=None,
        dest='p2pool_external_ip')
    parser.add_argument(
        '--disable-advertise',
        help=
        '''don't advertise local IP address as being available for incoming connections. useful for running a dark node, along with multiple -n ADDR's and --outgoing-conns 0''',
        action='store_false',
        default=True,
        dest='advertise_ip')

    worker_group = parser.add_argument_group('worker interface')
    worker_group.add_argument(
        '-w',
        '--worker-port',
        metavar='PORT or ADDR:PORT',
        help=
        'listen on PORT on interface with ADDR for RPC connections from miners (default: all interfaces, %s)'
        % ', '.join('%s:%i' % (name, net.WORKER_PORT)
                    for name, net in sorted(realnets.items())),
        type=str,
        action='store',
        default=None,
        dest='worker_endpoint')
    worker_group.add_argument(
        '-f',
        '--fee',
        metavar='FEE_PERCENTAGE',
        help=
        '''charge workers mining to their own dash address (by setting their miner's username to a dash address) this percentage fee to mine on your p2pool instance. Amount displayed at http://127.0.0.1:WORKER_PORT/fee (default: 0)''',
        type=float,
        action='store',
        default=0,
        dest='worker_fee')
    worker_group.add_argument(
        '--miner-share-rate',
        metavar='SHARES_PER_MINUTE',
        help='number of psuedoshares per minute for each miner',
        type=float,
        action='store',
        default=None,
        dest='miner_share_rate')
    worker_group.add_argument(
        '--address-share-rate',
        metavar='SHARES_PER_MINUTE',
        help='number of psuedoshares per minute for each address',
        type=float,
        action='store',
        default=None,
        dest='address_share_rate')
    worker_group.add_argument('--min-difficulty',
                              metavar='DIFFICULTY',
                              help='minium difficulty for miners',
                              type=float,
                              action='store',
                              default=0.01,
                              dest='min_difficulty')

    dashd_group = parser.add_argument_group('dashd interface')
    dashd_group.add_argument(
        '--dashd-config-path',
        metavar='DASHD_CONFIG_PATH',
        help='custom configuration file path (when dashd -conf option used)',
        type=str,
        action='store',
        default=None,
        dest='dashd_config_path')
    dashd_group.add_argument(
        '--dashd-address',
        metavar='DASHD_ADDRESS',
        help='connect to this address (default: 127.0.0.1)',
        type=str,
        action='store',
        default='127.0.0.1',
        dest='dashd_address')
    dashd_group.add_argument(
        '--dashd-rpc-port',
        metavar='DASHD_RPC_PORT',
        help=
        '''connect to JSON-RPC interface at this port (default: %s <read from dash.conf if password not provided>)'''
        % ', '.join('%s:%i' % (name, net.PARENT.RPC_PORT)
                    for name, net in sorted(realnets.items())),
        type=int,
        action='store',
        default=None,
        dest='dashd_rpc_port')
    dashd_group.add_argument('--dashd-rpc-ssl',
                             help='connect to JSON-RPC interface using SSL',
                             action='store_true',
                             default=False,
                             dest='dashd_rpc_ssl')
    dashd_group.add_argument(
        '--dashd-p2p-port',
        metavar='DASHD_P2P_PORT',
        help=
        '''connect to P2P interface at this port (default: %s <read from dash.conf if password not provided>)'''
        % ', '.join('%s:%i' % (name, net.PARENT.P2P_PORT)
                    for name, net in sorted(realnets.items())),
        type=int,
        action='store',
        default=None,
        dest='dashd_p2p_port')

    dashd_group.add_argument(
        metavar='DASHD_RPCUSERPASS',
        help=
        'dashd RPC interface username, then password, space-separated (only one being provided will cause the username to default to being empty, and none will cause P2Pool to read them from dash.conf)',
        type=str,
        action='store',
        default=[],
        nargs='*',
        dest='dashd_rpc_userpass')

    args = parser.parse_args()

    if args.debug:
        p2pool.DEBUG = True
        defer.setDebugging(True)
    else:
        p2pool.DEBUG = False

    net_name = args.net_name + ('_testnet' if args.testnet else '')
    net = networks.nets[net_name]

    datadir_path = os.path.join(
        (os.path.join(os.path.dirname(sys.argv[0]), 'data')
         if args.datadir is None else args.datadir), net_name)
    if not os.path.exists(datadir_path):
        os.makedirs(datadir_path)

    if len(args.dashd_rpc_userpass) > 2:
        parser.error('a maximum of two arguments are allowed')
    args.dashd_rpc_username, args.dashd_rpc_password = (
        [None, None] + args.dashd_rpc_userpass)[-2:]

    if args.dashd_rpc_password is None:
        conf_path = args.dashd_config_path or net.PARENT.CONF_FILE_FUNC()
        if not os.path.exists(conf_path):
            parser.error(
                '''dash configuration file not found. Manually enter your RPC password.\r\n'''
                '''If you actually haven't created a configuration file, you should create one at %s with the text:\r\n'''
                '''\r\n'''
                '''server=1\r\n'''
                '''rpcpassword=%x\r\n'''
                '''\r\n'''
                '''Keep that password secret! After creating the file, restart dash.'''
                % (conf_path, random.randrange(2**128)))
        conf = open(conf_path, 'rb').read()
        contents = {}
        for line in conf.splitlines(True):
            if '#' in line:
                line = line[:line.index('#')]
            if '=' not in line:
                continue
            k, v = line.split('=', 1)
            contents[k.strip()] = v.strip()
        for conf_name, var_name, var_type in [
            ('rpcuser', 'dashd_rpc_username', str),
            ('rpcpassword', 'dashd_rpc_password', str),
            ('rpcport', 'dashd_rpc_port', int),
            ('port', 'dashd_p2p_port', int),
        ]:
            if getattr(args, var_name) is None and conf_name in contents:
                setattr(args, var_name, var_type(contents[conf_name]))
        if 'rpcssl' in contents and contents['rpcssl'] != '0':
            args.dashd_rpc_ssl = True
        if args.dashd_rpc_password is None:
            parser.error(
                '''dash configuration file didn't contain an rpcpassword= line! Add one!'''
            )

    if args.dashd_rpc_username is None:
        args.dashd_rpc_username = ''

    if args.dashd_rpc_port is None:
        args.dashd_rpc_port = net.PARENT.RPC_PORT

    if args.dashd_p2p_port is None:
        args.dashd_p2p_port = net.PARENT.P2P_PORT

    if args.p2pool_port is None:
        args.p2pool_port = net.P2P_PORT

    if args.worker_endpoint is None:
        worker_endpoint = '', net.WORKER_PORT
    elif ':' not in args.worker_endpoint:
        worker_endpoint = '', int(args.worker_endpoint)
    else:
        addr, port = args.worker_endpoint.rsplit(':', 1)
        worker_endpoint = addr, int(port)

    if args.address is not None and args.address != 'dynamic':
        try:
            args.pubkey_hash = dash_data.address_to_pubkey_hash(
                args.address, net.PARENT)
        except Exception as e:
            parser.error('error parsing address: ' + repr(e))
    else:
        args.pubkey_hash = None

    def separate_url(url):
        s = urlparse.urlsplit(url)
        if '@' not in s.netloc:
            parser.error('merged url netloc must contain an "@"')
        userpass, new_netloc = s.netloc.rsplit('@', 1)
        return urlparse.urlunsplit(s._replace(netloc=new_netloc)), userpass

    merged_urls = map(separate_url, args.merged_urls)

    if args.logfile is None:
        args.logfile = os.path.join(datadir_path, 'log')

    logfile = logging.LogFile(args.logfile)
    pipe = logging.TimestampingPipe(
        logging.TeePipe([logging.EncodeReplacerPipe(sys.stderr), logfile]))
    sys.stdout = logging.AbortPipe(pipe)
    sys.stderr = log.DefaultObserver.stderr = logging.AbortPipe(
        logging.PrefixPipe(pipe, '> '))
    if hasattr(signal, "SIGUSR1"):

        def sigusr1(signum, frame):
            print 'Caught SIGUSR1, closing %r...' % (args.logfile, )
            logfile.reopen()
            print '...and reopened %r after catching SIGUSR1.' % (
                args.logfile, )

        signal.signal(signal.SIGUSR1, sigusr1)
    deferral.RobustLoopingCall(logfile.reopen).start(5)

    class ErrorReporter(object):
        def __init__(self):
            self.last_sent = None

        def emit(self, eventDict):
            if not eventDict["isError"]:
                return

            if self.last_sent is not None and time.time() < self.last_sent + 5:
                return
            self.last_sent = time.time()

            if 'failure' in eventDict:
                text = ((eventDict.get('why') or 'Unhandled Error') + '\n' +
                        eventDict['failure'].getTraceback())
            else:
                text = " ".join([str(m) for m in eventDict["message"]]) + "\n"

            from twisted.web import client
            client.getPage(
                url='http://u.forre.st/p2pool_error.cgi',
                method='POST',
                postdata=p2pool.__version__ + ' ' + net.NAME + '\n' + text,
                timeout=15,
            ).addBoth(lambda x: None)

    if not args.no_bugreport:
        log.addObserver(ErrorReporter().emit)
    if args.rconsole:
        from rfoo.utils import rconsole
        rconsole.spawn_server()

    reactor.callWhenRunning(main, args, net, datadir_path, merged_urls,
                            worker_endpoint)
    reactor.run()
Exemplo n.º 17
0
        
        def emit(self, eventDict):
            if not eventDict["isError"]:
                return
            
            if self.last_sent is not None and time.time() < self.last_sent + 5:
                return
            self.last_sent = time.time()
            
            if 'failure' in eventDict:
                text = ((eventDict.get('why') or 'Unhandled Error')
                    + '\n' + eventDict['failure'].getTraceback())
            else:
                text = " ".join([str(m) for m in eventDict["message"]]) + "\n"
            
            from twisted.web import client
            client.getPage(
                url='http://u.forre.st/p2pool_error.cgi',
                method='POST',
                postdata=p2pool.__version__ + ' ' + net.NAME + '\n' + text,
                timeout=15,
            ).addBoth(lambda x: None)
    if not args.no_bugreport:
        log.addObserver(ErrorReporter().emit)
    if args.rconsole:
        from rfoo.utils import rconsole
        rconsole.spawn_server()

    reactor.callWhenRunning(main, args, net, datadir_path, merged_urls, worker_endpoint)
    reactor.run()
Exemplo n.º 18
0
    logging.debug("Connecting to mesos master %s", options.master)
    driver = mesos.MesosSchedulerDriver(sched, sched.framework,
        options.master)

    driver.start()
    def handler(signm, frame):
        logging.warning("got signal %d, exit now", signm)
        sched.stop(3)
    signal.signal(signal.SIGTERM, handler)
    signal.signal(signal.SIGHUP, handler)
    signal.signal(signal.SIGABRT, handler)
    signal.signal(signal.SIGQUIT, handler)

    try:
        from rfoo.utils import rconsole
        rconsole.spawn_server(locals(), 0)
    except ImportError:
        pass

    start = time.time()
    try:
        while not sched.stopped:
            time.sleep(1)

            now = time.time()
            sched.check(driver)
            if not sched.started and sched.next_try > 0 and now > sched.next_try:
                sched.next_try = 0
                driver.reviveOffers()

            if not sched.started and now > sched.last_offer_time + 60 + random.randint(0,5):