Ejemplo n.º 1
0
    def _create_circus(cls, callable, plugins=None, stats=False, **kw):
        resolve_name(callable)  # used to check the callable
        fd, testfile = mkstemp()
        os.close(fd)
        wdir = os.path.dirname(__file__)
        args = ['generic.py', callable, testfile]
        worker = {
            'cmd': _CMD,
            'args': args,
            'working_dir': wdir,
            'name': 'test',
            'graceful_timeout': 4
        }
        worker.update(kw)
        debug = kw.get('debug', False)

        if stats:
            arbiter = get_arbiter([worker],
                                  background=True,
                                  plugins=plugins,
                                  stats_endpoint=DEFAULT_ENDPOINT_STATS,
                                  statsd=True,
                                  debug=debug,
                                  statsd_close_outputs=not debug)
        else:
            arbiter = get_arbiter([worker],
                                  background=True,
                                  plugins=plugins,
                                  debug=debug)
        arbiter.start()
        return testfile, arbiter
Ejemplo n.º 2
0
    def _run_circus(self, callable, plugins=None, stats=False, **kw):
        resolve_name(callable)  # used to check the callable
        fd, testfile = mkstemp()
        os.close(fd)
        wdir = os.path.dirname(__file__)
        args = ['generic.py', callable, testfile]
        worker = {
            'cmd': _CMD,
            'args': args,
            'working_dir': wdir,
            'name': 'test',
            'graceful_timeout': 4
        }
        worker.update(kw)
        if stats:
            arbiter = get_arbiter([worker],
                                  background=True,
                                  plugins=plugins,
                                  stats_endpoint=DEFAULT_ENDPOINT_STATS,
                                  debug=kw.get('debug', False))
        else:
            arbiter = get_arbiter([worker],
                                  background=True,
                                  plugins=plugins,
                                  debug=kw.get('debug', False))

        arbiter.start()
        time.sleep(.3)
        self.arbiters.append(arbiter)
        self.files.append(testfile)
        return testfile
    def start_manager(self):
        if settings.SETTINGS_TYPE == 'production':
            arbiter = get_arbiter([{"cmd": "python "
                                       "" + settings.ENVIRONMENTS_MANAGER_PATH, "numprocesses": 1}], background=True)

            if check_python_process():
                clean_environments()

            arbiter.start()
Ejemplo n.º 4
0
 def _create_circus(cls, callable, plugins=None, stats=False, **kw):
     resolve_name(callable)   # used to check the callable
     fd, testfile = mkstemp()
     os.close(fd)
     wdir = os.path.dirname(__file__)
     args = ['generic.py', callable, testfile]
     worker = {'cmd': _CMD, 'args': args, 'working_dir': wdir,
               'name': 'test', 'graceful_timeout': 4}
     worker.update(kw)
     if stats:
         arbiter = get_arbiter([worker], background=True, plugins=plugins,
                               stats_endpoint=DEFAULT_ENDPOINT_STATS,
                               debug=kw.get('debug', False))
     else:
         arbiter = get_arbiter([worker], background=True, plugins=plugins,
                               debug=kw.get('debug', False))
     arbiter.start()
     return testfile, arbiter
Ejemplo n.º 5
0
    def start_manager(self):
        if settings.SETTINGS_TYPE == 'production':
            arbiter = get_arbiter([{"cmd": "python "
                                       "" + settings.ENVIRONMENTS_MANAGER_PATH, "numprocesses": 1}], background=True)

            if check_python_process():
                clean_environments()

            arbiter.start()
 def start_manager(self):
     arbiter = get_arbiter([{
         "cmd": "python "
         "" + settings.ENVIRONMENTS_MANAGER_PATH,
         "numprocesses": 1
     }],
                           background=True)
     if settings.SETTINGS_TYPE == 'production':
         arbiter.start()
         self.started = True
Ejemplo n.º 7
0
 def _create_circus(cls, callable, plugins=None, stats=False, **kw):
     resolve_name(callable)  # used to check the callable
     fd, testfile = mkstemp()
     os.close(fd)
     wdir = os.path.dirname(__file__)
     args = ["generic.py", callable, testfile]
     worker = {"cmd": _CMD, "args": args, "working_dir": wdir, "name": "test", "graceful_timeout": 4}
     worker.update(kw)
     if stats:
         arbiter = get_arbiter(
             [worker],
             background=True,
             plugins=plugins,
             stats_endpoint=DEFAULT_ENDPOINT_STATS,
             debug=kw.get("debug", False),
         )
     else:
         arbiter = get_arbiter([worker], background=True, plugins=plugins, debug=kw.get("debug", False))
     arbiter.start()
     return testfile, arbiter
Ejemplo n.º 8
0
    def _run_circus(self, callable, plugins=None, stats=False, **kw):
        resolve_name(callable)   # used to check the callable
        fd, testfile = mkstemp()
        os.close(fd)
        wdir = os.path.dirname(__file__)
        args = ['generic.py', callable, testfile]
        worker = {'cmd': _CMD, 'args': args, 'working_dir': wdir,
                  'name': 'test'}
        worker.update(kw)
        if stats:
            arbiter = get_arbiter([worker], background=True, plugins=plugins,
                                  stats_endpoint='tcp://127.0.0.1:5557')
        else:
            arbiter = get_arbiter([worker], background=True, plugins=plugins)

        arbiter.start()
        time.sleep(.3)
        self.arbiters.append(arbiter)
        self.files.append(testfile)
        return testfile
Ejemplo n.º 9
0
    def _run_circus(self, callable, plugins=None, stats=False, **kw):
        resolve_name(callable)   # used to check the callable
        fd, testfile = mkstemp()
        os.close(fd)
        wdir = os.path.dirname(__file__)
        args = ['generic.py', callable, testfile]
        worker = {'cmd': _CMD, 'args': args, 'working_dir': wdir,
                  'name': 'test', 'graceful_timeout': 4}
        worker.update(kw)
        if stats:
            arbiter = get_arbiter([worker], background=True, plugins=plugins,
                                  stats_endpoint='tcp://127.0.0.1:5557')
        else:
            arbiter = get_arbiter([worker], background=True, plugins=plugins)

        arbiter.start()
        time.sleep(.3)
        self.arbiters.append(arbiter)
        self.files.append(testfile)
        return testfile
Ejemplo n.º 10
0
 def _run_circus(self, callable):
     resolve_name(callable)   # used to check the callable
     fd, testfile = mkstemp()
     os.close(fd)
     wdir = os.path.dirname(__file__)
     cmd = '%s generic.py %s %s' % (sys.executable, callable, testfile)
     worker = {'cmd': cmd, 'working_dir': wdir, 'name': 'test'}
     arbiter = get_arbiter([worker], background=True)
     arbiter.start()
     self.arbiters.append(arbiter)
     self.files.append(testfile)
     return testfile
Ejemplo n.º 11
0
    def setUpClass(cls):
        settings = {}
        load_into_settings(CONFIG_PATH, settings)
        cls.config = settings['config']

        cls.arbiter = circus.get_arbiter([
            {'cmd': 'python router.py %s' % CONFIG_PATH,
             'shell': True}
        ], background=True)
        cls.arbiter.start()
        # I hope the router is set up once we're done sleeping!
        time.sleep(1)
Ejemplo n.º 12
0
 def __init__(self, host, port, timeout=15):
     assert type(host) == str
     assert type(port) == int and port >= 0 and port <= 65535
     assert type(timeout) == int and timeout > 0
     self._host = host
     self._port = port
     self._timeout = timeout
     self._arbiter = get_arbiter([])
     self._arbiter.start()
     self._client = CircusClient(timeout=self._timeout,
                                 endpoint='tcp://{0}:{1}'.format(
                                     self._host, self._port))
Ejemplo n.º 13
0
 def __init__(self, host, port, timeout=15):
     assert type(host) == str
     assert type(port) == int and port >= 0 and port <= 65535
     assert type(timeout) == int and timeout > 0
     self._host = host
     self._port = port
     self._timeout = timeout
     self._arbiter = get_arbiter([])
     self._arbiter.start()
     self._client = CircusClient(timeout=self._timeout,
                                 endpoint='tcp://{0}:{1}'.format(self._host,
                                                                 self._port))
Ejemplo n.º 14
0
 def _run_circus(self, callable):
     resolve_name(callable)  # used to check the callable
     fd, testfile = mkstemp()
     os.close(fd)
     wdir = os.path.dirname(__file__)
     cmd = "%s generic.py %s %s" % (sys.executable, callable, testfile)
     arbiter = get_arbiter(cmd, working_dir=wdir, numprocesses=1, name="test")
     runner = Runner(arbiter, testfile)
     runner.start()
     self.runners.append(runner)
     self.files.append(testfile)
     return testfile
Ejemplo n.º 15
0
 def _run_circus(self, callable):
     resolve_name(callable)  # used to check the callable
     fd, testfile = mkstemp()
     os.close(fd)
     wdir = os.path.dirname(__file__)
     args = ["generic.py", callable, testfile]
     worker = {"cmd": _CMD, "args": args, "working_dir": wdir, "name": "test"}
     arbiter = get_arbiter([worker], background=True)
     arbiter.start()
     time.sleep(0.3)
     self.arbiters.append(arbiter)
     self.files.append(testfile)
     return testfile
Ejemplo n.º 16
0
def worker():
    cwd = os.path.abspath('.')
    program = {
        'name': 'worker',
        'use': 'circus.plugins.redis_observer.RedisObserver',
        'loop_rate': 5,
        'cmd': '.venv/bin/python worker.py',
        'working_dir': cwd,
        'sample_rate': 2.0,
        'application_name': 'eclogue-worker',
    }
    arbiter = get_arbiter(watchers=[program])
    arbiter.start()
Ejemplo n.º 17
0
def work(n):

    """
    Spin up workers.
    """

    workers = {"cmd": os.path.join(config["osp"]["bin"], "rqworker"), "numprocesses": n, "env": {"LANG": "en_US.UTF-8"}}

    arbiter = get_arbiter([workers])

    try:
        arbiter.start()
    finally:
        arbiter.stop()
Ejemplo n.º 18
0
def onboard(ctx, rover='hughey'):
    """
    Launches a supervisor for the onboard software stack. (Defaults to Hughey)
    """
    arbiter = get_arbiter({
        'cmd': p,
        'numprocesses': 1,
        'virtualenv': config.PRODUCT_ENV,
        'copy_env': True
    } for p in ONBOARD_PROGRAMS)
    try:
        arbiter.start()
    finally:
        arbiter.stop()
Ejemplo n.º 19
0
 def _run_circus(self, callable, plugins=None, **kw):
     resolve_name(callable)   # used to check the callable
     fd, testfile = mkstemp()
     os.close(fd)
     wdir = os.path.dirname(__file__)
     args = ['generic.py', callable, testfile]
     worker = {'cmd': _CMD, 'args': args, 'working_dir': wdir,
               'name': 'test'}
     worker.update(kw)
     arbiter = get_arbiter([worker], background=True, plugins=plugins)
     arbiter.start()
     time.sleep(.3)
     self.arbiters.append(arbiter)
     self.files.append(testfile)
     return testfile
Ejemplo n.º 20
0
def work(n):

    """
    Spin up workers.
    """

    workers = {
        'cmd': os.path.join(config['osp']['bin'], 'rqworker'),
        'numprocesses': n,
        'env': {'LANG': 'en_US.UTF-8'}
    }

    arbiter = get_arbiter([workers])

    try:
        arbiter.start()
    finally:
        arbiter.stop()
def work(n):
    """
    Spin up workers.
    """

    workers = {
        'cmd': os.path.join(config['osp']['bin'], 'rqworker'),
        'numprocesses': n,
        'env': {
            'LANG': 'en_US.UTF-8'
        }
    }

    arbiter = get_arbiter([workers])

    try:
        arbiter.start()
    finally:
        arbiter.stop()
Ejemplo n.º 22
0
 def _run_circus(self, callable, **kw):
     resolve_name(callable)  # used to check the callable
     fd, testfile = mkstemp()
     os.close(fd)
     wdir = os.path.dirname(__file__)
     args = ['generic.py', callable, testfile]
     worker = {
         'cmd': _CMD,
         'args': args,
         'working_dir': wdir,
         'name': 'test'
     }
     worker.update(kw)
     arbiter = get_arbiter([worker], background=True)
     arbiter.start()
     time.sleep(.3)
     self.arbiters.append(arbiter)
     self.files.append(testfile)
     return testfile
Ejemplo n.º 23
0
def start(
    exe_path: Path = DEFAULT_INSTALL_PATH,
    data_path: Path = DEFAULT_DATA_PATH,
    connection_uri: str = DEFAULT_CONNECTION_URI,
    database: str = SYSTEM_DATABASE,
    username: str = DEFAULT_USERNAME,
    password: str = DEFAULT_PASSWORD,
    close_stdout_and_stderr: bool = False,
) -> Arbiter:
    if exe_path.name != "arangodb":
        exe_path = get_exe_path(exe_path)
    working_dir_path = exe_path.parent

    env_path = f"{os.environ['PATH']}:{working_dir_path}"

    arbiter = get_arbiter([{
        "cmd":
        f"{exe_path} --starter.mode single --starter.data-dir {data_path}",
        "working_dir": working_dir_path,
        "env": {
            "PATH": env_path
        },
        "close_child_stdout": close_stdout_and_stderr,
        "close_child_stderr": close_stdout_and_stderr,
    }],
                          background=True)

    arbiter.start()

    while not is_running(connection_uri=connection_uri,
                         database=database,
                         username=username,
                         password=password):
        time.sleep(START_SLEEP_DELAY)

    return arbiter
Ejemplo n.º 24
0
def start():
    daemonize()
    # ~ arbiter = get_arbiter([get_streamer('bla')],
                          # ~ controller='tcp://127.0.0.1:6000',
                          # ~ logoutput='arbiter.log', loglevel='INFO', debug=False, statsd=True)
    arbiter = get_arbiter([get_daemon_properties('ricoh')],
                          controller='tcp://127.0.0.1:6000',
                          logoutput='arbiter.log', loglevel='INFO', debug=False, statsd=True)
    restart = True
    while restart:
        try:
            future = arbiter.start()
            restart = False
            if check_future_exception_and_log(future) is None:
                restart = arbiter._restarting
        except Exception as e:
            # emergency stop
            arbiter.loop.run_sync(arbiter._emergency_stop)
            raise(e)
        except KeyboardInterrupt:
            pass
        finally:
            arbiter = None
    sys.exit(0)
 def start_manager(self):
     if settings.SETTINGS_TYPE == 'production':
         arbiter = get_arbiter([{"cmd": "python "
                                    "" + settings.ENVIRONMENTS_MANAGER_PATH, "numprocesses": 1}], background=True)
         arbiter.start()
         self.started = True
Ejemplo n.º 26
0
def get_cluster(target, numprocesses=5, frontend=DEFAULT_FRONTEND,
                backend=DEFAULT_BACKEND, heartbeat=DEFAULT_HEARTBEAT,
                working_dir='.', logfile='stdout',
                debug=False, background=False, worker_params=None,
                timeout=DEFAULT_TIMEOUT_MOVF):
    """Runs a Powerhose cluster.

    Options:

    - **callable**: The Python callable that will be called when the broker
      receive a job.
    - **numprocesses**: The number of workers. Defaults to 5.
    - **frontend**: the ZMQ socket to receive jobs.
    - **backend**: the ZMQ socket to communicate with workers.
    - **heartbeat**: the ZMQ socket to receive heartbeat requests/
    - **working_dir**: The working directory. Defaults to *"."*
    - **logfile**: The file to log into. Defaults to stdout.
    - **debug**: If True, the logs are at the DEBUG level. Defaults to False
    - **background**: If True, the cluster is run in the background.
      Defaults to False.
    - **worker_params**: a dict of params to pass to the worker. Default is
      None
    - **timeout** the maximum time allowed before the thread stacks is dumped
      and the job result not sent back.
    """
    from circus import get_arbiter
    from circus.stream import StdoutStream, FileStream

    python = sys.executable
    if debug:
        debug = ' --debug'
    else:
        debug = ''
    if worker_params:
        params = encode_params(worker_params)

    broker_cmd = [python, '-m', 'powerhose.broker', '--logfile',  logfile,
                  debug, '--frontend', frontend, '--backend', backend,
                  '--heartbeat', heartbeat]

    worker_cmd = [python, '-m', 'powerhose.worker', target, '--logfile',
                  logfile, debug, '--backend', backend, '--heartbeat',
                  heartbeat, '--timeout', str(timeout)]

    if worker_params:
        worker_cmd += ['--params', params]

    if logfile == 'stdout':
        stream = {'class': StdoutStream}
    else:
        stream = {'class': FileStream}

    watchers = [{'name': 'broker',
                 'cmd': ' '.join(broker_cmd),
                 'working_dir': working_dir,
                 'executable': python,
                 'stderr_stream': stream,
                 'stdout_stream': stream
                 },
                {'name': 'workers',
                 'cmd': ' '.join(worker_cmd),
                 'numprocesses': numprocesses,
                 'working_dir': working_dir,
                 'executable': python,
                 'stderr_stream': stream,
                 'stdout_stream': stream

                 }
                ]

    # XXX add more options
    arbiter = get_arbiter(watchers, background=background)

    # give a chance to all processes to start
    # XXX this should be in Circus
    if background:
        start = time.clock()
        while time.clock() - start < 5:
            statuses = [status == 'active' for status in
                        arbiter.statuses().values()]
            if all(statuses):
                break

    return arbiter
Ejemplo n.º 27
0
def get_cluster(numprocesses=5, frontend=DEFAULT_FRONTEND,
                backend=DEFAULT_BACKEND, heartbeat=DEFAULT_HEARTBEAT,
                register=DEFAULT_REG,
                working_dir='.', logfile='stdout',
                debug=False, background=False, worker_params=None,
                timeout=DEFAULT_TIMEOUT_MOVF, max_age=DEFAULT_MAX_AGE,
                max_age_delta=DEFAULT_MAX_AGE_DELTA):
    """Runs a Loads cluster.

    Options:

    - **numprocesses**: The number of workers. Defaults to 5.
    - **frontend**: the ZMQ socket to receive jobs.
    - **backend**: the ZMQ socket to communicate with workers.
    - **register** : the ZMQ socket to register workers
    - **heartbeat**: the ZMQ socket to receive heartbeat requests
    - **working_dir**: The working directory. Defaults to *"."*
    - **logfile**: The file to log into. Defaults to stdout.
    - **debug**: If True, the logs are at the DEBUG level. Defaults to False
    - **background**: If True, the cluster is run in the background.
      Defaults to False.
    - **worker_params**: a dict of params to pass to the worker. Default is
      None
    - **timeout** the maximum time allowed before the thread stacks is dumped
      and the job result not sent back.
    - **max_age**: maximum age for a worker in seconds. After that delay,
      the worker will simply quit. When set to -1, never quits.
      Defaults to -1.
    - **max_age_delta**: maximum value in seconds added to max age.
      The Worker will quit after *max_age + random(0, max_age_delta)*
      This is done to avoid having all workers quit at the same instant.
    """
    from circus import get_arbiter

    python = sys.executable
    if debug:
        debug = ' --debug'
    else:
        debug = ''
    if worker_params:
        params = encode_params(worker_params)

    broker_cmd = [python, '-m', 'loads.transport.broker', '--logfile',
                  logfile, debug, '--frontend', frontend, '--backend',
                  backend, '--heartbeat', heartbeat]

    worker_cmd = [python, '-m', 'loads.transport.agent', '--logfile',
                  logfile, debug, '--backend', backend, '--heartbeat',
                  heartbeat, '--timeout', str(timeout), '--max-age',
                  str(max_age), '--max-age-delta', str(max_age_delta)]

    if worker_params:
        worker_cmd += ['--params', params]

    if logfile == 'stdout':
        stream = {'class': 'StdoutStream'}
    else:
        stream = {'class': 'FileStream',
                  'filename': logfile}

    watchers = [{'name': 'broker',
                 'cmd': ' '.join(broker_cmd),
                 'working_dir': working_dir,
                 'executable': python,
                 'stderr_stream': stream,
                 'stdout_stream': stream
                 },
                {'name': 'workers',
                 'cmd': ' '.join(worker_cmd),
                 'numprocesses': numprocesses,
                 'working_dir': working_dir,
                 'executable': python,
                 'stderr_stream': stream,
                 'stdout_stream': stream
                 }
                ]

    # XXX add more options
    arbiter = get_arbiter(watchers, background=background)

    # give a chance to all processes to start
    # XXX this should be in Circus
    if background:
        start = time.clock()
        while time.clock() - start < 5:
            statuses = [status == 'active' for status in
                        arbiter.statuses().values()]
            if all(statuses):
                break

    return arbiter
def get_cluster(target,
                numprocesses=5,
                frontend=DEFAULT_FRONTEND,
                backend=DEFAULT_BACKEND,
                heartbeat=DEFAULT_HEARTBEAT,
                register=DEFAULT_REG,
                working_dir='.',
                logfile='stdout',
                debug=False,
                background=False,
                worker_params=None,
                timeout=DEFAULT_TIMEOUT_MOVF,
                max_age=DEFAULT_MAX_AGE,
                max_age_delta=DEFAULT_MAX_AGE_DELTA):
    """Runs a Powerhose cluster.

    Options:

    - **callable**: The Python callable that will be called when the broker
      receive a job.
    - **numprocesses**: The number of workers. Defaults to 5.
    - **frontend**: the ZMQ socket to receive jobs.
    - **backend**: the ZMQ socket to communicate with workers.
    - **register** : the ZMQ socket to register workers
    - **heartbeat**: the ZMQ socket to receive heartbeat requests
    - **working_dir**: The working directory. Defaults to *"."*
    - **logfile**: The file to log into. Defaults to stdout.
    - **debug**: If True, the logs are at the DEBUG level. Defaults to False
    - **background**: If True, the cluster is run in the background.
      Defaults to False.
    - **worker_params**: a dict of params to pass to the worker. Default is
      None
    - **timeout** the maximum time allowed before the thread stacks is dumped
      and the job result not sent back.
    - **max_age**: maximum age for a worker in seconds. After that delay,
      the worker will simply quit. When set to -1, never quits.
      Defaults to -1.
    - **max_age_delta**: maximum value in seconds added to max age.
      The Worker will quit after *max_age + random(0, max_age_delta)*
      This is done to avoid having all workers quit at the same instant.
    """
    from circus import get_arbiter

    python = sys.executable
    if debug:
        debug = ' --debug'
    else:
        debug = ''
    if worker_params:
        params = encode_params(worker_params)

    broker_cmd = [
        python, '-m', 'powerhose.broker', '--logfile', logfile, debug,
        '--frontend', frontend, '--backend', backend, '--heartbeat', heartbeat
    ]

    worker_cmd = [
        python, '-m', 'powerhose.worker', target, '--logfile', logfile, debug,
        '--backend', backend, '--heartbeat', heartbeat, '--timeout',
        str(timeout), '--max-age',
        str(max_age), '--max-age-delta',
        str(max_age_delta)
    ]

    if worker_params:
        worker_cmd += ['--params', params]

    if logfile == 'stdout':
        stream = {'class': 'StdoutStream'}
    else:
        stream = {'class': 'FileStream', 'filename': logfile}

    watchers = [{
        'name': 'broker',
        'cmd': ' '.join(broker_cmd),
        'working_dir': working_dir,
        'executable': python,
        'stderr_stream': stream,
        'stdout_stream': stream
    }, {
        'name': 'workers',
        'cmd': ' '.join(worker_cmd),
        'numprocesses': numprocesses,
        'working_dir': working_dir,
        'executable': python,
        'stderr_stream': stream,
        'stdout_stream': stream
    }]

    # XXX add more options
    arbiter = get_arbiter(watchers, background=background)

    # give a chance to all processes to start
    # XXX this should be in Circus
    if background:
        start = time.clock()
        while time.clock() - start < 5:
            statuses = [
                status == 'active' for status in arbiter.statuses().values()
            ]
            if all(statuses):
                break

    return arbiter
Ejemplo n.º 29
0
 def __init__(self):
     self._arbiter = get_arbiter([], background=True)
     self._arbiter.start()
     self._client = create_circus_client()
Ejemplo n.º 30
0
                                                  debug=args.debug)

    with ZeroMQHandler(log_uri, multi=True):
        logger = Logger("Onitu")

        ioloop.install()
        loop = ioloop.IOLoop.instance()

        arbiter = circus.get_arbiter([
            {
                'cmd': 'redis-server',
                'args': 'redis/redis.conf',
                'copy_env': True,
                'priority': 1,
            },
            {
                'cmd': sys.executable,
                'args': ['-m', 'onitu.referee', log_uri],
                'copy_env': True,
            },
        ],
                                     proc_name="Onitu",
                                     loop=loop)

        for s in (signal.SIGINT, signal.SIGTERM, signal.SIGQUIT):
            signal.signal(s, lambda *args, **kwargs: loop.stop())

        try:
            future = arbiter.start()
            loop.add_future(future, load_drivers)
            arbiter.start_io_loop()
Ejemplo n.º 31
0
def main(profile, loggerconfig, foreground, pidfile):
    """Run an aiida daemon"""
    import zmq
    try:
        zmq_version = [int(part) for part in zmq.__version__.split('.')[:2]]
        if len(zmq_version) < 2:
            raise ValueError()
    except (AttributeError, ValueError):
        print('Unknown PyZQM version - aborting...')
        sys.exit(0)

    if zmq_version[0] < 13 or (zmq_version[0] == 13 and zmq_version[1] < 1):
        print('circusd needs PyZMQ >= 13.1.0 to run - aborting...')
        sys.exit(0)

    loglevel = 'INFO'
    logoutput = '-'

    if not foreground:
        logoutput = 'balrog-{}.log'.format(profile)
        daemonize()

    # Create the arbiter
    profile_config = ProfileConfig(profile)

    arbiter = get_arbiter(
        controller=profile_config.get_endpoint(0),
        pubsub_endpoint=profile_config.get_endpoint(1),
        stats_endpoint=profile_config.get_endpoint(2),
        logoutput=logoutput,
        loglevel=loglevel,
        debug=False,
        statsd=True,
        pidfile='balrog-{}.pid'.format(
            profile
        ),  # aiida.common.setup.AIIDA_CONFIG_FOLDER + '/daemon/aiida-{}.pid'.format(uuid)
        watchers=[{
            'name': profile_config.daemon_name,
            'cmd': profile_config.cmd_string,
            'virtualenv': VIRTUALENV,
            'copy_env': True,
            'stdout_stream': {
                'class': 'FileStream',
                'filename': '{}.log'.format(profile_config.daemon_name)
            },
            'env': {
                'PYTHONUNBUFFERED': 'True'
            }
        }])

    # go ahead and set umask early if it is in the config
    if arbiter.umask is not None:
        os.umask(arbiter.umask)

    pidfile = pidfile or arbiter.pidfile or None
    if pidfile:
        pidfile = Pidfile(pidfile)

        try:
            pidfile.create(os.getpid())
        except RuntimeError as e:
            print(str(e))
            sys.exit(1)

    # configure the logger
    loglevel = loglevel or arbiter.loglevel or 'info'
    logoutput = logoutput or arbiter.logoutput or '-'
    loggerconfig = loggerconfig or arbiter.loggerconfig or None
    configure_logger(logger, loglevel, logoutput, loggerconfig)

    # Main loop
    restart = True
    while restart:
        try:
            arbiter = arbiter
            future = arbiter.start()
            restart = False
            if check_future_exception_and_log(future) is None:
                restart = arbiter._restarting
        except Exception as e:
            # emergency stop
            arbiter.loop.run_sync(arbiter._emergency_stop)
            raise (e)
        except KeyboardInterrupt:
            pass
        finally:
            arbiter = None
            if pidfile is not None:
                pidfile.unlink()
    sys.exit(0)
Ejemplo n.º 32
0
 def __init__(self, cmd, num_workers=5, timeout=1.,
              check=5., controller='tcp://127.0.0.1:5555', **kw):
     self.arbiter = get_arbiter(cmd, num_workers, timeout,
                                controller=controller,
                                check_flapping=True, **kw)
Ejemplo n.º 33
0
from circus import get_arbiter

myprogram = {"cmd": "sleep 30", "numprocesses": 4}

print('Runnning...')
arbiter = get_arbiter([myprogram])
try:
    arbiter.start()
finally:
    arbiter.stop()

Ejemplo n.º 34
0
def _start_circus(foreground):
    """
    This will actually launch the circus daemon, either daemonized in the background
    or in the foreground, printing all logs to stdout.

    .. note:: this should not be called directly from the commandline!
    """
    from circus import get_arbiter
    from circus import logger as circus_logger
    from circus.circusd import daemonize
    from circus.pidfile import Pidfile
    from circus.util import check_future_exception_and_log, configure_logger

    client = DaemonClient()

    loglevel = client.loglevel
    logoutput = '-'

    if not foreground:
        logoutput = client.circus_log_file

    arbiter_config = {
        'controller': client.get_controller_endpoint(),
        'pubsub_endpoint': client.get_pubsub_endpoint(),
        'stats_endpoint': client.get_stats_endpoint(),
        'logoutput': logoutput,
        'loglevel': loglevel,
        'debug': False,
        'statsd': True,
        'pidfile': client.circus_pid_file,
        'watchers': [{
            'name': client.daemon_name,
            'cmd': client.cmd_string,
            'virtualenv': client.virtualenv,
            'copy_env': True,
            'stdout_stream': {
                'class': 'FileStream',
                'filename': client.daemon_log_file,
            },
            'env': get_env_with_venv_bin(),
        }]
    } # yapf: disable

    if not foreground:
        daemonize()

    arbiter = get_arbiter(**arbiter_config)
    pidfile = Pidfile(arbiter.pidfile)

    try:
        pidfile.create(os.getpid())
    except RuntimeError as exception:
        click.echo(str(exception))
        sys.exit(1)

    # Configure the logger
    loggerconfig = None
    loggerconfig = loggerconfig or arbiter.loggerconfig or None
    configure_logger(circus_logger, loglevel, logoutput, loggerconfig)

    # Main loop
    should_restart = True

    while should_restart:
        try:
            arbiter = arbiter
            future = arbiter.start()
            should_restart = False
            if check_future_exception_and_log(future) is None:
                should_restart = arbiter._restarting  # pylint: disable=protected-access
        except Exception as exception:
            # Emergency stop
            arbiter.loop.run_sync(arbiter._emergency_stop)  # pylint: disable=protected-access
            raise exception
        except KeyboardInterrupt:
            pass
        finally:
            arbiter = None
            if pidfile is not None:
                pidfile.unlink()

    sys.exit(0)
Ejemplo n.º 35
0
    with ZeroMQHandler(log_uri, multi=True):
        logger = Logger("Onitu")

        ioloop.install()
        loop = ioloop.IOLoop.instance()

        arbiter = circus.get_arbiter(
            [
                {
                    'cmd': 'redis-server',
                    'args': 'redis/redis.conf',
                    'copy_env': True,
                    'priority': 1,
                },
                {
                    'cmd': sys.executable,
                    'args': ['-m', 'onitu.referee', log_uri],
                    'copy_env': True,
                },
            ],
            proc_name="Onitu",
            loop=loop
        )

        for s in (signal.SIGINT, signal.SIGTERM, signal.SIGQUIT):
            signal.signal(s, lambda *args, **kwargs: loop.stop())

        try:
            future = arbiter.start()
            loop.add_future(future, load_drivers)
Ejemplo n.º 36
0
 def make_arbiter(self):
     print self.arbiter_config
     print self.watcher_config
     return get_arbiter([self.watcher_config], **self.arbiter_config)
Ejemplo n.º 37
0
from circus import get_arbiter


myprogram = {
    "cmd": "python",
    "args": "-u dummy_fly.py $(circus.wid)",
    "numprocesses": 3,
}


arbiter = get_arbiter([myprogram], debug=True)
try:
    arbiter.start()
finally:
    arbiter.stop()
Ejemplo n.º 38
0
#!/usr/bin/env python

"""
This is an addressable worker pool using circus
"""

from circus import get_arbiter

arbiter = get_arbiter([
    {'cmd': 'python foo.py'},
    {'cmd': 'python bar.py'},
    ])
try:
    arbiter.start()
finally:
    arbiter.stop()
Ejemplo n.º 39
0
#!/usr/bin/env python3
from circus import get_arbiter

sprogram = {
    "cmd": "python3 stream2db.py",
    "numprocesses": 1,
    "copy_env": True,
    "copy_path": True
}

arbiter = get_arbiter([sprogram])

try:
    arbiter.start()
finally:
    arbiter.stop()
Ejemplo n.º 40
0
myprogram = {
    "cmd": "python",
    "args": "-u dummy_fly.py $(circus.wid)",
    "numprocesses": 3,
}

from circus import get_arbiter

arbiter = get_arbiter([myprogram], debug=True)
try:
    arbiter.start()
finally:
    arbiter.stop()