예제 #1
0
파일: watcher.py 프로젝트: ionrock/circus
    def initialize(self, evpub_socket, sockets, arbiter):
        self.evpub_socket = evpub_socket
        self.sockets = sockets
        self.arbiter = arbiter

        # default refresh_time
        refresh_time = float(0.3)

        self.stdout = Redirector(
            Publisher(self.notify_event, topic='stdout', **self.publish),
            float(self.publish.get('refresh_time', refresh_time)),
            loop=self.loop)

        self.stderr = Redirector(
            Publisher(self.notify_event, topic='stderr', **self.publish),
            float(self.publish.get('refresh_time', refresh_time)),
            loop=self.loop)
예제 #2
0
파일: watcher.py 프로젝트: ionrock/circus
class Watcher(object):
    """
    Class managing a list of processes for a given command.

    Options:

    - **name**: name given to the watcher. Used to uniquely identify it.

    - **cmd**: the command to run. May contain *$WID*, which will be
      replaced by **wid**.

    - **args**: the arguments for the command to run. Can be a list or
      a string. If **args** is  a string, it's splitted using
      :func:`shlex.split`. Defaults to None.

    - **numprocesses**: Number of processes to run.

    - **working_dir**: the working directory to run the command in. If
      not provided, will default to the current working directory.

    - **shell**: if *True*, will run the command in the shell
      environment. *False* by default. **warning: this is a
      security hazard**.

    - **uid**: if given, is the user id or name the command should run
      with. The current uid is the default.

    - **gid**: if given, is the group id or name the command should run
      with. The current gid is the default.

    - **send_hup**: if True, a process reload will be done by sending
      the SIGHUP signal. Defaults to False.

    - **env**: a mapping containing the environment variables the command
      will run with. Optional.

    - **rlimits**: a mapping containing rlimit names and values that will
      be set before the command runs.

    - **stdout_stream**: a mapping that defines the stream for
      the process stdout. Defaults to None.

      Optional. When provided, *stdout_stream* is a mapping containing up to
      three keys:

      - **class**: the stream class. Defaults to
        `circus.stream.FileStream`
      - **filename**: the filename, if using a FileStream
      - **refresh_time**: the delay between two stream checks. Defaults
        to 0.3 seconds.
      - **max_bytes**: maximum file size, after which a new output file is
        opened. defaults to 0 which means no maximum size.
      - **backup_count**: how many backups to retain when rotating files
        according to the max_bytes parameter. defaults to 0 which means
        no backups are made.

      This mapping will be used to create a stream callable of the specified
      class.
      Each entry received by the callable is a mapping containing:

      - **pid** - the process pid
      - **name** - the stream name (*stderr* or *stdout*)
      - **data** - the data

    - **stderr_stream**: a mapping that defines the stream for
      the process stderr. Defaults to None.

      Optional. When provided, *stderr_stream* is a mapping containing up to
      three keys:
      - **class**: the stream class. Defaults to `circus.stream.FileStream`
      - **filename**: the filename, if using a FileStream
      - **refresh_time**: the delay between two stream checks. Defaults
        to 0.3 seconds.
      - **max_bytes**: maximum file size, after which a new output file is
        opened. defaults to 0 which means no maximum size.
      - **backup_count**: how many backups to retain when rotating files
        according to the max_bytes parameter. defaults to 0 which means
        no backups are made.

      This mapping will be used to create a stream callable of the specified
      class.

      Each entry received by the callable is a mapping containing:

      - **pid** - the process pid
      - **name** - the stream name (*stderr* or *stdout*)
      - **data** - the data

    - **priority** -- integer that defines a priority for the watcher. When
      the Arbiter do some operations on all watchers, it will sort them
      with this field, from the bigger number to the smallest.
      (default: 0)

    - **singleton** -- If True, this watcher has a single process.
      (default:False)

    - **use_sockets** -- If True, the processes will inherit the file
      descriptors, thus can reuse the sockets opened by circusd.
      (default: False)

    - **copy_env** -- If True, the environment in which circus is running
      run will be reproduced for the workers. (default: False)

    - **copy_path** -- If True, circusd *sys.path* is sent to the
      process through *PYTHONPATH*. You must activate **copy_env** for
      **copy_path** to work. (default: False)

    - **max_age**: If set after around max_age seconds, the process is
      replaced with a new one.  (default: 0, Disabled)

    - **max_age_variance**: The maximum number of seconds that can be added to
      max_age. This extra value is to avoid restarting all processes at the
      same time.  A process will live between max_age and
      max_age + max_age_variance seconds.

    - **hooks**: callback functions for hooking into the watcher startup
      and shutdown process. **hooks** is a dict where each key is the hook
      name and each value is a 2-tuple with the name of the callable
      or the callabled itself and a boolean flag indicating if an
      exception occuring in the hook should not be ignored.
      Possible values for the hook name: *before_start*, *after_start*,
      *before_stop*, *after_stop*.

    - **options** -- extra options for the worker. All options
      found in the configuration file for instance, are passed
      in this mapping -- this can be used by plugins for watcher-specific
      options.

    - **respawn** -- If set to False, the processes handled by a watcher will
      not be respawned automatically. (default: True)
    """
    def __init__(self, name, cmd, args=None, numprocesses=1, warmup_delay=0.,
                 working_dir=None, shell=False, uid=None, max_retry=5,
                 gid=None, send_hup=False, env=None, stopped=True,
                 graceful_timeout=30., prereload_fn=None, rlimits=None,
                 executable=None, publish=None, stream_stdout=None,
                 stream_stderr=None, priority=0, loop=None,
                 singleton=False, use_sockets=False, copy_env=False,
                 copy_path=False, max_age=0, max_age_variance=30,
                 hooks=None, respawn=True, autostart=True, **options):
        self.name = name
        self.use_sockets = use_sockets
        self.res_name = name.lower().replace(" ", "_")
        self.numprocesses = int(numprocesses)
        self.warmup_delay = warmup_delay
        self.cmd = cmd
        self.args = args
        self._process_counter = 0
        self.stopped = stopped
        self.graceful_timeout = float(graceful_timeout)
        self.prereload_fn = prereload_fn
        self.executable = None
        self.priority = priority
        self.publish = publish or {}
        self.stream_stdout = stream_stdout
        self.stream_stderr = stream_stderr
        self.stdout = self.stderr = None
        self.max_retry = max_retry
        self._options = options
        self.singleton = singleton
        self.copy_env = copy_env
        self.copy_path = copy_path
        self.max_age = int(max_age)
        self.max_age_variance = int(max_age_variance)
        self.ignore_hook_failure = ['before_stop', 'after_stop']
        self.hooks = self._resolve_hooks(hooks)
        self.respawn = respawn
        self.autostart = autostart
        self.loop = loop or ioloop.IOLoop.instance()

        if singleton and self.numprocesses not in (0, 1):
            raise ValueError("Cannot have %d processes with a singleton "
                             " watcher" % self.numprocesses)

        self.optnames = (("numprocesses", "warmup_delay", "working_dir",
                          "uid", "gid", "send_hup", "shell", "env",
                          "max_retry", "cmd", "args", "graceful_timeout",
                          "executable", "use_sockets", "priority", "copy_env",
                          "singleton", "publish", "max_age",
                          "max_age_variance")
                         + tuple(options.keys()))

        if not working_dir:
            # working dir hasn't been set
            working_dir = util.get_working_dir()

        self.working_dir = working_dir
        self.processes = {}
        self.shell = shell
        self.uid = uid
        self.gid = gid

        if self.copy_env:
            self.env = os.environ.copy()
            if self.copy_path:
                path = os.pathsep.join(sys.path)
                self.env['PYTHONPATH'] = path
            if env is not None:
                self.env.update(env)
        else:
            if self.copy_path:
                raise ValueError(('copy_env and copy_path must have the '
                                  'same value'))
            self.env = env

        self.rlimits = rlimits
        self.send_hup = send_hup
        self.sockets = self.evpub_socket = None
        self.arbiter = None

    def _resolve_hooks(self, hooks):
        """Check the supplied hooks argument to make sure we can find
        callables"""
        if not hooks:
            return {}

        resolved_hooks = {}

        for hook_name, hook_value in hooks.items():
            callable_or_name, ignore_failure = hook_value

            if callable(callable_or_name):
                resolved_hooks[hook_name] = callable_or_name
            else:
                # will raise ImportError on failure
                resolved_hook = resolve_name(callable_or_name)
                resolved_hooks[hook_name] = resolved_hook

            if ignore_failure:
                self.ignore_hook_failure.append(hook_name)

        return resolved_hooks

    @classmethod
    def load_from_config(cls, config, **extras):
        if 'env' in config:
            config['env'] = parse_env_dict(config['env'])
        config.update(extras)
        return cls(name=config.pop('name'), cmd=config.pop('cmd'), **config)

    @util.debuglog
    def initialize(self, evpub_socket, sockets, arbiter):
        self.evpub_socket = evpub_socket
        self.sockets = sockets
        self.arbiter = arbiter

        # default refresh_time
        refresh_time = float(0.3)

        self.stdout = Redirector(
            Publisher(self.notify_event, topic='stdout', **self.publish),
            float(self.publish.get('refresh_time', refresh_time)),
            loop=self.loop)

        self.stderr = Redirector(
            Publisher(self.notify_event, topic='stderr', **self.publish),
            float(self.publish.get('refresh_time', refresh_time)),
            loop=self.loop)

    def __len__(self):
        return len(self.processes)

    def notify_event(self, topic, msg):
        """Publish a message on the event publisher channel"""

        json_msg = json.dumps(msg)
        if isinstance(json_msg, unicode):
            json_msg = json_msg.encode('utf8')

        if isinstance(self.res_name, unicode):
            name = self.res_name.encode('utf8')
        else:
            name = self.res_name

        multipart_msg = ["watcher.%s.%s" % (name, topic), json.dumps(msg)]

        if self.evpub_socket is not None and not self.evpub_socket.closed:
            self.evpub_socket.send_multipart(multipart_msg)

    @util.debuglog
    def reap_process(self, pid, status=None):
        """ensure that the process is killed (and not a zombie)"""
        process = self.processes.pop(pid)

        if not status:
            while True:
                try:
                    _, status = os.waitpid(pid, os.WNOHANG)
                except OSError as e:
                    if e.errno == errno.EAGAIN:
                        time.sleep(0.001)
                        continue
                    elif e.errno == errno.ECHILD:
                        # nothing to do here, we do not have any child
                        # process running
                        return
                    else:
                        raise

        # get return code
        if os.WIFSIGNALED(status):
            os.WTERMSIG(status)
        # process exited using exit(2) system call; return the
        # integer exit(2) system call has been called with
        elif os.WIFEXITED(status):
            os.WEXITSTATUS(status)
        else:
            # should never happen
            raise RuntimeError("Unknown process exit status")

        # if the process is dead or a zombie try to definitely stop it.
        if process.status in (DEAD_OR_ZOMBIE, UNEXISTING):
            process.stop()

        logger.debug('reaping process %s [%s]' % (pid, self.name))
        self.notify_event("reap", {"process_pid": pid, "time": time.time()})

    @util.debuglog
    def reap_processes(self):
        """Reap all the processes for this watcher.
        """
        if self.stopped:
            logger.debug('do not reap processes as the watcher is stopped')
            return

        # reap_process changes our dict, look through the copy of keys
        for pid in self.processes.keys():
            self.reap_process(pid)

    @util.debuglog
    def manage_processes(self):
        """Manage processes."""
        if self.stopped:
            return

        if self.max_age:
            for process in self.processes.itervalues():
                max_age = self.max_age + randint(0, self.max_age_variance)
                if process.age() > max_age:
                    logger.debug('%s: expired, respawning', self.name)
                    self.notify_event("expired",
                                      {"process_pid": process.pid,
                                       "time": time.time()})
                    self.kill_process(process)

        if self.respawn and len(self.processes) < self.numprocesses:
            self.spawn_processes()

        processes = self.processes.values()
        processes.sort()
        while len(processes) > self.numprocesses:
            process = processes.pop(0)
            if process.status == DEAD_OR_ZOMBIE:
                self.processes.pop(process.pid)
            else:
                self.processes.pop(process.pid)
                self.kill_process(process)

    @util.debuglog
    def reap_and_manage_processes(self):
        """Reap & manage processes."""
        if self.stopped:
            return
        self.reap_processes()
        self.manage_processes()

    @util.debuglog
    def spawn_processes(self):
        """Spawn processes.
        """
        for i in range(self.numprocesses - len(self.processes)):
            self.spawn_process()
            time.sleep(self.warmup_delay)

    def _get_sockets_fds(self):
        # XXX should be cached
        if self.sockets is None:
            return {}
        fds = {}
        for name, sock in self.sockets.items():
            fds[name] = sock.fileno()
        return fds

    def spawn_process(self):
        """Spawn process.
        """
        if self.stopped:
            return

        cmd = util.replace_gnu_args(self.cmd, sockets=self._get_sockets_fds())
        self._process_counter += 1
        nb_tries = 0
        while nb_tries < self.max_retry or self.max_retry == -1:
            process = None
            try:
                process = Process(self._process_counter, cmd,
                                  args=self.args, working_dir=self.working_dir,
                                  shell=self.shell, uid=self.uid, gid=self.gid,
                                  env=self.env, rlimits=self.rlimits,
                                  executable=self.executable,
                                  use_fds=self.use_sockets, watcher=self)

                self.stdout.add_redirection('stdout',
                                            process,
                                            process.stdout)

                self.stderr.add_redirection('stderr',
                                            process,
                                            process.stderr)

                self.processes[process.pid] = process
                logger.debug('running %s process [pid %d]', self.name,
                             process.pid)
            except OSError, e:
                logger.warning('error in %r: %s', self.name, str(e))

            if process is None:
                nb_tries += 1
                continue
            else:
                self.notify_event("spawn", {"process_pid": process.pid,
                                            "time": time.time()})
                time.sleep(self.warmup_delay)
                return

        self.stop()